Here are the examples of the python api numpy.sum taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
8822 Examples
5
Source : audioFeatureExtraction.py
with MIT License
from jaflo
with MIT License
from jaflo
def stSpectralFlux(X, Xprev):
"""
Computes the spectral flux feature of the current frame
ARGUMENTS:
X: the abs(fft) of the current frame
Xpre: the abs(fft) of the previous frame
"""
# compute the spectral flux as the sum of square distances:
sumX = numpy.sum(X + eps)
sumPrevX = numpy.sum(Xprev + eps)
F = numpy.sum((X / sumX - Xprev / sumPrevX) ** 2)
return F
def stSpectralRollOff(X, c, fs):
5
Source : audioFeatureExtraction.py
with Apache License 2.0
from jim-schwoebel
with Apache License 2.0
from jim-schwoebel
def stSpectralFlux(X, Xprev):
"""
Computes the spectral flux feature of the current frame
ARGUMENTS:
X: the abs(fft) of the current frame
Xpre: the abs(fft) of the previous frame
"""
# compute the spectral flux as the sum of square distances:
sumX = numpy.sum(X + eps)
sumPrevX = numpy.sum(Xprev + eps)
F = numpy.sum((X / sumX - Xprev/sumPrevX) ** 2)
return F
def stSpectralRollOff(X, c, fs):
5
Source : geometry_elements.py
with MIT License
from ngageoint
with MIT License
from ngageoint
def get_centroid(self):
"""
Gets the centroid of the polygon - note that this may not actually lie in
the polygon interior for non-convex polygon. This will result in an undefined value
if the polygon is degenerate.
Returns
-------
numpy.ndarray
"""
arr = self._coordinates[:-1, 0]*self._coordinates[1:, 1] - \
self._coordinates[1:, 0]*self._coordinates[:-1, 1]
area = 0.5*numpy.sum(arr) # signed area
x = numpy.sum(0.5*(self._coordinates[:-1, 0] + self._coordinates[1:, 0])*arr)
y = numpy.sum(0.5*(self._coordinates[:-1, 1] + self._coordinates[1:, 1])*arr)
return numpy.array([x, y], dtype=numpy.float64)/(3*area)
@property
5
Source : utils.py
with MIT License
from OMR-Research
with MIT License
from OMR-Research
def dice(Seg, GT):
""" compute dice coefficient between current segmentation result and groundtruth (GT)"""
sum_GT = numpy.sum(GT)
sum_Seg = numpy.sum(Seg)
if (sum_GT + sum_Seg) == 0:
dice = 1.0
else:
dice = (2.0 * numpy.sum(Seg[GT == 1])) / (sum_Seg + sum_GT)
return dice
5
Source : pyjar.py
with GNU General Public License v3.0
from simonrharris
with GNU General Public License v3.0
from simonrharris
def create_rate_matrix(f, r):
#convert f and r to Q matrix
rm=numpy.array([[0, f[0]*r[1], f[0]*r[2], f[0]*r[3]],[f[1]*r[0], 0, f[1]*r[3],f[1]*r[4]],[f[2]*r[1], f[2]*r[3], 0, f[2]*r[5]],[f[3]*r[2], f[3]*r[4], f[3]*r[5], 0]])
rm[0][0]=numpy.sum(rm[0])*-1
rm[1][1]=numpy.sum(rm[1])*-1
rm[2][2]=numpy.sum(rm[2])*-1
rm[3][3]=numpy.sum(rm[3])*-1
return rm
def jar(alignment_filename, tree_filename, info_filename, output_prefix, verbose=False):
5
Source : ssim.py
with GNU General Public License v3.0
from usstdqq
with GNU General Public License v3.0
from usstdqq
def ssim(img1, img2, C1=0.01**2, C2=0.03**2):
bimg1 = block_view(img1, (4,4))
bimg2 = block_view(img2, (4,4))
s1 = numpy.sum(bimg1, (-1, -2))
s2 = numpy.sum(bimg2, (-1, -2))
ss = numpy.sum(bimg1*bimg1, (-1, -2)) + numpy.sum(bimg2*bimg2, (-1, -2))
s12 = numpy.sum(bimg1*bimg2, (-1, -2))
vari = ss - s1*s1 - s2*s2
covar = s12 - s1*s2
ssim_map = (2*s1*s2 + C1) * (2*covar + C2) / ((s1*s1 + s2*s2 + C1) * (vari + C2))
return numpy.mean(ssim_map)
# FIXME there seems to be a problem with this code
def ssim_exact(img1, img2, sd=1.5, C1=0.01**2, C2=0.03**2):
3
Source : data_augment.py
with Apache License 2.0
from 1adrianb
with Apache License 2.0
from 1adrianb
def _select_rand_weights(weight_idx=0, transforms=None):
transforms = transforms or _RAND_TRANSFORMS
assert weight_idx == 0 # only one set of weights currently
rand_weights = _RAND_CHOICE_WEIGHTS_0
probs = [rand_weights[k] for k in transforms]
probs /= np.sum(probs)
return probs
def rand_augment_ops(magnitude=10, hparams=None, transforms=None):
3
Source : misc.py
with Apache License 2.0
from 1adrianb
with Apache License 2.0
from 1adrianb
def params_count(model, ignore_bn=False):
"""
Compute the number of parameters.
Args:
model (model): model to count the number of parameters.
"""
if not ignore_bn:
return np.sum([p.numel() for p in model.parameters()]).item()
else:
count = 0
for m in model.modules():
if not isinstance(m, nn.BatchNorm3d):
for p in m.parameters(recurse=False):
count += p.numel()
return count
def gpu_mem_usage():
3
Source : board.py
with MIT License
from 2Bear
with MIT License
from 2Bear
def get_legal_moves(self, player):
own, enemy = self.get_own_and_enemy(player)
legal_moves_without_pass = bit_to_array(get_legal_moves_bit(own, enemy), config.board_length)
if np.sum(legal_moves_without_pass) == 0:
return np.concatenate((legal_moves_without_pass, [1]))
else:
return np.concatenate((legal_moves_without_pass, [0]))
left_right_mask = np.uint64(0x7e7e7e7e7e7e7e7e)
3
Source : othello.py
with MIT License
from 2Bear
with MIT License
from 2Bear
def print_winner(node):
black_stones_num = np.sum(node.board.black_array2d)
white_stones_num = np.sum(node.board.white_array2d)
if black_stones_num > white_stones_num:
print("black wins.")
elif black_stones_num < white_stones_num:
print("white wins.")
else:
print("draw.")
def restore_from_last_checkpoint(session, saver):
3
Source : mlp.py
with MIT License
from 3fon3fonov
with MIT License
from 3fon3fonov
def lnL(self, theta, X, Y, e_Y, func):
# the log-likelihood
global L, chisqr, wtrms # a blob
L = [] # log-likelihood for each instrument
Ymod = func(X, theta)
chisqr = []
wtrms = [] # weighted rms for each instrument
N = len(X)
for y,e_y,ymod,lnf in zip(Y,e_Y,Ymod,theta[-N:]):
# loop over data sets
sigma2 = e_y**2 + lnf**2
#weight = 1/sigma2
chisqr += [np.sum((y-ymod)**2/sigma2)]
L += [-0.5 * np.sum((y-ymod)**2/sigma2 + np.log(2*pi*sigma2))]
wtrms += [np.sqrt(np.sum((y-ymod)**2/sigma2)/np.sum(1/sigma2))]
return sum(L)
def single_freq_fit(self, omega):
3
Source : helper.py
with MIT License
from 4artit
with MIT License
from 4artit
def toc(self):
self._toc = datetime.datetime.now()
self._time = (self._toc-self._tic).total_seconds() + np.sum(self._cache)
self._log.append(self._time)
self._cache = []
def update(self):
3
Source : helper.py
with MIT License
from 4artit
with MIT License
from 4artit
def _calc_stats(self):
self._totaltime = np.sum(self._log)
self._totalnumber = len(self._log)
self._meantime = np.mean(self._log)
self._mediantime = np.median(self._log)
self._mintime = np.min(self._log)
self._maxtime = np.max(self._log)
self._stdtime = np.std(self._log)
self._meanfps = 1./np.mean(self._log)
self._medianfps = 1./np.median(self._log)
def stop(self):
3
Source : data_set.py
with MIT License
from 4p0pt0Z
with MIT License
from 4p0pt0Z
def load_audio(self, filename):
r"""Special audio loading function to make sum (and not average) of stereo files
Args:
filename (str): path to audio file
Returns:
Re-sampled and converted to mono (sum) audio waveform.
"""
audio, _ = librosa.core.load(filename, sr=self.config["sampling_rate"], mono=False)
if audio.ndim > 1:
audio = np.sum(audio, axis=0)
return audio
def load_audio_source_files(self, idx):
3
Source : util.py
with GNU General Public License v3.0
from 4rChon
with GNU General Public License v3.0
from 4rChon
def exploit_distribution(policy, valid_actions, size):
# Mask actions
non_spatial_policy = policy["non_spatial"][-1][valid_actions]
# Normalize probabilities
non_spatial_probs = non_spatial_policy/np.sum(non_spatial_policy)
# Choose from normalized distribution
act_id = np.random.choice(valid_actions, p=non_spatial_probs)
target = np.random.choice(np.arange(len(policy["spatial"][-1])), p=policy["spatial"][-1])
# Resize to provided resolution
coords = [int(target // size), int(target % size)]
return act_id, coords
def exploit_max(policy, valid_actions, size):
3
Source : brats.py
with Apache License 2.0
from 920232796
with Apache License 2.0
from 920232796
def compute_BraTS_dice(ref, pred):
"""
ref and gt are binary integer numpy.ndarray s
:param ref:
:param gt:
:return:
"""
num_ref = np.sum(ref)
num_pred = np.sum(pred)
if num_ref == 0:
if num_pred == 0:
return 1
else:
return 0
else:
return dc(pred, ref)
def compute_BraTS_HD95(ref, pred):
3
Source : keras_eval_errors.py
with MIT License
from 921kiyo
with MIT License
from 921kiyo
def check_confidence_tensor(tensor):
tol = 1e-04
if (not (np.abs(np.sum(tensor,axis=1)[0]-1.)) < tol) or (np.argmin(tensor, axis=1)[0] < 0):
return False
return True
def check_confusion_matrix(cm):
3
Source : test_errors.py
with MIT License
from 921kiyo
with MIT License
from 921kiyo
def check_confidence_tensor(tensor):
tol = 1e-09
if (not (np.sum(tensor,axis=1)[0]-1.) < tol) or (np.argmin(tensor, axis=1)[0] < 0):
return False
return True
def check_confusion_matrix(cm):
3
Source : tf_eval_errors.py
with MIT License
from 921kiyo
with MIT License
from 921kiyo
def check_confidence_tensor(tensor):
tol = 1e-05
if (not (np.sum(tensor,axis=1)[0]-1.) < tol) or (np.argmin(tensor, axis=1)[0] < 0):
return False
return True
def check_confusion_matrix(cm):
3
Source : visualise_classifier.py
with BSD 3-Clause "New" or "Revised" License
from a2i2
with BSD 3-Clause "New" or "Revised" License
from a2i2
def calculate_cohen_kappa(confusion_matrix):
n_classes = confusion_matrix.shape[0]
sum0 = np.sum(confusion_matrix, axis=0)
sum1 = np.sum(confusion_matrix, axis=1)
expected = safe_div(np.outer(sum0, sum1), np.sum(sum0))
w_mat = np.ones([n_classes, n_classes], dtype=np.int)
# pylint: disable=unsupported-assignment-operation
w_mat.flat[:: n_classes + 1] = 0
k = safe_div(np.sum(w_mat * confusion_matrix), np.sum(w_mat * expected))
return 1 - k
def calculate_classifier_metrics(y_true, y_pred):
3
Source : alignment.py
with MIT License
from a312863063
with MIT License
from a312863063
def find_max_region(bw_img):
labeled_img, num = label(bw_img, background=0, return_num=True)
max_label = 0
max_num = 0
for i in range(1, num + 1):
if np.sum(labeled_img == i) > max_num:
max_num = np.sum(labeled_img == i)
max_label = i
lcc = (labeled_img == max_label)
return lcc
def get_mask(img, parse_net):
3
Source : skeleton_decomposition.py
with MIT License
from aAbdz
with MIT License
from aAbdz
def order_branch(branch, junction, order):
e1 = np.sum(branch[0]-junction)**2
e2 = np.sum(branch[-1]-junction)**2
if order=='descend':
if e1 < e2:
branch = np.flip(branch, axis=0)
elif order=='ascend':
if e1>e2:
branch = np.flip(branch, axis=0)
return branch
def unique(mylist):
3
Source : test_functions.py
with MIT License
from AaltoPML
with MIT License
from AaltoPML
def levy_orig( x ):
x = np.asarray_chkfinite(x)
#print(len(x)) # is same as D
z = 1 + (x - 1) / 4
return -((sin( pi * z[0] )**2
+ sum( (z[:-1] - 1)**2 * (1 + 10 * sin( pi * z[:-1] + 1 )**2 ))
+ (z[-1] - 1)**2 * (1 + sin( 2 * pi * z[-1] )**2 ))) + + np.abs(60-0)*np.random.normal(0,NOISE_LEVEL)
#...............................................................................
#def levy(x_d,x_minus_d,d):
# x_ = list(x_minus_d)
# x_.insert(d-1,float(x_d))
# return -levy_orig(x_)
def ackley_orig( x, a=20, b=0.2, c=2*pi ):
3
Source : test_functions.py
with MIT License
from AaltoPML
with MIT License
from AaltoPML
def ackley_orig( x, a=20, b=0.2, c=2*pi ):
x = np.asarray_chkfinite(x) # ValueError if any NaN or Inf
n = len(x)
s1 = sum( x**2 )
s2 = sum( cos( c * x ))
return -(-a*exp( -b*sqrt( s1 / n )) - exp( s2 / n ) + a + exp(1)) + np.random.normal(0,NOISE_LEVEL) #np.abs(21-0)*np.random.normal(0,NOISE_LEVEL)
#def ackley(x_d,x_minus_d,d):
# x_ = list(x_minus_d)
# x_.insert(d-1,float(x_d))
# return -ackley_orig(x_)
def dixonprice_orig( x ): # dp.m
3
Source : test_functions.py
with MIT License
from AaltoPML
with MIT License
from AaltoPML
def dixonprice_orig( x ): # dp.m
x = np.asarray_chkfinite(x)
n = len(x)
j = np.arange( 2, n+1 )
x2 = 2 * x**2
return -(sum( j * (x2[1:] - x[:-1]) **2 ) + (x[0] - 1) **2) + np.random.normal(0,NOISE_LEVEL)
#def dixonprice(x_d,x_minus_d,d):
# x_ = list(x_minus_d)
# x_.insert(d-1,float(x_d))
# return -dixonprice_orig(x_)
#Six-Hump Camel: x in [-3,3] and y in [-2,2]
def sixhump_camel_orig(x):
3
Source : gp_model.py
with MIT License
from AaltoPML
with MIT License
from AaltoPML
def T(self,f,theta,Sigma_inv_=None):
if Sigma_inv_ is None:
Sigma_inv_=self.Sigma_inv
sumPhi = self.sum_Phi_vec(0,f,theta[0])
T = -0.5*f.T.dot(Sigma_inv_).dot(f) - np.sum(sumPhi)/self.m
return T
def T_grad(self,f,theta,Sigma_inv_=None):
3
Source : utils.py
with BSD 3-Clause "New" or "Revised" License
from abatten
with BSD 3-Clause "New" or "Revised" License
from abatten
def normalise_to_pdf(hist, bin_widths):
"""
"""
if np.sum(hist) < 1e-16:
pdf = np.zeros(len(hist))
else:
pdf = hist/bin_widths/np.sum(hist)
return pdf
def linear_interpolate_pdfs(sample, xvals, pdfs):
3
Source : quantile_regression.py
with MIT License
from Abdulmajid-Murad
with MIT License
from Abdulmajid-Murad
def quantile_score(y_true, y_pred, alpha):
score = XGBQuantile.quantile_cost(x=y_true-y_pred,alpha=alpha)
score = np.sum(score)
return score
@staticmethod
3
Source : quantile_regression.py
with MIT License
from Abdulmajid-Murad
with MIT License
from Abdulmajid-Murad
def get_split_gain(gradient,hessian,l=1):
split_gain = list()
for i in range(gradient.shape[0]):
split_gain.append(np.sum(gradient[:i])**2/(np.sum(hessian[:i])+l)+np.sum(gradient[i:])**2/(np.sum(hessian[i:])+l)-np.sum(gradient)**2/(np.sum(hessian)+l) )
return np.array(split_gain)
def root_mean_squared_error(y_true , y_pred):
3
Source : structure.py
with MIT License
from abelcarreras
with MIT License
from abelcarreras
def number_of_electrons(self):
"""
returns the total number of electrons
:return: number of total electrons
"""
return int(np.sum(self.get_atomic_numbers()) + self.charge)
@property
3
Source : utils.py
with MIT License
from abelcarreras
with MIT License
from abelcarreras
def get_occupied_electrons(configuration, structure):
# works for closed shell only
alpha_e = np.sum([int(c) for c in configuration['alpha']])
beta_e = np.sum([int(c) for c in configuration['beta']])
hole = 0 if configuration['hole'] == '' else 1
part = 0 if configuration['part'] == '' else 1
return (structure.number_of_electrons + structure.charge - (alpha_e + beta_e + part - hole))//2
def get_inertia(structure):
3
Source : shot_batch_sampler.py
with MIT License
from abhi4ssj
with MIT License
from abhi4ssj
def get_class_slices(labels, i):
num_slices, H, W = labels.shape
thresh = 0.005
total_slices = labels == i
pixel_sum = np.sum(total_slices, axis=(1, 2)).squeeze()
pixel_sum = pixel_sum / (H * W)
threshold_list = [idx for idx, slice in enumerate(
pixel_sum) if slice > thresh]
return threshold_list
def get_index_dict(labels, lab_list):
3
Source : shot_batch_sampler.py
with MIT License
from abhi4ssj
with MIT License
from abhi4ssj
def get_index_dict(labels, lab_list):
index_list = {i: get_class_slices(labels, i) for i in lab_list}
p = [1 - (len(val) / len(labels)) for val in index_list.values()]
p = p / np.sum(p)
return index_list, p
class OneShotBatchSampler:
3
Source : SpectralClustering.py
with MIT License
from abhinav4192
with MIT License
from abhinav4192
def SpectralClustering(CKSym, n):
# This is direct port of JHU vision lab code. Could probably use sklearn SpectralClustering.
CKSym = CKSym.astype(float)
N, _ = CKSym.shape
MAXiter = 1000 # Maximum number of iterations for KMeans
REPlic = 20 # Number of replications for KMeans
DN = np.diag(np.divide(1, np.sqrt(np.sum(CKSym, axis=0) + np.finfo(float).eps)))
LapN = identity(N).toarray().astype(float) - np.matmul(np.matmul(DN, CKSym), DN)
_, _, vN = np.linalg.svd(LapN)
vN = vN.T
kerN = vN[:, N - n:N]
normN = np.sqrt(np.sum(np.square(kerN), axis=1))
kerNS = np.divide(kerN, normN.reshape(len(normN), 1) + np.finfo(float).eps)
km = KMeans(n_clusters=n, n_init=REPlic, max_iter=MAXiter, n_jobs=-1).fit(kerNS)
return km.labels_
if __name__ == "__main__":
3
Source : featureExtraction.py
with MIT License
from abhirooptalasila
with MIT License
from abhirooptalasila
def spectral_flux(fft_magnitude, previous_fft_magnitude):
"""Computes the spectral flux feature of the current frame
Args:
fft_magnitude : the abs(fft) of the current frame
previous_fft_magnitude : the abs(fft) of the previous frame
"""
# compute the spectral flux as the sum of square distances:
fft_sum = np.sum(fft_magnitude + eps)
previous_fft_sum = np.sum(previous_fft_magnitude + eps)
sp_flux = np.sum(
(fft_magnitude / fft_sum - previous_fft_magnitude /
previous_fft_sum) ** 2)
return sp_flux
def spectral_rolloff(signal, c):
3
Source : nn.py
with GNU General Public License v3.0
from acamero
with GNU General Public License v3.0
from acamero
def get_trainable_count(model):
trainable_params = int(np.sum(
[K.count_params(p) for p in set(model.trainable_weights)]))
return trainable_params
class RNNBuilder(NNBuilder):
3
Source : poc.py
with GNU General Public License v3.0
from acamero
with GNU General Public License v3.0
from acamero
def _decode_solution(
self,
x):
if self.verbose: print(x)
poly = self.solution_decoder(x)
if self.verbose: print(self._poly_to_str(poly))
fn = lambda x: [c * x**ix for ix, c in enumerate(poly)]
poly_fn = lambda x : np.sum(fn(x))
return poly_fn
def fit(
3
Source : rnn.py
with GNU General Public License v3.0
from acamero
with GNU General Public License v3.0
from acamero
def __init__(self, layers, weights=None, dense_activation='tanh', verbose=0):
self.model = self._build_model( layers, dense_activation)
if weights:
self.model.set_weights( weights )
self.trainable_params = int(np.sum([K.count_params(p) for p in set(self.model.trainable_weights)]))
if verbose:
self.model.summary()
#self.model_to_png("model.png")
def _build_model(self, layers, dense_activation):
3
Source : rnn.py
with GNU General Public License v3.0
from acamero
with GNU General Public License v3.0
from acamero
def __init__(self, layers, weights=None, dense_activation='tanh'):
self.model = self._build_model( layers, dense_activation)
if weights:
self.model.set_weights( weights )
self.trainable_params = int(np.sum([K.count_params(p) for p in set(self.model.trainable_weights)]))
self.model.summary()
#self.model_to_png("model.png")
def _build_model(self, layers, dense_activation):
3
Source : detection_exporter.py
with Apache License 2.0
from Accenture
with Apache License 2.0
from Accenture
def update_timeseries(self, detections, frame_no, frame_offset=0):
sum_confidence = np.sum([d.confidence if not math.isnan(d.confidence)
else d.detection_history[-1]
for d in detections])
self._num_detections[self._video_idx] += len(detections)
current_timestamp = (frame_no + frame_offset) / self.fps
last_time_point = self._detection_timeseries[self._video_idx][-1][0] \
if self._detection_timeseries[self._video_idx] else 0
if math.isclose(last_time_point, current_timestamp):
# increment existing point in time series
self._detection_timeseries[self._video_idx][-1][1] += sum_confidence
else:
# add new detection to the time series
self._detection_timeseries[self._video_idx].append([current_timestamp, sum_confidence])
def add_detections_at_frame(self, detections, frame_no):
3
Source : mob.py
with Apache License 2.0
from Accenture
with Apache License 2.0
from Accenture
def score_vote_aabb(aabbs, scores):
normalized_scores = scores / np.sum(scores)
return np.average(aabbs, axis=0, weights=normalized_scores)
def spatial_vote_aabb(aabbs, scores=None):
3
Source : estimators_adaptive.py
with MIT License
from acids-ircam
with MIT License
from acids-ircam
def calc_probs(t_index, unique_inverse, label, b, b1, len_unique_a):
"""Calculate the p(x|T) and p(y|T)"""
indexs = unique_inverse == t_index
p_y_ts = np.sum(label[indexs], axis=0) / label[indexs].shape[0]
unique_array_internal, unique_counts_internal = \
np.unique(b[indexs], return_index=False, return_inverse=False, return_counts=True)
indexes_x = np.where(np.in1d(b1, b[indexs]))
p_x_ts = np.zeros(len_unique_a)
p_x_ts[indexes_x] = unique_counts_internal / float(sum(unique_counts_internal))
return p_x_ts, p_y_ts
def mutual_information_sampling(data, bins, pys1, pxs, label, b, b1, len_unique_a, p_YgX, unique_inverse_x,
3
Source : estimators_eet.py
with MIT License
from acids-ircam
with MIT License
from acids-ircam
def tcd(xs, base=2):
xis = [entropyd(column(xs, i), base) for i in range(0, len(xs[0]))]
hx = entropyd(xs, base)
return np.sum(xis) - hx
def ctcd(xs, y, base=2):
3
Source : metrics.py
with MIT License
from acl21
with MIT License
from acl21
def params_count(model):
"""Computes the number of parameters."""
return np.sum([p.numel() for p in model.parameters()]).item()
def flops_count(model):
3
Source : tf_util.py
with MIT License
from AcutronicRobotics
with MIT License
from AcutronicRobotics
def __init__(self, var_list, dtype=tf.float32):
assigns = []
shapes = list(map(var_shape, var_list))
total_size = np.sum([intprod(shape) for shape in shapes])
self.theta = theta = tf.placeholder(dtype, [total_size])
start = 0
assigns = []
for (shape, v) in zip(shapes, var_list):
size = intprod(shape)
assigns.append(tf.assign(v, tf.reshape(theta[start:start + size], shape)))
start += size
self.op = tf.group(*assigns)
def __call__(self, theta):
3
Source : numpy_routines.py
with GNU General Public License v3.0
from ad12
with GNU General Public License v3.0
from ad12
def sum_np(x, axis=None, dtype=None, keepdims=False, initial=np._NoValue, where=np._NoValue):
"""See :func:`numpy.sum`."""
return reduce_array_op(
np.sum, x, axis=axis, dtype=dtype, keepdims=keepdims, initial=initial, where=where
)
@implements(np.mean)
3
Source : sampler.py
with MIT License
from adammoss
with MIT License
from adammoss
def _rejection_prior_sample(self, loglstar, num_trials=None):
if num_trials is None:
ncall = 0
while True:
x = self.sample_prior(1)
logl, derived = self.loglike(x)
ncall += 1
if logl > loglstar:
break
else:
x = self.sample_prior(num_trials)
logl, derived = self.loglike(x)
ncall = num_trials / np.sum(logl > loglstar)
return x, logl, derived, ncall
def _rejection_flow_sample(
3
Source : _binary_approximation.py
with GNU Lesser General Public License v3.0
from adbuerger
with GNU Lesser General Public License v3.0
from adbuerger
def _check_sos1_constraint_fulfilled(self) -> None:
sums = np.sum(self._b_rel, axis=0)
tol = self._clamped * self._binary_threshold + (self._b_rel.shape[0] + 1) * np.finfo(sums.dtype).eps
if np.any(np.logical_or(sums > 1.0 + tol, sums < 1.0 - tol)):
warnings.warn("The sum of relaxed binary controls per time point " + \
"must be exactly 1\n.This seems not to be the case for the data " + \
" you provided.\npycombina might not work as expected.")
def _set_problem_size_reduction(self, reduce_problem_size_before_solve: bool) -> None:
3
Source : dataframe_quality.py
with MIT License
from ADGEfficiency
with MIT License
from ADGEfficiency
def check_duplicate_rows(df, verbose=True):
duplicated_bools = df.duplicated()
num = np.sum(duplicated_bools)
print('{} row duplicates'.format(num))
if verbose:
df[duplicated_bools].head(3)
return df[df.duplicated(keep=False)]
def check_nans(df, verbose=True):
3
Source : whittle.py
with MIT License
from AdityaMate
with MIT License
from AdityaMate
def fastCavg(x1, x2, c0=None, Tpass=np.identity(2), Tact=np.identity(2)):
x1=int(x1)
x2=int(x2)
T=[Tpass, Tact]
q= ((x1*getB(x2,T[1][0][1],T))/(1-getB(x1,T[1][1][1],T)) +x2)**-1
p=q*(getB(x2,T[1][0][1],T)/(1-getB(x1,T[1][1][1],T)))
if c0:
cavg=p*(x1- np.sum([getB(i,T[1][1][1],T) for i in range(1, x1+1)]))+q*(x2- np.sum([getB(j,T[1][0][1],T) for j in range(1, x2+1)])) + (p+q)*c0
return cavg, cavg-(p+q)*c0, (p+q)
else:
# return slope, intercept
return (p+q), p*(x1- np.sum([getB(i,T[1][1][1],T) for i in range(1, x1+1)]))+q*(x2- np.sum([getB(j,T[1][0][1],T) for j in range(1, x2+1)]))
def Cavg(x1, x2, c0=None, Tpass=np.identity(2), Tact=np.identity(2), ba=[], bna=[]):
See More Examples