Here are the examples of the python api numpy.true_divide taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
52 Examples
3
Source : core.py
with GNU General Public License v3.0
from Artikash
with GNU General Public License v3.0
from Artikash
def __itruediv__(self, other):
"True divide self by other in-place."
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.true_divide]
other_data = np.where(dom_mask, fval, other_data)
# self._mask = mask_or(self._mask, new_mask)
self._mask |= new_mask
ndarray.__itruediv__(self._data, np.where(self._mask, 1, other_data))
return self
#...
def __ipow__(self, other):
3
Source : GMM_IO.py
with MIT License
from CHPGenetics
with MIT License
from CHPGenetics
def clr_norm(data_df):
for hto in data_df.columns.values:
compensated_values = data_df.loc[:,hto].values + 1
gmean = stats.gmean(compensated_values)
#print(gmean)
data_df.loc[:,hto] = np.log(np.true_divide(compensated_values, gmean))
return data_df
def read_csv(path, hto_array):
3
Source : lax_reference.py
with Apache License 2.0
from google
with Apache License 2.0
from google
def padtype_to_pads(in_shape, filter_shape, window_strides, padding):
if padding.upper() == 'SAME':
out_shape = np.ceil(np.true_divide(in_shape, window_strides)).astype(int)
pad_sizes = [_max((out_size - 1) * stride + filter_size - in_size, 0)
for out_size, stride, filter_size, in_size
in zip(out_shape, window_strides, filter_shape, in_shape)]
return [(pad_size // 2, pad_size - pad_size // 2) for pad_size in pad_sizes]
else:
return [(0, 0)] * len(in_shape)
def _conv_view(lhs, rhs_shape, window_strides, pads, pad_value):
3
Source : statistics.py
with Apache License 2.0
from hasanirtiza
with Apache License 2.0
from hasanirtiza
def __init__(self, result):
self.result = result
self.mr = 1.0 - np.true_divide(result.tp, result.nof_gts) # 1 - recall
self.fppi = np.true_divide(result.fp, result.nof_imgs) # false positives per image
def create_plot(self, title=None, label=None):
3
Source : stretch.py
with BSD 3-Clause "New" or "Revised" License
from holzschu
with BSD 3-Clause "New" or "Revised" License
from holzschu
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
np.true_divide(values, self.a, out=values)
np.arcsinh(values, out=values)
np.true_divide(values, np.arcsinh(1. / self.a), out=values)
return values
@property
3
Source : stretch.py
with BSD 3-Clause "New" or "Revised" License
from holzschu
with BSD 3-Clause "New" or "Revised" License
from holzschu
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
np.true_divide(values, self.a, out=values)
np.sinh(values, out=values)
np.true_divide(values, np.sinh(1. / self.a), out=values)
return values
@property
3
Source : objectives.py
with MIT License
from jefkine
with MIT License
from jefkine
def derivative(self, predictions, targets, np_type):
"""
Applies the BinaryCrossEntropy Derivative to prediction and targets provided
Args:
predictions (numpy.array): the predictions numpy array
targets (numpy.array): the targets numpy array
Returns:
numpy.array: the output of BinaryCrossEntropy Derivative to prediction and targets
"""
clipped_predictions, clipped_divisor = super(BinaryCrossEntropy, self).clip(predictions)
return np.true_divide((clipped_predictions - targets), clipped_divisor)
def accuracy(self, predictions, targets, threshold = 0.5):
3
Source : objectives.py
with MIT License
from jefkine
with MIT License
from jefkine
def loss(self, predictions, targets, np_type):
"""
Applies the KLDivergence Loss to prediction and targets provided
Args:
predictions (numpy.array): the predictions numpy array
targets (numpy.array): the targets numpy array
Returns:
numpy.array: the output of KLDivergence Loss to prediction and targets
"""
targets = super(KLDivergence, self).add_fuzz_factor(targets)
predictions = super(KLDivergence, self).add_fuzz_factor(predictions)
return np.sum(np.multiply(targets, np.log(np.true_divide(targets, predictions))), axis = 1)
def derivative(self, predictions, targets, np_type):
3
Source : optimizers.py
with MIT License
from jefkine
with MIT License
from jefkine
def update(self, weights, grads, epoch_num, batch_num, batch_size):
self.weights = weights
self.grads = np.true_divide(grads, batch_size)
if self.cache is None:
self.cache = np.zeros_like(self.grads)
self.cache += np.square(self.grads)
self.weights -= np.multiply(
super(AdaGrad, self).get_learning_rate(epoch_num),
np.true_divide(self.grads, np.sqrt(self.cache) + self.epsilon)
)
return self.weights
@property
3
Source : optimizers.py
with MIT License
from jefkine
with MIT License
from jefkine
def update(self, weights, grads, epoch_num, batch_num, batch_size):
self.weights = weights
self.grads = np.true_divide(grads, batch_size) # grads
if self.cache is None:
self.cache = np.zeros_like(self.weights)
self.cache = np.multiply(self.rho, np.multiply(self.cache + (1 - self.rho), np.square(self.grads)))
self.weights -= np.multiply(self.lr, np.true_divide(self.grads, (np.sqrt(self.cache) + self.epsilon)))
return self.weights
@property
3
Source : dataset.py
with MIT License
from kozistr
with MIT License
from kozistr
def img_scaling(img, scale='0,1'):
if scale == '0,1':
try:
img /= 255.
except TypeError: # ufunc 'true divide' output ~
img = np.true_divide(img, 255.0, casting='unsafe')
elif scale == '-1,1':
try:
img = (img / 127.5) - 1.
except TypeError:
img = np.true_divide(img, 127.5, casting='unsafe') - 1.
else:
raise ValueError("[-] Only '0,1' or '-1,1' please - (%s)" % scale)
return img
def __init__(self, path, size=None, name='to_tfr', use_save=False, save_file_name='',
3
Source : score.py
with MIT License
from LAL
with MIT License
from LAL
def score_event(truth, submission):
"""Compute the TrackML event score for a single event.
Parameters
----------
truth : pandas.DataFrame
Truth information. Must have hit_id, particle_id, and weight columns.
submission : pandas.DataFrame
Proposed hit/track association. Must have hit_id and track_id columns.
"""
tracks = _analyze_tracks(truth, submission)
purity_rec = numpy.true_divide(tracks['major_nhits'], tracks['nhits'])
purity_maj = numpy.true_divide(tracks['major_nhits'], tracks['major_particle_nhits'])
good_track = (0.5 < purity_rec) & (0.5 < purity_maj)
return tracks['major_weight'][good_track].sum()
3
Source : utils.py
with MIT License
from llSourcell
with MIT License
from llSourcell
def to_chroma_np(bar, is_normalize=True):
chroma = bar.reshape(bar.shape[0], bar.shape[1], 12, 7, bar.shape[3]).sum(axis=3)
if is_normalize:
chroma_max = chroma.max(axis=(1, 2, 3), keepdims=True)
chroma_min = chroma.min(axis=(1, 2, 3), keepdims=True)
return np.true_divide(chroma + chroma_min, chroma_max - chroma_min + 1e-15)
else:
return chroma
def bilerp(a0, a1, b0, b1, steps):
3
Source : asinhstretchsigned.py
with GNU General Public License v3.0
from lsst-dm
with GNU General Public License v3.0
from lsst-dm
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
values *= 2.
values -= 1.
np.true_divide(values, self.a, out=values)
np.sinh(values, out=values)
np.true_divide(values, np.sinh(1. / self.a), out=values)
values += 1.
values /= 2.
return values
@property
3
Source : functions.py
with GNU General Public License v3.0
from nrc-cnrc
with GNU General Public License v3.0
from nrc-cnrc
def true_divide(x1,x2):
"""returns x1 / x2
"""
return _bcallg(np.true_divide,x1,x2)
def floor_divide(x1,x2):
3
Source : ufunc_test.py
with BSD 3-Clause "New" or "Revised" License
from spcl
with BSD 3-Clause "New" or "Revised" License
from spcl
def test_ufunc_maximum_nan_ff(A: dace.float32[10], B: dace.float32[10]):
C = np.true_divide(A, 0)
return np.maximum(C, B)
@compare_numpy_output(check_dtype=True)
3
Source : ufunc_test.py
with BSD 3-Clause "New" or "Revised" License
from spcl
with BSD 3-Clause "New" or "Revised" License
from spcl
def test_ufunc_fmax_nan_ff(A: dace.float32[10], B: dace.float32[10]):
C = np.true_divide(A, 0)
return np.fmax(C, B)
@compare_numpy_output(check_dtype=True)
3
Source : ufunc_test.py
with BSD 3-Clause "New" or "Revised" License
from spcl
with BSD 3-Clause "New" or "Revised" License
from spcl
def test_ufunc_minimum_nan_ff(A: dace.float32[10], B: dace.float32[10]):
C = np.true_divide(A, 0)
return np.minimum(C, B)
@compare_numpy_output(check_dtype=True)
3
Source : ufunc_test.py
with BSD 3-Clause "New" or "Revised" License
from spcl
with BSD 3-Clause "New" or "Revised" License
from spcl
def test_ufunc_fmin_nan_ff(A: dace.float32[10], B: dace.float32[10]):
C = np.true_divide(A, 0)
return np.fmin(C, B)
def test_ufunc_isfinite_c():
3
Source : mh.py
with Apache License 2.0
from uber-research
with Apache License 2.0
from uber-research
def binary_posterior(P0, P1):
'''Get posterior on P(case 1|x) given likelihoods for case 0 and case 1, P0
and P1, resp.
'''
posterior_1 = np.true_divide(P1, P1 + P0)
return posterior_1
def disc_2_odds_ratio(P_disc):
3
Source : equivalencies.py
with BSD 3-Clause "New" or "Revised" License
from yt-project
with BSD 3-Clause "New" or "Revised" License
from yt-project
def _convert(self, x, new_dims):
from unyt import physical_constants as pc
if new_dims == dimensionless:
beta = np.true_divide(x, pc.clight, out=self._get_out(x))
beta2 = np.multiply(beta, beta, out=self._get_out(x))
inv_gamma_2 = np.subtract(1, beta2, out=self._get_out(x))
inv_gamma = np.sqrt(inv_gamma_2, out=self._get_out(x))
gamma = np.true_divide(1.0, inv_gamma, out=self._get_out(x))
return gamma
elif new_dims == velocity:
gamma2 = np.multiply(x, x, out=self._get_out(x))
inv_gamma_2 = np.true_divide(1, gamma2, out=self._get_out(x))
beta2 = np.subtract(1, inv_gamma_2, out=self._get_out(x))
beta = np.sqrt(beta2, out=self._get_out(x))
return np.multiply(pc.clight, beta, out=self._get_out(x))
def __str__(self):
3
Source : equivalencies.py
with BSD 3-Clause "New" or "Revised" License
from yt-project
with BSD 3-Clause "New" or "Revised" License
from yt-project
def _convert(self, x, new_dims):
from unyt import physical_constants as pc
return np.true_divide(pc.h_mks / pc.clight, x, out=self._get_out(x))
def __str__(self):
0
Source : base.py
with GNU General Public License v3.0
from adityaprakash-bobby
with GNU General Public License v3.0
from adityaprakash-bobby
def _divide(self, other, true_divide=False, rdivide=False):
if isscalarlike(other):
if rdivide:
if true_divide:
return np.true_divide(other, self.todense())
else:
return np.divide(other, self.todense())
if true_divide and np.can_cast(self.dtype, np.float_):
return self.astype(np.float_)._mul_scalar(1./other)
else:
r = self._mul_scalar(1./other)
scalar_dtype = np.asarray(other).dtype
if (np.issubdtype(self.dtype, np.integer) and
np.issubdtype(scalar_dtype, np.integer)):
return r.astype(self.dtype)
else:
return r
elif isdense(other):
if not rdivide:
if true_divide:
return np.true_divide(self.todense(), other)
else:
return np.divide(self.todense(), other)
else:
if true_divide:
return np.true_divide(other, self.todense())
else:
return np.divide(other, self.todense())
elif isspmatrix(other):
if rdivide:
return other._divide(self, true_divide, rdivide=False)
self_csr = self.tocsr()
if true_divide and np.can_cast(self.dtype, np.float_):
return self_csr.astype(np.float_)._divide_sparse(other)
else:
return self_csr._divide_sparse(other)
else:
return NotImplemented
def __truediv__(self, other):
0
Source : main.py
with MIT License
from alan-toledo
with MIT License
from alan-toledo
def backward_prop(data, labels, params, forward_prop_func):
"""
Implement the backward propegation gradient computation step for a neural network
Args:
data: A numpy array containing the input
labels: A 2d numpy array containing the labels
params: A dictionary mapping parameter names to numpy arrays with the parameters.
This numpy array will contain W1, b1, W2 and b2
W1 and b1 represent the weights and bias for the hidden layer of the network
W2 and b2 represent the weights and bias for the output layer of the network
forward_prop_func: A function that follows the forward_prop API above
Returns:
A dictionary of strings to numpy arrays where each key represents the name of a weight
and the values represent the gradient of the loss with respect to that weight.
In particular, it should have 4 elements:
W1, W2, b1, and b2
"""
# *** START CODE HERE ***
(z, output, _) = forward_prop(data, labels, params)
gradients = {}
dscores = output - labels
dscores = np.true_divide(dscores, float(labels.shape[0])) #(1000, 10)
#(300, 1000) * (1000, 10)
gradients['W2'] = np.dot(z.T, dscores) #(300, 10)
gradients['b2'] = np.sum(dscores, axis=0, keepdims=True) #(10,1)
#(1.000, 10) * (10, 300)
dhidden = np.dot(dscores, params['W2'].T) #(1000, 300)
dhidden = np.multiply(dhidden, z*(1.0 - z)) #(1000, 300) * (1000, 300)
#(784, 1000) * (1000, 300)
gradients['W1'] = np.dot(data.T, dhidden) #(784, 300)
gradients['b1'] = np.sum(dhidden, axis=0, keepdims=True) #(300, 1)
return gradients
# *** END CODE HERE ***
def backward_prop_regularized(data, labels, params, forward_prop_func, reg):
0
Source : main.py
with MIT License
from alan-toledo
with MIT License
from alan-toledo
def backward_prop_regularized(data, labels, params, forward_prop_func, reg):
"""
Implement the backward propegation gradient computation step for a neural network
Args:
data: A numpy array containing the input
labels: A 2d numpy array containing the labels
params: A dictionary mapping parameter names to numpy arrays with the parameters.
This numpy array will contain W1, b1, W2 and b2
W1 and b1 represent the weights and bias for the hidden layer of the network
W2 and b2 represent the weights and bias for the output layer of the network
forward_prop_func: A function that follows the forward_prop API above
reg: The regularization strength (lambda)
Returns:
A dictionary of strings to numpy arrays where each key represents the name of a weight
and the values represent the gradient of the loss with respect to that weight.
In particular, it should have 4 elements:
W1, W2, b1, and b2
"""
# *** START CODE HERE ***
(z, output, _) = forward_prop(data, labels, params)
gradients = {}
dscores = output - labels
dscores = np.true_divide(dscores, float(labels.shape[0]))
dCE = dscores
#(300, 1000) * (1000, 10)
gradients['W2'] = np.dot(z.T, dCE) #(300, 10)
gradients['W2'] = gradients['W2'] + 2.0*float(reg)*params['W2'] #(300, 10)
gradients['b2'] = np.sum(dCE, axis=0, keepdims=True) #(10,1)
#(1.000, 10) * (10, 300)
dhidden = np.dot(dCE, params['W2'].T) #(1.000, 300)
dhidden = np.multiply(dhidden, z*(1.0 - z)) #(1000, 300) * (1000, 300)
#(784, 1000) * (1000, 300)
gradients['W1'] = np.dot(data.T, dhidden) #(784, 300)
gradients['W1'] = gradients['W1'] + 2.0*float(reg)*params['W1']
gradients['b1'] = np.sum(dhidden, axis=0, keepdims=True) #(300, 1)
return gradients
# *** END CODE HERE ***
def gradient_descent_epoch(train_data, train_labels, learning_rate, batch_size, params, forward_prop_func, backward_prop_func):
0
Source : frames.py
with MIT License
from alexandrosstergiou
with MIT License
from alexandrosstergiou
def frames_extractor(file,frame_num,step):
frames = []
try:
clip = VideoFileClip(file)
except:
if clip in locals():
clip.close()
print('FFMPEG COULD NOT INPORT: '+file)
return
count = 1
i = 0
for frame in clip.iter_frames():
if (i >= frame_num):
break
y,x,c = frame.shape
img = frame[y/2-112:y/2+112,x/2-112:x/2+112]
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.true_divide(img,255.0)
if (count % step == 0):
frames.append(copy.deepcopy(img))
i+=1
count+=1
clip.close()
del clip.reader
del clip
if (len(frames) == frame_num):
return frames
else:
return
0
Source : function_base.py
with MIT License
from alvarobartt
with MIT License
from alvarobartt
def percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile(s) of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : array_like of float
Percentile or sequence of percentiles to compute, which must be between
0 and 100 inclusive.
axis : {int, tuple of int, None}, optional
Axis or axes along which the percentiles are computed. The
default is to compute the percentile(s) along a flattened
version of the array.
.. versionchanged:: 1.9.0
A tuple of axes is supported
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow the input array `a` to be modified by intermediate
calculations, to save memory. In this case, the contents of the input
`a` after this function completes is undefined.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired percentile lies between two data points
``i < j``:
* 'linear': ``i + (j - i) * fraction``, where ``fraction``
is the fractional part of the index surrounded by ``i``
and ``j``.
* 'lower': ``i``.
* 'higher': ``j``.
* 'nearest': ``i`` or ``j``, whichever is nearest.
* 'midpoint': ``(i + j) / 2``.
.. versionadded:: 1.9.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the
result will broadcast correctly against the original array `a`.
.. versionadded:: 1.9.0
Returns
-------
percentile : scalar or ndarray
If `q` is a single percentile and `axis=None`, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the percentiles. The other axes are
the axes that remain after the reduction of `a`. If the input
contains integers or floats smaller than ``float64``, the output
data-type is ``float64``. Otherwise, the output data-type is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean
median : equivalent to ``percentile(..., 50)``
nanpercentile
quantile : equivalent to percentile, except with q in the range [0, 1].
Notes
-----
Given a vector ``V`` of length ``N``, the ``q``-th percentile of
``V`` is the value ``q/100`` of the way from the minimum to the
maximum in a sorted copy of ``V``. The values and distances of
the two nearest neighbors as well as the `interpolation` parameter
will determine the percentile if the normalized ranking does not
match the location of ``q`` exactly. This function is the same as
the median if ``q=50``, the same as the minimum if ``q=0`` and the
same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
3.5
>>> np.percentile(a, 50, axis=0)
array([[ 6.5, 4.5, 2.5]])
>>> np.percentile(a, 50, axis=1)
array([ 7., 2.])
>>> np.percentile(a, 50, axis=1, keepdims=True)
array([[ 7.],
[ 2.]])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=out)
array([[ 6.5, 4.5, 2.5]])
>>> m
array([[ 6.5, 4.5, 2.5]])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a == b)
The different types of interpolation can be visualized graphically:
.. plot::
import matplotlib.pyplot as plt
a = np.arange(4)
p = np.linspace(0, 100, 6001)
ax = plt.gca()
lines = [
('linear', None),
('higher', '--'),
('lower', '--'),
('nearest', '-.'),
('midpoint', '-.'),
]
for interpolation, style in lines:
ax.plot(
p, np.percentile(a, p, interpolation=interpolation),
label=interpolation, linestyle=style)
ax.set(
title='Interpolation methods for list: ' + str(a),
xlabel='Percentile',
ylabel='List item returned',
yticks=a)
ax.legend()
plt.show()
"""
q = np.true_divide(q, 100.0) # handles the asarray for us too
if not _quantile_is_valid(q):
raise ValueError("Percentiles must be in the range [0, 100]")
return _quantile_unchecked(
a, q, axis, out, overwrite_input, interpolation, keepdims)
def quantile(a, q, axis=None, out=None,
0
Source : ecp_utils.py
with Apache License 2.0
from Ascend
with Apache License 2.0
from Ascend
def evaluate(difficulty, ignore_other_vru, results_path, det_path, gt_path, det_method_name,
use_cache, eval_type='pedestrian'):
"""Evaluate to calculate lamr."""
pkl_path = os.path.join(results_path,
'ignore={}_difficulty={}_evaltype={}.pkl'.format(ignore_other_vru,
difficulty, eval_type))
if os.path.exists(pkl_path) and use_cache:
result = Result.load_from_disc(pkl_path)
else:
data = ECPDataset.load_gt_det(gt_path, det_path)
evaluator = create_evaluator(data, difficulty, ignore_other_vru, eval_type)
result = evaluator.result
result.save_to_disc(pkl_path)
mr = 1.0 - np.true_divide(result.tp, result.nof_gts)
recall = np.true_divide(result.tp, result.nof_gts)
fppi = np.true_divide(result.fp, result.nof_imgs)
title = 'difficulty={}, ignore_other_vru={}, evaltype={}'.format(difficulty, ignore_other_vru,
eval_type)
label = 'lamr: {}'.format(lamr(recall, fppi))
fig = create_lamr(title, label, mr, fppi)
filename = 'lamr_ignore={}_difficulty={}_evaltype={}'.format(ignore_other_vru, difficulty,
eval_type)
fig.savefig(os.path.join(results_path, '{}.pdf'.format(filename)))
fig.savefig(os.path.join(results_path, '{}.png'.format(filename)))
return lamr(recall, fppi)
def results2frame(dataset, results, destdir):
0
Source : lane_metric.py
with Apache License 2.0
from Ascend
with Apache License 2.0
from Ascend
def evaluate_core(*, gt_lanes, pr_lanes, gt_wh, pr_wh, hyperp):
"""Core function of evaluate for every image.
:param gt_lanes: groundtruth lanes of an image
:type gt_lanes: a list of lanes in an image
:param pr_lanes: predict lanes of an image
:type pr_lanes: a list of lanes in an image
:return: a dict contain a series of parameters, which is:
gt_num: groundtruth lanes number of an image
pr_num: predict lanes number of an image
hit_num: the matched number of groundtruth and predict lanes
gt_curr_num: groundtruth current lanes number of an image
pr_curr_num: predict current lanes number of an image
hit_curr_num: the matched number of groundtruth and predict lanes in current domin
left_error: the error of current left matched lane in x axes
right_error: the error of current right matched lane in x axes
census_error: the error of matched lane in x axes
:rtype: dict
"""
gt_num = len(gt_lanes)
pr_num = len(pr_lanes)
hit_num = 0
if gt_num > 0 and pr_num > 0:
iou_thresh = hyperp['iou_thresh']
new_height = hyperp['eval_height']
new_width = hyperp['eval_width']
gt_y_ratio = np.true_divide(gt_wh['height'], new_height)
gt_x_ratio = np.true_divide(gt_wh['width'], new_width)
pr_y_ratio = np.true_divide(pr_wh['height'], new_height)
pr_x_ratio = np.true_divide(pr_wh['width'], new_width)
# resize lanes and interp lanes,
# all the gt and pr are mapping to src img, so the scale ratio is same,
# note that the scale ratio is not a factor but a divisor
# print('gt_lane',gt_lanes)
gt_lanes = list(map(lambda lane: resize_lane(lane, gt_x_ratio, gt_y_ratio), gt_lanes))
pr_lanes = list(map(lambda lane: resize_lane(lane, pr_x_ratio, pr_y_ratio), pr_lanes))
sorted_gt_lanes = gt_lanes
sorted_pr_lanes = pr_lanes
iou_mat = np.zeros((gt_num, pr_num))
for (index_gt, gt_lane), (index_pr, pr_lane) in product(enumerate(sorted_gt_lanes), enumerate(sorted_pr_lanes)):
iou_mat[index_gt][index_pr] = calc_iou(gt_lane, pr_lane, hyperp)
# match_idx = Munkres().compute(make_cost_matrix(iou_mat, lambda iou: float(1.0 - iou)))
cost_matrix = 1 - np.array(iou_mat)
match_index_list = linear_sum_assignment(cost_matrix)
for gt_index, pr_index in zip(*match_index_list):
iou_val = iou_mat[gt_index][pr_index]
if iou_val > iou_thresh:
hit_num += 1
return dict(gt_num=gt_num, pr_num=pr_num, hit_num=hit_num)
class LaneMetricCore(MetricBase):
0
Source : function_base.py
with Apache License 2.0
from aws-samples
with Apache License 2.0
from aws-samples
def percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
"""
Compute the q-th percentile of the data along the specified axis.
Returns the q-th percentile(s) of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : array_like of float
Percentile or sequence of percentiles to compute, which must be between
0 and 100 inclusive.
axis : {int, tuple of int, None}, optional
Axis or axes along which the percentiles are computed. The
default is to compute the percentile(s) along a flattened
version of the array.
.. versionchanged:: 1.9.0
A tuple of axes is supported
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow the input array `a` to be modified by intermediate
calculations, to save memory. In this case, the contents of the input
`a` after this function completes is undefined.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired percentile lies between two data points
``i < j``:
* 'linear': ``i + (j - i) * fraction``, where ``fraction``
is the fractional part of the index surrounded by ``i``
and ``j``.
* 'lower': ``i``.
* 'higher': ``j``.
* 'nearest': ``i`` or ``j``, whichever is nearest.
* 'midpoint': ``(i + j) / 2``.
.. versionadded:: 1.9.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the
result will broadcast correctly against the original array `a`.
.. versionadded:: 1.9.0
Returns
-------
percentile : scalar or ndarray
If `q` is a single percentile and `axis=None`, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the percentiles. The other axes are
the axes that remain after the reduction of `a`. If the input
contains integers or floats smaller than ``float64``, the output
data-type is ``float64``. Otherwise, the output data-type is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean
median : equivalent to ``percentile(..., 50)``
nanpercentile
quantile : equivalent to percentile, except with q in the range [0, 1].
Notes
-----
Given a vector ``V`` of length ``N``, the q-th percentile of
``V`` is the value ``q/100`` of the way from the minimum to the
maximum in a sorted copy of ``V``. The values and distances of
the two nearest neighbors as well as the `interpolation` parameter
will determine the percentile if the normalized ranking does not
match the location of ``q`` exactly. This function is the same as
the median if ``q=50``, the same as the minimum if ``q=0`` and the
same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
3.5
>>> np.percentile(a, 50, axis=0)
array([[ 6.5, 4.5, 2.5]])
>>> np.percentile(a, 50, axis=1)
array([ 7., 2.])
>>> np.percentile(a, 50, axis=1, keepdims=True)
array([[ 7.],
[ 2.]])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=out)
array([[ 6.5, 4.5, 2.5]])
>>> m
array([[ 6.5, 4.5, 2.5]])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a == b)
The different types of interpolation can be visualized graphically:
.. plot::
import matplotlib.pyplot as plt
a = np.arange(4)
p = np.linspace(0, 100, 6001)
ax = plt.gca()
lines = [
('linear', None),
('higher', '--'),
('lower', '--'),
('nearest', '-.'),
('midpoint', '-.'),
]
for interpolation, style in lines:
ax.plot(
p, np.percentile(a, p, interpolation=interpolation),
label=interpolation, linestyle=style)
ax.set(
title='Interpolation methods for list: ' + str(a),
xlabel='Percentile',
ylabel='List item returned',
yticks=a)
ax.legend()
plt.show()
"""
q = np.true_divide(q, 100.0) # handles the asarray for us too
if not _quantile_is_valid(q):
raise ValueError("Percentiles must be in the range [0, 100]")
return _quantile_unchecked(
a, q, axis, out, overwrite_input, interpolation, keepdims)
def _quantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None,
0
Source : utils.py
with MIT License
from clinplayer
with MIT License
from clinplayer
def scale_vertex_by_bbox(v, source1, source2, target1, target2):
s, s_ = source1, source2
t, t_ = target1, target2
cs=0.5*(source1+source2)
ct=0.5*(target1+target2)
move=t-s
dt=t-t_
ds=s-s_
for i in range(3):
if ds[i]==0:
ds[i]=1
scale=np.true_divide(dt ,ds)
cs=cs*scale
move=ct-cs
v=v*scale+move
return v
def get_scaled_prim_point(boxlist, source_v, target_v):
0
Source : function_base.py
with Apache License 2.0
from dashanji
with Apache License 2.0
from dashanji
def percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
"""
Compute the q-th percentile of the data along the specified axis.
Returns the q-th percentile(s) of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : array_like of float
Percentile or sequence of percentiles to compute, which must be between
0 and 100 inclusive.
axis : {int, tuple of int, None}, optional
Axis or axes along which the percentiles are computed. The
default is to compute the percentile(s) along a flattened
version of the array.
.. versionchanged:: 1.9.0
A tuple of axes is supported
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow the input array `a` to be modified by intermediate
calculations, to save memory. In this case, the contents of the input
`a` after this function completes is undefined.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired percentile lies between two data points
``i < j``:
* 'linear': ``i + (j - i) * fraction``, where ``fraction``
is the fractional part of the index surrounded by ``i``
and ``j``.
* 'lower': ``i``.
* 'higher': ``j``.
* 'nearest': ``i`` or ``j``, whichever is nearest.
* 'midpoint': ``(i + j) / 2``.
.. versionadded:: 1.9.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the
result will broadcast correctly against the original array `a`.
.. versionadded:: 1.9.0
Returns
-------
percentile : scalar or ndarray
If `q` is a single percentile and `axis=None`, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the percentiles. The other axes are
the axes that remain after the reduction of `a`. If the input
contains integers or floats smaller than ``float64``, the output
data-type is ``float64``. Otherwise, the output data-type is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean
median : equivalent to ``percentile(..., 50)``
nanpercentile
quantile : equivalent to percentile, except with q in the range [0, 1].
Notes
-----
Given a vector ``V`` of length ``N``, the q-th percentile of
``V`` is the value ``q/100`` of the way from the minimum to the
maximum in a sorted copy of ``V``. The values and distances of
the two nearest neighbors as well as the `interpolation` parameter
will determine the percentile if the normalized ranking does not
match the location of ``q`` exactly. This function is the same as
the median if ``q=50``, the same as the minimum if ``q=0`` and the
same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
3.5
>>> np.percentile(a, 50, axis=0)
array([6.5, 4.5, 2.5])
>>> np.percentile(a, 50, axis=1)
array([7., 2.])
>>> np.percentile(a, 50, axis=1, keepdims=True)
array([[7.],
[2.]])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=out)
array([6.5, 4.5, 2.5])
>>> m
array([6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([7., 2.])
>>> assert not np.all(a == b)
The different types of interpolation can be visualized graphically:
.. plot::
import matplotlib.pyplot as plt
a = np.arange(4)
p = np.linspace(0, 100, 6001)
ax = plt.gca()
lines = [
('linear', None),
('higher', '--'),
('lower', '--'),
('nearest', '-.'),
('midpoint', '-.'),
]
for interpolation, style in lines:
ax.plot(
p, np.percentile(a, p, interpolation=interpolation),
label=interpolation, linestyle=style)
ax.set(
title='Interpolation methods for list: ' + str(a),
xlabel='Percentile',
ylabel='List item returned',
yticks=a)
ax.legend()
plt.show()
"""
q = np.true_divide(q, 100)
q = asanyarray(q) # undo any decay that the ufunc performed (see gh-13105)
if not _quantile_is_valid(q):
raise ValueError("Percentiles must be in the range [0, 100]")
return _quantile_unchecked(
a, q, axis, out, overwrite_input, interpolation, keepdims)
def _quantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None,
0
Source : snippet.py
with Apache License 2.0
from dockerizeme
with Apache License 2.0
from dockerizeme
def true_divide (left, right):
return _maybewrap (np.true_divide (_usable (left), _usable (right)),
left, right)
def floor_divide (left, right):
0
Source : heat_pump.py
with MIT License
from ElsevierSoftwareX
with MIT License
from ElsevierSoftwareX
def __init__(self, environment, p_th_nom, cop=None, eta=0.36, t_max=55.0,
lower_activation_limit=0, t_flow=55.0):
simu_horizon = environment.timer.simu_horizon
(t_ambient,) = environment.weather.getWeatherForecast(getTAmbient=True)
ts = environment.timer.time_in_year()
t_ambient = t_ambient[ts:ts + simu_horizon]
if cop is None:
cop = eta * np.true_divide((t_flow + 273.15), (t_flow - t_ambient))
cop = np.nan_to_num(cop)
cop[cop < 0] = 0
elif isinstance(cop, (int, float)):
cop = np.full(simu_horizon, cop)
elif not isinstance(cop, np.ndarray):
raise TypeError(
"Unknown type for `cop`: {}. Must be `numpy.ndarray`, `int` "
"or `float`".format(type(cop))
)
super().__init__(environment, t_ambient, t_flow, [], [], cop, t_max, lower_activation_limit)
self._long_id = "HP_" + self._id_string
self.cop = cop
self.p_th_nom = p_th_nom
self.activation_constr = LowerActivationLimit(self, "p_th_heat", lower_activation_limit, -p_th_nom)
def populate_model(self, model, mode="convex"):
0
Source : indictor.py
with Apache License 2.0
from Frank-qlu
with Apache License 2.0
from Frank-qlu
def bias(data, n=5):
import numpy as np
'''
乖离率 bias
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,默认5
return
-------
BIAS:numpy.ndarray < numpy.float64>
乖离率指标
'''
MA = ma(data, n)
CLOSES = data["close"]
BIAS = (np.true_divide((CLOSES - MA), MA)) * (100 / 100)
return BIAS
def asi(data, n=5):
0
Source : indictor.py
with Apache License 2.0
from Frank-qlu
with Apache License 2.0
from Frank-qlu
def obv(data):
import numpy as np
'''
On Balance Volume 能量潮指标
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
return
-------
OBV:numpy.ndarray < numpy.float64>
OBV能量潮指标
'''
tmp = np.true_divide(((data["close"] - data["low"]) - (data["high"] - data["close"])), (data["high"] - data["low"]))
OBV = tmp * data["volume"]
return OBV
def sar(data, n=4):
0
Source : statistics.py
with Apache License 2.0
from hasanirtiza
with Apache License 2.0
from hasanirtiza
def log_avg_mr_reference_implementation(self):
ys = np.true_divide(self.result.tp, self.result.nof_gts) # recall
xs = np.true_divide(self.result.fp, self.result.nof_imgs) # false positives per image
ref = np.power(10, np.linspace(-2, 0, 9))
result = np.zeros(ref.shape)
for i in range(len(ref)):
j = np.argwhere(xs < = ref[i]).flatten()
if j.size:
# "[...](for curves that end before reaching a given FPPI
# rate, the minimum miss rate achieved is used)"
result[i] = ys[j[-1]]
else:
# if first detection is a tp the minimal fppi is zero and we never execute this else
# case (assuming fppi ref points are > 0)
# if first detection is a fp the minimal fppi is 1/nof_imgs, we execute this else
# case if 1/nof_imgs > min(fppi ref points)
# this part corresponds to xs[0] = -inf and ys[0] = 0
result[i] = 0 # unnecessary since result[i] is 0 anyways...
# The 1e-10 is needed if missrate of 0 (equals recall of 1 as result variable stores recall)
# is reached.
return np.exp(np.mean(np.log(np.maximum(1e-10, 1 - result))))
class MrFppiMultiPlot:
0
Source : ndarithmetic.py
with BSD 3-Clause "New" or "Revised" License
from holzschu
with BSD 3-Clause "New" or "Revised" License
from holzschu
def divide(self, operand, operand2=None, **kwargs):
return self._prepare_then_do_arithmetic(np.true_divide, operand,
operand2, **kwargs)
@sharedmethod
0
Source : objectives.py
with MIT License
from jefkine
with MIT License
from jefkine
def derivative(self, predictions, targets, np_type):
"""
Applies the KLDivergence Derivative to prediction and targets provided
Args:
predictions (numpy.array): the predictions numpy array
targets (numpy.array): the targets numpy array
Returns:
numpy.array: the output of KLDivergence Derivative to prediction and targets
"""
targets = super(KLDivergence, self).add_fuzz_factor(targets)
predictions = super(KLDivergence, self).add_fuzz_factor(predictions)
d_log_diff = np.multiply((predictions - targets), (np.log(np.true_divide(targets, predictions))))
return np.multiply((1 + np.log(np.true_divide(targets, predictions))), d_log_diff)
def accuracy(self, predictions, targets):
0
Source : optimizers.py
with MIT License
from jefkine
with MIT License
from jefkine
def update(self, weights, grads, epoch_num, batch_num, batch_size):
self.weights = weights
self.grads = np.true_divide(grads, batch_size)
self.t = batch_num
if self.m is None:
self.m = np.zeros_like(self.weights)
if self.u is None:
self.u = np.zeros_like(self.weights)
lr_t = np.true_divide(super(Adamax, self).get_learning_rate(epoch_num),
1. - np.power(self.beta1, self.t))
m_hat = np.multiply(self.beta1, self.m) + np.multiply((1. - self.beta1), self.grads)
u_hat = np.maximum(np.multiply(self.beta2, self.u), np.abs(self.grads))
self.weights -= np.true_divide(np.multiply(lr_t, m_hat), (u_hat + self.epsilon))
return self.weights
@property
0
Source : optimizers.py
with MIT License
from jefkine
with MIT License
from jefkine
def update(self, weights, grads, epoch_num, batch_num, batch_size):
self.weights = weights
self.grads = np.true_divide(grads, batch_size) # grads
if self.cache is None:
self.cache = np.zeros_like(self.weights)
if self.delta is None:
self.delta = np.zeros_like(self.weights)
self.cache = np.multiply(self.rho, self.cache) + np.multiply(1 - self.rho, np.square(self.grads))
RMSE_grad = np.sqrt(self.cache + self.epsilon)
RMSE_delta = np.sqrt(self.delta + self.epsilon)
update = np.multiply(self.grads, np.true_divide(RMSE_delta, RMSE_grad))
self.weights -= np.multiply(super(Adadelta, self).get_learning_rate(epoch_num), update)
self.delta = np.multiply(self.rho, self.delta) + np.multiply((1 - self.rho), np.square(update))
return self.weights
@property
0
Source : dataset.py
with MIT License
from Kshitij-Ambilduke
with MIT License
from Kshitij-Ambilduke
def __getitem__(self, idx):
data = self.questions[idx]
# Each call to __getitem__ from dataloader returns a Sample class object which
# collated by our special batch collator to a SampleList which is basically
# a attribute based batch in layman terms
current_sample = Sample()
question = data["question"]
tokens = tokenize(question, keep=[";", ","], remove=["?", "."])
processed = self.text_processor({"tokens": tokens})
current_sample.text = processed["text"]
processed = self.answer_processor({"answers": [data["answer"]]})
current_sample.answers = processed["answers"]
current_sample.targets = processed["answers_scores"]
image_path = os.path.join(self.image_path, data["image_filename"])
image = np.true_divide(Image.open(image_path).convert("RGB"), 255)
image = image.astype(np.float32)
current_sample.image = torch.from_numpy(image.transpose(2, 0, 1))
return current_sample
0
Source : dataset.py
with MIT License
from Kshitij-Ambilduke
with MIT License
from Kshitij-Ambilduke
def __getitem__(self, idx):
data = self.questions[idx]
current_sample = Sample()
question = data["question"]
tokens = tokenize(question, keep=[";", ","], remove=["?", "."])
processed = self.text_processor({"tokens": tokens})
current_sample.text = processed["text"]
processed = self.answer_processor({'answers': [data["answer"]]})
current_sample.answers = processed["answers"]
current_sample.targets = processed["answers_scores"]
#print(processed["answers_scores"])
#print(type(current_sample.answers))
image_path = os.path.join(self.image_path, data["image_name"])
image = np.true_divide(Image.open(image_path).convert("RGB"), 255)
image = image.astype(np.float32)
current_sample.image = torch.from_numpy(image.transpose(2, 0, 1))
return current_sample
0
Source : handler.py
with BSD 3-Clause "New" or "Revised" License
from mm842
with BSD 3-Clause "New" or "Revised" License
from mm842
def calibrate_mrevdynamics_lobster_rf(
ticker_str,
date_str,
time_start_data,
time_end_data,
time_start_calc,
time_end_calc,
num_levels_data,
num_levels_calc,
ntimepoints_grid,
ntimesteps_cal,
ntimesteps_nextcal,
cal_to_average=False,
cal_to_average_classic=False,
):
""" Calibrates mean reverting model to order book volume loaded from lobster data
-----------
args:
ticker_str,
date_str,
time_start_data,
time_end_data,
time_start_calc,
time_end_calc,
num_levels_data,
num_levels_calc,
ntimepoints_grid,
ntimesteps_cal,
ntimesteps_nextcal,
cal_to_average=False calibration to total volume (if False) in the first buckets or average
cal_to_average_classic=False calibration to total volume (if False) in the first buckets or average - by just averaging after extraction
"""
# read files from lobster to uniform grid
lobreader = lobr.LOBSTERReader(
ticker_str,
date_str,
str(time_start_data),
str(time_end_data),
str(num_levels_data),
str(time_start_calc),
str(time_end_calc)
)
print('Extracting total volume process on uniform grid.')
if cal_to_average:
dt, time_stamps, volume_bid, volume_ask = lobreader.load_marketdepth(
num_observations=ntimepoints_grid,
num_levels_calc_str=str(num_levels_calc),
write_output=False
)
else:
dt, time_stamps, volume_bid, volume_ask = lobreader.load_ordervolume(
num_observations=ntimepoints_grid,
num_levels_calc_str=str(num_levels_calc),
write_output=False
)
if cal_to_average_classic:
volume_bid = np.true_divide(volume_bid, num_levels_calc)
volume_ask = np.true_divide(volume_ask, num_levels_calc)
print("Finished.")
print('Start calibration on time frame')
# Create calibrator object with id inherited from lobster notation and estimator for correlation based on realized covariance
ov_cal = cal.OrderVolumeCalibrator(
calibratorid=lobreader.create_filestr(identifier_str="cal_ordervolume",
num_levels=str(num_levels_calc)),
estimator_dynamics=est.estimate_recgamma_diff,
estimator_corr=est.estimate_log_corr_rv
)
ov_cal.calibrate_running_frame(
time_stamps[0],
dt,
volume_bid,
volume_ask,
ntimesteps_cal,
ntimesteps_nextcal
)
# save history as csv file
print('Calibration finished. Saving csv file.')
ov_cal.savef_history(csv=True)
print('Calibration history saved.')
# create plots
lobp.plot_calibration_history_volume(ov_cal.history, filename=ov_cal.calibratorid,
titlestr=" ".join(
(ticker_str, date_str, str(num_levels_calc))))
print('Plots saved')
def calibrate_profile_lobster(
0
Source : event_detection.py
with BSD 3-Clause "New" or "Revised" License
from mxochicale
with BSD 3-Clause "New" or "Revised" License
from mxochicale
def get_fixation_list(gaze, errors, xi, yi, ti, fixation_radius_threshold, fixation_duration_threshold, pupil_diameter):
n, m = gaze.shape
fixations = []
fixation = [] # single fixation, to be appended to fixations
counter = 0 # number of points in the fixation
sumx = 0 # used to compute the center of a fixation in x and y direction
sumy = 0
distance = 0 # captures the distance of a current sample from the fixation center
i = 0 # iterates through the gaze samples
while i < n - 1:
x = gaze[i, xi]
y = gaze[i, yi]
if counter == 0:
# ignore erroneous samples before a fixation
if errors[i]:
i += 1
continue
centerx = x
centery = y
else:
centerx = np.true_divide(sumx, counter)
centery = np.true_divide(sumy, counter)
if not errors[i]: # only update distance if the current sample is not erroneous
distance = np.sqrt((x - centerx) * (x - centerx) + (y - centery) * (y - centery))
if distance > fixation_radius_threshold: # start new fixation
if gaze[(i - 1), ti] - gaze[(i - counter), ti] >= fixation_duration_threshold:
start_index = i - counter + 1
end_index = i - 1 - 1
# discard fixations with more than 50% erroneous samples
percentage_error = np.sum(errors[start_index:(end_index + 1)]) / float(end_index - start_index)
if percentage_error >= 0.5:
if errors[i]:
i += 1
counter = 0
else:
counter = 1
sumx = x
sumy = y
continue
gaze_indices = np.arange(start_index, end_index+1)[np.logical_not(errors[start_index:(end_index + 1)])]
start_index = gaze_indices[0]
end_index = gaze_indices[-1]
gazex = gaze[start_index:(end_index + 1), xi][np.logical_not(errors[start_index:(end_index + 1)])]
gazey = gaze[start_index:(end_index + 1), yi][np.logical_not(errors[start_index:(end_index + 1)])]
gazet = gaze[start_index:(end_index + 1), ti][np.logical_not(errors[start_index:(end_index + 1)])]
# extract fixation characteristics
fixation.append(np.mean(gazex)) # 0.-1. mean x,y
fixation.append(np.mean(gazey))
fixation.append(np.var(gazex)) # 2-3. var x, y
fixation.append(np.var(gazey))
fixation.append(gazet[0]) # 4-5. t_start, t_end
fixation.append(gazet[-1])
fixation.append(gaze_indices[0]) # 6-7. index_start, index_end
fixation.append(gaze_indices[-1])
ds = ((pupil_diameter[start_index:(end_index+1), 1] + pupil_diameter[start_index:(end_index+1), 2]) / 2.)[np.logical_not(errors[start_index:(end_index+1)])]
fixation.append(np.mean(ds)) # 8. mean pupil diameter
fixation.append(np.var(ds)) # 9. var pupil diameter
succ_dx = gazex[1:] - gazex[:-1]
succ_dy = gazey[1:] - gazey[:-1]
succ_angles = np.arctan2(succ_dy, succ_dx)
fixation.append(np.mean(succ_angles)) # 10 mean successive angle
fixation.append(np.var(succ_angles)) # 11 var successive angle
fixations.append(fixation)
assert len(fixation) == len(gs.fixations_list_labels)
# set up new fixation
fixation = []
if errors[i]:
i += 1
counter = 0
else:
counter = 1
sumx = x
sumy = y
else:
if not errors[i]:
counter += 1
sumx += x
sumy += y
i += 1
return fixations
def get_saccade_list(gaze, fixations, xi, yi, ti, pupil_diameter, fixation_radius_threshold, errors,
0
Source : mhgan.py
with MIT License
from nardeas
with MIT License
from nardeas
def generate_enhanced(self, sess, data_sampler, noise_sampler, count=1, k=100, squeeze=True):
'''
Draws < count> number of enhanced samples from Generator with
Metropolis-Hastings algorithm.
'''
# Draw samples and epsilon values, compute scores
scores, epsilon, samples = sess.run([
self.scores,
self.u,
self.generator_output_tensor
], feed_dict={
self.generator_input_tensor: noise_sampler(shape=[
count * k,
*self.generator_input_shape
]),
# Calibration scores from real data
self.discriminator_input_tensor: data_sampler(
count
),
self.c: count,
self.k: k
}
)
# Metropolis-Hastings GAN algorithm
selected = []
for i in range(count):
x = 0
for x_next in range(k):
Pd1 = scores[i][x]
Pd2 = scores[i][x_next]
alpha = np.fmin(1., np.true_divide((1./Pd1 - 1.), (1./Pd2 - 1.)))
# Will ignore NaNs
if epsilon[i][x_next] < = alpha:
x = x_next
# Avoid samples from calibration distribution
x += int(x == 0)
selected.append(samples[x])
selected = np.asarray(selected)
if squeeze and selected.ndim > 3:
return selected.squeeze(axis=3)
return selected
0
Source : sergio.py
with GNU General Public License v3.0
from PayamDiba
with GNU General Public License v3.0
from PayamDiba
def init_gene_bin_conc_ (self, level):
"""
Initilizes the concentration of all genes in the input level
Note: calculate_half_response_ should be run before this method
"""
currGenes = self.level2verts_[level]
for g in currGenes:
if g[0].Type == 'MR':
allBinRates = self.graph_[g[0].ID]['rates']
for bIdx, rate in enumerate(allBinRates):
g[bIdx].append_Conc(np.true_divide(rate, self.decayVector_[g[0].ID]))
else:
params = self.graph_[g[0].ID]['params']
for bIdx in range(self.nBins_):
rate = 0
for interTuple in params:
meanExp = self.meanExpression[interTuple[0], bIdx]
rate += np.abs(interTuple[1]) * self.hill_(meanExp, interTuple[3], interTuple[2], interTuple[1] < 0)
g[bIdx].append_Conc(np.true_divide(rate, self.decayVector_[g[0].ID]))
def calculate_prod_rate_(self, bin_list, level):
0
Source : sergio.py
with GNU General Public License v3.0
from PayamDiba
with GNU General Public License v3.0
from PayamDiba
def calculate_ssConc_(self):
"""
This function calculates the steady state concentrations of both unspliced and spliced RNA in the given bin (cell type).
Note that this steady state concentration will be used to initilize U and S concentration of this bin (if it's a master bin) and its children (if any)
Half responses are also computed here by calling its function.
"""
for level in range(self.maxLevels_, -1, -1):
for binID in range(self.nBins_):
currGenes = self.level2verts_[level]
for g in currGenes:
if g[0].Type == 'MR':
currRate = self.graph_[g[0].ID]['rates'][binID]
self.binDict[binID][g[0].ID] = gene(g[0].ID, 'MR', binID)
self.binDict[binID][g[0].ID].set_ss_conc_U(np.true_divide(currRate, self.decayVector_[g[0].ID]))
self.binDict[binID][g[0].ID].set_ss_conc_S(self.ratioSp_[g[0].ID] * np.true_divide(currRate, self.decayVector_[g[0].ID]))
else:
params = self.graph_[g[0].ID]['params']
currRate = 0
for interTuple in params:
meanExp = self.meanExpression[interTuple[0], binID]
currRate += np.abs(interTuple[1]) * self.hill_(meanExp, interTuple[3], interTuple[2], interTuple[1] < 0)
#if binID == 0 and g[0].ID == 0:
#print meanExp
#print interTuple[3]
#print interTuple[2]
#print interTuple[1]
#print self.hill_(meanExp, interTuple[3], interTuple[2], interTuple[1] < 0)
self.binDict[binID][g[0].ID] = gene(g[0].ID, 'T', binID)
self.binDict[binID][g[0].ID].set_ss_conc_U(np.true_divide(currRate, self.decayVector_[g[0].ID]))
self.binDict[binID][g[0].ID].set_ss_conc_S(self.ratioSp_[g[0].ID] * np.true_divide(currRate, self.decayVector_[g[0].ID]))
# NOTE This is our assumption for dynamics simulations --> we estimate mean expression of g in b with steady state concentration of U_g in b
self.meanExpression[g[0].ID, binID] = self.binDict[binID][g[0].ID].ss_U_
#if binID == 0 and g[0].ID == 0:
# print currRate
# print self.decayVector_[g[0].ID]
if level > 0:
self.calculate_half_response_(level - 1)
def populate_with_parentCells_(self, binID):
0
Source : ufunc_test.py
with BSD 3-Clause "New" or "Revised" License
from spcl
with BSD 3-Clause "New" or "Revised" License
from spcl
def test_ufunc_true_divide_ff(A: dace.float32[10], B: dace.float32[10]):
return np.true_divide(A, B)
@compare_numpy_output(non_zero=True, check_dtype=True)
0
Source : ufunc_test.py
with BSD 3-Clause "New" or "Revised" License
from spcl
with BSD 3-Clause "New" or "Revised" License
from spcl
def test_ufunc_true_divide_uu(A: dace.uint32[10], B: dace.uint32[10]):
return np.true_divide(A, B)
@pytest.mark.skip
See More Examples