Here are the examples of the python api numpy.percentile taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
756 Examples
3
Source : leaf.py
with BSD 2-Clause "Simplified" License
from 0x9900
with BSD 2-Clause "Simplified" License
from 0x9900
def reject_outliers(data, magnitude=1.8):
"""Reject the statistical outliers from a list"""
q25, q75 = np.percentile(data, [25, 75])
iqr = q75 - q25
qmin = q25 - (iqr * magnitude)
qmax = q75 + (iqr * magnitude)
return [x for x in data if qmin < = x < = qmax]
def azimuth(wspr_data):
3
Source : tools.py
with MIT License
from aaron-h-code
with MIT License
from aaron-h-code
def remove_outliers(arr, alpha = 1.5):
q1 = np.percentile(arr, 25)
q3 = np.percentile(arr, 75)
iqr = q3 - q1
valid_range = (q1 - alpha * iqr, q3 + alpha * iqr)
outliers_idx = []
for i in range(len(arr)):
if arr[i] > valid_range[1] or arr[i] < valid_range[0]:
outliers_idx.append(i)
return outliers_idx
# convert each character from int to one-hot vector
'''
3
Source : label_nodes.py
with BSD 3-Clause "New" or "Revised" License
from abaxi
with BSD 3-Clause "New" or "Revised" License
from abaxi
def summarize_jacc_dict(j_dict):
'''
input: j_dict, dict node-->[jaccard score dist.]
output: None
updates j_dict, [jaccard score dist.] converted to
[0th, 25th, 50th, 75th, 100th percentile] values
'''
for node in j_dict.keys():
v = j_dict[node]
v = numpy.percentile(v, [0,25,50,75,100])
j_dict[node] = v
def convert_jacc_dict_to_str(j_dict):
3
Source : test_function_base.py
with GNU General Public License v3.0
from adityaprakash-bobby
with GNU General Public License v3.0
from adityaprakash-bobby
def test_linear(self):
# Test defaults
assert_equal(np.percentile(range(10), 50), 4.5)
# explicitly specify interpolation_method 'linear' (the default)
assert_equal(np.percentile(range(10), 50,
interpolation='linear'), 4.5)
def test_lower_higher(self):
3
Source : test_function_base.py
with GNU General Public License v3.0
from adityaprakash-bobby
with GNU General Public License v3.0
from adityaprakash-bobby
def test_lower_higher(self):
# interpolation_method 'lower'/'higher'
assert_equal(np.percentile(range(10), 50,
interpolation='lower'), 4)
assert_equal(np.percentile(range(10), 50,
interpolation='higher'), 5)
def test_midpoint(self):
3
Source : test_function_base.py
with GNU General Public License v3.0
from adityaprakash-bobby
with GNU General Public License v3.0
from adityaprakash-bobby
def test_midpoint(self):
assert_equal(np.percentile(range(10), 51,
interpolation='midpoint'), 4.5)
assert_equal(np.percentile(range(11), 51,
interpolation='midpoint'), 5.5)
assert_equal(np.percentile(range(11), 50,
interpolation='midpoint'), 5)
def test_nearest(self):
3
Source : test_function_base.py
with GNU General Public License v3.0
from adityaprakash-bobby
with GNU General Public License v3.0
from adityaprakash-bobby
def test_nearest(self):
assert_equal(np.percentile(range(10), 51,
interpolation='nearest'), 5)
assert_equal(np.percentile(range(10), 49,
interpolation='nearest'), 4)
def test_sequence(self):
3
Source : test_function_base.py
with GNU General Public License v3.0
from adityaprakash-bobby
with GNU General Public License v3.0
from adityaprakash-bobby
def test_exception(self):
assert_raises(ValueError, np.percentile, [1, 2], 56,
interpolation='foobar')
assert_raises(ValueError, np.percentile, [1], 101)
assert_raises(ValueError, np.percentile, [1], -1)
assert_raises(ValueError, np.percentile, [1], list(range(50)) + [101])
assert_raises(ValueError, np.percentile, [1], list(range(50)) + [-0.1])
def test_percentile_list(self):
3
Source : test_nanfunctions.py
with GNU General Public License v3.0
from adityaprakash-bobby
with GNU General Public License v3.0
from adityaprakash-bobby
def test_result_values(self):
tgt = [np.percentile(d, 28) for d in _rdat]
res = np.nanpercentile(_ndat, 28, axis=1)
assert_almost_equal(res, tgt)
# Transpose the array to fit the output convention of numpy.percentile
tgt = np.transpose([np.percentile(d, (28, 98)) for d in _rdat])
res = np.nanpercentile(_ndat, (28, 98), axis=1)
assert_almost_equal(res, tgt)
def test_allnans(self):
3
Source : test_quantile.py
with GNU General Public License v3.0
from adityaprakash-bobby
with GNU General Public License v3.0
from adityaprakash-bobby
def test_quantile_interpolation(self):
# see gh-10174
# interpolation = linear (default case)
q = self.ts.quantile(0.1, interpolation='linear')
assert q == np.percentile(self.ts.dropna(), 10)
q1 = self.ts.quantile(0.1)
assert q1 == np.percentile(self.ts.dropna(), 10)
# test with and without interpolation keyword
assert q == q1
def test_quantile_interpolation_dtype(self):
3
Source : test_window.py
with GNU General Public License v3.0
from adityaprakash-bobby
with GNU General Public License v3.0
from adityaprakash-bobby
def test_rolling_quantile_np_percentile(self):
# #9413: Tests that rolling window's quantile default behavior
# is analogus to Numpy's percentile
row = 10
col = 5
idx = pd.date_range('20100101', periods=row, freq='B')
df = DataFrame(np.random.rand(row * col).reshape((row, -1)), index=idx)
df_quantile = df.quantile([0.25, 0.5, 0.75], axis=0)
np_percentile = np.percentile(df, [25, 50, 75], axis=0)
tm.assert_almost_equal(df_quantile.values, np.array(np_percentile))
@pytest.mark.skipif(_np_version_under1p12,
3
Source : _continuous_distns.py
with GNU General Public License v3.0
from adityaprakash-bobby
with GNU General Public License v3.0
from adityaprakash-bobby
def _fitstart(self, data, args=None):
# Initialize ML guesses using quartiles instead of moments.
p25, p50, p75 = np.percentile(data, [25, 50, 75])
return p50, (p75 - p25)/2
cauchy = cauchy_gen(name='cauchy')
3
Source : hypermapper.py
with GNU General Public License v3.0
from AICONSlab
with GNU General Public License v3.0
from AICONSlab
def cutoff_img(in_file, cutoff_percents, out_file):
print("\n thresholding ...")
img = nib.load(in_file)
data = img.get_data()
cutoff_low = np.percentile(data, cutoff_percents)
cutoff_high = np.percentile(data, 100 - cutoff_percents)
print(cutoff_low)
print(cutoff_high)
new_data = data.copy()
new_data[new_data > cutoff_high] = cutoff_high
new_data[new_data < cutoff_low] = cutoff_low
nib.save(nib.Nifti1Image(new_data, img.affine), out_file)
def normalize_sample_wise_img(in_file, out_file):
3
Source : context.py
with MIT License
from Akegarasu
with MIT License
from Akegarasu
def _recognize_fan_count(img: Image) -> int:
cv_img = imagetools.cv_image(img.convert("L"))
cv_img = imagetools.level(
cv_img, np.percentile(cv_img, 1), np.percentile(cv_img, 90)
)
_, binary_img = cv2.threshold(cv_img, 50, 255, cv2.THRESH_BINARY_INV)
if os.getenv("DEBUG") == __name__:
cv2.imshow("cv_img", cv_img)
cv2.imshow("binary_img", binary_img)
cv2.waitKey()
cv2.destroyAllWindows()
text = ocr.text(imagetools.pil_image(binary_img))
return int(text.rstrip("人").replace(",", ""))
def _recognize_status(img: Image) -> Tuple[int, Text]:
3
Source : game_data.py
with MIT License
from Akegarasu
with MIT License
from Akegarasu
def _recognize_fan_count(img: PIL.Image.Image) -> int:
cv_img = imagetools.cv_image(imagetools.resize(img.convert("L"), height=32))
cv_img = imagetools.level(
cv_img, np.percentile(cv_img, 1), np.percentile(cv_img, 90)
)
_, binary_img = cv2.threshold(cv_img, 60, 255, cv2.THRESH_BINARY_INV)
if os.getenv("DEBUG") == __name__:
cv2.imshow("cv_img", cv_img)
cv2.imshow("binary_img", binary_img)
cv2.waitKey()
cv2.destroyAllWindows()
text = ocr.text(imagetools.pil_image(binary_img))
return int(text.rstrip("人").replace(",", ""))
def _recognize_spec(img: PIL.Image.Image) -> Tuple[Text, int, int, int, int]:
3
Source : stats.py
with BSD 3-Clause "New" or "Revised" License
from alan-turing-institute
with BSD 3-Clause "New" or "Revised" License
from alan-turing-institute
def iqr(X):
"""Numba interquartile range function for a single time series."""
sorted = X.copy()
sorted.sort()
return np.percentile(sorted, 75) - np.percentile(sorted, 25)
3
Source : test_mlab.py
with MIT License
from alvarobartt
with MIT License
from alvarobartt
def test_prctile(input, percentile):
with pytest.warns(MatplotlibDeprecationWarning):
assert_allclose(mlab.prctile(input, percentile),
np.percentile(input, percentile))
@pytest.mark.parametrize('xmin, xmax, N', [
3
Source : stainNorm_Macenko.py
with MIT License
from ameliajimenez
with MIT License
from ameliajimenez
def transform(self, I):
I = ut.standardize_brightness(I)
stain_matrix_source = get_stain_matrix(I)
source_concentrations = ut.get_concentrations(I, stain_matrix_source)
maxC_source = np.percentile(source_concentrations, 99, axis=0).reshape((1, 2))
maxC_target = np.percentile(self.target_concentrations, 99, axis=0).reshape((1, 2))
source_concentrations *= (maxC_target / maxC_source)
return (255 * np.exp(-1 * np.dot(source_concentrations, self.stain_matrix_target).reshape(I.shape))).astype(
np.uint8)
def hematoxylin(self, I):
3
Source : stain_utils.py
with MIT License
from ameliajimenez
with MIT License
from ameliajimenez
def standardize_brightness(I):
"""
:param I:
:return:
"""
p = np.percentile(I, 90)
return np.clip(I * 255.0 / p, 0, 255).astype(np.uint8)
def remove_zeros(I):
3
Source : dw.py
with BSD 2-Clause "Simplified" License
from amerand
with BSD 2-Clause "Simplified" License
from amerand
def sparse2Dsigma(x, order=4, nsigma=10, retT=False):
"""
x is a 2D image of shape (2**N, 2**N) shape.
keep fraction "frac" in percent of each frequencies
"""
S = struct2D(x, order=order)
for i in filter(lambda k: k.startswith('f'), S.keys()):
if float(i.split('_')[0][1:])>0:
sigma = 0.5*(np.percentile(S[i], 84)-
np.percentile(S[i], 16))
mask = np.abs(S[i])>=nsigma*sigma
S[i] *= mask
return structInv2D(S, order=order, retT=retT)
def psd2D(x):
3
Source : compute_onmt_data_stats.py
with MIT License
from andreamad8
with MIT License
from andreamad8
def compute_ntokens_percentiles(
input_txt_files: List[str], percentiles: List[int]
) -> List[int]:
"""Computes the percentiles of sequence lengths."""
ntokens_array: List[int] = sum(
[
[len(line.strip().split()) for line in open(input_txt)]
for input_txt in input_txt_files
],
[],
)
return np.percentile(ntokens_array, percentiles).tolist()
def save_ntokens_percentiles(
3
Source : clustering.py
with MIT License
from AndreFCruz
with MIT License
from AndreFCruz
def cluster_by_affinity_propagation(document, predictions, percentile_preference=99):
def mention_contains_proper_noun(mention):
for token in mention.tokens:
if token.is_proper_noun():
return True
return False
affinity_matrix = generate_affinity_matrix(document, predictions)
# preference = [1 if mention_contains_proper_noun(mention) else 0.5 for mention in document.mentions]
preference = [np.percentile(predictions, percentile_preference) for _ in range(len(document.mentions))]
_, cluster_labels = affinity_propagation(affinity_matrix, preference=preference)
return cluster_labels_to_entity_clusters(cluster_labels)
def cluster_randomly(document, prob_coref=0.03):
3
Source : recognition.py
with GNU General Public License v3.0
from aqntks
with GNU General Public License v3.0
from aqntks
def contrast_grey(img):
high = np.percentile(img, 90)
low = np.percentile(img, 10)
return (high-low)/np.maximum(10, high+low), high, low
def adjust_contrast_grey(img, target = 0.4):
3
Source : dataset.py
with GNU General Public License v3.0
from aqntks
with GNU General Public License v3.0
from aqntks
def contrast_grey(img):
high = np.percentile(img, 90)
low = np.percentile(img, 10)
return (high-low)/(high+low), high, low
def adjust_contrast_grey(img, target = 0.4):
3
Source : computeMisclosure.py
with GNU General Public License v3.0
from aria-tools
with GNU General Public License v3.0
from aria-tools
def __imgClipValues__(self,img,percentiles):
'''
Find values at which to clip the images (min/max) based on histogram percentiles.
'''
clipValues={}
clipValues['min'],clipValues['max']=np.percentile(img.flatten(),percentiles)
return clipValues
def __plotCumNetMisclosure__(self):
3
Source : demo.py
with MIT License
from AsahiLiu
with MIT License
from AsahiLiu
def preprocess_point_cloud(point_cloud):
''' Prepare the numpy point cloud (N,3) for forward pass '''
point_cloud = point_cloud[:,0:3] # do not use color for now
floor_height = np.percentile(point_cloud[:,2],0.99)
height = point_cloud[:,2] - floor_height
point_cloud = np.concatenate([point_cloud, np.expand_dims(height, 1)],1) # (N,4) or (N,7)
point_cloud = random_sampling(point_cloud, FLAGS.num_point)
pc = np.expand_dims(point_cloud.astype(np.float32), 0) # (1,40000,4)
return pc
if __name__=='__main__':
3
Source : procS1StackRTC.py
with GNU General Public License v3.0
from asfadmin
with GNU General Public License v3.0
from asfadmin
def get2sigmacutoffs(fi):
(x,y,trans,proj,data) = saa.read_gdal_file(saa.open_gdal_file(fi))
top = np.percentile(data,98)
data[data>top]=top
stddev = np.std(data)
mean = np.mean(data)
lo = mean - 2*stddev
hi = mean + 2*stddev
return lo,hi
def changeRes(res,fi):
3
Source : AutoEncoder.py
with The Unlicense
from AshwathSalimath
with The Unlicense
from AshwathSalimath
def outliers_iqr(ys):
quartile_1, quartile_3 = np.percentile(ys, [25, 75])
iqr = quartile_3 - quartile_1
lower_bound = quartile_1 - (iqr * 1.5)
upper_bound = quartile_3 + (iqr * 1.5)
return np.where((ys > upper_bound) | (ys < lower_bound))
outliers_iqr_np = outliers_iqr(iot_error_np)
3
Source : _rgr.py
with MIT License
from AtrCheema
with MIT License
from AtrCheema
def nrmse_ipercentile(self, q1=25, q2=75) -> float:
"""
RMSE normalized by inter percentile range of true. This is least sensitive to outliers.
q1: any interger between 1 and 99
q2: any integer between 2 and 100. Should be greater than q1.
Reference: Pontius et al., 2008.
"""
q1 = np.percentile(self.true, q1)
q3 = np.percentile(self.true, q2)
iqr = q3 - q1
return float(self.rmse() / iqr)
def nrmse_mean(self) -> float:
3
Source : test_window.py
with Apache License 2.0
from aws-samples
with Apache License 2.0
from aws-samples
def test_rolling_quantile_np_percentile(self):
# #9413: Tests that rolling window's quantile default behavior
# is analogus to Numpy's percentile
row = 10
col = 5
idx = pd.date_range('20100101', periods=row, freq='B')
df = DataFrame(np.random.rand(row * col).reshape((row, -1)), index=idx)
df_quantile = df.quantile([0.25, 0.5, 0.75], axis=0)
np_percentile = np.percentile(df, [25, 50, 75], axis=0)
tm.assert_almost_equal(df_quantile.values, np.array(np_percentile))
@pytest.mark.parametrize('quantile', [0.0, 0.1, 0.45, 0.5, 1])
3
Source : util.py
with MIT License
from bei181
with MIT License
from bei181
def normalize_depth_for_display(depth, pc=95, crop_percent=0, normalizer=None,
cmap=CMAP_DEFAULT):
"""Converts a depth map to an RGB image."""
# Convert to disparity.
disp = 1.0 / (depth + 1e-6)
if normalizer is not None:
disp /= normalizer
else:
disp /= (np.percentile(disp, pc) + 1e-6)
disp = np.clip(disp, 0, 1)
disp = gray2rgb(disp, cmap=cmap)
keep_h = int(disp.shape[0] * (1 - crop_percent))
disp = disp[:keep_h]
return disp
def get_seq_start_end(target_index, seq_length, sample_every=1):
3
Source : test_statstools.py
with MIT License
from birforce
with MIT License
from birforce
def test_robust_kurtosis_ab(self):
# Test custom alpha, beta in kr3
x = self.kurtosis_x
alpha, beta = (10.0, 45.0)
kurtosis = robust_kurtosis(self.kurtosis_x, ab=(alpha,beta), excess=False)
num = np.mean(x[x>np.percentile(x,100.0 - alpha)]) - np.mean(x[x < np.percentile(x,alpha)])
denom = np.mean(x[x>np.percentile(x,100.0 - beta)]) - np.mean(x[x < np.percentile(x,beta)])
assert_almost_equal(kurtosis[2], num/denom)
def test_robust_kurtosis_dg(self):
3
Source : test_statstools.py
with MIT License
from birforce
with MIT License
from birforce
def test_robust_kurtosis_dg(self):
# Test custom delta, gamma in kr4
x = self.kurtosis_x
delta, gamma = (10.0, 45.0)
kurtosis = robust_kurtosis(self.kurtosis_x, dg=(delta,gamma), excess=False)
q = np.percentile(x,[delta, 100.0-delta, gamma, 100.0-gamma])
assert_almost_equal(kurtosis[3], (q[1] - q[0]) / (q[3] - q[2]))
if __name__ == "__main__":
3
Source : medical_image_process.py
with MIT License
from black0017
with MIT License
from black0017
def percentile_clip(img_numpy, min_val=0.1, max_val=99.8):
"""
Intensity normalization based on percentile
Clips the range based on the quarile values.
:param min_val: should be in the range [0,100]
:param max_val: should be in the range [0,100]
:return: intesity normalized image
"""
low = np.percentile(img_numpy, min_val)
high = np.percentile(img_numpy, max_val)
img_numpy[img_numpy < low] = low
img_numpy[img_numpy > high] = high
return img_numpy
3
Source : contract.py
with BSD 3-Clause "New" or "Revised" License
from BlueBrain
with BSD 3-Clause "New" or "Revised" License
from BlueBrain
def expand(x_in, y_in, xy):
cog = xy.mean(axis=0)
bary = BarycentricCoordinates(x_in, y_in)
A = bary.cart2bary(xy[:, 0], xy[:, 1])
req_factor = numpy.percentile(A, 95, axis=0) / 1.33
if numpy.sum(req_factor > 1) >= 2:
ijk = numpy.argsort(req_factor)
fac = numpy.minimum(numpy.mean(req_factor[ijk[1:]]), 2.5)
x_in, y_in = _around_point(x_in, y_in, ijk[0], cog, fac)
return x_in, y_in
def contract(x_in, y_in, xy, info_log):
3
Source : metrics.py
with BSD 3-Clause "New" or "Revised" License
from BPHO-Salk
with BSD 3-Clause "New" or "Revised" License
from BPHO-Salk
def normalize(x, pmin=3, pmax=99.8, axis=None, clip=False, eps=1e-20, dtype=np.float32):
"""Percentile-based image normalization."""
mi = np.percentile(x,pmin,axis=axis,keepdims=True)
ma = np.percentile(x,pmax,axis=axis,keepdims=True)
return normalize_mi_ma(x, mi, ma, clip=clip, eps=eps, dtype=dtype)
def normalize_mi_ma(x, mi, ma, clip=False, eps=1e-20, dtype=np.float32):
3
Source : utils.py
with MIT License
from brianhie
with MIT License
from brianhie
def print_cell_types(cell_types, intensity):
# Print most intense cell types in cluster.
intense_qtile = np.percentile(intensity, 95)
cluster_types = cell_types[intensity > intense_qtile]
for cell_type, count in Counter(cluster_types).most_common():
print('{} ({})'.format(cell_type, count))
sys.stdout.flush()
def print_gene_modules(corr, genes):
3
Source : selection.py
with Apache License 2.0
from broadinstitute
with Apache License 2.0
from broadinstitute
def populate_weight_stats(selector, stats, weights, quantiles=[5.0, 10.0, 20.0, 50.0, 90.0, 95.0]):
q5, q10, q20, q50, q90, q95 = np.percentile(weights, quantiles).tolist()
s = "5/10/20/50/90/95: {:.2e} {:.2e} {:.2e} {:.2e} {:.2e} {:.2e}"
stats['Weight quantiles'] = s.format(q5, q10, q20, q50, q90, q95)
s = "mean/std/min/max: {:.2e} {:.2e} {:.2e} {:.2e}"
stats['Weight moments'] = s.format(weights.mean().item(), weights.std().item(),
weights.min().item(), weights.max().item())
T, T_burnin = selector.T, selector.T_burnin
elapsed_time = time.time() - selector.ts[0]
stats['Elapsed MCMC time'] = "{:.1f} seconds".format(elapsed_time)
stats['Mean iteration time'] = "{:.3f} ms".format(1000.0 * elapsed_time / (T + T_burnin))
stats['Number of retained samples'] = T
stats['Number of burn-in samples'] = T_burnin
class BayesianVariableSelector(object):
3
Source : categorical.py
with MIT License
from buds-lab
with MIT License
from buds-lab
def draw_quartiles(self, ax, data, support, density, center, split=False):
"""Draw the quartiles as lines at width of density."""
q25, q50, q75 = np.percentile(data, [25, 50, 75])
self.draw_to_density(ax, center, q25, support, density, split,
linewidth=self.linewidth,
dashes=[self.linewidth * 1.5] * 2)
self.draw_to_density(ax, center, q50, support, density, split,
linewidth=self.linewidth,
dashes=[self.linewidth * 3] * 2)
self.draw_to_density(ax, center, q75, support, density, split,
linewidth=self.linewidth,
dashes=[self.linewidth * 1.5] * 2)
def draw_points(self, ax, data, center):
3
Source : categorical.py
with MIT License
from buds-lab
with MIT License
from buds-lab
def _lv_outliers(self, vals, k):
"""Find the outliers based on the letter value depth."""
perc_ends = (100*(0.5**(k+2)), 100*(1 - 0.5**(k+2)))
edges = np.percentile(vals, perc_ends)
lower_out = vals[np.where(vals < edges[0])[0]]
upper_out = vals[np.where(vals > edges[1])[0]]
return np.concatenate((lower_out, upper_out))
def _width_functions(self, width_func):
3
Source : test_categorical.py
with MIT License
from buds-lab
with MIT License
from buds-lab
def test_draw_quartiles(self):
kws = self.default_kws.copy()
kws.update(dict(y="y", data=self.df, inner=None))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_quartiles(ax, self.y, p.support[0], p.density[0], 0)
for val, line in zip(np.percentile(self.y, [25, 50, 75]), ax.lines):
_, y = line.get_xydata().T
npt.assert_array_equal(y, [val, val])
def test_draw_points(self):
3
Source : test_matrix.py
with MIT License
from buds-lab
with MIT License
from buds-lab
def test_robust_vlims(self):
kws = self.default_kws.copy()
kws["robust"] = True
p = mat._HeatMapper(self.df_unif, **kws)
nt.assert_equal(p.vmin, np.percentile(self.x_unif, 2))
nt.assert_equal(p.vmax, np.percentile(self.x_unif, 98))
def test_custom_sequential_vlims(self):
3
Source : plate.py
with MIT License
from BuysDB
with MIT License
from BuysDB
def cell_counts_to_dataframe(self, cell_counts, mux, name='raw_reads'):
df = pd.DataFrame({name: cell_counts})
offset = 0 # Offset is zero for all protocols since 0.1.12
format = 384 if ('384' in mux or mux.startswith('CS2')) else 96
df['col'] = [index2well[format]
[(offset + int(x.rsplit('_')[-1]))][1] for x in df.index]
df['row'] = [-rows.index(index2well[format]
[(offset + int(x.rsplit('_')[-1]))][0]) for x in df.index]
df['size'] = (df[name] / np.percentile(df[name], 99) * 200)
return df
def __iter__(self):
3
Source : mcmc.py
with MIT License
from C-bowman
with MIT License
from C-bowman
def estimate_burn_in(self):
# first get an estimate based on when the chain first reaches
# the top 1% of log-probabilities
prob_estimate = argmax(self.probs > percentile(self.probs, 99))
# now we find the point at which the proposal width for each parameter
# starts to deviate significantly from the current value
width_estimates = []
for p in self.params:
vals = abs((array(p.sigma_values)[::-1] / p.sigma) - 1.0)
chks = array(p.sigma_checks)[::-1]
first_true = chks[argmax(vals > 0.15)]
width_estimates.append(first_true)
width_estimate = mean(width_estimates)
return int(max(prob_estimate, width_estimate))
def autoselect_burn(self):
3
Source : mcmc.py
with MIT License
from C-bowman
with MIT License
from C-bowman
def estimate_burn_in(self):
# first get an estimate based on when the chain first reaches
# the top 1% of log-probabilities
prob_estimate = argmax(self.probs > percentile(self.probs, 99))
# now we find the point at which the proposal width for each parameter
# starts to deviate significantly from the current value
epsl = abs((array(self.ES.epsilon_values)[::-1] / self.ES.epsilon) - 1.0)
chks = array(self.ES.epsilon_checks)[::-1]
epsl_estimate = chks[argmax(epsl > 0.15)] * self.ES.accept_rate
return int(min(max(prob_estimate, epsl_estimate), 0.9 * self.n))
def save(self, filename, compressed=False):
3
Source : degrade.py
with Apache License 2.0
from Calamari-OCR
with Apache License 2.0
from Calamari-OCR
def binary_blur(image, sigma, noise=0.0):
p = percent_black(image)
blurred = cv.GaussianBlur(image, (0, 0), sigmaX=sigma, borderType=cv.BORDER_REFLECT)
if noise > 0:
blurred += np.random.randn(*blurred.shape) * noise
t = np.percentile(blurred, p)
return np.array(blurred > t, "f")
#
# multiscale noise
#
def make_noise_at_scale(shape, scale):
3
Source : visualization.py
with MIT License
from calico
with MIT License
from calico
def make_rgb_channel(data, hue):
def normalize(arr):
arr = arr - np.percentile(arr, 5)
return np.clip(arr / np.percentile(arr, 97), 0, 1)
color_data = np.zeros(list(data.shape) + [3])
color_data[..., 0] = hue
color_data[..., 1] = 1
color_data[..., 2] = normalize(data)
return colors.hsv_to_rgb(color_data)
def make_multichannel_im(data, starting_hue=0.):
3
Source : sits_func.py
with GNU General Public License v3.0
from charlotte-pel
with GNU General Public License v3.0
from charlotte-pel
def computingMinMax(X, per=2):
min_per = np.percentile(X, per, axis=(0,1))
max_per = np.percentile(X, 100-per, axis=(0,1))
return min_per, max_per
#-----------------------------------------------------------------------
def normalizingData(X, min_per, max_per):
3
Source : readingsits.py
with GNU General Public License v3.0
from charlotte-pel
with GNU General Public License v3.0
from charlotte-pel
def computingMinMax(X, per=2):
min_per = np.percentile(X, per, axis=(0,1))
max_per = np.percentile(X, 100-per, axis=(0,1))
return min_per, max_per
#-----------------------------------------------------------------------
def read_minMaxVal(minmax_file):
3
Source : post_proc.py
with MIT License
from chengzhag
with MIT License
from chengzhag
def mean_percentile(vec, p1=25, p2=75):
vmin = np.percentile(vec, p1)
vmax = np.percentile(vec, p2)
return vec[(vmin < = vec) & (vec < = vmax)].mean()
def vote(vec, tol):
See More Examples