Here are the examples of the python api numpy.percentile taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
127 Examples
3
Example 1
def percentiles(self, metric_key):
# if fewer than 10 points, don't save percentiles
if self.percentiles_Ns(metric_key)["n_total"] < 10:
return None
# expand the accuemulations
elements = list(self.metric_counters[metric_key].elements())
# add the zeros
n_total = self.percentiles_Ns(metric_key)["n_total"]
n_zero = self.percentiles_Ns(metric_key)["n_zero"]
if n_zero:
elements += [0 for i in range(n_zero)]
percentiles = numpy.percentile(elements, q=range(101))
percentiles = [int(round(p, 0)) for p in percentiles]
return percentiles
3
Example 2
Project: moss Source File: mosaic.py
def plot_contours(self, cmap, levels=8, linewidths=1):
"""Plot the statistical volume as a contour map."""
slices = self.stat_img.get_data()[self.x_slice,
self.y_slice,
self.z_slice].transpose(2, 0, 1)
if isinstance(cmap, list):
levels = len(cmap)
cmap = mpl.colors.ListedColormap(cmap)
vmin, vmax = np.percentile(slices, [1, 99])
for slice, ax in zip(slices, self.axes.flat):
try:
ax.contour(np.rot90(slice), levels, cmap=cmap,
vmin=vmin, vmax=vmax, linewidths=linewidths)
except ValueError:
pass
3
Example 3
def scale(x, mask=None, limits=None):
"""Scale an array as is done in MWP paper
Sqrt transform of data cipped at 5 and 99.8%
"""
limits = limits or [5, 99.8]
if mask is None:
lo, hi = np.percentile(x, limits)
else:
lo, hi = np.percentile(x[mask], limits)
x = (np.clip(x, lo, hi) - lo) / (hi - lo)
return (np.sqrt(x) * 255).astype(np.uint8)
3
Example 4
Project: repo-tools Source File: transitions_kpi.py
def make_percentile(qper):
"""
Returns a percentile function for the given numeric qper
qper: Float in range of [0,100]. Percentile to compute which must be
between 0 and 100 inclusive.
"""
def percentile(time_spent):
"""
Returns the qth percentile of the tickets
"""
seconds_spent = map(datetime.timedelta.total_seconds, time_spent)
raw_result = numpy.percentile(seconds_spent, qper)
return datetime.timedelta(seconds=raw_result)
return percentile
3
Example 5
def scale(training, scale_frac):
"""Gets the scaling factors for each dimension from the training data."""
top = np.ravel(np.percentile(training, (1.0 + scale_frac) * 50.0, axis=0))
bottom = np.ravel(np.percentile(training, (1.0 - scale_frac) * 50.0,
axis=0))
scales = top - bottom
return scales
3
Example 6
Project: osf.io Source File: file_sizes.py
def size_percentiles():
sizes = walk_collect(settings.UPLOADS_PATH, size_helper)
cutoffs = range(2, 102, 2)
percentiles = np.percentile(
[size[-1] / 1024 / 1024 for size in sizes],
cutoffs,
)
return tabulate.tabulate(
zip(cutoffs, percentiles),
headers=['Percentile', 'Size (MiB)'],
)
3
Example 7
def quantile(x, q, weights=None):
"""
Like numpy.percentile, but:
* Values of q are quantiles [0., 1.] rather than percentiles [0., 100.]
* scalar q not supported (q must be iterable)
* optional weights on x
"""
if weights is None:
return np.percentile(x, [100. * qi for qi in q])
else:
idx = np.argsort(x)
xsorted = x[idx]
cdf = np.add.accuemulate(weights[idx])
cdf /= cdf[-1]
return np.interp(q, cdf, xsorted).tolist()
3
Example 8
Project: quality-assessment-protocol Source File: dvars.py
def robust_stdev(func, interp="fraction"):
"""
Compute robust estimation of standard deviation
"""
lower_qs = np.percentile(func, 25, axis=0)
upper_qs = np.percentile(func, 75, axis=0)
# note: won't work on roxy with scipy == 0.9
#lower_qs = stats.scoreatpercentile(func, 25, interpolation_method=interp, axis=0)
#upper_qs = stats.scoreatpercentile(func, 75, interpolation_method=interp, axis=0)
stdev = (upper_qs - lower_qs)/1.349
return stdev
3
Example 9
Project: SciDB-Py Source File: test_basic.py
def test_percentile_attribute_check():
x = sdb.arange(5)
y = sdb.arange(5) * 2
z = join(x, y)
expected = np.percentile(y.toarray(), 50)
actual = sdb.percentile(z, 50, att=z.att_names[1])
assert_allclose(expected, actual)
3
Example 10
def process(self):
square = np.array(self.input.buffer) ** 2
threshold = np.percentile(square, 98)
if np.max(self.input.buffer[:self.input.new_samples]) > threshold:
self.beat.append([1.0])
print 'beat event'
else:
self.beat.append([0.0])
self.beat.process()
3
Example 11
Project: nanown Source File: stats.py
def septasummary(values, distance=25):
left2 = 50-distance
left3 = 50-(distance/2.0)
left1 = left2/2.0
right2 = 50+distance
right3 = 50+(distance/2.0)
right1 = (right2+100)/2.0
l1,l2,l3,m,r3,r2,r1 = numpy.percentile(values, (left1,left2,left3,50,right3,right2,right1))
return (l1+l2+l3+m+r3+r2+r1)/7.0
3
Example 12
Project: energywise Source File: plotter_new.py
def _add_fig_overthresh(pdf, d, size, fontsize):
#overthresh
kwhs, kwhs_oriflag = d["kwhs"]
thresh = np.percentile(kwhs[kwhs_oriflag], 99)
overtimes = gen_over_thresh(d, thresh)
ot_fig = plt.figure(figsize = size)
for i, p in enumerate(overtimes):
if i >= 9: break
over_fig = ot_fig.add_subplot(3, 3, i + 1)
make_strange_per_fig(d, over_fig, p)
ot_fig.suptitle("Times in the top 1%%\n (>%.2fkwhs)" % thresh, fontsize = 24)
extract_legend(ot_fig)
plt.subplots_adjust(hspace = .35, wspace = .5)
plt.savefig(pdf, format = 'pdf')
3
Example 13
Project: Shazam Source File: peakpicker.py
def find_thres(spectrogram,percentile,base):
"Find the peak picking threshold for a particular spectrogram"
dim = spectrogram.shape
window = spectrogram[0:dim[0],base:dim[1]]
threshold = np.percentile(window, percentile)
return threshold
3
Example 14
def test_no_nans(self):
data = self.data[:-1]
bounds = contrast_stretching(data, 1)
nose.tools.assert_tuple_equal(bounds, (
np.percentile(data, 0.5),
np.percentile(data, 99.5)))
3
Example 15
Project: Dracula Source File: modelio.py
def get_max_word_count(path):
t = get_tweet_words(path)
m = [len(t[c]) for c in t]
m = int(numpy.percentile(m, 99))
#m = int(numpy.median([len(t[c]) for c in t]))
logging.debug("get_max_word_count('%s') = %d", path, m)
return m
3
Example 16
Project: mit-tab Source File: compute_stats.py
def speaks_stats(self, speaks, header):
if len(speaks) > 0:
percentiles = np.percentile(speaks, [0, 25, 50, 75, 100])
mean, std = round(np.mean(speaks), 2), round(np.std(speaks), 2)
else:
percentiles = [0]*5
mean, std = 0, 0
print "{0} ".format(header),
print (("| %05.2f "*5) + "|") % tuple(percentiles),"|",
print (("| %05.2f "*2) + "|") % (mean, std)
return percentiles, mean, std
3
Example 17
Project: cesium Source File: amplitude.py
def flux_percentile_ratio(x, percentile_range, base=10., exponent=-0.4):
"""A ratio of ((50+x) flux percentile - (50-x) flux percentile) /
(95 flux percentile - 5 flux percentile), where x = percentile_range/2.
Assumes data is log-scaled; by default we assume inputs are scaled as
x=10^(-0.4*y), corresponding to units of magnitudes. Computations are
performed on the corresponding linear-scale values.
"""
linear_scale_data = base ** (exponent * x)
y_high, y_low, y_95, y_5 = np.percentile(linear_scale_data,
[50 + percentile_range / 2., 50 - percentile_range / 2., 95, 5])
return (y_high - y_low) / (y_95 - y_5)
3
Example 18
def percentile(self, n, default=None):
"""
Calculate the Nth Percentile value over the time series.
:param int n: Integer value of the percentile to calculate.
:param default: Value to return as a default should the calculation not be possible.
:return: Float representing the Nth percentile value or `None`.
"""
return numpy.asscalar(numpy.percentile(self.values, n)) if self.values else default
3
Example 19
Project: AWS-Lambda-ML-Microservice-Skeleton Source File: test_nanfunctions.py
Function: test_result_values
Function: test_result_values
def test_result_values(self):
tgt = [np.percentile(d, 28) for d in _rdat]
res = np.nanpercentile(_ndat, 28, axis=1)
assert_almost_equal(res, tgt)
tgt = [np.percentile(d, (28, 98)) for d in _rdat]
res = np.nanpercentile(_ndat, (28, 98), axis=1)
assert_almost_equal(res, tgt)
3
Example 20
def test_percentile():
def check(array, percentiles):
expected = np.percentile(array, percentiles)
actual = sdb.percentile(sdb.from_array(array), percentiles)
assert_allclose(expected, actual)
x = np.arange(11)
yield check, x, [50]
yield check, x, 50
yield check, x, 43.2
yield check, x.astype(float), 43.2
yield check, x, [1, 5, 100]
3
Example 21
Project: dask Source File: percentile.py
@wraps(np.percentile)
def _percentile(a, q, interpolation='linear'):
if not len(a):
return None
if isinstance(q, Iterator):
q = list(q)
if str(a.dtype) == 'category':
result = np.percentile(a.codes, q, interpolation=interpolation)
import pandas as pd
return pd.Categorical.from_codes(result, a.categories, a.ordered)
if np.issubdtype(a.dtype, np.datetime64):
a2 = a.astype('i8')
result = np.percentile(a2, q, interpolation=interpolation)
return result.astype(a.dtype)
if not np.issubdtype(a.dtype, np.number):
interpolation = 'nearest'
return np.percentile(a, q, interpolation=interpolation)
3
Example 22
Project: unshred Source File: lines.py
def _get_median_angle(lines):
angles = []
for x1, y1, x2, y2 in lines:
c = complex(x2, -y2) - complex(x1, -y1)
angle = cmath.phase(c)
angles.append(angle)
# Not np.median to avoid averaging middle elements.
median_angle = numpy.percentile(angles, .5)
return _normalize_angle(median_angle, [-math.pi / 2, math.pi / 2], math.pi)
3
Example 23
Project: xarray Source File: test_plot.py
def test_robust(self):
cmap_params = _determine_cmap_params(self.data, robust=True)
self.assertEqual(cmap_params['vmin'], np.percentile(self.data, 2))
self.assertEqual(cmap_params['vmax'], np.percentile(self.data, 98))
self.assertEqual(cmap_params['cmap'].name, 'viridis')
self.assertEqual(cmap_params['extend'], 'both')
self.assertIsNone(cmap_params['levels'])
self.assertIsNone(cmap_params['norm'])
3
Example 24
Project: Topological-Anomaly-Detection Source File: TADClassifier.py
def trim_adjacency_matrix(adj, r=None, rq=.1):
"""
Given a condensed distance matrix (i.e. of the kind outputted by pdist),
returns a copy of the distance matrix where all entries greater than 'r'
are set to zero. If 'r' is not provided, evaluates the 'rq' quantile of the
input distances and uses that as a heuristic for 'r'. Default behavior is
to use the 10th percentile of distances as 'r'.
"""
if r is None:
r = np.percentile(adj, 100*rq)
print "r:", r
adj2 = adj.copy()
adj2[adj>r] = 0
return adj2, r
3
Example 25
Project: tvb-library Source File: fcd_matrix.py
def spectral_dbscan(fcd, n_dim=2, eps=0.3, min_samples=50):
fcd = fcd - fcd.min()
se = SpectralEmbedding(n_dim, affinity="precomputed")
xi = se.fit_transform(fcd)
pd = pdist(xi)
eps = np.percentile(pd, 100 * eps)
db = DBSCAN(eps=eps, min_samples=min_samples).fit(xi)
return xi.T, db.labels_
3
Example 26
Project: nanown Source File: stats.py
def quadsummary(values, distance=25):
left1 = 50-distance
left2 = (left1+50)/2.0
right1 = 50+distance
right2 = (right1+50)/2.0
l1,l2,r2,r1 = numpy.percentile(values, (left1,left2,right2,right1))
#print(left1,left2,left3,50,right3,right2,right1)
#print(l1,l2,l3,m,r3,r2,r1)
return (l1+l2+r2+r1)/4.0
3
Example 27
Project: socialsent Source File: lexicons.py
def make_concreteness_lexicon(top=75, bottom=25):
raw_scores = {}
fp = open(constants.LEXICONS + "concreteness/raw_ratings.csv")
fp.readline()
for line in fp:
info = line.split(",")
if len(info[0].split()) > 1:
continue
raw_scores[info[0]] = float(info[2])
pos_thresh = np.percentile(raw_scores.values(), top)
neg_thresh = np.percentile(raw_scores.values(), bottom)
polarities = {}
label_func = lambda s : 1 if s > pos_thresh else -1 if s < neg_thresh else 0
for word, score in raw_scores.iteritems():
polarities[word] = label_func(score)
util.write_json(polarities, constants.PROCESSED_LEXICONS + "concreteness.json")
3
Example 28
Project: ggplot Source File: utils.py
def calc_n_bins(series):
"https://en.wikipedia.org/wiki/Histogram#Number_of_bins_and_width"
q75, q25 = np.percentile(series, [75 , 25])
iqr = q75 - q25
h = (2 * iqr) / (len(series)**(1/3.))
k = (series.max() - series.min()) / h
return k
3
Example 29
Project: nanown Source File: stats.py
def multiBoxTest(params, greater, samples):
uc = [s['unusual_packet'] for s in samples]
rest = [s['other_packet'] for s in samples]
uc_high,uc_low = numpy.percentile(uc, (params['high'],params['low']))
rest_high,rest_low = numpy.percentile(rest, (params['high'],params['low']))
if uc_high < rest_low:
if greater:
return -1
else:
return 1
if rest_high < uc_low:
if greater:
return 1
else:
return -1
return 0
3
Example 30
Project: intrinsic Source File: image_util.py
def rescale_for_display(image, mask_nz=None, percentile=99.9):
""" Rescales an image so that a particular perenctile is mapped to pure
white """
if mask_nz is not None:
return image / np.percentile(image, percentile)
else:
return image / np.percentile(image[mask_nz], percentile)
3
Example 31
Project: EyeTab Source File: image_utils.py
def measure_blurriness_LoG(img):
""" Blurriness measure
"""
grey_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur_img = cv2.GaussianBlur(grey_img, (3, 3), 0)
LoG_img = cv2.Laplacian(blur_img, cv2.CV_16S, ksize=5, scale=15, delta=0)
thresh_val = np.percentile(LoG_img, 90)
av_edge_strength = np.mean(LoG_img[LoG_img > thresh_val])
return av_edge_strength
3
Example 32
Project: tdigest Source File: test_tdigest.py
@pytest.mark.parametrize("percentile_range", [[0, 7], [27, 47], [39, 66], [81, 99], [77, 100], [0,100]])
@pytest.mark.parametrize("data_size", [100, 1000, 5000])
def test_trimmed_mean(self, percentile_range, data_size):
p1 = percentile_range[0]
p2 = percentile_range[1]
t = TDigest()
x = random.random(size=data_size)
t.batch_update(x)
tm_actual = t.trimmed_mean(p1, p2)
tm_expected = x[bitwise_and(x >= percentile(x, p1), x <= percentile(x, p2))].mean()
testing.assert_allclose(tm_actual, tm_expected, rtol= 0.01, atol= 0.01)
3
Example 33
def test_nans(self):
data = self.data[:-1]
bounds = contrast_stretching(self.data, 1)
nose.tools.assert_tuple_equal(bounds, (
np.percentile(data, 0.5),
np.percentile(data, 99.5)))
3
Example 34
def benchmark(name, func, debug=False):
times = []
for n in range(0, REPEATS):
for i, url in enumerate(URLS):
u = url.strip()
if debug:
print u
t = clock()
func(u)
times.append(clock() - t)
row = [name, sum(times), mean(times), median(times), percentile(times, 90)]
print row
data.append(row)
3
Example 35
def filter(self, *data):
lens = [len(seq) for seq in data[0]]
if self.percentile > 0:
max_len = np.percentile(lens, self.percentile)
max_len = np.clip(max_len, self.min_max_len, self.max_len)
else:
max_len = self.max_len
valid_idxs = [i for i, l in enumerate(lens) if l <= max_len]
if len(data) == 1:
return list_index(data[0], valid_idxs)
else:
return tuple([list_index(d, valid_idxs) for d in data])
3
Example 36
Project: cesium Source File: amplitude.py
def percent_difference_flux_percentile(x, base=10., exponent=-0.4):
"""Difference between the 95th and 5th percentiles of the data, expressed
as a percentage of the median value.
See Eyer (2005) arXiv:astro-ph/0511458v1, Evans & Belokurov (2005) (there
the 98th and 2nd percentiles are used).
Assumes data is log-scaled; by default we assume inputs are scaled as
x=10^(-0.4*y), corresponding to units of magnitudes. Computations are
performed on the corresponding linear-scale values.
"""
linear_scale_data = base ** (exponent * x)
y_95, y_50, y_5 = np.percentile(linear_scale_data, [95, 50, 5])
return (y_95 - y_5) / y_50
3
Example 37
Project: kameleon-mcmc Source File: true_mmd_samples.py
def read_samples():
f = open("/home/dino/git/mmdIIDTrueSamples.dat", "r")
H0_samples = load(f)
HA_samples = load(f)
gaussian1 = load(f)
gaussian2 = load(f)
f.close()
print 'P1:', gaussian1.mu, gaussian1.L.dot(gaussian1.L.T)
print 'P2:', gaussian2.mu, gaussian2.L.dot(gaussian2.L.T)
hist((H0_samples,HA_samples),50)
th=percentile(H0_samples,95)
print 'expected Type II: ', sum(HA_samples<th)/float(len(HA_samples))
#print H0_samples
#print HA_samples
show()
return None
3
Example 38
Project: Dracula Source File: modelio.py
def get_max_word_length(path):
t = get_tweet_words(path)
m = 0
d = []
for c in t:
for w in t[c]:
d.append(len(w))
if len(w) >= m:
m = len(w)
logging.debug('length: %s, %d', w, m)
m = numpy.percentile(d, 99)
logging.debug("get_max_word_length('%s') = %d", path, m)
return m
3
Example 39
Project: librosa Source File: test_effects.py
def test_hpss():
y, sr = librosa.load(__EXAMPLE_FILE)
y_harm, y_perc = librosa.effects.hpss(y)
# Make sure that the residual energy is generally small
y_residual = y - y_harm - y_perc
rms_orig = librosa.feature.rmse(y=y)
rms_res = librosa.feature.rmse(y=y_residual)
assert np.percentile(rms_orig, 0.01) > np.percentile(rms_res, 0.99)
3
Example 40
Project: statsmodels Source File: test_statstools.py
def test_robust_kurtosis_ab(self):
"""Test custom alpha, beta in kr3"""
x = self.kurtosis_x
alpha, beta = (10.0, 45.0)
kurtosis = robust_kurtosis(self.kurtosis_x, ab=(alpha,beta), excess=False)
num = np.mean(x[x>np.percentile(x,100.0 - alpha)]) - np.mean(x[x<np.percentile(x,alpha)])
denom = np.mean(x[x>np.percentile(x,100.0 - beta)]) - np.mean(x[x<np.percentile(x,beta)])
assert_almost_equal(kurtosis[2], num/denom)
3
Example 41
Project: seaborn Source File: test_matrix.py
def test_robust_sequential_vlims(self):
kws = self.default_kws.copy()
kws["robust"] = True
p = mat._HeatMapper(self.df_unif, **kws)
nt.assert_equal(p.vmin, np.percentile(self.x_unif, 2))
nt.assert_equal(p.vmax, np.percentile(self.x_unif, 98))
3
Example 42
Project: statsmodels Source File: test_statstools.py
def test_robust_kurtosis_dg(self):
"""Test custom delta, gamma in kr4"""
x = self.kurtosis_x
delta, gamma = (10.0, 45.0)
kurtosis = robust_kurtosis(self.kurtosis_x, dg=(delta,gamma), excess=False)
q = np.percentile(x,[delta, 100.0-delta, gamma, 100.0-gamma])
assert_almost_equal(kurtosis[3], (q[1] - q[0]) / (q[3] - q[2]))
3
Example 43
def deprocess_image(x, contrast_percent=0.0, resize=None):
x = vgg16.img_from_vgg(x)
if contrast_percent:
min_x, max_x = np.percentile(x, (contrast_percent, 100 - contrast_percent))
x = (x - min_x) * 255.0 / (max_x - min_x)
x = np.clip(x, 0, 255)
if resize:
x = imresize(x, resize, interp='bicubic')
return x.astype('uint8')
3
Example 44
Project: nanown Source File: stats.py
def ubersummary(values, distance=25):
left2 = 50-distance
left3 = 50-(distance/2.0)
left1 = left2/2.0
right2 = 50+distance
right3 = 50+(distance/2.0)
right1 = (right2+100)/2.0
l1,l2,l3,r3,r2,r1 = numpy.percentile(values, (left1,left2,left3,right3,right2,right1))
#print(l1,l2,l3,m,r3,r2,r1)
return (l1+l2*4+l3+r3+r2*4+r1)/12.0
2
Example 45
Project: bcbio-nextgen Source File: callable.py
def _analysis_block_stats(regions, samples):
"""Provide statistics on sizes and number of analysis blocks.
"""
prev = None
between_sizes = []
region_sizes = []
for region in regions:
if prev and prev.chrom == region.chrom:
between_sizes.append(region.start - prev.end)
region_sizes.append(region.end - region.start)
prev = region
def descriptive_stats(xs):
if len(xs) < 2:
return xs
parts = ["min: %s" % min(xs),
"5%%: %s" % numpy.percentile(xs, 5),
"25%%: %s" % numpy.percentile(xs, 25),
"median: %s" % numpy.percentile(xs, 50),
"75%%: %s" % numpy.percentile(xs, 75),
"95%%: %s" % numpy.percentile(xs, 95),
"99%%: %s" % numpy.percentile(xs, 99),
"max: %s" % max(xs)]
return "\n".join([" " + x for x in parts])
logger.info("Identified %s parallel analysis blocks\n" % len(region_sizes) +
"Block sizes:\n%s\n" % descriptive_stats(region_sizes) +
"Between block sizes:\n%s\n" % descriptive_stats(between_sizes))
if len(region_sizes) == 0:
raise ValueError("No callable regions found in: %s" %
(", ".join([dd.get_sample_name(x) for x in samples])))
2
Example 46
Project: nanown Source File: stats.py
def midsummary(values, distance=25):
#return (numpy.percentile(values, 50-distance) + numpy.percentile(values, 50+distance))/2.0
l,h = numpy.percentile(values, (50-distance,50+distance))
return (l+h)/2.0
0
Example 47
Project: mpop Source File: HRWimage.py
def HRWscatterplot( HRW_data, title='', hrw_channels=None, min_correlation=None, cloud_type=None, color_mode='direction'):
## get a empty figure with transparent background, no axis and no margins outside the diagram
# fig = plt.figure()
import pylab
fig = pylab.figure()
ax = plt.subplot(111)
ax.set_yscale("log", nonposx='clip')
plt.scatter(HRW_data.wind_speed, HRW_data.pressure/100, s=5, c=HRW_data.wind_direction, alpha=0.5, edgecolor='none')
pylab.title(title)
pylab.ylim([1000,100])
plt.yticks([1000,900,800,700,600,500,400,300,200,100], ['1000','900','800','700','600','500','400','300','200','100'], rotation='horizontal')
p = percentile(HRW_data.wind_speed, 95)
vmax = (round(p/10)+1)*10
print "... vmax:", vmax
plt.plot([0,vmax], [680,680], color='g')
plt.plot([0,vmax], [440,440], color='b')
pylab.xlim([0,vmax])
ax.set_xlabel('HRW [m/s]')
ax.set_ylabel('p [hPa]')
cbar = plt.colorbar()
cbar.ax.set_ylabel('wind direction')
return fig2img ( fig )
0
Example 48
Project: mpop Source File: image.py
def stretch_hist_equalize(self, ch_nb):
"""Stretch the current image's colors by performing histogram
equalization on channel *ch_nb*.
"""
LOG.info("Perform a histogram equalized contrast stretch.")
if(self.channels[ch_nb].size ==
np.ma.count_masked(self.channels[ch_nb])):
LOG.warning("Nothing to stretch !")
return
arr = self.channels[ch_nb]
nwidth = 2048.0
carr = arr.compressed()
cdf = np.arange(0.0, 1.0, 1 / nwidth)
LOG.debug("Make histogram bins having equal amount of data, " +
"using numpy percentile function:")
bins = percentile(carr, list(cdf * 100))
res = np.ma.empty_like(arr)
res.mask = np.ma.getmaskarray(arr)
res[~res.mask] = np.interp(carr, bins, cdf)
self.channels[ch_nb] = res
0
Example 49
def __init__(self, data, quantile_threshold, taumax,
eventsynctype="directedES", non_local=False,
node_weight_type="surface", silence_level=0):
"""
Initialize an instance of EventSynchronizationClimateNetwork.
For other applications of event synchronization networks please use
the event synchronization class directly.
:type data: :classL`..climate.ClimateData`
:arg data: The climate data used for network construction.
:type quantile_threshold: float between 0 and 1
:arg quantile_threshold: values above will be treated as events
:arg int taumax: Maximum dynamical delay
:type eventsynctype: str
:arg eventsynctype: one of "directedES", "symmetricES" or
"antisymmetricES" [default: "directed"]
:arg bool non_local: Determines, whether links between spatially close
nodes should be suppressed.
:arg str node_weight_type: The type of geographical node weight to be
used.
:arg int silence_level: The inverse level of verbosity of the object.
"""
etypes = ["directedES", "symmetricES", "antisymmetricES"]
if eventsynctype not in etypes:
raise IOError("wrong eventsynctype...\n" +
"Available options: '%s', '%s' or '%s'" %
(etypes[0], etypes[1], etypes[2]))
self.__eventsynctype = eventsynctype
self.directed = self.__eventsynctype != "symmetricES"
eventmatrix = data.observable() > np.percentile(data.observable(),
quantile_threshold*100,
axis=0)
EventSynchronization.__init__(self, eventmatrix.astype(int), taumax)
eventsyncmatrix = getattr(self, self.__eventsynctype)()
ClimateNetwork.__init__(self, grid=data.grid,
similarity_measure=eventsyncmatrix,
threshold=0,
non_local=non_local,
directed=self.directed,
node_weight_type=node_weight_type,
silence_level=silence_level)
0
Example 50
Project: mpop Source File: image.py
def stretch_linear(self, ch_nb, cutoffs=(0.005, 0.005)):
"""Stretch linearly the contrast of the current image on channel
*ch_nb*, using *cutoffs* for left and right trimming.
"""
LOG.debug("Perform a linear contrast stretch.")
if((self.channels[ch_nb].size ==
np.ma.count_masked(self.channels[ch_nb])) or
self.channels[ch_nb].min() == self.channels[ch_nb].max()):
LOG.warning("Nothing to stretch !")
return
arr = self.channels[ch_nb]
carr = arr.compressed()
LOG.debug("Calculate the histogram percentiles: ")
LOG.debug("Left and right percentiles: " +
str(cutoffs[0] * 100) + " " + str(cutoffs[1] * 100))
left, right = percentile(carr, [cutoffs[0] * 100,
100. - cutoffs[1] * 100])
delta_x = (right - left)
LOG.debug("Interval: left=%f, right=%f width=%f"
% (left, right, delta_x))
if delta_x > 0.0:
self.channels[ch_nb] = np.ma.array((arr - left) / delta_x,
mask=arr.mask)
else:
self.channels[ch_nb] = np.ma.zeros(arr.shape)
LOG.warning("Unable to make a contrast stretch!")