Here are the examples of the python api numpy.ptp taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
5 Examples
3
Example 1
Project: pygmi Source File: graphs.py
def update_scatter(self, x, y):
""" Update the plot """
self.figure.clear()
self.axes = self.figure.add_subplot(111)
xmin = min(x) - 0.1 * np.ptp(x)
xmax = max(x) + 0.1 * np.ptp(x)
ymin = min(y) - 0.1 * np.ptp(y)
ymax = max(y) + 0.1 * np.ptp(y)
self.axes.scatter(x, y)
self.axes.axis([xmin, xmax, ymin, ymax])
self.axes.set_xlabel("Number of Classes")
self.figure.canvas.draw()
3
Example 2
Project: scenesim Source File: picker.py
def _make_connector(self, parent, points, extent, name):
""" Makes connector object."""
connector = self.base_connector.copy()
scale = Vec3(*(np.ptp(points, axis=0)))
scale_extended = scale + extent
pos = Point3(*(np.min(points, axis=0) + scale / 2.))
connector.apply_prop(dict(name=name, scale=scale_extended, pos=pos),
other=self.scene)
connector.wrtReparentTo(parent)
return connector
0
Example 3
Project: trackpy Source File: motion.py
def diagonal_size(single_trajectory, pos_columns=None, t_column='frame'):
"""Measure the diagonal size of a trajectory.
Parameters
----------
single_trajectory : DataFrame containing a single trajectory
pos_columns = list
names of column with position ['x', 'y']
t_column = 'frame'
Returns
-------
float : length of diangonal of rectangular box containing the trajectory
Examples
--------
>>> diagonal_size(single_trajectory)
>>> many_trajectories.groupby('particle').agg(tp.diagonal_size)
>>> many_trajectories.groupby('particle').filter(lambda x: tp.diagonal_size(x) > 5)
"""
if pos_columns is None:
pos_columns = ['x', 'y']
pos = single_trajectory.set_index(t_column)[pos_columns]
return np.sqrt(np.sum(pos.apply(np.ptp)**2))
0
Example 4
Project: pycog Source File: rdm.py
def plot_stimulus_duration(trialsfile, plot, **kwargs):
"""
Percent correct as a function of stimulus duration.
"""
from pycog.datatools import partition
# Load trials
trials, ntrials = load_trials(trialsfile)
#-------------------------------------------------------------------------------------
# Compute psychometric performance by stimulus duration
#-------------------------------------------------------------------------------------
correct_duration_by_coh = {}
for i, trial in enumerate(trials):
info = trial['info']
# Coherence
coh = info['coh']
if coh == 0:
continue
# Correct, stimulus duration
correct = 1*(get_choice(trial) == info['choice'])
correct_duration_by_coh.setdefault(coh, ([], []))[0].append(correct)
correct_duration_by_coh[coh][1].append(np.ptp(info['epochs']['stimulus']))
correct_by_coh = {}
correct_by_coh = {}
for coh, (correct, duration) in correct_duration_by_coh.items():
Xbins, Ybins, Xedges, _ = partition(np.asarray(duration), np.asarray(correct),
nbins=10)
correct_by_coh[coh] = ((Xedges[:-1] + Xedges[1:])/2,
[100*np.sum(Ybin > 0)*safe_divide(len(Ybin))
for Ybin in Ybins])
#-------------------------------------------------------------------------------------
# Plot
#-------------------------------------------------------------------------------------
lineprop = {'lw': kwargs.get('lw', 1)}
dataprop = {'ms': kwargs.get('ms', 6),
'mew': kwargs.get('mew', 0)}
cohs = sorted(correct_by_coh)
xall = []
for coh in cohs:
stim, correct = correct_by_coh[coh]
plot.plot(stim, correct, color=colors[coh], label='{}\%'.format(SCALE*coh),
**lineprop)
plot.plot(stim, correct, 'o', mfc=colors[coh], **dataprop)
xall.append(stim)
plot.lim('x', xall)
plot.ylim(50, 100)
0
Example 5
Project: pycog Source File: rdm.py
def sort_trials_stim_onset(trialsfile, sortedfile):
# Load trials
trials, ntrials = load_trials(trialsfile)
# Get unique conditions
conds = []
cohs = []
for trial in trials:
info = trial['info']
conds.append((info['coh'], info['in_out']))
cohs.append(info['coh'])
conds = list(set(conds))
cohs = list(set(cohs))
#-------------------------------------------------------------------------------------
# Prepare for averaging
#-------------------------------------------------------------------------------------
# Number of units
nunits = trials[0]['r'].shape[0]
# Number of time points
stimulus = [np.ptp(trial['info']['epochs']['stimulus']) for trial in trials]
idx = np.argmax(stimulus)
trial = trials[idx]
t = trial['t']
w = np.where(t <= trial['info']['epochs']['stimulus'][1])[0][-1] + 1
t = t[:w] - trial['info']['epochs']['stimulus'][0]
ntime = len(t)
#-------------------------------------------------------------------------------------
# Average across conditions
#-------------------------------------------------------------------------------------
sorted_trials = {c: np.zeros((nunits, ntime)) for c in conds}
ntrials_by_cond = {c: np.zeros(ntime) for c in conds}
for trial in trials:
info = trial['info']
# Include only correct trials
coh = info['coh']
choice = get_choice(trial)
# Include only correct trials
if choice != info['choice']:
continue
t_i = trial['t']
w_i = np.where(t_i <= info['epochs']['stimulus'][1])[0][-1] + 1
c = (info['coh'], info['in_out'])
sorted_trials[c][:,:w_i] += trial['r'][:,:w_i]
ntrials_by_cond[c][:w_i] += 1
for c in conds:
sorted_trials[c] *= np.array([safe_divide(x) for x in ntrials_by_cond[c]])
# Save
with open(sortedfile, 'wb') as f:
pickle.dump((t, sorted_trials), f, pickle.HIGHEST_PROTOCOL)
print(("[ {}.sort_trials_stim_onset ]"
" Trials sorted and aligned to stimulus onset, saved to {}")
.format(THIS, sortedfile))