Here are the examples of the python api numpy.max taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
166 Examples
3
Example 1
Project: EDeN Source File: rna_constrained_subopt.py
def max_difference_subselection(seqs, scores=None, max_num=None):
# extract difference matrix
diff_matrix = difference_matrix(seqs)
size = len(seqs)
m = np.max(diff_matrix) + 1
# iterate size - k times, i.e. until only k instances are left
for t in range(size - max_num):
# find pairs with smallest difference
(min_i, min_j) = np.unravel_index(np.argmin(diff_matrix), diff_matrix.shape)
# choose instance with highest score
if scores[min_i] > scores[min_j]:
id = min_i
else:
id = min_j
# remove instance with highest score by setting all its pairwise differences to max value
diff_matrix[id, :] = m
diff_matrix[:, id] = m
# extract surviving elements, i.e. element that have 0 on the diagonal
return np.array([i for i, x in enumerate(np.diag(diff_matrix)) if x == 0])
3
Example 2
def max(self, name, ng=0):
"""
return the maximum of the variable name in the domain's valid region
"""
n = self.vars.index(name)
g = self.grid
return np.max(self.data[n,g.ilo-ng:g.ihi+1+ng,g.jlo-ng:g.jhi+1+ng])
3
Example 3
def __init__(self, kern_list):
for k in kern_list:
assert isinstance(k, Kern), "can only add Kern instances"
input_dim = np.max([k.input_dim if type(k.active_dims) is slice else np.max(k.active_dims) + 1 for k in kern_list])
Kern.__init__(self, input_dim=input_dim)
# add kernels to a list, flattening out instances of this class therein
self.kern_list = []
for k in kern_list:
if isinstance(k, self.__class__):
self.kern_list.extend(k.kern_list)
else:
self.kern_list.append(k)
# generate a set of suitable names and add the kernels as attributes
names = make_kernel_names(self.kern_list)
[setattr(self, name, k) for name, k in zip(names, self.kern_list)]
3
Example 4
Project: histwords Source File: plothelper.py
def get_ccdf(deg_hist, x_min=1):
cuem_counts = [0]
degs = range(x_min, np.max(deg_hist.keys()))
total_sum = 0
for deg in degs:
if deg in deg_hist:
deg_count = deg_hist[deg]
else:
deg_count = 0
total_sum += deg_count
cuem_counts.append((cuem_counts[-1] + deg_count))
return np.array(degs), 1 - np.array(cuem_counts[1:]) / float(total_sum)
3
Example 5
Project: spectralDNS Source File: OrrSommerfeld_eig.py
def create_interpolation_arrays(self):
# Normalize and create necessary arrays for interpolation
self.phi = zeros(self.N, complex)
self.phi[1: -1] = squeeze(self.eigvectors[:, self.nx])
self.phi = self.phi/max(abs(real(self.phi)))
# Compute and store derivative of phi
self.dphidy = dot(self.D, self.phi)
self.dphidy[:self.N:self.N-1] = 0.
#self.dphidy[-1]=0.
# Create interpolation vector and barycentric weights for later use
self.f = zeros(self.N, float)
self.w = (-1.)**(arange(self.N))
self.w[:self.N:(self.N-1)]/=2.
3
Example 6
Project: RMG-Py Source File: species.py
def getSymmetryNumber(self):
"""
Get the symmetry number for the species, which is the highest symmetry number amongst
its resonance isomers. This function is currently used for website purposes and testing only as it
requires additional calculateSymmetryNumber calls.
"""
cython.declare(symmetryNumber=cython.int)
symmetryNumber = numpy.max([mol.getSymmetryNumber() for mol in self.molecule])
return symmetryNumber
2
Example 7
Project: Attentive_reader Source File: attentive_reader.py
def prepare_data_sents(seqs_x, seqs_y):
# x: a list of sentences
lengths_s = []
lengths_w = []
for seq in seqs_x:
lengths_s.append(len(seq))
lengths_w_tmp = []
for w in seq:
lengths_w_tmp.append(len(w))
lengths_w.append(lengths_w_tmp)
lengths_y = [len(s) for s in seqs_y]
n_samples = len(seqs_x)
maxlen_s = numpy.max(lengths_s) + 1
maxlen_w = numpy.max([lw for lws in lengths_w for lw in lws]) + 1
maxlen_y = numpy.max(lengths_y) + 1
x = numpy.zeros((maxlen_w, maxlen_s, n_samples)).astype('uint32')
y = numpy.zeros((maxlen_y, n_samples)).astype('uint32')
x_mask = numpy.zeros((maxlen_w, maxlen_s, n_samples)).astype('float32')
y_mask = numpy.zeros((maxlen_y, n_samples)).astype('float32')
for idx, [s_x, s_y] in enumerate(zip(seqs_x, seqs_y)):
for i in xrange(lengths_s[idx]):
x[:lengths_w[idx][i], i, idx] = numpy.array(s_x[i], dtype='uint32')
x_mask[:lengths_w[idx][i], i, idx] = 1.
if x[:, :, idx].sum((0, 1)) <= 1:
import ipdb; ipdb.set_trace()
warnings.warn("There is a problem with the data at this index.")
y[:lengths_y[idx], idx] = s_y
y_mask[:lengths_y[idx], idx] = 1.
return x, x_mask, y, y_mask, maxlen_w, maxlen_s, maxlen_y
2
Example 8
def on_key_down(self, key_code):
if key_code == wx.WXK_DELETE or \
key_code == wx.WXK_NUMPAD_DELETE or \
(key_code == wx.WXK_BACK and sys.is_darwin()):
if self._show_delete_menu:
if self._object is not None:
self.on_delete_object(None)
self.queue_refresh()
if key_code == wx.WXK_DOWN:
if wx.GetKeyState(wx.WXK_SHIFT):
self._zoom *= 1.2
if self._zoom > numpy.max(self._machine_size) * 3:
self._zoom = numpy.max(self._machine_size) * 3
elif wx.GetKeyState(wx.WXK_CONTROL):
self._z_offset += 5
else:
self._pitch -= 15
self.queue_refresh()
elif key_code == wx.WXK_UP:
if wx.GetKeyState(wx.WXK_SHIFT):
self._zoom /= 1.2
if self._zoom < 1:
self._zoom = 1
elif wx.GetKeyState(wx.WXK_CONTROL):
self._z_offset -= 5
else:
self._pitch += 15
self.queue_refresh()
elif key_code == wx.WXK_LEFT:
self._yaw -= 15
self.queue_refresh()
elif key_code == wx.WXK_RIGHT:
self._yaw += 15
self.queue_refresh()
elif key_code == wx.WXK_NUMPAD_ADD or key_code == wx.WXK_ADD or \
key_code == ord('+') or key_code == ord('='):
self._zoom /= 1.2
if self._zoom < 1:
self._zoom = 1
self.queue_refresh()
elif key_code == wx.WXK_NUMPAD_SUBTRACT or key_code == wx.WXK_SUBTRACT or \
key_code == ord('-'):
self._zoom *= 1.2
if self._zoom > numpy.max(self._machine_size) * 3:
self._zoom = numpy.max(self._machine_size) * 3
self.queue_refresh()
elif key_code == wx.WXK_HOME:
self._yaw = 30
self._pitch = 60
self.queue_refresh()
elif key_code == wx.WXK_PAGEUP:
self._yaw = 0
self._pitch = 0
self.queue_refresh()
elif key_code == wx.WXK_PAGEDOWN:
self._yaw = 0
self._pitch = 90
self.queue_refresh()
elif key_code == wx.WXK_END:
self._yaw = 90
self._pitch = 90
self.queue_refresh()
if key_code == wx.WXK_CONTROL:
self._move_vertical = True
2
Example 9
Project: pycollada Source File: lineset.py
def __init__(self, sources, material, index, xmlnode=None):
"""A LineSet should not be created manually. Instead, call the
:meth:`collada.geometry.Geometry.createLineSet` method after
creating a geometry instance.
"""
if len(sources) == 0: raise DaeIncompleteError('A line set needs at least one input for vertex positions')
if not 'VERTEX' in sources: raise DaeIncompleteError('Line set requires vertex input')
#find max offset
max_offset = max([ max([input[0] for input in input_type_array])
for input_type_array in sources.values() if len(input_type_array) > 0])
self.sources = sources
self.material = material
self.index = index
self.indices = self.index
self.nindices = max_offset + 1
self.index.shape = (-1, 2, self.nindices)
self.nlines = len(self.index)
if len(self.index) > 0:
self._vertex = sources['VERTEX'][0][4].data
self._vertex_index = self.index[:,:, sources['VERTEX'][0][0]]
self.maxvertexindex = numpy.max( self._vertex_index )
checkSource(sources['VERTEX'][0][4], ('X', 'Y', 'Z'),
self.maxvertexindex)
else:
self._vertex = None
self._vertex_index = None
self.maxvertexindex = -1
if 'NORMAL' in sources and len(sources['NORMAL']) > 0 \
and len(self.index) > 0:
self._normal = sources['NORMAL'][0][4].data
self._normal_index = self.index[:,:, sources['NORMAL'][0][0]]
self.maxnormalindex = numpy.max( self._normal_index )
checkSource(sources['NORMAL'][0][4], ('X', 'Y', 'Z'),
self.maxnormalindex)
else:
self._normal = None
self._normal_index = None
self.maxnormalindex = -1
if 'TEXCOORD' in sources and len(sources['TEXCOORD']) > 0 \
and len(self.index) > 0:
self._texcoordset = tuple([texinput[4].data
for texinput in sources['TEXCOORD']])
self._texcoord_indexset = tuple([ self.index[:,:, sources['TEXCOORD'][i][0]]
for i in xrange(len(sources['TEXCOORD'])) ])
self.maxtexcoordsetindex = [numpy.max(tex_index)
for tex_index in self._texcoord_indexset]
for i, texinput in enumerate(sources['TEXCOORD']):
checkSource(texinput[4], ('S', 'T'), self.maxtexcoordsetindex[i])
else:
self._texcoordset = tuple()
self._texcoord_indexset = tuple()
self.maxtexcoordsetindex = -1
if xmlnode is not None:
self.xmlnode = xmlnode
"""ElementTree representation of the line set."""
else:
self.index.shape = (-1)
acclen = len(self.index)
txtindices = ' '.join(map(str, self.index.tolist()))
self.index.shape = (-1, 2, self.nindices)
self.xmlnode = E.lines(count=str(self.nlines),
material=self.material)
all_inputs = []
for semantic_list in self.sources.values():
all_inputs.extend(semantic_list)
for offset, semantic, sourceid, set, src in all_inputs:
inpnode = E.input(offset=str(offset), semantic=semantic,
source=sourceid)
if set is not None:
inpnode.set('set', str(set))
self.xmlnode.append(inpnode)
self.xmlnode.append(E.p(txtindices))
2
Example 10
Project: pycollada Source File: triangleset.py
def __init__(self, sources, material, index, xmlnode=None):
"""A TriangleSet should not be created manually. Instead, call the
:meth:`collada.geometry.Geometry.createTriangleSet` method after
creating a geometry instance.
"""
if len(sources) == 0: raise DaeIncompleteError('A triangle set needs at least one input for vertex positions')
if not 'VERTEX' in sources: raise DaeIncompleteError('Triangle set requires vertex input')
max_offset = max([ max([input[0] for input in input_type_array])
for input_type_array in sources.values()
if len(input_type_array) > 0])
self.material = material
self.index = index
self.indices = self.index
self.nindices = max_offset + 1
self.index.shape = (-1, 3, self.nindices)
self.ntriangles = len(self.index)
self.sources = sources
if len(self.index) > 0:
self._vertex = sources['VERTEX'][0][4].data
self._vertex_index = self.index[:,:, sources['VERTEX'][0][0]]
self.maxvertexindex = numpy.max( self._vertex_index )
checkSource(sources['VERTEX'][0][4], ('X', 'Y', 'Z'), self.maxvertexindex)
else:
self._vertex = None
self._vertex_index = None
self.maxvertexindex = -1
if 'NORMAL' in sources and len(sources['NORMAL']) > 0 and len(self.index) > 0:
self._normal = sources['NORMAL'][0][4].data
self._normal_index = self.index[:,:, sources['NORMAL'][0][0]]
self.maxnormalindex = numpy.max( self._normal_index )
checkSource(sources['NORMAL'][0][4], ('X', 'Y', 'Z'), self.maxnormalindex)
else:
self._normal = None
self._normal_index = None
self.maxnormalindex = -1
if 'TEXCOORD' in sources and len(sources['TEXCOORD']) > 0 and len(self.index) > 0:
self._texcoordset = tuple([texinput[4].data for texinput in sources['TEXCOORD']])
self._texcoord_indexset = tuple([ self.index[:,:, sources['TEXCOORD'][i][0]]
for i in xrange(len(sources['TEXCOORD'])) ])
self.maxtexcoordsetindex = [ numpy.max( tex_index ) for tex_index in self._texcoord_indexset ]
for i, texinput in enumerate(sources['TEXCOORD']):
checkSource(texinput[4], ('S', 'T'), self.maxtexcoordsetindex[i])
else:
self._texcoordset = tuple()
self._texcoord_indexset = tuple()
self.maxtexcoordsetindex = -1
if 'TEXTANGENT' in sources and len(sources['TEXTANGENT']) > 0 and len(self.index) > 0:
self._textangentset = tuple([texinput[4].data for texinput in sources['TEXTANGENT']])
self._textangent_indexset = tuple([ self.index[:,:, sources['TEXTANGENT'][i][0]]
for i in xrange(len(sources['TEXTANGENT'])) ])
self.maxtextangentsetindex = [ numpy.max( tex_index ) for tex_index in self._textangent_indexset ]
for i, texinput in enumerate(sources['TEXTANGENT']):
checkSource(texinput[4], ('X', 'Y', 'Z'), self.maxtextangentsetindex[i])
else:
self._textangentset = tuple()
self._textangent_indexset = tuple()
self.maxtextangentsetindex = -1
if 'TEXBINORMAL' in sources and len(sources['TEXBINORMAL']) > 0 and len(self.index) > 0:
self._texbinormalset = tuple([texinput[4].data for texinput in sources['TEXBINORMAL']])
self._texbinormal_indexset = tuple([ self.index[:,:, sources['TEXBINORMAL'][i][0]]
for i in xrange(len(sources['TEXBINORMAL'])) ])
self.maxtexbinormalsetindex = [ numpy.max( tex_index ) for tex_index in self._texbinormal_indexset ]
for i, texinput in enumerate(sources['TEXBINORMAL']):
checkSource(texinput[4], ('X', 'Y', 'Z'), self.maxtexbinormalsetindex[i])
else:
self._texbinormalset = tuple()
self._texbinormal_indexset = tuple()
self.maxtexbinormalsetindex = -1
if xmlnode is not None: self.xmlnode = xmlnode
else:
self._recreateXmlNode()
2
Example 11
Project: orange Source File: orngProjectionPursuit.py
def draw_scatter_hist(x,y, fileName="lala.png"):
from matplotlib.ticker import NullFormatter
nullfmt = NullFormatter() # no labels
clf()
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = left_h = left+width+0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.2]
rect_histy = [left_h, bottom, 0.2, height]
# start with a rectangular Figure
figure(1, figsize=(8,8))
axScatter = axes(rect_scatter)
axHistx = axes(rect_histx)
axHisty = axes(rect_histy)
# no labels
axHistx.xaxis.set_major_formatter(nullfmt)
axHisty.yaxis.set_major_formatter(nullfmt)
# the scatter plot:
axScatter.scatter(x, y)
# now determine nice limits by hand:
binwidth = 0.25
xymax = numpy.max([numpy.max(np.fabs(x)), numpy.max(np.fabs(y))])
lim = (int(xymax/binwidth) + 1) * binwidth
axScatter.set_xlim( (-lim, lim) )
axScatter.set_ylim( (-lim, lim) )
bins = numpy.arange(-lim, lim + binwidth, binwidth)
axHistx.hist(x, bins=bins)
axHisty.hist(y, bins=bins, orientation='horizontal')
axHistx.set_xlim(axScatter.get_xlim())
axHisty.set_ylim(axScatter.get_ylim())
savefig(fileName)
2
Example 12
Project: APGL Source File: Util.py
@staticmethod
def fitDiscretePowerLaw(x, xmins = None):
"""
Take a sample of discrete data points which are drawn from a power law probability
distribution (p(x) = x-alpha / zeta(alpha, xmin)) and return the exponent.
If xmins is supplied then it searches through the set of xmins rather than
using all possible xmins. Most of the time it helps to keep xmins low.
Returns the goodness of fit, best alpha and xmin. If there is only 1 unique
value of x then -1, -1 min(x) is returned.
"""
xmax = numpy.max(x)
if xmins == None:
xmin = numpy.max(numpy.array([numpy.min(x), 1]))
xmins = numpy.arange(xmin, xmax)
#Note that x must have at least 2 unique elements
if xmins.shape[0] == 0:
return -1, -1, numpy.min(x)
alphas = numpy.arange(1.5, 3.5, 0.01)
ksAlpha = numpy.zeros((xmins.shape[0], 2))
for j in range(xmins.shape[0]):
xmin = xmins[j]
z = x[x >= xmin]
n = z.shape[0]
sumLogx = numpy.sum(numpy.log(z))
likelyhoods = numpy.zeros(alphas.shape[0])
for i in range(alphas.shape[0]):
likelyhoods[i] = -n*numpy.log(scipy.special.zeta(alphas[i], xmin)) -alphas[i]*sumLogx
k = numpy.argmax(likelyhoods)
#Compute KS statistic
cdf = numpy.cuemsum(numpy.bincount(z)[xmin:xmax]/float(n))
fit = numpy.arange(xmin, xmax)**-alphas[k] /scipy.special.zeta(alphas[k], xmin)
fit = numpy.cuemsum(fit)
ksAlpha[j, 0] = numpy.max(numpy.abs(cdf - fit))
ksAlpha[j, 1] = alphas[k]
i = numpy.argmin(ksAlpha[:, 0])
return ksAlpha[i, 0], ksAlpha[i, 1], xmins[i]
0
Example 13
Project: scikit-beam Source File: save_powder_output.py
def gsas_writer(tth, intensity, output_name, mode=None,
err=None, dir_path=None):
"""
Save diffraction intensities into .gsas file format
Parameters
----------
tth : ndarray
twotheta values (degrees) shape (N, ) array
intensity : ndarray
intensity values shape (N, ) array
output_name : str
name for the saved output diffraction intensities
mode : {'STD', 'ESD', 'FXYE'}, optional
GSAS file formats, could be 'STD', 'ESD', 'FXYE'
err : ndarray, optional
error value of intensity shape(N, ) array
err is None then mode will be 'STD'
dir_path : str, optional
new directory path to save the output data files
eg: /Data/experiments/data/
"""
# save output diffraction intensities into .gsas file extension.
ext = '.gsas'
_validate_input(tth, intensity, err, ext)
file_path = _create_file_path(dir_path, output_name, ext)
max_intensity = 999999
log_scale = np.floor(np.log10(max_intensity / np.max(intensity)))
log_scale = min(log_scale, 0)
scale = 10 ** int(log_scale)
lines = []
title = 'Angular Profile'
title += ': %s' % output_name
title += ' scale=%g' % scale
title = title[:80]
lines.append("%-80s" % title)
i_bank = 1
n_chan = len(intensity)
# two-theta0 and dtwo-theta in centidegrees
tth0_cdg = tth[0] * 100
dtth_cdg = (tth[-1] - tth[0]) / (len(tth) - 1) * 100
if err is None:
mode = 'STD'
if mode == 'STD':
n_rec = int(np.ceil(n_chan / 10.0))
l_bank = ("BANK %5i %8i %8i CONST %9.5f %9.5f %9.5f %9.5f STD" %
(i_bank, n_chan, n_rec, tth0_cdg, dtth_cdg, 0, 0))
lines.append("%-80s" % l_bank)
lrecs = ["%2i%6.0f" % (1, ii * scale) for ii in intensity]
for i in range(0, len(lrecs), 10):
lines.append("".join(lrecs[i:i + 10]))
elif mode == 'ESD':
n_rec = int(np.ceil(n_chan / 5.0))
l_bank = ("BANK %5i %8i %8i CONST %9.5f %9.5f %9.5f %9.5f ESD"
% (i_bank, n_chan, n_rec, tth0_cdg, dtth_cdg, 0, 0))
lines.append("%-80s" % l_bank)
l_recs = ["%8.0f%8.0f" % (ii, ee * scale)
for ii, ee in zip(intensity, err)]
for i in range(0, len(l_recs), 5):
lines.append("".join(l_recs[i:i + 5]))
elif mode == 'FXYE':
n_rec = n_chan
l_bank = ("BANK %5i %8i %8i CONST %9.5f %9.5f %9.5f %9.5f FXYE" %
(i_bank, n_chan, n_rec, tth0_cdg, dtth_cdg, 0, 0))
lines.append("%-80s" % l_bank)
l_recs = [
"%22.10f%22.10f%24.10f" % (xx * scale, yy * scale, ee * scale)
for xx, yy, ee in zip(tth, intensity, err)]
for i in range(len(l_recs)):
lines.append("%-80s" % l_recs[i])
else:
raise ValueError(" Define the GSAS file type ")
lines[-1] = "%-80s" % lines[-1]
rv = "\r\n".join(lines) + "\r\n"
with open(file_path, 'wt') as f:
f.write(rv)
0
Example 14
def find_baselines(self, ):
d_hist = self.d_gauss_hist
gmaxval = np.max(d_hist)
maxloc = np.argmax(d_hist)
peakthresh = gmaxval / 10.0
zerothresh = gmaxval / 50.0
inpeak = False
min_dist_in_peak = self.best_harmonic / 2.0
self.base_lines = []
log("Max Hist: {:.2f} Peakthresh: {:.2f} Zerothresh: {:.2f} Min Dist in Peak: {:.2f}"
"".format(gmaxval, peakthresh, zerothresh, min_dist_in_peak))
for irow, val in enumerate(d_hist):
if not inpeak:
if val > peakthresh:
inpeak = True
maxval = val
maxloc = irow
mintosearch = irow + min_dist_in_peak
log('\ntransition to in-peak: mintosearch : ', mintosearch, end='')
# accept no zeros between i and i+mintosearch
else: # in peak, look for max
if val > maxval:
maxval = val
maxloc = irow
mintosearch = irow + min_dist_in_peak
log('\nMoved mintosearch to', mintosearch, end='')
elif irow > mintosearch and val <= zerothresh:
# leave peak and save the last baseline found
inpeak = False
log('\nFound baseline #', maxloc, end='')
self.base_lines.append(maxloc)
log(' @{}'.format(irow), end='')
if inpeak:
self.base_lines.append(maxloc)
log('\nFound baseline #', maxloc, end='')
self.num_lines = len(self.base_lines)
0
Example 15
Project: scikit-image Source File: hough_transform.py
def hough_line_peaks(hspace, angles, dists, min_distance=9, min_angle=10,
threshold=None, num_peaks=np.inf):
"""Return peaks in Hough transform.
Identifies most prominent lines separated by a certain angle and distance
in a hough transform. Non-maximum suppression with different sizes is
applied separately in the first (distances) and second (angles) dimension
of the hough space to identify peaks.
Parameters
----------
hspace : (N, M) array
Hough space returned by the `hough_line` function.
angles : (M,) array
Angles returned by the `hough_line` function. Assumed to be continuous.
(`angles[-1] - angles[0] == PI`).
dists : (N, ) array
Distances returned by the `hough_line` function.
min_distance : int
Minimum distance separating lines (maximum filter size for first
dimension of hough space).
min_angle : int
Minimum angle separating lines (maximum filter size for second
dimension of hough space).
threshold : float
Minimum intensity of peaks. Default is `0.5 * max(hspace)`.
num_peaks : int
Maximum number of peaks. When the number of peaks exceeds `num_peaks`,
return `num_peaks` coordinates based on peak intensity.
Returns
-------
hspace, angles, dists : tuple of array
Peak values in hough space, angles and distances.
Examples
--------
>>> from skimage.transform import hough_line, hough_line_peaks
>>> from skimage.draw import line
>>> img = np.zeros((15, 15), dtype=np.bool_)
>>> rr, cc = line(0, 0, 14, 14)
>>> img[rr, cc] = 1
>>> rr, cc = line(0, 14, 14, 0)
>>> img[cc, rr] = 1
>>> hspace, angles, dists = hough_line(img)
>>> hspace, angles, dists = hough_line_peaks(hspace, angles, dists)
>>> len(angles)
2
"""
hspace = hspace.copy()
rows, cols = hspace.shape
if threshold is None:
threshold = 0.5 * np.max(hspace)
distance_size = 2 * min_distance + 1
angle_size = 2 * min_angle + 1
hspace_max = ndimage.maximum_filter1d(hspace, size=distance_size, axis=0,
mode='constant', cval=0)
hspace_max = ndimage.maximum_filter1d(hspace_max, size=angle_size, axis=1,
mode='constant', cval=0)
mask = (hspace == hspace_max)
hspace *= mask
hspace_t = hspace > threshold
label_hspace = measure.label(hspace_t)
props = measure.regionprops(label_hspace, hspace_max)
# Sort the list of peaks by intensity, not left-right, so larger peaks
# in Hough space cannot be arbitrarily suppressed by smaller neighbors
props = sorted(props, key=lambda x: x.max_intensity)[::-1]
coords = np.array([np.round(p.centroid) for p in props], dtype=int)
hspace_peaks = []
dist_peaks = []
angle_peaks = []
# relative coordinate grid for local neighbourhood suppression
dist_ext, angle_ext = np.mgrid[-min_distance:min_distance + 1,
-min_angle:min_angle + 1]
for dist_idx, angle_idx in coords:
accuem = hspace[dist_idx, angle_idx]
if accuem > threshold:
# absolute coordinate grid for local neighbourhood suppression
dist_nh = dist_idx + dist_ext
angle_nh = angle_idx + angle_ext
# no reflection for distance neighbourhood
dist_in = np.logical_and(dist_nh > 0, dist_nh < rows)
dist_nh = dist_nh[dist_in]
angle_nh = angle_nh[dist_in]
# reflect angles and assume angles are continuous, e.g.
# (..., 88, 89, -90, -89, ..., 89, -90, -89, ...)
angle_low = angle_nh < 0
dist_nh[angle_low] = rows - dist_nh[angle_low]
angle_nh[angle_low] += cols
angle_high = angle_nh >= cols
dist_nh[angle_high] = rows - dist_nh[angle_high]
angle_nh[angle_high] -= cols
# suppress neighbourhood
hspace[dist_nh, angle_nh] = 0
# add current line to peaks
hspace_peaks.append(accuem)
dist_peaks.append(dists[dist_idx])
angle_peaks.append(angles[angle_idx])
hspace_peaks = np.array(hspace_peaks)
dist_peaks = np.array(dist_peaks)
angle_peaks = np.array(angle_peaks)
if num_peaks < len(hspace_peaks):
idx_maxsort = np.argsort(hspace_peaks)[::-1][:num_peaks]
hspace_peaks = hspace_peaks[idx_maxsort]
dist_peaks = dist_peaks[idx_maxsort]
angle_peaks = angle_peaks[idx_maxsort]
return hspace_peaks, angle_peaks, dist_peaks
0
Example 16
Project: scipy Source File: miobase.py
def matdims(arr, oned_as='column'):
"""
Determine equivalent MATLAB dimensions for given array
Parameters
----------
arr : ndarray
Input array
oned_as : {'column', 'row'}, optional
Whether 1-D arrays are returned as MATLAB row or column matrices.
Default is 'column'.
Returns
-------
dims : tuple
Shape tuple, in the form MATLAB expects it.
Notes
-----
We had to decide what shape a 1 dimensional array would be by
default. ``np.atleast_2d`` thinks it is a row vector. The
default for a vector in MATLAB (e.g. ``>> 1:12``) is a row vector.
Versions of scipy up to and including 0.11 resulted (accidentally)
in 1-D arrays being read as column vectors. For the moment, we
maintain the same tradition here.
Examples
--------
>>> matdims(np.array(1)) # numpy scalar
(1, 1)
>>> matdims(np.array([1])) # 1d array, 1 element
(1, 1)
>>> matdims(np.array([1,2])) # 1d array, 2 elements
(2, 1)
>>> matdims(np.array([[2],[3]])) # 2d array, column vector
(2, 1)
>>> matdims(np.array([[2,3]])) # 2d array, row vector
(1, 2)
>>> matdims(np.array([[[2,3]]])) # 3d array, rowish vector
(1, 1, 2)
>>> matdims(np.array([])) # empty 1d array
(0, 0)
>>> matdims(np.array([[]])) # empty 2d
(0, 0)
>>> matdims(np.array([[[]]])) # empty 3d
(0, 0, 0)
Optional argument flips 1-D shape behavior.
>>> matdims(np.array([1,2]), 'row') # 1d array, 2 elements
(1, 2)
The argument has to make sense though
>>> matdims(np.array([1,2]), 'bizarre')
Traceback (most recent call last):
...
ValueError: 1D option "bizarre" is strange
"""
shape = arr.shape
if shape == (): # scalar
return (1,1)
if reduce(operator.mul, shape) == 0: # zero elememts
return (0,) * np.max([arr.ndim, 2])
if len(shape) == 1: # 1D
if oned_as == 'column':
return shape + (1,)
elif oned_as == 'row':
return (1,) + shape
else:
raise ValueError('1D option "%s" is strange'
% oned_as)
return shape
0
Example 17
Project: scipy Source File: _numdiff.py
def _sparse_difference(fun, x0, f0, h, use_one_sided,
structure, groups, method):
m = f0.size
n = x0.size
row_indices = []
col_indices = []
fractions = []
n_groups = np.max(groups) + 1
for group in range(n_groups):
# Perturb variables which are in the same group simultaneously.
e = np.equal(group, groups)
h_vec = h * e
if method == '2-point':
x = x0 + h_vec
dx = x - x0
df = fun(x) - f0
# The result is written to columns which correspond to perturbed
# variables.
cols, = np.nonzero(e)
# Find all non-zero elements in selected columns of Jacobian.
i, j, _ = find(structure[:, cols])
# Restore column indices in the full array.
j = cols[j]
elif method == '3-point':
# Here we do conceptually the same but separate one-sided
# and two-sided schemes.
x1 = x0.copy()
x2 = x0.copy()
mask_1 = use_one_sided & e
x1[mask_1] += h_vec[mask_1]
x2[mask_1] += 2 * h_vec[mask_1]
mask_2 = ~use_one_sided & e
x1[mask_2] -= h_vec[mask_2]
x2[mask_2] += h_vec[mask_2]
dx = np.zeros(n)
dx[mask_1] = x2[mask_1] - x0[mask_1]
dx[mask_2] = x2[mask_2] - x1[mask_2]
f1 = fun(x1)
f2 = fun(x2)
cols, = np.nonzero(e)
i, j, _ = find(structure[:, cols])
j = cols[j]
mask = use_one_sided[j]
df = np.empty(m)
rows = i[mask]
df[rows] = -3 * f0[rows] + 4 * f1[rows] - f2[rows]
rows = i[~mask]
df[rows] = f2[rows] - f1[rows]
elif method == 'cs':
f1 = fun(x0 + h_vec*1.j)
df = f1.imag
dx = h_vec
cols, = np.nonzero(e)
i, j, _ = find(structure[:, cols])
j = cols[j]
else:
raise ValueError("Never be here.")
# All that's left is to compute the fraction. We store i, j and
# fractions as separate arrays and later construct coo_matrix.
row_indices.append(i)
col_indices.append(j)
fractions.append(df[i] / dx[j])
row_indices = np.hstack(row_indices)
col_indices = np.hstack(col_indices)
fractions = np.hstack(fractions)
J = coo_matrix((fractions, (row_indices, col_indices)), shape=(m, n))
return csr_matrix(J)
0
Example 18
Project: mlxtend Source File: tf_multilayerperceptron.py
def _fit(self, X, y, init_params=True):
self._check_target_array(y)
n_batches = self.minibatches
if y.shape[0] % n_batches != 0:
raise AttributeError("Training set size %d cannot"
" be divided into %d minibatches without"
" remainder" % (y.shape[0], n_batches))
# Construct the Graph
g = tf.Graph()
with g.as_default():
self.optimizer_ = self._init_optimizer(self.optimizer)
if init_params:
if self.n_classes is None:
self.n_classes = np.max(y) + 1
self._n_features = X.shape[1]
self._weight_maps, self._bias_maps = self._layermapping(
n_features=self._n_features,
n_classes=self.n_classes,
hidden_layers=self.hidden_layers)
tf_weights, tf_biases = self._init_params_from_layermapping(
weight_maps=self._weight_maps,
bias_maps=self._bias_maps,
activations=self.activations)
self.cost_ = []
else:
tf_weights, tf_biases = self._reuse_weights(
weights=self.w_,
biases=self.b_)
# Prepare the training data
y_enc = self._one_hot(y, self.n_classes, dtype=np.float)
n_idx = list(range(y.shape[0]))
tf_X = tf.convert_to_tensor(value=X, dtype=self.dtype)
tf_y = tf.convert_to_tensor(value=y_enc, dtype=self.dtype)
tf_idx = tf.placeholder(tf.int32,
shape=[int(y.shape[0] / n_batches)])
X_batch = tf.gather(params=tf_X, indices=tf_idx)
y_batch = tf.gather(params=tf_y, indices=tf_idx)
# Setup the graph for minimizing cross entropy cost
net = self._predict(tf_X=X_batch,
tf_weights=tf_weights,
tf_biases=tf_biases,
activations=self.activations,
dropout=True)
# Define loss and optimizer
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(net,
y_batch)
cost = tf.reduce_mean(cross_entropy)
train = self.optimizer_.minimize(cost,
global_step=self.global_step_)
# Initializing the variables
init = tf.initialize_all_variables()
# random seed for shuffling and dropout
rgen = np.random.RandomState(self.random_seed)
# Launch the graph
with tf.Session(graph=g) as sess:
sess.run(init)
self.init_time_ = time()
for epoch in range(self.epochs):
if self.minibatches > 1:
n_idx = rgen.permutation(n_idx)
minis = np.array_split(n_idx, self.minibatches)
costs = []
for idx in minis:
_, c = sess.run([train, cost], feed_dict={tf_idx: idx})
costs.append(c)
avg_cost = np.mean(costs)
self.cost_.append(avg_cost)
self._print_progress(iteration=epoch + 1,
n_iter=self.epochs,
cost=avg_cost)
self.w_ = {k: tf_weights[k].eval() for k in tf_weights}
self.b_ = {k: tf_biases[k].eval() for k in tf_biases}
0
Example 19
Project: cartopy Source File: img_nest.py
def __init__(self, name, crs, collections, _ancestry=None):
"""
Represents a complex nest of ImageCollections.
On construction, the image collections are scanned for ancestry,
leading to fast image finding capabilities.
A complex (and time consuming to create) NestedImageCollection instance
can be saved as a pickle file and subsequently be (quickly) restored.
There is a simplified creation interface for NestedImageCollection
``from_configuration`` for more detail.
Args:
* name:
The name of the nested image collection.
* crs:
The native :class:`~cartopy.crs.Projection` of all the image
collections.
* collections:
A list of one or more :class:`~cartopy.io.img_nest.ImageCollection`
instances.
"""
# NOTE: all collections must have the same crs.
_names = set([collection.name for collection in collections])
assert len(_names) == len(collections), \
'The collections must have unique names.'
self.name = name
self.crs = crs
self._collections_by_name = {collection.name: collection
for collection in collections}
def sort_func(c):
return np.max([image.bbox().area for image in c.images])
self._collections = sorted(collections, key=sort_func, reverse=True)
self._ancestry = {}
"""
maps (collection name, image) to a list of children
(collection name, image).
"""
if _ancestry is not None:
self._ancestry = _ancestry
else:
parent_wth_children = zip(self._collections,
self._collections[1:])
for parent_collection, collection in parent_wth_children:
for parent_image in parent_collection.images:
for image in collection.images:
if self._is_parent(parent_image, image):
# append the child image to the parent's ancestry
key = (parent_collection.name, parent_image)
self._ancestry.setdefault(key, []).append(
(collection.name, image))
0
Example 20
Project: mlxtend Source File: tf_softmax.py
def _fit(self, X, y, init_params=True,):
self._check_target_array(y)
n_batches = self.minibatches
if y.shape[0] % n_batches != 0:
raise AttributeError("Training set size %d cannot"
" be divided into %d minibatches without"
" remainder" % (y.shape[0], n_batches))
# Construct the Graph
g = tf.Graph()
with g.as_default():
if init_params:
if self.n_classes is None:
self.n_classes = np.max(y) + 1
self._n_features = X.shape[1]
tf_w_, tf_b_ = self._initialize_weights(
n_features=self._n_features,
n_classes=self.n_classes)
self.cost_ = []
self.train_acc_ = []
self.valid_acc_ = []
else:
tf_w_ = tf.Variable(self.w_)
tf_b_ = tf.Variable(self.b_)
# Prepare the training data
y_enc = self._one_hot(y, self.n_classes, dtype=np.float)
tf_X = tf.convert_to_tensor(value=X, dtype=self.dtype)
tf_y = tf.convert_to_tensor(value=y_enc, dtype=self.dtype)
tf_idx = tf.placeholder(tf.int32,
shape=[int(y.shape[0] / n_batches)])
X_batch = tf.gather(params=tf_X, indices=tf_idx)
y_batch = tf.gather(params=tf_y, indices=tf_idx)
# Setup the graph for minimizing cross entropy cost
net = tf.matmul(X_batch, tf_w_) + tf_b_
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(net,
y_batch)
cost = tf.reduce_mean(cross_entropy)
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=self.eta)
train = optimizer.minimize(cost)
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session(graph=g) as sess:
sess.run(init)
rgen = np.random.RandomState(self.random_seed)
self.init_time_ = time()
for epoch in range(self.epochs):
costs = []
for idx in self._yield_minibatches_idx(
rgen=rgen,
n_batches=self.minibatches,
data_ary=y,
shuffle=True):
_, c = sess.run([train, cost], feed_dict={tf_idx: idx})
costs.append(c)
avg_cost = np.mean(costs)
self.cost_.append(avg_cost)
# compute prediction accuracy
train_acc = self._accuracy(y, tf_X, tf_w_, tf_b_)
self.train_acc_.append(train_acc)
if self.print_progress:
self._print_progress(iteration=epoch + 1,
n_iter=self.epochs,
cost=avg_cost)
self.w_ = tf_w_.eval()
self.b_ = tf_b_.eval()
0
Example 21
def _prepare_batch(self, sequences):
"""Transposes a list of sequences into a list of time steps. Then
returns word ID, file ID, and mask matrices in a format suitable to be
input to the neural network.
The first returned matrix contains the word IDs, the second identifies
the file in case of multiple training files, and the third contains a
mask that defines which elements are past the sequence end. Where the
other values are valid, the mask matrix contains ones.
All returned matrices have the same shape. The first dimensions is the
time step, i.e. the index to a word in a sequence. The second dimension
selects the sequence. In other words, the first row is the first word of
each sequence and so on.
:type sequences: list of lists
:param sequences: list of sequences, each of which is a list of word
IDs
:rtype: three ndarrays
:returns: word ID, file ID, and mask matrix
"""
num_sequences = len(sequences)
batch_length = numpy.max([len(s) for s in sequences])
unk_id = self.vocabulary.word_to_id['<unk>']
shape = (batch_length, num_sequences)
word_ids = numpy.ones(shape, numpy.int64) * unk_id
mask = numpy.zeros(shape, numpy.int8)
file_ids = numpy.zeros(shape, numpy.int8)
for i, sequence in enumerate(sequences):
length = len(sequence)
sequence_word_ids = numpy.ones(length, numpy.int64) * unk_id
sequence_file_ids = numpy.zeros(length, numpy.int8)
for index, (word, file_id) in enumerate(sequence):
if word in self.vocabulary.word_to_id:
sequence_word_ids[index] = self.vocabulary.word_to_id[word]
sequence_file_ids[index] = file_id
word_ids[:length, i] = sequence_word_ids
mask[:length, i] = 1
file_ids[:length, i] = sequence_file_ids
return word_ids, file_ids, mask
0
Example 22
Project: homer Source File: systems.py
def try_join_system(page, i):
""" Try joining system i with the system below it.
Update page.systems, returning True,
or return False if no barlines connect. """
if (len(page.systems[i]['barlines']) == 0
or len(page.systems[i+1]['barlines']) == 0):
return False
# Match each barline x1 in the top system to x0 in the bottom system
# If no matching barline within staff_dist, then extend the barline
# vertically to the height of the system
system0 = page.systems[i]['barlines']
system1 = page.systems[i+1]['barlines']
system0_x1 = system0[:,1,0]
system1_x0 = system1[:,0,0]
pairs = util.merge(system0_x1, system1_x0, np.max(page.staff_dist))
if len(pairs) == 0:
return False
ispair = (pairs >= 0).all(axis=1)
if ispair.sum() == 0:
# Must have at least one actual pair to test other possible ones
return False
s0_nopair = (pairs[:,0] >= 0) & ~ispair
s1_nopair = (pairs[:,1] >= 0) & ~ispair
new_systems = np.zeros((len(pairs), 2, 2), np.int32)
new_systems[ispair, 0] = system0[pairs[ispair, 0], 0]
new_systems[ispair, 1] = system1[pairs[ispair, 1], 1]
new_systems[s0_nopair, 0] = system0[pairs[s0_nopair, 0], 0]
new_systems[s0_nopair, 1] = system0[pairs[s0_nopair, 0], 1]
new_systems[s0_nopair, 1, 1] = system1[:, 1, 1].mean()
new_systems[s1_nopair, 1] = system1[pairs[s1_nopair, 1], 1]
new_systems[s1_nopair, 0] = system1[pairs[s1_nopair, 1], 0]
new_systems[s1_nopair, 0, 1] = system1[:, 0, 1].mean()
actual_systems = verify_barlines(page,
page.systems[i]['start'],
page.systems[i+1]['stop'], new_systems)
if len(actual_systems):
# Combine systems i and i+1
page.systems[i] = dict(barlines=actual_systems,
start=page.systems[i]['start'],
stop=page.systems[i+1]['stop'])
del page.systems[i+1]
return True
else:
return False
0
Example 23
Project: BioSPPy Source File: clustering.py
def coassoc_partition(coassoc=None, k=0, linkage='average'):
"""Extract the consensus partition from a co-association matrix using
hierarchical agglomerative methods.
Parameters
----------
coassoc : array
Co-association matrix.
k : int, optional
Number of clusters to extract; if 0 uses the life-time criterion.
linkage : str, optional
Linkage criterion for final partition extraction; one of 'average',
'complete', 'single', or 'weighted'.
Returns
-------
clusters : dict
Dictionary with the sample indices (rows from 'data') for each found
cluster; outliers have key -1; clusters are assigned integer keys
starting at 0.
"""
# check inputs
if coassoc is None:
raise TypeError("Please specify the input co-association matrix.")
if linkage not in ['average', 'complete', 'single', 'weighted']:
raise ValueError("Unknown linkage criterion '%r'." % linkage)
N = len(coassoc)
if k > N:
raise ValueError("Number of clusters 'k' is higher than the number of \
input samples.")
if k < 0:
k = 0
# convert coassoc to condensed format, dissimilarity
mx = np.max(coassoc)
D = metrics.squareform(mx - coassoc)
# build linkage
Z = sch.linkage(D, method=linkage)
# extract clusters
if k == 0:
# life-time
labels = _life_time(Z, N)
else:
labels = sch.fcluster(Z, k, 'maxclust')
# get cluster indices
clusters = _extract_clusters(labels)
return utils.ReturnTuple((clusters,), ('clusters',))
0
Example 24
Project: auto-sklearn Source File: plot_utils.py
def plot_summed_wins_of_optimizers(trial_list_per_dataset,
name_list_per_dataset,
save="", cut=sys.maxint,
figsize=(16, 4), legend_ncols=3,
colors=None, linewidth=3,
markers=None, markersize=6):
# TODO colors should be a function handle which returns an Iterable!
# This is a hack
cut = 50
optimizers, summed_wins_of_optimizer = get_summed_wins_of_optimizers(
trial_list_per_dataset, name_list_per_dataset)
# Make a copy of the colors iterator, because we need it more than once!
if colors is not None:
if not isinstance(colors, itertools.cycle):
raise TypeError()
else:
color_values = list()
for i in range(10):
r, g, b = colors.next()
color_values.append((r, g, b))
if markers is not None:
if not isinstance(markers, itertools.cycle):
raise TypeError()
else:
marker_values = list()
for i in range(10):
marker_values.append(markers.next())
################################################################################
# Plot statistics
for opt1_idx, key in enumerate(optimizers):
fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True, dpi=600,
figsize=figsize)
if colors is None:
colors_ = plot_util.get_plot_colors()
else:
colors_ = itertools.cycle(color_values)
if markers is None:
markers_ = plot_util.get_empty_iterator()
else:
markers_ = itertools.cycle(marker_values)
y_max = 0.
for opt2_idx, key2 in enumerate(optimizers):
if opt1_idx == opt2_idx:
continue
y = []
y1 = []
for i in range(0, cut+1):
y.append(summed_wins_of_optimizer[i][opt1_idx, opt2_idx]
/ len(trial_list_per_dataset) * 100)
y1.append(- summed_wins_of_optimizer[i][opt2_idx, opt1_idx]
/ len(trial_list_per_dataset) * 100)
y_max_tmp = max(np.max(y), np.max(np.abs(y1)))
y_max_tmp = np.ceil(y_max_tmp * 10) / 10.
y_max = max(y_max_tmp, y_max)
label = "%s vs %s" % (key, key2)
label = label.replace("learned", "d_c").replace("l1", "d_p")
color = colors_.next()
marker = markers_.next()
ax0.plot(range(0, cut+1), y, color=color, label=label,
linewidth=linewidth, marker=marker, markersize=markersize)
ax1.plot(range(0, cut+1), y1, color=color, label=label,
linewidth=linewidth, marker=marker, markersize=markersize)
#handles, labels = ax1.get_legend_handles_labels()
#fig.legend(handles, labels, loc="upper center", fancybox=True,
# ncol=legend_ncols, shadow=True)
ax0.set_xlim((0, cut))
ax0.set_ylim((0, y_max))
ax0.set_ylabel("Significant wins (%)")
ax1.set_xlim((0, cut))
ax1.set_ylim((-y_max, 0))
ax1.set_ylabel("Significant losses (%)")
yticklabels = ax1.get_yticks().tolist()
#print yticklabels, [item.get_text() for item in yticklabels]
ax1.set_yticklabels([-int(item) for item in yticklabels])
ax1.legend(loc="best", fancybox=True, ncol=legend_ncols, shadow=True)
plt.tight_layout()
#plt.subplots_adjust(top=0.85)
plt.xlabel("#Function evaluations")
if save != "":
plt.savefig(save % key, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches="tight", pad_inches=0.1)
else:
plt.show()
plt.close(fig)
0
Example 25
Project: pysb Source File: anneal_sundials.py
def tenninetycomp(outlistnorm, arglist, xpsamples=1.0):
""" Determine Td and Ts. Td calculated at time when signal goes up to 10%.
Ts calculated as signal(90%) - signal(10%). Then a chi-square is calculated.
outlistnorm: the outlist from anneal_odesolve
arglist: simaxis, Tdxp, varTdxp, Tsxp, varTsxp
xpsamples
"""
xarr = outlistnorm[0] #this assumes the first column of the array is time
yarr = outlistnorm[arglist[0]] #the argument passed should be the axis
Tdxp = arglist[1]
varTdxp = arglist[2]
Tsxp = arglist[3]
varTsxp = arglist[4]
# make a B-spine representation of the xarr and yarr
tck = scipy.interpolate.splrep(xarr, yarr)
t, c, k = tck
tenpt = numpy.max(yarr) * .1 # the ten percent point in y-axis
ntypt = numpy.max(yarr) * .9 # the 90 percent point in y-axis
#lower the spline at the abcissa
xten = scipy.interpolate.sproot((t, c-tenpt, k))[0]
xnty = scipy.interpolate.sproot((t, c-ntypt, k))[0]
#now compare w the input data, Td, and Ts
Tdsim = xten #the Td is the point where the signal crosses 10%; should be the midpoint???
Tssim = xnty - xten
# calculate chi-sq as
#
# 1 1
# obj = ----------(Tdsim - Tdxp)^2 + --------(Tssim - Tsxp)^2
# 2*var_Tdxp 2*var_Td
#
obj = ((1./varTdxp) * (Tdsim - Tdxp)**2.) + ((1./varTsxp) * (Tssim - Tsxp)**2.)
#obj *= xpsamples
print("OBJOUT-10-90:(%g,%g):%g"%(Tdsim, Tssim, obj))
return obj
0
Example 26
Project: attention-lvcsr Source File: test_pool.py
@staticmethod
def numpy_max_pool_2d_stride(input, ds, ignore_border=False, st=None,
mode='max'):
'''Helper function, implementing pool_2d in pure numpy
this function provides st input to indicate the stide size
for the pooling regions. if not indicated, st == sd.'''
if len(input.shape) < 2:
raise NotImplementedError('input should have at least 2 dim,'
' shape is %s'
% str(input.shape))
if st is None:
st = ds
img_rows = input.shape[-2]
img_cols = input.shape[-1]
out_r = 0
out_c = 0
if img_rows - ds[0] >= 0:
out_r = (img_rows - ds[0]) // st[0] + 1
if img_cols - ds[1] >= 0:
out_c = (img_cols - ds[1]) // st[1] + 1
if not ignore_border:
if out_r > 0:
if img_rows - ((out_r - 1) * st[0] + ds[0]) > 0:
rr = img_rows - out_r * st[0]
if rr > 0:
out_r += 1
else:
if img_rows > 0:
out_r += 1
if out_c > 0:
if img_cols - ((out_c - 1) * st[1] + ds[1]) > 0:
cr = img_cols - out_c * st[1]
if cr > 0:
out_c += 1
else:
if img_cols > 0:
out_c += 1
out_shp = list(input.shape[:-2])
out_shp.append(out_r)
out_shp.append(out_c)
func = numpy.max
if mode == 'sum':
func = numpy.sum
elif mode != 'max':
func = numpy.average
output_val = numpy.zeros(out_shp)
for k in numpy.ndindex(*input.shape[:-2]):
for i in range(output_val.shape[-2]):
ii_st = i * st[0]
ii_end = builtins.min(ii_st + ds[0], img_rows)
for j in range(output_val.shape[-1]):
jj_st = j * st[1]
jj_end = builtins.min(jj_st + ds[1], img_cols)
patch = input[k][ii_st:ii_end, jj_st:jj_end]
output_val[k][i, j] = func(patch)
return output_val
0
Example 27
def test_rings():
center = (100., 100.)
img_dim = (200, 205)
first_q = 10.
delta_q = 5.
num_rings = 7 # number of Q rings
one_step_q = 5.0
step_q = [2.5, 3.0, 5.8]
# test when there is same spacing between rings
edges = roi.ring_edges(first_q, width=delta_q, spacing=one_step_q,
num_rings=num_rings)
print("edges there is same spacing between rings ", edges)
label_array = roi.rings(edges, center, img_dim)
print("label_array there is same spacing between rings", label_array)
label_mask, pixel_list = roi.extract_label_indices(label_array)
# number of pixels per ROI
num_pixels = np.bincount(label_mask, minlength=(np.max(label_mask)+1))
num_pixels = num_pixels[1:]
# test when there is same spacing between rings
edges = roi.ring_edges(first_q, width=delta_q, spacing=2.5,
num_rings=num_rings)
print("edges there is same spacing between rings ", edges)
label_array = roi.rings(edges, center, img_dim)
print("label_array there is same spacing between rings", label_array)
label_mask, pixel_list = roi.extract_label_indices(label_array)
# number of pixels per ROI
num_pixels = np.bincount(label_mask, minlength=(np.max(label_mask)+1))
num_pixels = num_pixels[1:]
# test when there is different spacing between rings
edges = roi.ring_edges(first_q, width=delta_q, spacing=step_q,
num_rings=4)
print("edges when there is different spacing between rings", edges)
label_array = roi.rings(edges, center, img_dim)
print("label_array there is different spacing between rings", label_array)
label_mask, pixel_list = roi.extract_label_indices(label_array)
# number of pixels per ROI
num_pixels = np.bincount(label_mask, minlength=(np.max(label_mask)+1))
num_pixels = num_pixels[1:]
# test when there is no spacing between rings
edges = roi.ring_edges(first_q, width=delta_q, num_rings=num_rings)
print("edges", edges)
label_array = roi.rings(edges, center, img_dim)
print("label_array", label_array)
label_mask, pixel_list = roi.extract_label_indices(label_array)
# number of pixels per ROI
num_pixels = np.bincount(label_mask, minlength=(np.max(label_mask)+1))
num_pixels = num_pixels[1:]
# Did we draw the right number of rings?
print(np.unique(label_array))
actual_num_rings = len(np.unique(label_array)) - 1
assert_equal(actual_num_rings, num_rings)
# Does each ring have more pixels than the last, being larger?
ring_areas = np.bincount(label_array.ravel())[1:]
area_comparison = np.diff(ring_areas)
print(area_comparison)
areas_monotonically_increasing = np.all(area_comparison > 0)
assert_true(areas_monotonically_increasing)
# Test various illegal inputs
assert_raises(ValueError,
lambda: roi.ring_edges(1, 2)) # need num_rings
# width incompatible with num_rings
assert_raises(ValueError,
lambda: roi.ring_edges(1, [1, 2, 3], num_rings=2))
# too few spacings
assert_raises(ValueError,
lambda: roi.ring_edges(1, [1, 2, 3], [1]))
# too many spacings
assert_raises(ValueError,
lambda: roi.ring_edges(1, [1, 2, 3], [1, 2, 3]))
# num_rings conflicts with width, spacing
assert_raises(ValueError,
lambda: roi.ring_edges(1, [1, 2, 3], [1, 2], 5))
w_edges = [[5, 7], [1, 2]]
assert_raises(ValueError, roi.rings, w_edges, center=(4, 4),
shape=(20, 20))
0
Example 28
Project: qutip Source File: utilities.py
def clebsch(j1, j2, j3, m1, m2, m3):
"""Calculates the Clebsch-Gordon coefficient
for coupling (j1,m1) and (j2,m2) to give (j3,m3).
Parameters
----------
j1 : float
Total angular momentum 1.
j2 : float
Total angular momentum 2.
j3 : float
Total angular momentum 3.
m1 : float
z-component of angular momentum 1.
m2 : float
z-component of angular momentum 2.
m3 : float
z-component of angular momentum 3.
Returns
-------
cg_coeff : float
Requested Clebsch-Gordan coefficient.
"""
if m3 != m1 + m2:
return 0
vmin = int(np.max([-j1 + j2 + m3, -j1 + m1, 0]))
vmax = int(np.min([j2 + j3 + m1, j3 - j1 + j2, j3 + m3]))
C = np.sqrt((2.0 * j3 + 1.0) * factorial(j3 + j1 - j2) *
factorial(j3 - j1 + j2) * factorial(j1 + j2 - j3) *
factorial(j3 + m3) * factorial(j3 - m3) /
(factorial(j1 + j2 + j3 + 1) *
factorial(j1 - m1) * factorial(j1 + m1) *
factorial(j2 - m2) * factorial(j2 + m2)))
S = 0
for v in range(vmin, vmax + 1):
S += (-1.0) ** (v + j2 + m2) / factorial(v) * \
factorial(j2 + j3 + m1 - v) * factorial(j1 - m1 + v) / \
factorial(j3 - j1 + j2 - v) / factorial(j3 + m3 - v) / \
factorial(v + j1 - j2 - m3)
C = C * S
return C
0
Example 29
def paintEvent(self, evt):
# get the widget dimensions
orig_width = self.width()
orig_height = self.height()
# fill perc % of the widget
perc = 1
width = int(orig_width * perc)
height = int(orig_height * perc)
# get the starting origin
x_orig = int((orig_width - width) / 2)
# we want to start at the bottom and draw up.
y_orig = orig_height - int((orig_height - height) / 2)
# a running x-position
running_pos = x_orig
# calculate to number of bars
nbars = len(self.counts)
# calculate the bar widths, this compilcation is
# necessary because integer trunction severly cripples
# the layout.
remainder = width % nbars
bar_width = [int(width / nbars)] * nbars
for i in range(remainder):
bar_width[i] += 1
paint = QPainter()
paint.begin(self)
# determine the scaling factor
max_val = np.max(self.counts)
scale = 1. * height / max_val
# determine if we have a colormap and drop into the appopriate
# loop.
if hasattr(self.colormap[0], '__iter__'):
# assume we have a colormap
for i in range(len(self.counts)):
bar_height = self.counts[i]
r, g, b = self.colormap[i]
paint.setPen(QColor(r, g, b))
paint.setBrush(QColor(r, g, b))
paint.drawRect(running_pos, y_orig, bar_width[i],
-bar_height)
running_pos += bar_width[i]
else:
# we have a tuple
r, g, b = self.colormap
paint.setPen(QColor(r, g, b))
paint.setBrush(QColor(r, g, b))
for i in range(len(self.counts)):
bar_height = self.counts[i] * scale
paint.drawRect(running_pos, y_orig, bar_width[i],
-bar_height)
running_pos += bar_width[i]
paint.end()
0
Example 30
Project: meshtool Source File: make_atlases.py
def makeAtlases(mesh):
# get a mapping from path to actual image, since theoretically you could have
# the same image file in multiple image nodes
unique_images = {}
image_scales = {}
for cimg in mesh.images:
path = cimg.path
if path not in unique_images:
unique_images[path] = cimg.pilimage
image_scales[path] = (1,1)
# get a mapping from texture coordinates to all of the images they get bound to
tex2img = getTexcoordToImgMapping(mesh)
# don't consider any texcoords that are used more than once with different images
# could probably have an algorithm that takes this into account, but it would
# require some complex groupings. any images referenced have to also not be
# considered for atlasing, as well as any tex coords that reference them
# also filter out any images that are >= 1024 in either dimension
texs_to_delete = []
imgs_to_delete = []
for texset, imgpaths in tex2img.iteritems():
valid_range = False
if len(imgpaths) == 1:
texarray = mesh.geometries[texset.geom_id] \
.primitives[texset.prim_index] \
.texcoordset[texset.texcoordset_index]
width, height = unique_images[imgpaths[0]].size
tile_x = int(numpy.ceil(numpy.max(texarray[:,0])))
tile_y = int(numpy.ceil(numpy.max(texarray[:,1])))
stretched_width = tile_x * width
stretched_height = tile_y * height
#allow tiling of texcoords if the final tiled image is <= MAX_TILING_DIMENSION
if numpy.min(texarray) < 0.0:
valid_range = False
elif stretched_width > MAX_TILING_DIMENSION or stretched_height > MAX_TILING_DIMENSION:
valid_range = False
else:
valid_range = True
if valid_range:
scale_x, scale_y = image_scales[imgpaths[0]]
scale_x = max(scale_x, tile_x)
scale_y = max(scale_y, tile_y)
image_scales[imgpaths[0]] = (scale_x, scale_y)
if len(imgpaths) > 1 or not valid_range:
for imgpath in imgpaths:
if imgpath not in imgs_to_delete:
imgs_to_delete.append(imgpath)
texs_to_delete.append(texset)
for imgpath, pilimg in unique_images.iteritems():
if max(pilimg.size) > MAX_IMAGE_DIMENSION and imgpath not in imgs_to_delete:
imgs_to_delete.append(imgpath)
for imgpath in imgs_to_delete:
for texset, imgpaths in tex2img.iteritems():
if imgpaths[0] == imgpath and texset not in texs_to_delete:
texs_to_delete.append(texset)
del unique_images[imgpath]
for texset in texs_to_delete:
del tex2img[texset]
# now make a mapping between images and the tex coords that have to be
# updated if it is atlased
img2texs = {}
for imgpath in unique_images:
img2texs[imgpath] = []
for texset, imgpaths in tex2img.iteritems():
if imgpaths[0] == imgpath:
img2texs[imgpath].append(texset)
for path, pilimg in unique_images.iteritems():
tile_x, tile_y = image_scales[path]
width, height = pilimg.size
if tile_x > 1 or tile_y > 1:
if 'A' in pilimg.getbands():
imgformat = 'RGBA'
initval = (0,0,0,255)
else:
imgformat = 'RGB'
initval = (0,0,0)
tiled_img = Image.new(imgformat, (width*tile_x, height*tile_y), initval)
for x in range(tile_x):
for y in range(tile_y):
tiled_img.paste(pilimg, (x*width,y*height))
pilimg = tiled_img
width, height = pilimg.size
#round down to power of 2
width = int(math.pow(2, int(math.log(width, 2))))
height = int(math.pow(2, int(math.log(height, 2))))
if (width, height) != pilimg.size:
pilimg = pilimg.resize((width, height), Image.ANTIALIAS)
unique_images[path] = pilimg
group1, group2 = splitAlphas(unique_images)
to_del = combinePacks(packImages(mesh, img2texs, group1, image_scales),
packImages(mesh, img2texs, group2, image_scales))
if to_del is not None:
for geom, primindices in to_del.iteritems():
for i in sorted(primindices, reverse=True):
del geom.primitives[i]
0
Example 31
@classmethod
def from_data(cls, m, title="Default title", key="0", mxtype=None, fmt=None):
"""Create a HBInfo instance from an existing sparse matrix.
Parameters
----------
m : sparse matrix
the HBInfo instance will derive its parameters from m
title : str
Title to put in the HB header
key : str
Key
mxtype : HBMatrixType
type of the input matrix
fmt : dict
not implemented
Returns
-------
hb_info : HBInfo instance
"""
pointer = m.indptr
indices = m.indices
values = m.data
nrows, ncols = m.shape
nnon_zeros = m.nnz
if fmt is None:
# +1 because HB use one-based indexing (Fortran), and we will write
# the indices /pointer as such
pointer_fmt = IntFormat.from_number(np.max(pointer+1))
indices_fmt = IntFormat.from_number(np.max(indices+1))
if values.dtype.kind in np.typecodes["AllFloat"]:
values_fmt = ExpFormat.from_number(-np.max(np.abs(values)))
elif values.dtype.kind in np.typecodes["AllInteger"]:
values_fmt = IntFormat.from_number(-np.max(np.abs(values)))
else:
raise NotImplementedError("type %s not implemented yet" % values.dtype.kind)
else:
raise NotImplementedError("fmt argument not supported yet.")
if mxtype is None:
if not np.isrealobj(values):
raise ValueError("Complex values not supported yet")
if values.dtype.kind in np.typecodes["AllInteger"]:
tp = "integer"
elif values.dtype.kind in np.typecodes["AllFloat"]:
tp = "real"
else:
raise NotImplementedError("type %s for values not implemented"
% values.dtype)
mxtype = HBMatrixType(tp, "unsymmetric", "assembled")
else:
raise ValueError("mxtype argument not handled yet.")
def _nlines(fmt, size):
nlines = size // fmt.repeat
if nlines * fmt.repeat != size:
nlines += 1
return nlines
pointer_nlines = _nlines(pointer_fmt, pointer.size)
indices_nlines = _nlines(indices_fmt, indices.size)
values_nlines = _nlines(values_fmt, values.size)
total_nlines = pointer_nlines + indices_nlines + values_nlines
return cls(title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
pointer_fmt.fortran_format, indices_fmt.fortran_format,
values_fmt.fortran_format)
0
Example 32
def _fit(self, X, y, init_params=True):
self._check_target_array(y)
if init_params:
self._decr_eta = self.eta
if self.n_classes is None:
self.n_classes = np.max(y) + 1
self._n_features = X.shape[1]
self._weight_maps, self._bias_maps = self._layermapping(
n_features=self._n_features,
n_classes=self.n_classes,
hidden_layers=self.hidden_layers)
self.w_, self.b_ = self._init_params_from_layermapping(
weight_maps=self._weight_maps,
bias_maps=self._bias_maps,
random_seed=self.random_seed)
self.cost_ = []
if self.momentum != 0.0:
prev_grad_b_1 = np.zeros(shape=self.b_['1'].shape)
prev_grad_w_1 = np.zeros(shape=self.w_['1'].shape)
prev_grad_b_out = np.zeros(shape=self.b_['out'].shape)
prev_grad_w_out = np.zeros(shape=self.w_['out'].shape)
y_enc = self._one_hot(y=y, n_labels=self.n_classes, dtype=np.float)
self.init_time_ = time()
rgen = np.random.RandomState(self.random_seed)
for i in range(self.epochs):
for idx in self._yield_minibatches_idx(
rgen=rgen,
n_batches=self.minibatches,
data_ary=y,
shuffle=True):
net_1, act_1, net_out, act_out = self._feedforward(X[idx])
# GRADIENTS VIA BACKPROPAGATION
# [n_samples, n_classlabels]
sigma_out = act_out - y_enc[idx]
# [n_samples, n_hidden]
sigmoid_derivative_1 = act_1 * (1.0 - act_1)
# [n_samples, n_classlabels] dot [n_classlabels, n_hidden]
# -> [n_samples, n_hidden]
sigma_1 = (np.dot(sigma_out, self.w_['out'].T) *
sigmoid_derivative_1)
# [n_features, n_samples] dot [n_samples, n_hidden]
# -> [n_features, n_hidden]
grad_W_1 = np.dot(X[idx].T, sigma_1)
grad_B_1 = np.sum(sigma_1, axis=0)
# [n_hidden, n_samples] dot [n_samples, n_classlabels]
# -> [n_hidden, n_classlabels]
grad_W_out = np.dot(act_1.T, sigma_out)
grad_B_out = np.sum(sigma_out, axis=0)
# LEARNING RATE ADJUSTEMENTS
self._decr_eta /= (1.0 + self.decrease_const * i)
# REGULARIZATION AND WEIGHT UPDATES
dW_1 = (self._decr_eta * grad_W_1 +
self._decr_eta * self.l2 * self.w_['1'])
dW_out = (self._decr_eta * grad_W_out +
self._decr_eta * self.l2 * self.w_['out'])
dB_1 = self._decr_eta * grad_B_1
dB_out = self._decr_eta * grad_B_out
self.w_['1'] -= dW_1
self.b_['1'] -= dB_1
self.w_['out'] -= dW_out
self.b_['out'] -= dB_out
if self.momentum != 0.0:
self.w_['1'] -= self.momentum * prev_grad_w_1
self.b_['1'] -= self.momentum * prev_grad_b_1
self.w_['out'] -= self.momentum * prev_grad_w_out
self.b_['out'] -= self.momentum * prev_grad_b_out
prev_grad_b_1 = grad_B_1
prev_grad_w_1 = grad_W_1
prev_grad_b_out = grad_B_out
prev_grad_w_out = grad_W_out
net_1, act_1, net_out, act_out = self._feedforward(X)
cross_ent = self._cross_entropy(output=act_out, y_target=y_enc)
cost = self._compute_cost(cross_ent)
self.cost_.append(cost)
if self.print_progress:
self._print_progress(iteration=i + 1,
n_iter=self.epochs,
cost=cost)
return self
0
Example 33
Project: crosscat Source File: convergence_test_utils.py
def ARI_CrossCat(Xc, Xrv, XRc, XRrv):
''' Adjusted Rand Index (ARI) calculation for a CrossCat clustered table
To calculate ARI based on the CrossCat partition, each cell in the
table is considered as an instance to be assigned to a cluster. A cluster
is defined by both the view index AND the category index. In other words,
if, and only if, two cells, regardless of which columns and rows they belong
to, are lumped into the same view and category, the two cells are considered
to be in the same cluster.
For a table of size Nrow x Ncol
Xc: (1 x Ncol) array of view assignment for each column.
Note: It is assumed that the view indices are consecutive integers
starting from 0. Hence, the number of views is equal to highest
view index plus 1.
Xrv: (Nrow x Nview) array where each row is the assignmennt of categories for the
corresponding row in the data table. The i-th element in a row
corresponds to the category assignment of the i-th view of that row.
XRc and XRrv have the same format as Xr and Xrv respectively.
The ARI index is calculated from the comparison of the table clustering
define by (XRc, XRrv) and (Xc, Xrv).
'''
Xrv = Xrv.T
XRrv = XRrv.T
# Find the highest category index of all views
max_cat_index = numpy.max(Xrv)
# re-assign category indices so that they have different values in
# different views
Xrv = Xrv + numpy.arange(0,Xrv.shape[1])*(max_cat_index+1)
# similarly for the reference partition
max_cat_index = numpy.max(XRrv)
XRrv = XRrv + numpy.arange(0,XRrv.shape[1])*(max_cat_index+1)
# Table clustering assignment for the first partition
CellClusterAssgn = numpy.zeros((Xrv.shape[0], Xc.size))
for icol in range(Xc.size):
CellClusterAssgn[:,icol]=Xrv[:,Xc[icol]]
# Flatten the table to a 1-D array compatible with the ARI function
CellClusterAssgn = CellClusterAssgn.reshape(CellClusterAssgn.size)
# Table clustering assignment for the second partition
RefCellClusterAssgn = numpy.zeros((Xrv.shape[0], Xc.size))
for icol in range(Xc.size):
RefCellClusterAssgn[:,icol]=XRrv[:,XRc[icol]]
# Flatten the table
RefCellClusterAssgn = RefCellClusterAssgn.reshape(RefCellClusterAssgn.size)
# Compare the two partitions using ARI
ARI = calc_ari(RefCellClusterAssgn, CellClusterAssgn)
ARI_viewonly = calc_ari(Xc, XRc)
return ARI, ARI_viewonly
0
Example 34
def tps_rpm_bij(x_nd, y_md, n_iter=20, reg_init=.1, reg_final=.001, rad_init=.1, rad_final=.005,
rot_reg = 1e-3, plotting = False, plot_cb = None, x_weights = None, y_weights = None,
outlierprior = .1, outlierfrac = 2e-1, vis_cost_xy = None, return_corr=False):
"""
tps-rpm algorithm mostly as described by chui and rangaran
reg_init/reg_final: regularization on curvature
rad_init/rad_final: radius for correspondence calculation (meters)
plotting: 0 means don't plot. integer n means plot every n iterations
interest_pts are points in either scene where we want a lower prior of outliers
x_weights: rescales the weights of the forward tps fitting of the last iteration
y_weights: same as x_weights, but for the backward tps fitting
"""
_,d=x_nd.shape
regs = loglinspace(reg_init, reg_final, n_iter)
rads = loglinspace(rad_init, rad_final, n_iter)
f = ThinPlateSpline(d)
scale = (np.max(y_md,axis=0) - np.min(y_md,axis=0)) / (np.max(x_nd,axis=0) - np.min(x_nd,axis=0))
f.lin_ag = np.diag(scale) # align the mins and max
f.trans_g = np.median(y_md,axis=0) - np.median(x_nd,axis=0) * scale # align the medians
# do a coarse search through rotations
# fit_rotation(f, x_nd, y_md)
g = ThinPlateSpline(d)
g.lin_ag = np.diag(1./scale)
g.trans_g = -np.diag(1./scale).dot(f.trans_g)
# set up outlier priors for source and target scenes
n, _ = x_nd.shape
m, _ = y_md.shape
x_priors = np.ones(n)*outlierprior
y_priors = np.ones(m)*outlierprior
# r_N = None
for i in xrange(n_iter):
xwarped_nd = f.transform_points(x_nd)
ywarped_md = g.transform_points(y_md)
fwddist_nm = ssd.cdist(xwarped_nd, y_md,'euclidean')
invdist_nm = ssd.cdist(x_nd, ywarped_md,'euclidean')
r = rads[i]
prob_nm = np.exp( -(fwddist_nm + invdist_nm) / (2*r) )
if vis_cost_xy != None:
pi = np.exp( -vis_cost_xy )
pi /= pi.max() # rescale the maximum probability to be 1. effectively, the outlier priors are multiplied by a
# visual prior of 1 (since the outlier points have a visual prior of 1 with any point)
prob_nm *= pi
corr_nm, r_N, _ = balance_matrix3(prob_nm, 10, x_priors, y_priors, outlierfrac) # edit final value to change outlier percentage
corr_nm += 1e-9
wt_n = corr_nm.sum(axis=1)
wt_m = corr_nm.sum(axis=0)
xtarg_nd = (corr_nm/wt_n[:,None]).dot(y_md)
ytarg_md = (corr_nm/wt_m[None,:]).T.dot(x_nd)
if plotting and i%plotting==0:
plot_cb(x_nd, y_md, xtarg_nd, corr_nm, wt_n, f)
if i == (n_iter-1):
if x_weights is not None:
wt_n=wt_n*x_weights
if y_weights is not None:
wt_m=wt_m*y_weights
f = fit_ThinPlateSpline(x_nd, xtarg_nd, bend_coef = regs[i], wt_n=wt_n, rot_coef = rot_reg)
g = fit_ThinPlateSpline(y_md, ytarg_md, bend_coef = regs[i], wt_n=wt_m, rot_coef = rot_reg)
f._cost = tps_cost(f.lin_ag, f.trans_g, f.w_ng, f.x_na, xtarg_nd, regs[i], wt_n=wt_n)/wt_n.mean()
g._cost = tps_cost(g.lin_ag, g.trans_g, g.w_ng, g.x_na, ytarg_md, regs[i], wt_n=wt_m)/wt_m.mean()
if return_corr:
return (f, g), corr_nm
return f,g
0
Example 35
Project: scot Source File: ooapi.py
def get_surrogate_connectivity(self, measure_name, repeats=100, plot=False, random_state=None):
""" Calculate spectral connectivity measure under the assumption of no actual connectivity.
Repeatedly samples connectivity from phase-randomized data. This provides estimates of the connectivity
distribution if there was no causal structure in the data.
Parameters
----------
measure_name : str
Name of the connectivity measure to calculate. See :class:`Connectivity` for supported measures.
repeats : int, optional
How many surrogate samples to take.
Returns
-------
measure : array, shape = [`repeats`, n_channels, n_channels, nfft]
Values of the connectivity measure for each surrogate.
See Also
--------
:func:`scot.connectivity_statistics.surrogate_connectivity` : Calculates surrogate connectivity
"""
cs = surrogate_connectivity(measure_name, self.activations_[:, :, self.trial_mask_],
self.var_, self.nfft_, repeats, random_state=random_state)
if plot is None or plot:
fig = plot
if self.plot_diagonal == 'fill':
diagonal = 0
elif self.plot_diagonal == 'S':
diagonal = -1
sb = self.get_surrogate_connectivity('absS', repeats)
sb /= np.max(sb) # scale to 1 since components are scaled arbitrarily anyway
su = np.percentile(sb, 95, axis=0)
fig = self.plotting.plot_connectivity_spectrum([su], fs=self.fs_, freq_range=self.plot_f_range,
diagonal=1, border=self.plot_outside_topo, fig=fig)
else:
diagonal = -1
cu = np.percentile(cs, 95, axis=0)
fig = self.plotting.plot_connectivity_spectrum([cu], fs=self.fs_, freq_range=self.plot_f_range,
diagonal=diagonal, border=self.plot_outside_topo, fig=fig)
return cs, fig
return cs
0
Example 36
Project: pyspeckit Source File: mapplot.py
def makeplane(self, estimator=np.nanmean):
"""
Create a "plane" view of the cube, either by slicing or projecting it
or by showing a slice from the best-fit model parameter cube.
Parameters
----------
estimator : [ function | 'max' | 'int' | FITS filename | integer | slice ]
A non-pythonic, non-duck-typed variable. If it's a function, apply that function
along the cube's spectral axis to obtain an estimate (e.g., mean, min, max, etc.).
'max' will do the same thing as passing np.max
'int' will attempt to integrate the image (which is why I didn't duck-type)
(integrate means sum and multiply by dx)
a .fits filename will be read using pyfits (so you can make your own cover figure)
an integer will get the n'th slice in the parcube if it exists
If it's a slice, slice the input data cube along the Z-axis with this slice
"""
# THIS IS A HACK!!! isinstance(a function, function) must be a thing...
FUNCTION = type(np.max)
# estimator is NOT duck-typed
if type(estimator) is FUNCTION:
self.plane = estimator(self.Cube.cube,axis=0)
elif isinstance(estimator, six.string_types):
if estimator == 'max':
self.plane = self.Cube.cube.max(axis=0)
elif estimator == 'int':
dx = np.abs(self.Cube.xarr[1:] - self.Cube.xarr[:-1])
dx = np.concatenate([dx,[dx[-1]]])
self.plane = (self.Cube.cube * dx[:,np.newaxis,np.newaxis]).sum(axis=0)
elif estimator[-5:] == ".fits":
self.plane = pyfits.getdata(estimator)
elif type(estimator) is slice:
self.plane = self.Cube.cube[estimator,:,:]
elif type(estimator) is int:
if hasattr(self.Cube,'parcube'):
self.plane = self.Cube.parcube[estimator,:,:]
if self.plane is None:
raise ValueError("Invalid estimator %s" % (str(estimator)))
if np.sum(np.isfinite(self.plane)) == 0:
raise ValueError("Map is all NaNs or infs. Check your estimator or your input cube.")
0
Example 37
def tps_rpm_bij(x_nd, y_md, f_solver_factory=None, g_solver_factory=None,
n_iter=settings.N_ITER, em_iter=settings.EM_ITER,
reg_init=settings.REG[0], reg_final=settings.REG[1],
rad_init=settings.RAD[0], rad_final=settings.RAD[1],
rot_reg=settings.ROT_REG,
outlierprior=settings.OUTLIER_PRIOR, outlierfrac=settings.OURLIER_FRAC,
prior_prob_nm=None, callback=None):
_, d = x_nd.shape
regs = loglinspace(reg_init, reg_final, n_iter)
rads = loglinspace(rad_init, rad_final, n_iter)
f = ThinPlateSpline(d)
scale = (np.max(y_md,axis=0) - np.min(y_md,axis=0)) / (np.max(x_nd,axis=0) - np.min(x_nd,axis=0))
f.lin_ag = np.diag(scale) # align the mins and max
f.trans_g = np.median(y_md,axis=0) - np.median(x_nd,axis=0) * scale # align the medians
g = ThinPlateSpline(d)
g.lin_ag = np.diag(1./scale)
g.trans_g = -np.diag(1./scale).dot(f.trans_g)
# set up outlier priors for source and target scenes
n, _ = x_nd.shape
m, _ = y_md.shape
x_priors = np.ones(n)*outlierprior
y_priors = np.ones(m)*outlierprior
# set up custom solver if solver factory is specified
if f_solver_factory is None:
fsolve = None
else:
fsolve = f_solver_factory.get_solver(x_nd, rot_reg)
if g_solver_factory is None:
gsolve = None
else:
gsolve = g_solver_factory.get_solver(y_md, rot_reg)
for i, (reg, rad) in enumerate(zip(regs, rads)):
for i_em in range(em_iter):
xwarped_nd = f.transform_points(x_nd)
ywarped_md = g.transform_points(y_md)
fwddist_nm = ssd.cdist(xwarped_nd, y_md, 'sqeuclidean')
invdist_nm = ssd.cdist(x_nd, ywarped_md, 'sqeuclidean')
prob_nm = np.exp( -((1/n) * fwddist_nm + (1/m) * invdist_nm) / (2*rad * (1/n + 1/m)) )
if prior_prob_nm != None:
prob_nm *= prior_prob_nm
corr_nm, _, _ = balance_matrix3(prob_nm, 10, x_priors, y_priors, outlierfrac) # edit final value to change outlier percentage
xtarg_nd, wt_n = prepare_fit_ThinPlateSpline(x_nd, y_md, corr_nm)
ytarg_md, wt_m = prepare_fit_ThinPlateSpline(x_nd, y_md, corr_nm, fwd=False)
if fsolve is None:
f = ThinPlateSpline.create_from_optimization(x_nd, xtarg_nd, reg, rot_reg, wt_n)
else:
fsolve.solve(wt_n, xtarg_nd, reg, f)
if gsolve is None:
g = ThinPlateSpline.create_from_optimization(y_md, ytarg_md, reg, rot_reg, wt_m)
else:
gsolve.solve(wt_m, ytarg_md, reg, g)
if callback:
callback(i, i_em, x_nd, y_md, xtarg_nd, corr_nm, wt_n, f, g, corr_nm, rad)
return f, g, corr_nm
0
Example 38
Project: Py6S Source File: radiosonde.py
@classmethod
def _import_from_arrays(cls, pressure, altitude, temperature, mixing_ratio, base_profile):
"""Import radiosonde data from a set of arrays containing various radiosonde parameters.
This routine deals with all of the interpolation and combining required for use in 6S.
The arguments must be:
* `pressure` in hPa or millibars
* `altitude` in km
* `temperature` in celsius
* `mixing_ratio` in g/kg
This returns an atmospheric profile suitable for storing in s.atmos_profile.
"""
# Interpolate to 6S levels
max_alt = np.max(altitude)
interp_altitudes = cls.sixs_altitudes[cls.sixs_altitudes < max_alt]
f_interp_pressure = interp1d(altitude, pressure, bounds_error=False, fill_value=pressure[0])
f_interp_temp = interp1d(altitude, temperature, bounds_error=False, fill_value=temperature[0])
f_interp_mixrat = interp1d(altitude, mixing_ratio, bounds_error=False, fill_value=mixing_ratio[0])
int_pres = f_interp_pressure(interp_altitudes)
int_temp = f_interp_temp(interp_altitudes)
int_mixrat = f_interp_mixrat(interp_altitudes)
base_profile_index = base_profile - 1
# Convert units (temperature from C -> K and mixing ratio to density)
int_temp = cls._celsius_to_kelvin(int_temp)
int_water = cls._mixing_ratio_to_density(int_pres, int_temp, int_mixrat)
# Get the rest of the profile from the base profiles
rest_of_pres = cls.pressure_profiles[base_profile_index]
rest_of_pres = rest_of_pres[cls.sixs_altitudes >= max_alt]
rest_of_temp = cls.temp_profiles[base_profile_index]
rest_of_temp = rest_of_temp[cls.sixs_altitudes >= max_alt]
rest_of_water = cls.water_density_profiles[base_profile_index]
rest_of_water = rest_of_water[cls.sixs_altitudes >= max_alt]
final_pressure = np.hstack((int_pres, rest_of_pres))
final_temp = np.hstack((int_temp, rest_of_temp))
final_water = np.hstack((int_water, rest_of_water))
final_ozone = cls.ozone_density_profiles[base_profile_index]
params = {'altitude': cls.sixs_altitudes,
'pressure': final_pressure,
'temperature': final_temp,
'water': final_water,
'ozone': final_ozone}
return AtmosProfile.RadiosondeProfile(params)
0
Example 39
Project: HPOlib Source File: plotTraceWithStd_perTime.py
def fill_trajectories(trace_list, times_list):
""" Each trajectory need to has the exact same number of entries and timestamps"""
# We need to define the max value = what is measured before the first evaluation
max_value = np.max([np.max(ls) for ls in trace_list])
number_exp = len(trace_list)
new_trajectories = list()
new_times = list()
for i in range(number_exp):
new_trajectories.append(list())
new_times.append(list())
# noinspection PyUnusedLocal
counter = [1 for i in range(number_exp)]
finish = False
# We need to insert the max values in the beginning and the min values in the end
for i in range(number_exp):
trace_list[i].insert(0, max_value)
trace_list[i].append(np.min(trace_list[i]))
times_list[i].insert(0, 0)
times_list[i].append(sys.maxint)
# Add all possible time values
while not finish:
min_idx = np.argmin([times_list[idx][counter[idx]] for idx in range(number_exp)])
counter[min_idx] += 1
for idx in range(number_exp):
new_times[idx].append(times_list[min_idx][counter[min_idx] - 1])
new_trajectories[idx].append(trace_list[idx][counter[idx] - 1])
# Check if we're finished
for i in range(number_exp):
finish = True
if counter[i] < len(trace_list[i]) - 1:
finish = False
break
times = new_times
trajectories = new_trajectories
tmp_times = list()
# Sanitize lists and delete double entries
for i in range(number_exp):
tmp_times = list()
tmp_traj = list()
for t in range(len(times[i]) - 1):
if times[i][t+1] != times[i][t] and not np.isnan(times[i][t]):
tmp_times.append(times[i][t])
tmp_traj.append(trajectories[i][t])
tmp_times.append(times[i][-1])
tmp_traj.append(trajectories[i][-1])
times[i] = tmp_times
trajectories[i] = tmp_traj
# We need only one list for all times
times = tmp_times
return trajectories, times
0
Example 40
def kmeans_init(X, Y, n_labels, n_hidden_states, latent_node_features=False):
all_feats = []
# iterate over samples
for x, y in zip(X, Y):
# first, get neighbor counts from nodes
if len(x) == 3:
features, edges, n_hidden = x
elif len(x) == 4:
# edge features are discarded
features, edges, _, n_hidden = x
else:
raise ValueError("Something is fishy!")
n_visible = features.shape[0]
if latent_node_features:
n_visible -= n_hidden
if np.max(edges) != n_hidden + n_visible - 1:
raise ValueError("Edges don't add up")
labels_one_hot = np.zeros((n_visible, n_labels), dtype=np.int)
y = y.ravel()
gx = np.ogrid[:n_visible]
labels_one_hot[gx, y] = 1
graph = sparse.coo_matrix((np.ones(edges.shape[0]), edges.T),
(n_visible + n_hidden, n_visible + n_hidden))
graph = (graph + graph.T)[-n_hidden:, :n_visible]
neighbors = graph * labels_one_hot.reshape(n_visible, -1)
# normalize (for borders)
neighbors /= np.maximum(neighbors.sum(axis=1)[:, np.newaxis], 1)
all_feats.append(neighbors)
all_feats_stacked = np.vstack(all_feats)
try:
km = KMeans(n_clusters=n_hidden_states)
except TypeError:
# for old versions :-/
km = KMeans(k=n_hidden_states)
km.fit(all_feats_stacked)
H = []
for y, feats in zip(Y, all_feats):
H.append(np.hstack([y, km.predict(feats) + n_labels]))
return H
0
Example 41
Project: Neural-Net-Bayesian-Optimization Source File: optimizer.py
def select_multiple(self, cap=5):
""" Identify multiple points.
"""
# Rank order by expected improvement
train_Y = self.__dataset[:, -1:]
prediction = self.__pred
hi_ci = self.__hi_ci
sig = abs((hi_ci - prediction)/2)
gamma = (prediction - np.max(train_Y)) / sig
ei = sig*(gamma*stats.norm.cdf(gamma) + stats.norm.pdf(gamma))
if np.max(ei) <= 0:
# If no good points, do pure exploration
sig_order = np.argsort(-sig, axis=0)
select_indices = sig_order[:cap, 0].tolist()
else:
ei_order = np.argsort(-1*ei, axis=0)
select_indices = [ei_order[0, 0]]
for candidate in ei_order[:, 0]:
keep = True
for selected_index in select_indices:
keep = keep*self.check_point(selected_index, candidate)
if keep and ei[candidate, 0] > 0:
select_indices.append(candidate)
if len(select_indices) == cap: # Number of points to select
break
if len(select_indices) < cap:
# If not enough good points, append with exploration
sig_order = np.argsort(-sig, axis=0)
add_indices = sig_order[:(cap-len(select_indices)), 0].tolist()
select_indices.extend(add_indices)
index = np.argmax(ei)
self.__gamma = gamma
self.__ei = ei
return np.atleast_2d(self.__domain[select_indices, :])
0
Example 42
Project: PYPOWER Source File: t_is.py
def t_is(got, expected, prec=5, msg=''):
"""Tests if two matrices are identical to some tolerance.
Increments the global test count and if the maximum difference
between corresponding elements of C{got} and C{expected} is less
than 10**(-C{prec}) then it increments the passed tests count,
otherwise increments the failed tests count. Prints 'ok' or 'not ok'
followed by the MSG, unless the global variable t_quiet is true.
Intended to be called between calls to C{t_begin} and C{t_end}.
@author: Ray Zimmerman (PSERC Cornell)
"""
if isinstance(got, int) or isinstance(got, float):
got = array([got], float)
elif isinstance(got, list) or isinstance(got, tuple):
got = array(got, float)
if isinstance(expected, int) or isinstance(expected, float):
expected = array([expected], float)
elif isinstance(expected, list) or isinstance(expected, tuple):
expected = array(expected, float)
if (got.shape == expected.shape) or (expected.shape == (0,)):
got_minus_expected = got - expected
max_diff = max(max(abs(got_minus_expected)))
condition = ( max_diff < 10**(-prec) )
else:
condition = False
max_diff = 0
t_ok(condition, msg)
if (not condition and not TestGlobals.t_quiet):
s = ''
if max_diff != 0:
idx = nonzero(not abs(got_minus_expected) < 10**(-prec))
if len(idx) == 1: # 1D array
idx = (idx[0], zeros( len(got_minus_expected) ))
i, j = idx
k = i + (j-1) * expected.shape[0]
got = got.flatten()
expected = expected.flatten()
got_minus_expected = got_minus_expected.flatten()
kk = argmax( abs(got_minus_expected[ k.astype(int) ]) )
s += ' row col got expected got - exp\n'
s += '------- ------ ---------------- ---------------- ----------------'
for u in range(len(i)):
s += '\n%6d %6d %16g %16g %16g' % \
(i[u], j[u], got[k[u]], expected[k[u]], got_minus_expected[k[u]])
if u == kk:
s += ' *'
s += '\nmax diff @ (%d,%d) = %g > allowed tol of %g\n\n' % \
(i[kk], j[kk], max_diff, 10**(-prec))
else:
s += ' dimension mismatch:\n'
if len(got.shape) == 1: # 1D array
s += ' got: %d\n' % got.shape
else:
s += ' got: %d x %d\n' % got.shape
if len(expected.shape) == 1: # 1D array
s += ' expected: %d\n' % expected.shape
else:
s += ' expected: %d x %d\n' % expected.shape
print(s)
0
Example 43
Project: RLScore Source File: reader.py
def read_sparse(fname, fdim=None):
"""Reads in a sparse n x m matrix from a file with n rows.
Format is of the type 0:1.5 3:4.2 7:1.1 ...
with each line containing index:value pairs with indices
ranging from 0...n_features-1, and only indices with non-zero values
being present in the file.
Parameters
----------
fname : string
input file name
fdim: int
number of dimensions, if None estimated from data file
Returns
-------
X : sparse matrix (csr)
"""
#each row represents an instance, each column a feature
f = open(fname)
rows = []
columns = []
values = []
linecounter = 0
for line in f:
linecounter += 1
#Empty lines and commented lines are passed over
if len(line.strip()) == 0 or line[0] == '#':
print "Warning: no inputs on line %d" % linecounter
continue
line = line.split("#",1)
attributes = line[0].split()
previous = -1
#Attributes indices must be positive integers in an ascending order,
#and the values must be real numbers.
for att_val in attributes:
if len(att_val.split(":")) != 2:
raise Exception("Error when reading in feature file: feature:value pair %s on line %d is not well-formed\n" % (att_val, linecounter))
index, value = att_val.split(":")
try:
index = int(index)
value = float(value)
if value != 0. and (fdim is None or index < fdim):
columns.append(index)
rows.append(linecounter-1)
values.append(value)
except ValueError:
raise Exception("Error when reading in feature file: feature:value pair %s on line %d is not well-formed\n" % (att_val, linecounter))
if not index > previous:
raise Exception("Error when reading in feature file: line %d features must be in ascending order\n" % (linecounter))
previous = index
#That's all folks
if fdim is None:
X = sparse.coo_matrix((values,(rows,columns)), dtype=float64)
else:
rdim = np.max(rows)+1
X = sparse.coo_matrix((values,(rows,columns)), (rdim, fdim), dtype=float64)
X = X.tocsr()
f.close()
return X
0
Example 44
def plot(self, burn_in=0, names=None, annotate=True, exclude_ranefs=False,
hide_transformed=True, kind='trace', **kwargs):
'''
Plots posterior distributions and sample traces. Basically a wrapper
for pm.traceplot() plus some niceties, based partly on code from:
https://pymc-devs.github.io/pymc3/notebooks/GLM-model-selection.html
Args:
burn_in (int): Number of initial samples to exclude before
summary statistics are computed.
names (list): Optional list of variable names to summarize.
annotate (bool): If True (default), add lines marking the
posterior means, write the posterior means next to the
lines, and add factor level names for fixed factors with
more than one distribution on the traceplot.
exclude_ranefs (bool): If True, do not show trace plots for
individual random effects. Defaults to False.
hide_transformed (bool): If True (default), do not show trace
plots for internally transformed variables.
kind (str): Either 'trace' (default) or 'priors'. If 'priors',
this just internally calls Model.plot()
'''
if kind == 'priors':
return self.model.plot()
# if no 'names' specified, filter out unwanted variables
if names is None:
names = self._filter_names(names, exclude_ranefs, hide_transformed)
# compute means for all variables and factors
if annotate:
kwargs['lines'] = {param: self.trace[param][burn_in:].mean() for param in names}
# factors (i.e., fixed terms with shape > 1) must be handled separately
factors = {}
for fix in self.model.fixed_terms.values():
if 'b_'+fix.name in names and len(fix.levels)>1:
# remove factor from dictionary of lines to plot
kwargs['lines'].pop('b_'+fix.name)
# add factor and its column means to dictionary of factors
factors.update({'b_'+fix.name:
{':'.join(re.findall('\[([^]]+)\]', x)):
self.trace['b_'+fix.name][burn_in:].mean(0)[i]
for i,x in enumerate(fix.levels)}})
# make the traceplot
ax = pm.traceplot(self.trace[burn_in:], varnames=names,
figsize=(12,len(names)*1.5), **kwargs)
if annotate:
# add lines and annotation for the factors
for f in factors.keys():
for lev in factors[f]:
# draw line
ax[names.index(f),0].axvline(x=factors[f][lev], color="r", lw=1.5)
# write the mean
ax[names.index(f),0].annotate('{:.2f}'.format(factors[f][lev]),
xy=(factors[f][lev],0), xycoords='data', xytext=(5,10),
textcoords='offset points', rotation=90, va='bottom',
fontsize='large', color='#AA0022')
# write the factor level name
ax[names.index(f),0].annotate(lev,
xy=(factors[f][lev],0), xycoords='data', xytext=(-11,5),
textcoords='offset points', rotation=90, va='bottom',
fontsize='large', color='#AA0022')
# add lines and annotation for the rest of the variables
for v in kwargs['lines'].keys():
ax[names.index(v),0].annotate('{:.2f}'.format(kwargs['lines'][v]),
xy=(kwargs['lines'][v],0), xycoords='data', xytext=(5,10),
textcoords='offset points', rotation=90, va='bottom',
fontsize='large', color='#AA0022')
# For binomial models with n_trials = 1 (most common use case),
# tell user which event is being modeled
if self.model.family.name=='binomial' and np.max(self.model.y.data) < 1.01:
event = next(i for i,x in enumerate(self.model.y.data.flatten()) if x>.99)
warnings.warn('Modeling the probability that {}==\'{}\''.format(
self.model.y.name, str(self.model.data[self.model.y.name][event])))
return ax
0
Example 45
Project: scikit-beam Source File: calibration.py
def estimate_d_blind(name, wavelength, bin_centers, ring_average,
window_size, max_peak_count, thresh):
"""
Estimate the sample-detector distance
Given a radially integrated calibration image return an estimate for
the sample-detector distance. This function does not require a
rough estimate of what d should be.
For the peaks found the detector-sample distance is estimated via
.. math ::
D = \\frac{r}{\\tan 2\\theta}
where :math:`r` is the distance in mm from the calibrated center
to the ring on the detector and :math:`D` is the distance from
the sample to the detector.
Parameters
----------
name : str
The name of the calibration standard. Used to look up the
expected peak location
Valid options: $name_ops
wavelength : float
The wavelength of scattered x-ray in nm
bin_centers : array
The distance from the calibrated center to the center of
the ring's annulus in mm
ring_average : array
The average intensity in the given ring of a azimuthally integrated
powder pattern. In counts [arb]
window_size : int
The number of elements on either side of a local maximum to
use for locating and refining peaks. Candidates are identified
as a relative maximum in a window sized (2*window_size + 1) and
the same window is used for fitting the peaks to refine the location.
max_peak_count : int
Use at most this many peaks
thresh : float
Fraction of maximum peak height
Returns
-------
dist_sample : float
The detector-sample distance in mm. This is the mean of the estimate
from all of the peaks used.
std_dist_sample : float
The standard deviation of d computed from the peaks used.
"""
# get the calibration standard
cal = calibration_standards[name]
# find the local maximums
cands = scipy.signal.argrelmax(ring_average, order=window_size)[0]
# filter local maximums by size
cands = filter_peak_height(ring_average, cands,
thresh*np.max(ring_average), window=window_size)
# TODO insert peak identification validation. This might be better than
# improving the threshold value.
# refine the locations of the peaks
peaks_x, peaks_y = peak_refinement(bin_centers, ring_average, cands,
window_size, refine_log_quadratic)
# compute tan(2theta) for the expected peaks
tan2theta = np.tan(cal.convert_2theta(wavelength))
# figure out how many peaks we can look at
slc = slice(0, np.min([len(tan2theta), len(peaks_x), max_peak_count]))
# estimate the sample-detector distance for each of the peaks
d_array = (peaks_x[slc] / tan2theta[slc])
return np.mean(d_array), np.std(d_array)
0
Example 46
def maximize(self, variables, inplace=True):
"""
Maximizes the factor with respect to `variables`.
Parameters
----------
variables: list, array-like
List of variables with respect to which factor is to be maximized
inplace: boolean
If inplace=True it will modify the factor itself, else would return
a new factor.
Returns
-------
DiscreteFactor or None: if inplace=True (default) returns None
if inplace=False returns a new `DiscreteFactor` instance.
Examples
--------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi = DiscreteFactor(['x1', 'x2', 'x3'], [3, 2, 2], [0.25, 0.35, 0.08, 0.16, 0.05, 0.07,
... 0.00, 0.00, 0.15, 0.21, 0.09, 0.18])
>>> phi.variables
['x1','x2','x3']
>>> phi.maximize(['x2'])
>>> phi.variables
['x1', 'x3']
>>> phi.cardinality
array([3, 2])
>>> phi.values
array([[ 0.25, 0.35],
[ 0.05, 0.07],
[ 0.15, 0.21]])
"""
if isinstance(variables, six.string_types):
raise TypeError("variables: Expected type list or array-like, got type str")
phi = self if inplace else self.copy()
for var in variables:
if var not in phi.variables:
raise ValueError("{var} not in scope.".format(var=var))
var_indexes = [phi.variables.index(var) for var in variables]
index_to_keep = sorted(set(range(len(self.variables))) - set(var_indexes))
phi.variables = [phi.variables[index] for index in index_to_keep]
phi.cardinality = phi.cardinality[index_to_keep]
phi.values = np.max(phi.values, axis=tuple(var_indexes))
if not inplace:
return phi
0
Example 47
Project: BioSPPy Source File: eda.py
def basic_scr(signal=None, sampling_rate=1000.):
"""Basic method to extract Skin Conductivity Responses (SCR) from an
EDA signal.
Follows the approach in [Gamb08]_.
Parameters
----------
signal : array
Input filterd EDA signal.
sampling_rate : int, float, optional
Sampling frequency (Hz).
Returns
-------
onsets : array
Indices of the SCR onsets.
peaks : array
Indices of the SRC peaks.
amplitudes : array
SCR pulse amplitudes.
References
----------
.. [Gamb08] Hugo Gamboa, "Multi-modal Behavioral Biometrics Based on HCI
and Electrophysiology", PhD thesis, Instituto Superior T{\'e}cnico, 2008
"""
# check inputs
if signal is None:
raise TypeError("Please specify an input signal.")
# find extrema
pi, _ = st.find_extrema(signal=signal, mode='max')
ni, _ = st.find_extrema(signal=signal, mode='min')
# sanity check
if len(pi) == 0 or len(ni) == 0:
raise ValueError("Could not find SCR pulses.")
# pair vectors
if ni[0] < pi[0]:
ni = ni[1:]
if pi[-1] > ni[-1]:
pi = pi[:-1]
if len(pi) > len(ni):
pi = pi[:-1]
li = min(len(pi), len(ni))
i1 = pi[:li]
i3 = ni[:li]
# indices
i0 = i1 - (i3 - i1) / 2.
if i0[0] < 0:
i0[0] = 0
# amplitude
a = np.array(map(lambda i: np.max(signal[i1[i]:i3[i]]), range(li)))
# output
args = (i3, i1, a)
names = ('onsets', 'peaks', 'amplitudes')
return utils.ReturnTuple(args, names)
0
Example 48
Project: RoBO Source File: epmgp.py
def lt_factor(s, l, M, V, mp, p, gamma):
cVc = (V[l, l] - 2 * V[s, l] + V[s, s]) / 2.0
Vc = (V[:, l] - V[:, s]) / sq2
cM = (M[l] - M[s]) / sq2
cVnic = np.max([cVc / (1 - p * cVc), 0])
cmni = cM + cVnic * (p * cM - mp)
z = cmni / np.sqrt(cVnic)
if np.isnan(z):
z = -np.inf
e, lP, exit_flag = log_relative_gauss(z)
if exit_flag == 0:
alpha = e / np.sqrt(cVnic)
# beta = alpha * (alpha + cmni / cVnic);
# r = beta * cVnic / (1 - cVnic * beta);
beta = alpha * (alpha * cVnic + cmni)
r = beta / (1 - beta)
# new message
pnew = r / cVnic
mpnew = r * (alpha + cmni / cVnic) + alpha
# update terms
dp = np.max([-p + eps, gamma * (pnew - p)]) # at worst, remove message
dmp = np.max([-mp + eps, gamma * (mpnew - mp)])
d = np.max([dmp, dp]) # for convergence measures
pnew = p + dp
mpnew = mp + dmp
# project out to marginal
Vnew = V - dp / (1 + dp * cVc) * np.outer(Vc, Vc)
Mnew = M + (dmp - cM * dp) / (1 + dp * cVc) * Vc
if np.any(np.isnan(Vnew)):
raise Exception("an error occurs while running expectation "
"propagation in entropy search. "
"Resulting variance contains NaN")
# % there is a problem here, when z is very large
logS = lP - 0.5 * (np.log(beta) - np.log(pnew) - np.log(cVnic))\
+ (alpha * alpha) / (2 * beta) * cVnic
elif exit_flag == -1:
d = np.NAN
Mnew = 0
Vnew = 0
pnew = 0
mpnew = 0
logS = -np.Infinity
elif exit_flag == 1:
d = 0
# remove message from marginal:
# new message
pnew = 0
mpnew = 0
# update terms
dp = -p # at worst, remove message
dmp = -mp
d = max([dmp, dp]) # for convergence measures
# project out to marginal
Vnew = V - dp / (1 + dp * cVc) * (np.outer(Vc, Vc))
Mnew = M + (dmp - cM * dp) / (1 + dp * cVc) * Vc
logS = 0
return Mnew, Vnew, pnew, mpnew, logS, d
0
Example 49
Project: VASPy Source File: electro.py
def plot_contour3d(self, **kwargs):
'''
use mayavi.mlab to plot 3d contour.
Parameter
---------
kwargs: {
'maxct' : float,max contour number,
'nct' : int, number of contours,
'opacity' : float, opacity of contour,
'widths' : tuple of int
number of replication on x, y, z axis,
}
'''
if not mayavi_installed:
self.__logger.warning("Mayavi is not installed on your device.")
return
# set parameters
widths = kwargs['widths'] if 'widths' in kwargs else (1, 1, 1)
elf_data, grid = self.expand_data(self.elf_data, self.grid, widths)
# import pdb; pdb.set_trace()
maxdata = np.max(elf_data)
maxct = kwargs['maxct'] if 'maxct' in kwargs else maxdata
# check maxct
if maxct > maxdata:
self.__logger.warning("maxct is larger than %f", maxdata)
opacity = kwargs['opacity'] if 'opacity' in kwargs else 0.6
nct = kwargs['nct'] if 'nct' in kwargs else 5
# plot surface
surface = mlab.contour3d(elf_data)
# set surface attrs
surface.actor.property.opacity = opacity
surface.contour.maximum_contour = maxct
surface.contour.number_of_contours = nct
# reverse axes labels
mlab.axes(xlabel='z', ylabel='y', zlabel='x') # 是mlab参数顺序问题?
mlab.outline()
mlab.show()
return
0
Example 50
Project: scikit-beam Source File: image.py
def construct_circ_avg_image(radii, intensities, dims=None, center=None,
pixel_size=(1, 1), left=0, right=0):
""" Constructs a 2D image from circular averaged data
where radii are given in units of pixels.
Normally, data will be taken from circular_average and used to
re-interpolate into an image.
Parameters
----------
radii : 1D array of floats
the radii (must be in pixels)
intensities : 1D array of floats
the intensities for the radii
dims : 2 tuple of floats, optional
[dy, dx] (row, col)
dy, dx are the dimensions in row,col format If the dims are not set, it
will assume the dimensions to be (2*maxr+1) where maxr is the maximum
radius. Note in the case of rectangular pixels (pixel_size not 1:1)
that the `maxr' value will be different in each dimension
center: 2 tuple of floats, optional
[y0, x0] (row, col)
y0, x0 is the center (in row,col format)
if not centered, assumes center is (dims[0]-1)/2., (dims[1]-1)/2.
pixel_size : tuple, optional
The size of a pixel (in a real unit, like mm).
argument order should be (pixel_height, pixel_width)
default is (1, 1)
left : float, optional
pixels smaller than the minimum radius are set to this value
(set to None for the value at the minimum radius)
right : float, optional
pixels larger than the maximum radius are set to this value
(set to None for the value at the maximum radius)
Returns
-------
IMG : the interpolated circular averaged image
See Also
--------
circular_average : compute circular average of an image
Pixels smaller than the minimum radius are set to the value at that
minimum radius.
Pixels larger than the maximum radius are set to zero.
bin_grid : Bin and integrate an image, given the radial array of pixels
Useful for nonlinear spacing (Ewald curvature)
Notes
-----
Some pixels may not be filled if the dimensions chosen are too large.
Run this code again on a list of values equal to 1 to obtain a mask
(and set left=0 and right=0).
"""
if dims is None:
if center is not None:
raise ValueError("Specifying a dims but not a center does not "
"make sense and may lead to unexpected results.")
# round up, also take into account pixel size change
maxr_y, maxr_x = (int(np.max(radii/pixel_size[0])+.5),
int(np.max(radii/pixel_size[1])+.5))
dims = 2*maxr_y+1, 2*maxr_x+1
if center is None:
center = (dims[0]-1)/2., (dims[1] - 1)/2.
radial_val = utils.radial_grid(center, dims, pixel_size)
CIMG = np.interp(radial_val, radii, intensities, right=0)
return CIMG