Here are the examples of the python api numpy.abs taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
161 Examples
2
Example 1
Project: sherpa Source File: sample.py
def get_scales(self, fit, myscales=None):
scales = []
thawedpars = [par for par in fit.model.pars if not par.frozen]
if None == myscales:
oldestmethod = fit.estmethod
covar = Covariance()
covar.config['sigma'] = self.sigma
fit.estmethod = Covariance()
try:
r = fit.est_errors()
finally:
fit.estmethod = oldestmethod
for par, val, lo, hi in izip(thawedpars, r.parvals, r.parmins, r.parmaxes):
scale = None
if lo is not None and hi is not None:
scale = numpy.abs(lo)
else:
warning("Covariance failed for '%s', trying Confidence..." %
par.fullname)
conf = Confidence()
conf.config['sigma'] = self.sigma
fit.estmethod = conf
try:
t = fit.est_errors(parlist=(par,))
if t.parmins[0] is not None and t.parmaxes[0] is not None:
scale = numpy.abs(t.parmins[0])
else:
if t.parmins[0] is None and t.parmaxes[0] is not None:
scale = numpy.abs(t.parmaxes[0])
else:
warning('1 sigma bounds for parameter ' +
par.fullname +
' could not be found, using soft limit minimum')
if 0.0 == numpy.abs(par.min):
scale = 1.0e-16
else:
scale = numpy.abs(par.min)
finally:
fit.estmethod = oldestmethod
scales.append(scale)
else:
if not numpy.iterable(myscales):
raise TypeError(
"scales option must be iterable of length %d " % len(thawedpars))
scales = list(map(abs, myscales))
scales = numpy.asarray(scales).transpose()
return scales
2
Example 2
Project: blendercam Source File: basrelief.py
def problemAreas(br):
t=time.time()
i=bpy.data.images[br.source_image_name]
silh_thres=br.silhouette_threshold
recover_silh=br.recover_silhouettes
silh_scale=br.silhouette_scale
MINS=br.min_gridsize
smoothiterations=br.smooth_iterations
vcycleiterations=br.vcycle_iterations
linbcgiterations=br.linbcg_iterations
useplanar=br.use_planar
#scale down before:
if br.gradient_scaling_mask_use:
m=bpy.data.images[br.gradient_scaling_mask_name]
#mask=nar=imagetonumpy(m)
#if br.scale_down_before_use:
# i.scale(int(i.size[0]*br.scale_down_before),int(i.size[1]*br.scale_down_before))
# if br.gradient_scaling_mask_use:
# m.scale(int(m.size[0]*br.scale_down_before),int(m.size[1]*br.scale_down_before))
nar=imagetonumpy(i)
#return
if br.gradient_scaling_mask_use:
mask=imagetonumpy(m)
#put image to scale
tonemap(nar)
nar=1-nar# reverse z buffer+ add something
print(nar.min(),nar.max())
gx=nar.copy()
gx.fill(0)
gx[:-1,:]=nar[1:,:]-nar[:-1,:]
gy=nar.copy()
gy.fill(0)
gy[:,:-1]=nar[:,1:]-nar[:,:-1]
#it' ok, we can treat neg and positive silh separately here:
a=br.attenuation
planar=nar<(nar.min()+0.0001)#numpy.logical_or(silhxplanar,silhyplanar)#
#sqrt for silhouettes recovery:
sqrarx=numpy.abs(gx)
for iter in range(0,br.silhouette_exponent):
sqrarx=numpy.sqrt(sqrarx)
sqrary=numpy.abs(gy)
for iter in range(0,br.silhouette_exponent):
sqrary=numpy.sqrt(sqrary)
#detect and also recover silhouettes:
silhxpos=gx>silh_thres
gx=gx*(-silhxpos)+recover_silh*(silhxpos*silh_thres*silh_scale)*sqrarx
silhxneg=gx<-silh_thres
gx=gx*(-silhxneg)-recover_silh*(silhxneg*silh_thres*silh_scale)*sqrarx
silhx=numpy.logical_or(silhxpos,silhxneg)
gx=gx*silhx+(1.0/a*numpy.log(1.+a*(gx)))*(-silhx)#attenuate
#if br.fade_distant_objects:
# gx*=(nar)
# gy*=(nar)
silhypos=gy>silh_thres
gy=gy*(-silhypos)+recover_silh*(silhypos*silh_thres*silh_scale)*sqrary
silhyneg=gy<-silh_thres
gy=gy*(-silhyneg)-recover_silh*(silhyneg*silh_thres*silh_scale)*sqrary
silhy=numpy.logical_or(silhypos,silhyneg)#both silh
gy=gy*silhy+(1.0/a*numpy.log(1.+a*(gy)))*(-silhy)#attenuate
#now scale slopes...
if br.gradient_scaling_mask_use:
gx*=mask
gy*=mask
divg=gx+gy
divga=numpy.abs(divg)
divgp= divga>silh_thres/4.0
divgp=1-divgp
for a in range(0,2):
atimes(divgp,divga)
divga=divgp
numpytoimage(divga,'problem')
2
Example 3
Project: blendercam Source File: basrelief.py
def relief(br):
t=time.time()
i=bpy.data.images[br.source_image_name]
silh_thres=br.silhouette_threshold
recover_silh=br.recover_silhouettes
silh_scale=br.silhouette_scale
MINS=br.min_gridsize
smoothiterations=br.smooth_iterations
vcycleiterations=br.vcycle_iterations
linbcgiterations=br.linbcg_iterations
useplanar=br.use_planar
#scale down before:
if br.gradient_scaling_mask_use:
m=bpy.data.images[br.gradient_scaling_mask_name]
#mask=nar=imagetonumpy(m)
#if br.scale_down_before_use:
# i.scale(int(i.size[0]*br.scale_down_before),int(i.size[1]*br.scale_down_before))
# if br.gradient_scaling_mask_use:
# m.scale(int(m.size[0]*br.scale_down_before),int(m.size[1]*br.scale_down_before))
nar=imagetonumpy(i)
#return
if br.gradient_scaling_mask_use:
mask=imagetonumpy(m)
#put image to scale
tonemap(nar)
nar=1-nar# reverse z buffer+ add something
print(nar.min(),nar.max())
gx=nar.copy()
gx.fill(0)
gx[:-1,:]=nar[1:,:]-nar[:-1,:]
gy=nar.copy()
gy.fill(0)
gy[:,:-1]=nar[:,1:]-nar[:,:-1]
#it' ok, we can treat neg and positive silh separately here:
a=br.attenuation
planar=nar<(nar.min()+0.0001)#numpy.logical_or(silhxplanar,silhyplanar)#
#sqrt for silhouettes recovery:
sqrarx=numpy.abs(gx)
for iter in range(0,br.silhouette_exponent):
sqrarx=numpy.sqrt(sqrarx)
sqrary=numpy.abs(gy)
for iter in range(0,br.silhouette_exponent):
sqrary=numpy.sqrt(sqrary)
#detect and also recover silhouettes:
silhxpos=gx>silh_thres
gx=gx*(-silhxpos)+recover_silh*(silhxpos*silh_thres*silh_scale)*sqrarx
silhxneg=gx<-silh_thres
gx=gx*(-silhxneg)-recover_silh*(silhxneg*silh_thres*silh_scale)*sqrarx
silhx=numpy.logical_or(silhxpos,silhxneg)
gx=gx*silhx+(1.0/a*numpy.log(1.+a*(gx)))*(-silhx)#attenuate
#if br.fade_distant_objects:
# gx*=(nar)
# gy*=(nar)
silhypos=gy>silh_thres
gy=gy*(-silhypos)+recover_silh*(silhypos*silh_thres*silh_scale)*sqrary
silhyneg=gy<-silh_thres
gy=gy*(-silhyneg)-recover_silh*(silhyneg*silh_thres*silh_scale)*sqrary
silhy=numpy.logical_or(silhypos,silhyneg)#both silh
gy=gy*silhy+(1.0/a*numpy.log(1.+a*(gy)))*(-silhy)#attenuate
#now scale slopes...
if br.gradient_scaling_mask_use:
gx*=mask
gy*=mask
#
#print(silhx)
#silhx=abs(gx)>silh_thres
#gx=gx*(-silhx)
#silhy=abs(gy)>silh_thres
#gy=gy*(-silhy)
divg=gx+gy
divg[1:,:]=divg[1:,:]-gx[:-1,:] #subtract x
divg[:,1:]=divg[:,1:]-gy[:,:-1] #subtract y
if br.detail_enhancement_use:# fourier stuff here!disabled by now
print("detail enhancement")
rows,cols=gx.shape
crow,ccol = rows/2, cols/2
#dist=int(br.detail_enhancement_freq*gx.shape[0]/(2))
#bandwidth=.1
#dist=
divgmin=divg.min()
divg+=divgmin
divgf=numpy.fft.fft2(divg)
divgfshift=numpy.fft.fftshift(divgf)
#mspectrum = 20*numpy.log(numpy.abs(divgfshift))
#numpytoimage(mspectrum,'mspectrum')
mask=divg.copy()
pos=numpy.array((crow,ccol))
#bpy.context.scene.view_settings.curve_mapping.initialize()
#cur=bpy.context.scene.view_settings.curve_mapping.curves[0]
def filterwindow(x,y, cx = 0, cy = 0):#, curve=None):
return abs((cx-x))+abs((cy-y))
#v=(abs((cx-x)/(cx))+abs((cy-y)/(cy)))
#return v
mask=numpy.fromfunction(filterwindow,divg.shape, cx=crow, cy=ccol)#, curve=cur)
mask=numpy.sqrt(mask)
#for x in range(mask.shape[0]):
# for y in range(mask.shape[1]):
# mask[x,y]=cur.evaluate(mask[x,y])
maskmin=mask.min()
maskmax=mask.max()
mask=(mask-maskmin)/(maskmax-maskmin)
mask*=br.detail_enhancement_amount
mask+=1-mask.max()
#mask+=1
mask[crow-1:crow+1,ccol-1:ccol+1]=1#to preserve basic freqencies.
#numpytoimage(mask,'mask')
divgfshift = divgfshift*mask
divgfshift = numpy.fft.ifftshift(divgfshift)
divg = numpy.abs(numpy.fft.ifft2(divgfshift))
divg-=divgmin
divg=-divg
print("detail enhancement finished")
levels=0
mins = min(nar.shape[0],nar.shape[1])
while (mins>=MINS):
levels+=1
mins = mins/2;
target=numpy.zeros_like(divg)
solve_pde_multigrid( divg, target ,vcycleiterations, linbcgiterations, smoothiterations, mins, levels, useplanar, planar);
tonemap(target)
ipath=bpy.path.abspath(i.filepath)[:-len(bpy.path.basename(i.filepath))]+br.output_image_name+'.exr'
numpysave(target,ipath)
t=time.time()-t
print('total time:'+ str(t)+'\n')
1
Example 4
Project: qgisSpaceSyntaxToolkit Source File: current_flow_betweenness_subset.py
def edge_current_flow_betweenness_centrality_subset(G, sources, targets,
normalized=True,
weight='weight',
dtype=float, solver='lu'):
"""Compute current-flow betweenness centrality for edges using subsets
of nodes.
Current-flow betweenness centrality uses an electrical current
model for information spreading in contrast to betweenness
centrality which uses shortest paths.
Current-flow betweenness centrality is also known as
random-walk betweenness centrality [2]_.
Parameters
----------
G : graph
A NetworkX graph
sources: list of nodes
Nodes to use as sources for current
targets: list of nodes
Nodes to use as sinks for current
normalized : bool, optional (default=True)
If True the betweenness values are normalized by b=b/(n-1)(n-2) where
n is the number of nodes in G.
weight : string or None, optional (default='weight')
Key for edge data used as the edge weight.
If None, then use 1 as each edge weight.
dtype: data type (float)
Default data type for internal matrices.
Set to np.float32 for lower memory consumption.
solver: string (default='lu')
Type of linear solver to use for computing the flow matrix.
Options are "full" (uses most memory), "lu" (recommended), and
"cg" (uses least memory).
Returns
-------
nodes : dictionary
Dictionary of edge tuples with betweenness centrality as the value.
See Also
--------
betweenness_centrality
edge_betweenness_centrality
current_flow_betweenness_centrality
Notes
-----
Current-flow betweenness can be computed in `O(I(n-1)+mn \log n)`
time [1]_, where `I(n-1)` is the time needed to compute the
inverse Laplacian. For a full matrix this is `O(n^3)` but using
sparse methods you can achieve `O(nm{\sqrt k})` where `k` is the
Laplacian matrix condition number.
The space required is `O(nw) where `w` is the width of the sparse
Laplacian matrix. Worse case is `w=n` for `O(n^2)`.
If the edges have a 'weight' attribute they will be used as
weights in this algorithm. Unspecified weights are set to 1.
References
----------
.. [1] Centrality Measures Based on Current Flow.
Ulrik Brandes and Daniel Fleischer,
Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf
.. [2] A measure of betweenness centrality based on random walks,
M. E. J. Newman, Social Networks 27, 39-54 (2005).
"""
from networkx.utils import reverse_cuthill_mckee_ordering
try:
import numpy as np
except ImportError:
raise ImportError('current_flow_betweenness_centrality requires NumPy ',
'http://scipy.org/')
try:
import scipy
except ImportError:
raise ImportError('current_flow_betweenness_centrality requires SciPy ',
'http://scipy.org/')
if G.is_directed():
raise nx.NetworkXError('edge_current_flow_betweenness_centrality ',
'not defined for digraphs.')
if not nx.is_connected(G):
raise nx.NetworkXError("Graph not connected.")
n = G.number_of_nodes()
ordering = list(reverse_cuthill_mckee_ordering(G))
# make a copy with integer labels according to rcm ordering
# this could be done without a copy if we really wanted to
mapping=dict(zip(ordering,range(n)))
H = nx.relabel_nodes(G,mapping)
betweenness=(dict.fromkeys(H.edges(),0.0))
if normalized:
nb=(n-1.0)*(n-2.0) # normalization factor
else:
nb=2.0
for row,(e) in flow_matrix_row(H, weight=weight, dtype=dtype,
solver=solver):
for ss in sources:
i=mapping[ss]
for tt in targets:
j=mapping[tt]
betweenness[e]+=0.5*np.abs(row[i]-row[j])
betweenness[e]/=nb
return dict(((ordering[s],ordering[t]),v)
for (s,t),v in betweenness.items())
0
Example 5
Project: qutip Source File: steadystate.py
def steadystate(A, c_op_list=[], **kwargs):
"""Calculates the steady state for quantum evolution subject to the
supplied Hamiltonian or Liouvillian operator and (if given a Hamiltonian) a
list of collapse operators.
If the user passes a Hamiltonian then it, along with the list of collapse
operators, will be converted into a Liouvillian operator in Lindblad form.
Parameters
----------
A : qobj
A Hamiltonian or Liouvillian operator.
c_op_list : list
A list of collapse operators.
method : str {'direct', 'eigen', 'iterative-gmres',
'iterative-lgmres', 'iterative-bicgstab', 'svd', 'power',
'power-gmres', 'power-lgmres', 'power-bicgstab'}
Method for solving the underlying linear equation. Direct LU solver
'direct' (default), sparse eigenvalue problem 'eigen',
iterative GMRES method 'iterative-gmres', iterative LGMRES method
'iterative-lgmres', iterative BICGSTAB method 'iterative-bicgstab',
SVD 'svd' (dense), or inverse-power method 'power'. The iterative
power methods 'power-gmres', 'power-lgmres', 'power-bicgstab' use
the same solvers as their direct counterparts.
return_info : bool, optional, default = False
Return a dictionary of solver-specific infomation about the
solution and how it was obtained.
sparse : bool, optional, default = True
Solve for the steady state using sparse algorithms. If set to False,
the underlying Liouvillian operator will be converted into a dense
matrix. Use only for 'smaller' systems.
use_rcm : bool, optional, default = False
Use reverse Cuthill-Mckee reordering to minimize fill-in in the
LU factorization of the Liouvillian.
use_wbm : bool, optional, default = False
Use Weighted Bipartite Matching reordering to make the Liouvillian
diagonally dominant. This is useful for iterative preconditioners
only, and is set to ``True`` by default when finding a preconditioner.
weight : float, optional
Sets the size of the elements used for adding the unity trace condition
to the linear solvers. This is set to the average abs value of the
Liouvillian elements if not specified by the user.
x0 : ndarray, optional
ITERATIVE ONLY. Initial guess for solution vector.
maxiter : int, optional, default=1000
ITERATIVE ONLY. Maximum number of iterations to perform.
tol : float, optional, default=1e-12
ITERATIVE ONLY. Tolerance used for terminating solver.
permc_spec : str, optional, default='COLAMD'
ITERATIVE ONLY. Column ordering used internally by superLU for the
'direct' LU decomposition method. Options include 'COLAMD' and
'NATURAL'. If using RCM then this is set to 'NATURAL' automatically
unless explicitly specified.
use_precond : bool optional, default = False
ITERATIVE ONLY. Use an incomplete sparse LU decomposition as a
preconditioner for the 'iterative' GMRES and BICG solvers.
Speeds up convergence time by orders of magnitude in many cases.
M : {sparse matrix, dense matrix, LinearOperator}, optional
ITERATIVE ONLY. Preconditioner for A. The preconditioner should
approximate the inverse of A. Effective preconditioning can
dramatically improve the rate of convergence for iterative methods.
If no preconditioner is given and ``use_precond = True``, then one
is generated automatically.
fill_factor : float, optional, default = 100
ITERATIVE ONLY. Specifies the fill ratio upper bound (>=1) of the iLU
preconditioner. Lower values save memory at the cost of longer
execution times and a possible singular factorization.
drop_tol : float, optional, default = 1e-4
ITERATIVE ONLY. Sets the threshold for the magnitude of preconditioner
elements that should be dropped. Can be reduced for a courser
factorization at the cost of an increased number of iterations, and a
possible singular factorization.
diag_pivot_thresh : float, optional, default = None
ITERATIVE ONLY. Sets the threshold between [0,1] for which diagonal
elements are considered acceptable pivot points when using a
preconditioner. A value of zero forces the pivot to be the diagonal
element.
ILU_MILU : str, optional, default = 'smilu_2'
ITERATIVE ONLY. Selects the incomplete LU decomposition method
algoithm used in creating the preconditoner. Should only be used by
advanced users.
Returns
-------
dm : qobj
Steady state density matrix.
info : dict, optional
Dictionary containing solver-specific information about the solution.
Notes
-----
The SVD method works only for dense operators (i.e. small systems).
"""
ss_args = _default_steadystate_args()
for key in kwargs.keys():
if key in ss_args.keys():
ss_args[key] = kwargs[key]
else:
raise Exception(
"Invalid keyword argument '"+key+"' passed to steadystate.")
# Set column perm to NATURAL if using RCM and not specified by user
if ss_args['use_rcm'] and ('permc_spec' not in kwargs.keys()):
ss_args['permc_spec'] = 'NATURAL'
# Create & check Liouvillian
A = _steadystate_setup(A, c_op_list)
# Set weight parameter to avg abs val in L if not set explicitly
if 'weight' not in kwargs.keys():
ss_args['info']['weight']
ss_args['weight'] = np.mean(np.abs(A.data.data.max()))
ss_args['info']['weight'] = ss_args['weight']
if ss_args['method'] == 'direct':
if ss_args['sparse']:
return _steadystate_direct_sparse(A, ss_args)
else:
return _steadystate_direct_dense(A, ss_args)
elif ss_args['method'] == 'eigen':
return _steadystate_eigen(A, ss_args)
elif ss_args['method'] in ['iterative-gmres',
'iterative-lgmres', 'iterative-bicgstab']:
return _steadystate_iterative(A, ss_args)
elif ss_args['method'] == 'svd':
return _steadystate_svd_dense(A, ss_args)
elif ss_args['method'] in ['power', 'power-gmres',
'power-lgmres', 'power-bicgstab']:
return _steadystate_power(A, ss_args)
else:
raise ValueError('Invalid method argument for steadystate.')
0
Example 6
def endIter(self):
# After reaching target misfit with l2-norm, switch to IRLS (mode:2)
if self.invProb.phi_d < self.target and self.mode == 1:
print("Convergence with smooth l2-norm regularization: Start IRLS steps...")
self.mode = 2
# Either use the supplied epsilon, or fix base on distribution of
# model values
if getattr(self, 'eps', None) is None:
self.reg.eps_p = np.percentile(np.abs(self.invProb.curModel),self.prctile)
else:
self.reg.eps_p = self.eps[0]
if getattr(self, 'eps', None) is None:
self.reg.eps_q = np.percentile(np.abs(self.reg.regmesh.cellDiffxStencil*(self.reg.mapping * self.invProb.curModel)),self.prctile)
else:
self.reg.eps_q = self.eps[1]
self.reg.norms = self.norms
self.coolingFactor = 1.
self.coolingRate = 1
self.iterStart = self.opt.iter
self.phi_d_last = self.invProb.phi_d
self.phi_m_last = self.invProb.phi_m_last
self.reg.l2model = self.invProb.curModel
self.reg.curModel = self.invProb.curModel
print("L[p qx qy qz]-norm : " + str(self.reg.norms))
print("eps_p: " + str(self.reg.eps_p) + " eps_q: " + str(self.reg.eps_q))
if getattr(self, 'f_old', None) is None:
self.f_old = self.reg.eval(self.invProb.curModel)#self.invProb.evalFunction(self.invProb.curModel, return_g=False, return_H=False)
# Beta Schedule
if self.opt.iter > 0 and self.opt.iter % self.coolingRate == 0:
if self.debug: print('BetaSchedule is cooling Beta. Iteration: {0:d}'.format(self.opt.iter))
self.invProb.beta /= self.coolingFactor
# Only update after GN iterations
if (self.opt.iter-self.iterStart) % self.minGNiter == 0 and self.mode==2:
self.IRLSiter += 1
phim_new = self.reg.eval(self.invProb.curModel)
self.f_change = np.abs(self.f_old - phim_new) / self.f_old
print("Regularization decrease: {0:6.3e}".format((self.f_change)))
# Check for maximum number of IRLS cycles
if self.IRLSiter == self.maxIRLSiter:
print("Reach maximum number of IRLS cycles: {0:d}".format(self.maxIRLSiter))
self.opt.stopNextIteration = True
return
# Check if the function has changed enough
if self.f_change < self.f_min_change and self.IRLSiter > 1:
print("Minimum decrease in regularization. End of IRLS")
self.opt.stopNextIteration = True
return
else:
self.f_old = phim_new
# # Cool the threshold parameter if required
# if getattr(self, 'factor', None) is not None:
# eps = self.reg.eps / self.factor
#
# if getattr(self, 'eps_min', None) is not None:
# self.reg.eps = np.max([self.eps_min,eps])
# else:
# self.reg.eps = eps
# Get phi_m at the end of current iteration
self.phi_m_last = self.invProb.phi_m_last
# Reset the regularization matrices so that it is
# recalculated for current model
self.reg._Wsmall = None
self.reg._Wx = None
self.reg._Wy = None
self.reg._Wz = None
# Update the model used for the IRLS weights
self.reg.curModel = self.invProb.curModel
# Temporarely set gamma to 1. to get raw phi_m
self.reg.gamma = 1.
# Compute new model objective function value
phim_new = self.reg.eval(self.invProb.curModel)
# Update gamma to scale the regularization between IRLS iterations
self.reg.gamma = self.phi_m_last / phim_new
# Reset the regularization matrices again for new gamma
self.reg._Wsmall = None
self.reg._Wx = None
self.reg._Wy = None
self.reg._Wz = None
# Check if misfit is within the tolerance, otherwise scale beta
val = self.invProb.phi_d / (self.survey.nD*0.5)
if np.abs(1.-val) > self.beta_tol:
self.invProb.beta = self.invProb.beta * self.survey.nD*0.5 / self.invProb.phi_d
0
Example 7
Project: bayespy Source File: plot.py
def _hinton(W, error=None, vmax=None, square=False, axes=None):
"""
Draws a Hinton diagram for visualizing a weight matrix.
Temporarily disables matplotlib interactive mode if it is on,
otherwise this takes forever.
Originally copied from
http://wiki.scipy.org/Cookbook/Matplotlib/HintonDiagrams
"""
if axes is None:
axes = plt.gca()
W = misc.atleast_nd(W, 2)
(height, width) = W.shape
if not vmax:
#vmax = 2**np.ceil(np.log(np.max(np.abs(W)))/np.log(2))
if error is not None:
vmax = np.max(np.abs(W) + error)
else:
vmax = np.max(np.abs(W))
axes.fill(0.5+np.array([0,width,width,0]),
0.5+np.array([0,0,height,height]),
'gray')
if square:
axes.set_aspect('equal')
axes.set_ylim(0.5, height+0.5)
axes.set_xlim(0.5, width+0.5)
axes.set_xticks([])
axes.set_yticks([])
axes.invert_yaxis()
for x in range(width):
for y in range(height):
_x = x+1
_y = y+1
w = W[y,x]
_w = np.abs(w)
if w > 0:
_c = 'white'
else:
_c = 'black'
if error is not None:
e = error[y,x]
if e < 0:
print(e, _w, vmax)
raise Exception("BUG? Negative error")
if _w + e > vmax:
print(e, _w, vmax)
raise Exception("BUG? Value+error greater than max")
_rectangle(axes,
_x,
_y,
min(1, np.sqrt((_w+e)/vmax)),
min(1, np.sqrt((_w+e)/vmax)),
edgecolor=_c,
fill=False)
_blob(axes, _x, _y, min(1, _w/vmax), _c)
0
Example 8
Project: statsmodels Source File: regressionplots.py
def influence_plot(results, external=True, alpha=.05, criterion="cooks",
size=48, plot_alpha=.75, ax=None, **kwargs):
"""
Plot of influence in regression. Plots studentized resids vs. leverage.
Parameters
----------
results : results instance
A fitted model.
external : bool
Whether to use externally or internally studentized residuals. It is
recommended to leave external as True.
alpha : float
The alpha value to identify large studentized residuals. Large means
abs(resid_studentized) > t.ppf(1-alpha/2, dof=results.df_resid)
criterion : str {'DFFITS', 'Cooks'}
Which criterion to base the size of the points on. Options are
DFFITS or Cook's D.
size : float
The range of `criterion` is mapped to 10**2 - size**2 in points.
plot_alpha : float
The `alpha` of the plotted points.
ax : matplotlib Axes instance
An instance of a matplotlib Axes.
Returns
-------
fig : matplotlib figure
The matplotlib figure that contains the Axes.
Notes
-----
Row labels for the observations in which the leverage, measured by the
diagonal of the hat matrix, is high or the residuals are large, as the
combination of large residuals and a high influence value indicates an
influence point. The value of large residuals can be controlled using the
`alpha` parameter. Large leverage points are identified as
hat_i > 2 * (df_model + 1)/nobs.
"""
fig, ax = utils.create_mpl_ax(ax)
infl = results.get_influence()
if criterion.lower().startswith('coo'):
psize = infl.cooks_distance[0]
elif criterion.lower().startswith('dff'):
psize = np.abs(infl.dffits[0])
else:
raise ValueError("Criterion %s not understood" % criterion)
# scale the variables
#TODO: what is the correct scaling and the assumption here?
#we want plots to be comparable across different plots
#so we would need to use the expected distribution of criterion probably
old_range = np.ptp(psize)
new_range = size**2 - 8**2
psize = (psize - psize.min()) * new_range/old_range + 8**2
leverage = infl.hat_matrix_diag
if external:
resids = infl.resid_studentized_external
else:
resids = infl.resid_studentized_internal
from scipy import stats
cutoff = stats.t.ppf(1.-alpha/2, results.df_resid)
large_resid = np.abs(resids) > cutoff
large_leverage = leverage > _high_leverage(results)
large_points = np.logical_or(large_resid, large_leverage)
ax.scatter(leverage, resids, s=psize, alpha=plot_alpha)
# add point labels
labels = results.model.data.row_labels
if labels is None:
labels = lrange(len(resids))
ax = utils.annotate_axes(np.where(large_points)[0], labels,
lzip(leverage, resids),
lzip(-(psize/2)**.5, (psize/2)**.5), "x-large",
ax)
#TODO: make configurable or let people do it ex-post?
font = {"fontsize" : 16, "color" : "black"}
ax.set_ylabel("Studentized Residuals", **font)
ax.set_xlabel("H Leverage", **font)
ax.set_title("Influence Plot", **font)
return fig
0
Example 9
Project: pyspeckit Source File: plotters.py
def reset_limits(self, xmin=None, xmax=None, ymin=None, ymax=None,
reset_xlimits=True, reset_ylimits=True, ypeakscale=1.2,
silent=None, use_window_limits=False, **kwargs):
"""
Automatically or manually reset the plot limits
"""
# if not use_window_limits: use_window_limits = False
if self.debug:
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
print(zip(args,values))
if use_window_limits:
# this means DO NOT reset!
# it simply sets self.[xy][min/max] = current value
self.set_limits_from_visible_window()
else:
if silent is not None:
self.silent = silent
# if self.xmin and self.xmax:
if (reset_xlimits or self.Spectrum.xarr.min().value < self.xmin or self.Spectrum.xarr.max().value > self.xmax):
if not self.silent:
warn("Resetting X-axis min/max because the plot is out of bounds.")
self.xmin = None
self.xmax = None
if xmin is not None:
self.xmin = u.Quantity(xmin, self._xunit)
elif self.xmin is None:
self.xmin = u.Quantity(self.Spectrum.xarr.min().value, self._xunit)
if xmax is not None:
self.xmax = u.Quantity(xmax, self._xunit)
elif self.xmax is None:
self.xmax = u.Quantity(self.Spectrum.xarr.max().value, self._xunit)
xpixmin = np.argmin(np.abs(self.Spectrum.xarr.value-self.xmin.value))
xpixmax = np.argmin(np.abs(self.Spectrum.xarr.value-self.xmax.value))
if xpixmin>xpixmax:
xpixmin,xpixmax = xpixmax,xpixmin
elif xpixmin == xpixmax:
if reset_xlimits:
raise Exception("Infinite recursion error. Maybe there are no valid data?")
if not self.silent:
warn("ERROR: the X axis limits specified were invalid. Resetting.")
self.reset_limits(reset_xlimits=True, ymin=ymin, ymax=ymax,
reset_ylimits=reset_ylimits,
ypeakscale=ypeakscale, **kwargs)
return
if self.ymin and self.ymax:
# this is utter nonsense....
if (np.nanmax(self.Spectrum.data) < self.ymin or np.nanmin(self.Spectrum.data) > self.ymax
or reset_ylimits):
if not self.silent and not reset_ylimits:
warn("Resetting Y-axis min/max because the plot is out of bounds.")
self.ymin = None
self.ymax = None
if ymin is not None:
self.ymin = ymin
elif self.ymin is None:
yminval = np.nanmin(self.Spectrum.data[xpixmin:xpixmax])
# Increase the range fractionally. This means dividing a positive #, multiplying a negative #
if yminval < 0:
self.ymin = float(yminval)*float(ypeakscale)
else:
self.ymin = float(yminval)/float(ypeakscale)
if ymax is not None:
self.ymax = ymax
elif self.ymax is None:
ymaxval = (np.nanmax(self.Spectrum.data[xpixmin:xpixmax])-self.ymin)
if ymaxval > 0:
self.ymax = float(ymaxval) * float(ypeakscale) + self.ymin
else:
self.ymax = float(ymaxval) / float(ypeakscale) + self.ymin
self.ymin += self.offset
self.ymax += self.offset
self.axis.set_xlim(self.xmin.value if hasattr(self.xmin, 'value') else self.xmin,
self.xmax.value if hasattr(self.xmax, 'value') else self.xmax)
self.axis.set_ylim(self.ymin, self.ymax)
0
Example 10
Project: pylon Source File: estimator.py
def run(self):
""" Solves a state estimation problem.
"""
case = self.case
baseMVA = case.base_mva
buses = self.case.connected_buses
branches = case.online_branches
generators = case.online_generators
meas = self.measurements
# Update indices.
self.case.index_buses()
self.case.index_branches()
# Index buses.
# ref = [b._i for b in buses if b.type == REFERENCE]
pv = [b._i for b in buses if b.type == PV]
pq = [b._i for b in buses if b.type == PQ]
# Build admittance matrices.
Ybus, Yf, Yt = case.Y
# Prepare initial guess.
V0 = self.getV0(self.v_mag_guess, buses, generators)
# Start the clock.
t0 = time()
# Initialise SE.
converged = False
i = 0
V = V0
Va = angle(V0)
Vm = abs(V0)
nb = Ybus.shape[0]
f = [b.from_bus._i for b in branches]
t = [b.to_bus._i for b in branches]
nonref = pv + pq
# Form measurement vector.
z = array([m.value for m in meas])
# Form measurement index vectors.
idx_zPf = [m.b_or_l._i for m in meas if m.type == PF]
idx_zPt = [m.b_or_l._i for m in meas if m.type == PT]
idx_zQf = [m.b_or_l._i for m in meas if m.type == QF]
idx_zQt = [m.b_or_l._i for m in meas if m.type == QT]
idx_zPg = [m.b_or_l._i for m in meas if m.type == PG]
idx_zQg = [m.b_or_l._i for m in meas if m.type == QG]
idx_zVm = [m.b_or_l._i for m in meas if m.type == VM]
idx_zVa = [m.b_or_l._i for m in meas if m.type == VA]
def col(seq):
return [[k] for k in seq]
# Create inverse of covariance matrix with all measurements.
# full_scale = 30
# sigma = [
# 0.02 * abs(Sf) + 0.0052 * full_scale * ones(nbr,1),
# 0.02 * abs(St) + 0.0052 * full_scale * ones(nbr,1),
# 0.02 * abs(Sbus) + 0.0052 * full_scale * ones(nb,1),
# 0.2 * pi/180 * 3*ones(nb,1),
# 0.02 * abs(Sf) + 0.0052 * full_scale * ones(nbr,1),
# 0.02 * abs(St) + 0.0052 * full_scale * ones(nbr,1),
# 0.02 * abs(Sbus) + 0.0052 * full_scale * ones(nb,1),
# 0.02 * abs(V0) + 0.0052 * 1.1 * ones(nb,1),
# ] ./ 3
# Get R inverse matrix.
sigma_vector = r_[
self.sigma[0] * ones(len(idx_zPf)),
self.sigma[1] * ones(len(idx_zPt)),
self.sigma[2] * ones(len(idx_zQf)),
self.sigma[3] * ones(len(idx_zQt)),
self.sigma[4] * ones(len(idx_zPg)),
self.sigma[5] * ones(len(idx_zQg)),
self.sigma[6] * ones(len(idx_zVm)),
self.sigma[7] * ones(len(idx_zVa))
]
sigma_squared = sigma_vector**2
rsig = range(len(sigma_squared))
Rinv = csr_matrix((1.0 / sigma_squared, (rsig, rsig)))
# Do Newton iterations.
while (not converged) and (i < self.max_iter):
i += 1
# Compute estimated measurement.
Sfe = V[f] * conj(Yf * V)
Ste = V[t] * conj(Yt * V)
# Compute net injection at generator buses.
gbus = [g.bus._i for g in generators]
Sgbus = V[gbus] * conj(Ybus[gbus, :] * V)
# inj S + local Sd
Sd = array([complex(b.p_demand, b.q_demand) for b in buses])
Sgen = (Sgbus * baseMVA + Sd) / baseMVA
z_est = r_[
Sfe[idx_zPf].real,
Ste[idx_zPt].real,
Sfe[idx_zQf].imag,
Ste[idx_zQt].imag,
Sgen[idx_zPg].real,
Sgen[idx_zQg].imag,
abs(V[idx_zVm]),
angle(V[idx_zVa])
]
# Get H matrix.
dSbus_dVm, dSbus_dVa = case.dSbus_dV(Ybus, V)
dSf_dVa, dSf_dVm, dSt_dVa, dSt_dVm, _, _ = case.dSbr_dV(Yf, Yt,V)
# Get sub-matrix of H relating to line flow.
dPF_dVa = dSf_dVa.real # from end
dQF_dVa = dSf_dVa.imag
dPF_dVm = dSf_dVm.real
dQF_dVm = dSf_dVm.imag
dPT_dVa = dSt_dVa.real # to end
dQT_dVa = dSt_dVa.imag
dPT_dVm = dSt_dVm.real
dQT_dVm = dSt_dVm.imag
# Get sub-matrix of H relating to generator output.
dPG_dVa = dSbus_dVa[gbus, :].real
dQG_dVa = dSbus_dVa[gbus, :].imag
dPG_dVm = dSbus_dVm[gbus, :].real
dQG_dVm = dSbus_dVm[gbus, :].imag
# Get sub-matrix of H relating to voltage angle.
dVa_dVa = csr_matrix((ones(nb), (range(nb), range(nb))))
dVa_dVm = csr_matrix((nb, nb))
# Get sub-matrix of H relating to voltage magnitude.
dVm_dVa = csr_matrix((nb, nb))
dVm_dVm = csr_matrix((ones(nb), (range(nb), range(nb))))
h = [(col(idx_zPf), dPF_dVa, dPF_dVm),
(col(idx_zQf), dQF_dVa, dQF_dVm),
(col(idx_zPt), dPT_dVa, dPT_dVm),
(col(idx_zQt), dQT_dVa, dQT_dVm),
(col(idx_zPg), dPG_dVa, dPG_dVm),
(col(idx_zQg), dQG_dVa, dQG_dVm),
(col(idx_zVm), dVm_dVa, dVm_dVm),
(col(idx_zVa), dVa_dVa, dVa_dVm)]
H = vstack([hstack([dVa[idx, nonref], dVm[idx, nonref]])
for idx, dVa, dVm in h if len(idx) > 0 ])
# Compute update step.
J = H.T * Rinv * H
F = H.T * Rinv * (z - z_est) # evalute F(x)
dx = spsolve(J, F)
# Check for convergence.
normF = linalg.norm(F, Inf)
if self.verbose:
logger.info("Iteration [%d]: Norm of mismatch: %.3f" %
(i, normF))
if normF < self.tolerance:
converged = True
# Update voltage.
npvpq = len(nonref)
Va[nonref] = Va[nonref] + dx[:npvpq]
Vm[nonref] = Vm[nonref] + dx[npvpq:2 * npvpq]
V = Vm * exp(1j * Va)
Va = angle(V)
Vm = abs(V)
# Weighted sum squares of error.
error_sqrsum = sum((z - z_est)**2 / sigma_squared)
# Update case with solution.
case.pf_solution(Ybus, Yf, Yt, V)
# Stop the clock.
elapsed = time() - t0
if self.verbose and converged:
print "State estimation converged in: %.3fs (%d iterations)" % \
(elapsed, i)
# self.output_solution(sys.stdout, z, z_est)
solution = {"V": V, "converged": converged, "iterations": i,
"z": z, "z_est": z_est, "error_sqrsum": error_sqrsum,
"elapsed": elapsed}
return solution
0
Example 11
Project: hmf Source File: halofit.py
def halofit(k, delta_k,sigma_8, z, cosmo = None, takahashi=True):
"""
Implementation of HALOFIT (Smith+2003).
Parameters
----------
k : array_like
Wavenumbers [h/Mpc].
delta_k : array_like
Dimensionless power (linear) at `k`.
sigma_8 : float
RMS linear density fluctuations in spheres of radius 8 Mpc/h at z=0.
z : float
Redshift
cosmo : :class:`hmf.cosmo.Cosmology` instance, optional
An instance of either the `Cosmology` class provided in the `hmf` package, or
any subclass of `FLRW` from `astropy`. Defualt is the default cosmology from
the :mod:`hmf.cosmo` module.
takahashi : bool, optional
Whether to use updated parameters from Takahashi+2012. Otherwise use
original from Smith+2003.
Returns
-------
nonlinear_delta_k : array_like
Dimensionless power at `k`, with nonlinear corrections applied.
"""
if cosmo is None:
cosmo = csm()
# Get physical parameters
rknl, neff, rncur = _get_spec(k, delta_k, sigma_8)
# Only apply the model to higher wavenumbers
mask = k > 0.005
plin = delta_k[mask]
k = k[mask]
# Define the cosmology at redshift
omegamz = cosmo.Om(z)
omegavz = cosmo.Ode(z)
w = cosmo.w(z)
fnu = cosmo.Onu0 / cosmo.Om0
if takahashi:
a = 10 ** (1.5222 + 2.8553 * neff + 2.3706 * neff ** 2 +
0.9903 * neff ** 3 + 0.2250 * neff ** 4 +
- 0.6038 * rncur + 0.1749 * omegavz * (1 + w))
b = 10 ** (-0.5642 + 0.5864 * neff + 0.5716 * neff ** 2 +
- 1.5474 * rncur + 0.2279 * omegavz * (1 + w))
c = 10 ** (0.3698 + 2.0404 * neff + 0.8161 * neff ** 2 + 0.5869 * rncur)
gam = 0.1971 - 0.0843 * neff + 0.8460 * rncur
alpha = np.abs(6.0835 + 1.3373 * neff - 0.1959 * neff ** 2 +
- 5.5274 * rncur)
beta = (2.0379 - 0.7354 * neff + 0.3157 * neff ** 2 +
1.2490 * neff ** 3 + 0.3980 * neff ** 4 - 0.1682 * rncur +
fnu * (1.081 + 0.395 * neff ** 2))
xmu = 0.0
xnu = 10 ** (5.2105 + 3.6902 * neff)
else:
a = 10 ** (1.4861 + 1.8369 * neff + 1.6762 * neff ** 2 +
0.7940 * neff ** 3 + 0.1670 * neff ** 4 +
- 0.6206 * rncur)
b = 10 ** (0.9463 + 0.9466 * neff + 0.3084 * neff ** 2 +
- 0.94 * rncur)
c = 10 ** (-0.2807 + 0.6669 * neff + 0.3214 * neff ** 2 - 0.0793 * rncur)
gam = 0.8649 + 0.2989 * neff + 0.1631 * rncur
alpha = np.abs(1.3884 + 0.3700 * neff - 0.1452 * neff ** 2)
beta = (0.8291 + 0.9854 * neff + 0.3401 * neff ** 2)
xmu = 10 ** (-3.5442 + 0.1908 * neff)
xnu = 10 ** (0.9589 + 1.2857 * neff)
if np.abs(1 - omegamz) > 0.01:
f1a = omegamz ** -0.0732
f2a = omegamz ** -0.1423
f3a = omegamz ** 0.0725
f1b = omegamz ** -0.0307
f2b = omegamz ** -0.0585
f3b = omegamz ** 0.0743
frac = omegavz / (1 - omegamz)
if takahashi:
f1 = f1b
f2 = f2b
f3 = f3b
else:
f1 = frac * f1b + (1 - frac) * f1a
f2 = frac * f2b + (1 - frac) * f2a
f3 = frac * f3b + (1 - frac) * f3a
else:
f1 = f2 = f3 = 1.0
y = k / rknl
ph = a * y ** (f1 * 3) / (1 + b * y ** f2 + (f3 * c * y) ** (3 - gam))
ph = ph / (1 + xmu / y + xnu * y ** -2) * (1 + fnu * (0.977 - 18.015 * (cosmo.Om0 - 0.3)))
plinaa = plin * (1 + fnu * 47.48 * k ** 2 / (1 + 1.5 * k ** 2))
pq = plin * (1 + plinaa) ** beta / (1 + plinaa * alpha) * np.exp(-y / 4.0 - y ** 2 / 8.0)
pnl = pq + ph
# We have to copy so the original data is not overwritten, giving unexpected results.
nonlinear_delta_k = delta_k.copy()
nonlinear_delta_k[mask] = pnl
return nonlinear_delta_k
0
Example 12
def _param_grad_helper(self, dL_dK, X, X2, target):
"""derivative of the covariance matrix with respect to the parameters."""
if X2 is None: X2 = X
dist = np.abs(X - X2.T)
ly=1/self.lengthscaleY
lu=np.sqrt(3)/self.lengthscaleU
#ly=self.lengthscaleY
#lu=self.lengthscaleU
dk1theta1 = np.exp(-ly*dist)*2*(-lu)/(lu+ly)**3
#c=np.sqrt(3)
#t1=c/lu
#t2=1/ly
#dk1theta1=np.exp(-dist*ly)*t2*( (2*c*t2+2*t1)/(c*t2+t1)**2 -2*(2*c*t2*t1+t1**2)/(c*t2+t1)**3 )
dk2theta1 = 1*(
np.exp(-lu*dist)*dist*(-ly+2*lu-lu*ly*dist+dist*lu**2)*(ly-lu)**(-2) + np.exp(-lu*dist)*(-2+ly*dist-2*dist*lu)*(ly-lu)**(-2)
+np.exp(-dist*lu)*(ly-2*lu+ly*lu*dist-dist*lu**2)*2*(ly-lu)**(-3)
+np.exp(-dist*ly)*2*(ly-lu)**(-2)
+np.exp(-dist*ly)*2*(2*lu-ly)*(ly-lu)**(-3)
)
dk3theta1 = np.exp(-dist*lu)*(lu+ly)**(-2)*((2*lu+ly+dist*lu**2+lu*ly*dist)*(-dist-2/(lu+ly))+2+2*lu*dist+ly*dist)
dktheta1 = self.varianceU*self.varianceY*(dk1theta1+dk2theta1+dk3theta1)
dk1theta2 = np.exp(-ly*dist) * ((lu+ly)**(-2)) * ( (-dist)*(2*lu+ly) + 1 + (-2)*(2*lu+ly)/(lu+ly) )
dk2theta2 = 1*(
np.exp(-dist*lu)*(ly-lu)**(-2) * ( 1+lu*dist+(-2)*(ly-2*lu+lu*ly*dist-dist*lu**2)*(ly-lu)**(-1) )
+np.exp(-dist*ly)*(ly-lu)**(-2) * ( (-dist)*(2*lu-ly) -1+(2*lu-ly)*(-2)*(ly-lu)**(-1) )
)
dk3theta2 = np.exp(-dist*lu) * (-3*lu-ly-dist*lu**2-lu*ly*dist)/(lu+ly)**3
dktheta2 = self.varianceU*self.varianceY*(dk1theta2 + dk2theta2 +dk3theta2)
k1 = np.exp(-ly*dist)*(2*lu+ly)/(lu+ly)**2
k2 = (np.exp(-lu*dist)*(ly-2*lu+lu*ly*dist-lu**2*dist) + np.exp(-ly*dist)*(2*lu-ly) ) / (ly-lu)**2
k3 = np.exp(-lu*dist) * ( (1+lu*dist)/(lu+ly) + (lu)/(lu+ly)**2 )
dkdvar = k1+k2+k3
target[0] += np.sum(self.varianceY*dkdvar * dL_dK)
target[1] += np.sum(self.varianceU*dkdvar * dL_dK)
target[2] += np.sum(dktheta1*(-np.sqrt(3)*self.lengthscaleU**(-2)) * dL_dK)
target[3] += np.sum(dktheta2*(-self.lengthscaleY**(-2)) * dL_dK)
0
Example 13
def getSubRectangles(self, ims):
""" getSubRectangles(ims)
Calculate the minimal rectangles that need updating each frame.
Returns a two-element tuple containing the cropped images and a
list of x-y positions.
Calculating the subrectangles takes extra time, obviously. However,
if the image sizes were reduced, the actual writing of the GIF
goes faster. In some cases applying this method produces a GIF faster.
"""
# Check image count
if len(ims) < 2:
return ims, [(0,0) for i in ims]
# We need numpy
if np is None:
raise RuntimeError("Need Numpy to calculate sub-rectangles. ")
# Prepare
ims2 = [ims[0]]
xy = [(0,0)]
t0 = time.time()
# Iterate over images
prev = ims[0]
for im in ims[1:]:
# Get difference, sum over colors
diff = np.abs(im-prev)
if diff.ndim==3:
diff = diff.sum(2)
# Get begin and end for both dimensions
X = np.argwhere(diff.sum(0))
Y = np.argwhere(diff.sum(1))
# Get rect coordinates
if X.size and Y.size:
x0, x1 = X[0], X[-1]+1
y0, y1 = Y[0], Y[-1]+1
else: # No change ... make it minimal
x0, x1 = 0, 2
y0, y1 = 0, 2
# Cut out and store
im2 = im[y0:y1,x0:x1]
prev = im
ims2.append(im2)
xy.append((x0,y0))
# Done
#print('%1.2f seconds to determine subrectangles of %i images' %
# (time.time()-t0, len(ims2)) )
return ims2, xy
0
Example 14
Project: theano_pyglm Source File: plot_results.py
def plot_spatiotemporal_tuning_curves(s_mean, s_std=None, s_true=None, color=None):
if 'sharedtuningcurve_provider' in s_mean['latent']:
tc_mean = s_mean['latent']['sharedtuningcurve_provider']
if s_std is not None:
tc_std = s_std['latent']['sharedtuningcurve_provider']
# Get the stimulus responses
tc_x = tc_mean['stim_response_x']
tc_t = tc_mean['stim_response_t']
# import pdb; pdb.set_trace()
R = tc_x.shape[-1]
assert tc_t.shape[-1] == R
# Plot each tuning curve
if s_true is not None:
true_tc_x = s_true['latent']['sharedtuningcurve_provider']['stim_response_x']
true_tc_t = s_true['latent']['sharedtuningcurve_provider']['stim_response_t']
true_R = true_tc_x.shape[-1]
if tc_x.ndim == 3:
ncols = 3
else:
ncols = 2
else:
ncols = 2
for r in range(R):
# Plot the spatial component of the stimulus response
plt.subplot(R,ncols,r*ncols+1)
if tc_x.ndim == 3:
px_per_node = 10
stim_x_max = np.amax(np.abs(tc_x[:,:,r]))
plt.imshow(np.kron(tc_x[:,:,r],np.ones((px_per_node,px_per_node))),
vmin=-stim_x_max,vmax=stim_x_max,
extent=[0,1,0,1],
cmap='RdGy',
interpolation='nearest')
plt.colorbar()
plt.title('$f_{%d}(\\Delta x)$' % r)
if s_true is not None and r < true_R:
plt.subplot(R,ncols,r*ncols+2)
px_per_node = 10
stim_x_max = np.amax(np.abs(true_tc_x[:,:,r]))
plt.imshow(np.kron(true_tc_x[:,:,r],np.ones((px_per_node,px_per_node))),
vmin=-stim_x_max,vmax=stim_x_max,
extent=[0,1,0,1],
cmap='RdGy',
interpolation='nearest')
plt.colorbar()
plt.title('True $f_{%d}(\\Delta x)$' % r)
elif tc_x.ndim == 2:
plt.plot(tc_x[:,r], color=color, linestyle='-')
# Plot the true tuning curve
if s_true is not None and r < true_R:
plt.plot(true_tc_x[:,r], color='k', linestyle='-')
# If standard deviation is given, plot that as well
if s_std is not None:
stim_x_std = tc_std['stim_response_x'][:,r]
plt.plot(tc_x[:,r] + 2*stim_x_std, color=color, linestyle='--')
plt.plot(tc_x [:,r]- 2*stim_x_std, color=color, linestyle='--')
plt.title('$f_{%d}(\\Delta x)$' % r)
else:
raise Exception('Invalid TC dimension.')
plt.subplot(R,ncols,(r+1)*ncols)
plt.plot(tc_t[:,r], color=color)
# Plot the true tuning curve
if s_true is not None and r < true_R:
plt.plot(true_tc_t[:,r], color='k', linestyle='-')
if s_std is not None:
stim_t_std = tc_std['stim_response_t'][:,r]
plt.plot(tc_t[:,r] + 2*stim_t_std, color=color, linestyle='--')
plt.plot(tc_t[:,r] - 2*stim_t_std, color=color, linestyle='--')
plt.title('$f_{%d}(\\Delta t)$' % r)
0
Example 15
Project: pyrf Source File: numpy_util.py
def calculate_occupied_bw(pow_data, span, occupied_perc):
"""
Return the occupied bandwidth of a give spectrum, and occupied percentage
:param pow_data: spectral data to be analyzed
:type pow_data: list
:param span: span of the given spectrum (Hz)
:type float:
:param occupied_perc: Percentage of the power to be measured
:type float:
:returns: float value of the occupied bandwidth
"""
# 100% of the occupied bandwidth is the full span
if occupied_perc >= 100.0:
return span
# calculate center bin
pow_list = list(pow_data)
total_points = len(pow_list)
mid_point = int(total_points/ 2)
# the number of segments to jump seperate the spectrum
num_divisions = 1500
# offset the value of the spectrum, so the minimum is 0, to prevent negative numbers from offseting the channel power calculation
min_offset = np.abs(min(pow_list))
pow_list= (pow_list + min_offset) * 5
# calculate total channel power
total_channel_power = calculate_channel_power(pow_list)
perc_power = (occupied_perc / 100.0) * total_channel_power
bisect_val = 1
prev_bisect = -1
# calculate channel power at center point, while incrementing i on each loop to increase span of calculation
while True:
# print bisect_val, total_points
section_power = calculate_channel_power(pow_list[mid_point - bisect_val : mid_point + bisect_val])
if section_power > perc_power:
break
if section_power < perc_power:
prev_bisect = bisect_val
bisect_val = bisect_val + max(1, int(total_points / num_divisions))
# calculate occupied bandwidth
occupied_bw = (float(2 * bisect_val) / float(total_points)) * span
return occupied_bw
0
Example 16
Project: qgisSpaceSyntaxToolkit Source File: current_flow_betweenness_subset.py
def current_flow_betweenness_centrality_subset(G,sources,targets,
normalized=True,
weight='weight',
dtype=float, solver='lu'):
r"""Compute current-flow betweenness centrality for subsets of nodes.
Current-flow betweenness centrality uses an electrical current
model for information spreading in contrast to betweenness
centrality which uses shortest paths.
Current-flow betweenness centrality is also known as
random-walk betweenness centrality [2]_.
Parameters
----------
G : graph
A NetworkX graph
sources: list of nodes
Nodes to use as sources for current
targets: list of nodes
Nodes to use as sinks for current
normalized : bool, optional (default=True)
If True the betweenness values are normalized by b=b/(n-1)(n-2) where
n is the number of nodes in G.
weight : string or None, optional (default='weight')
Key for edge data used as the edge weight.
If None, then use 1 as each edge weight.
dtype: data type (float)
Default data type for internal matrices.
Set to np.float32 for lower memory consumption.
solver: string (default='lu')
Type of linear solver to use for computing the flow matrix.
Options are "full" (uses most memory), "lu" (recommended), and
"cg" (uses least memory).
Returns
-------
nodes : dictionary
Dictionary of nodes with betweenness centrality as the value.
See Also
--------
approximate_current_flow_betweenness_centrality
betweenness_centrality
edge_betweenness_centrality
edge_current_flow_betweenness_centrality
Notes
-----
Current-flow betweenness can be computed in `O(I(n-1)+mn \log n)`
time [1]_, where `I(n-1)` is the time needed to compute the
inverse Laplacian. For a full matrix this is `O(n^3)` but using
sparse methods you can achieve `O(nm{\sqrt k})` where `k` is the
Laplacian matrix condition number.
The space required is `O(nw) where `w` is the width of the sparse
Laplacian matrix. Worse case is `w=n` for `O(n^2)`.
If the edges have a 'weight' attribute they will be used as
weights in this algorithm. Unspecified weights are set to 1.
References
----------
.. [1] Centrality Measures Based on Current Flow.
Ulrik Brandes and Daniel Fleischer,
Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf
.. [2] A measure of betweenness centrality based on random walks,
M. E. J. Newman, Social Networks 27, 39-54 (2005).
"""
from networkx.utils import reverse_cuthill_mckee_ordering
try:
import numpy as np
except ImportError:
raise ImportError('current_flow_betweenness_centrality requires NumPy ',
'http://scipy.org/')
try:
import scipy
except ImportError:
raise ImportError('current_flow_betweenness_centrality requires SciPy ',
'http://scipy.org/')
if G.is_directed():
raise nx.NetworkXError('current_flow_betweenness_centrality() ',
'not defined for digraphs.')
if not nx.is_connected(G):
raise nx.NetworkXError("Graph not connected.")
n = G.number_of_nodes()
ordering = list(reverse_cuthill_mckee_ordering(G))
# make a copy with integer labels according to rcm ordering
# this could be done without a copy if we really wanted to
mapping=dict(zip(ordering,range(n)))
H = nx.relabel_nodes(G,mapping)
betweenness = dict.fromkeys(H,0.0) # b[v]=0 for v in H
for row,(s,t) in flow_matrix_row(H, weight=weight, dtype=dtype,
solver=solver):
for ss in sources:
i=mapping[ss]
for tt in targets:
j=mapping[tt]
betweenness[s]+=0.5*np.abs(row[i]-row[j])
betweenness[t]+=0.5*np.abs(row[i]-row[j])
if normalized:
nb=(n-1.0)*(n-2.0) # normalization factor
else:
nb=2.0
for v in H:
betweenness[v]=betweenness[v]/nb+1.0/(2-n)
return dict((ordering[k],v) for k,v in betweenness.items())
0
Example 17
def getSubRectangles(self, ims):
""" getSubRectangles(ims)
Calculate the minimal rectangles that need updating each frame.
Returns a two-element tuple containing the cropped images and a
list of x-y positions.
Calculating the subrectangles takes extra time, obviously. However,
if the image sizes were reduced, the actual writing of the GIF
goes faster. In some cases applying this method produces a GIF faster.
"""
# Check image count
if len(ims) < 2:
return ims, [(0, 0) for i in ims]
# We need numpy
if np is None:
raise RuntimeError("Need Numpy to calculate sub-rectangles. ")
# Prepare
ims2 = [ims[0]]
xy = [(0, 0)]
t0 = time.time()
# Iterate over images
prev = ims[0]
for im in ims[1:]:
# Get difference, sum over colors
diff = np.abs(im - prev)
if diff.ndim == 3:
diff = diff.sum(2)
# Get begin and end for both dimensions
X = np.argwhere(diff.sum(0))
Y = np.argwhere(diff.sum(1))
# Get rect coordinates
if X.size and Y.size:
x0, x1 = int(X[0][0]), int(X[-1][0] + 1)
y0, y1 = int(Y[0][0]), int(Y[-1][0] + 1)
else: # No change ... make it minimal
x0, x1 = 0, 2
y0, y1 = 0, 2
# Cut out and store
im2 = im[y0:y1, x0:x1]
prev = im
ims2.append(im2)
xy.append((x0, y0))
# Done
# print('%1.2f seconds to determine subrectangles of %i images' %
# (time.time()-t0, len(ims2)) )
return ims2, xy
0
Example 18
Project: statsmodels Source File: rootfinding.py
def brentq_expanding(func, low=None, upp=None, args=(), xtol=1e-5,
start_low=None, start_upp=None, increasing=None,
max_it=100, maxiter_bq=100, factor=10,
full_output=False):
'''find the root of a function in one variable by expanding and brentq
Assumes function ``func`` is monotonic.
Parameters
----------
func : callable
function for which we find the root ``x`` such that ``func(x) = 0``
low : float or None
lower bound for brentq
upp : float or None
upper bound for brentq
args : tuple
optional additional arguments for ``func``
xtol : float
parameter x tolerance given to brentq
start_low : float (positive) or None
starting bound for expansion with increasing ``x``. It needs to be
positive. If None, then it is set to 1.
start_upp : float (negative) or None
starting bound for expansion with decreasing ``x``. It needs to be
negative. If None, then it is set to -1.
increasing : bool or None
If None, then the function is evaluated at the initial bounds to
determine wether the function is increasing or not. If increasing is
True (False), then it is assumed that the function is monotonically
increasing (decreasing).
max_it : int
maximum number of expansion steps.
maxiter_bq : int
maximum number of iterations of brentq.
factor : float
expansion factor for step of shifting the bounds interval, default is
10.
full_output : bool, optional
If full_output is False, the root is returned. If full_output is True,
the return value is (x, r), where x is the root, and r is a
RootResults object.
Returns
-------
x : float
root of the function, value at which ``func(x) = 0``.
info : RootResult (optional)
returned if ``full_output`` is True.
attributes:
- start_bounds : starting bounds for expansion stage
- brentq_bounds : bounds used with ``brentq``
- iterations_expand : number of iterations in expansion stage
- converged : True if brentq converged.
- flag : return status, 'converged' if brentq converged
- function_calls : number of function calls by ``brentq``
- iterations : number of iterations in ``brentq``
Notes
-----
If increasing is None, then whether the function is monotonically
increasing or decreasing is inferred from evaluating the function at the
initial bounds. This can fail if there is numerically no variation in the
data in this range. In this case, using different starting bounds or
directly specifying ``increasing`` can make it possible to move the
expansion in the right direction.
If
'''
# TODO: rtol is missing, what does it do?
left, right = low, upp # alias
# start_upp first because of possible sl = -1 > upp
if upp is not None:
su = upp
elif start_upp is not None:
if start_upp < 0:
raise ValueError('start_upp needs to be positive')
su = start_upp
else:
su = 1.
if low is not None:
sl = low
elif start_low is not None:
if start_low > 0:
raise ValueError('start_low needs to be negative')
sl = start_low
else:
sl = min(-1., su - 1.)
# need sl < su
if upp is None:
su = max(su, sl + 1.)
# increasing or not ?
if ((low is None) or (upp is None)) and increasing is None:
assert sl < su # check during developement
f_low = func(sl, *args)
f_upp = func(su, *args)
# special case for F-distribution (symmetric around zero for effect
# size)
# chisquare also takes an indefinite time (didn't wait see if it
# returns)
if np.max(np.abs(f_upp - f_low)) < 1e-15 and sl == -1 and su == 1:
sl = 1e-8
f_low = func(sl, *args)
increasing = (f_low < f_upp)
if DEBUG:
print('symm', sl, su, f_low, f_upp)
# possibly func returns nan
delta = su - sl
if np.isnan(f_low):
# try just 3 points to find ``increasing``
# don't change sl because brentq can handle one nan bound
for fraction in [0.25, 0.5, 0.75]:
sl_ = sl + fraction * delta
f_low = func(sl_, *args)
if not np.isnan(f_low):
break
else:
raise ValueError('could not determine whether function is ' +
'increasing based on starting interval.' +
'\nspecify increasing or change starting ' +
'bounds')
if np.isnan(f_upp):
for fraction in [0.25, 0.5, 0.75]:
su_ = su + fraction * delta
f_upp = func(su_, *args)
if not np.isnan(f_upp):
break
else:
raise ValueError('could not determine whether function is' +
'increasing based on starting interval.' +
'\nspecify increasing or change starting ' +
'bounds')
increasing = (f_low < f_upp)
if DEBUG:
print('low, upp', low, upp, func(sl, *args), func(su, *args))
print('increasing', increasing)
print('sl, su', sl, su)
if not increasing:
sl, su = su, sl
left, right = right, left
n_it = 0
if left is None and sl != 0:
left = sl
while func(left, *args) > 0:
# condition is also false if func returns nan
right = left
left *= factor
if n_it >= max_it:
break
n_it += 1
# left is now such that func(left) < q
if right is None and su != 0:
right = su
while func(right, *args) < 0:
left = right
right *= factor
if n_it >= max_it:
break
n_it += 1
# right is now such that func(right) > q
if n_it >= max_it:
# print('Warning: max_it reached')
# TODO: use Warnings, Note: brentq might still work even with max_it
f_low = func(sl, *args)
f_upp = func(su, *args)
if np.isnan(f_low) and np.isnan(f_upp):
# can we still get here?
raise ValueError('max_it reached' +
'\nthe function values at boths bounds are NaN' +
'\nchange the starting bounds, set bounds' +
'or increase max_it')
res = optimize.brentq(func, left, right, args=args,
xtol=xtol, maxiter=maxiter_bq,
full_output=full_output)
if full_output:
val = res[0]
info = res[1]
info.iterations_expand = n_it
info.start_bounds = (sl, su)
info.brentq_bounds = (left, right)
info.increasing = increasing
return val, info
else:
return res
0
Example 19
def __init__(self, x, y, metric=("supremum", "supremum"),
normalize=False, lag=0, silence_level=0, **kwds):
"""
Initialize an instance of JointRecurrenceNetwork.
.. note::
For a joint recurrence network, time series x and y need to have the
same length!
Creates an embedding of the given time series x and y, calculates a
joint recurrence plot from the embedding and then creates a Network
object from the joint recurrence plot, interpreting the joint
recurrence matrix as the adjacency matrix of an undirected complex
network.
Either recurrence thresholds ``threshold``/``threshold_std`` or
recurrence rates ``recurrence_rate`` have to be given as keyword
arguments.
Embedding is only supported for scalar time series. If embedding
dimension ``dim`` and delay ``tau`` are **both** given as keyword
arguments, embedding is applied. Multidimensional time series are
processed as is by default.
:type x: 2D Numpy array (time, dimension)
:arg x: The time series x to be analyzed, can be scalar or
multi-dimensional.
:type y: 2D Numpy array (time, dimension)
:arg y: The time series y to be analyzed, can be scalar or
multi-dimensional.
:type metric: tuple of string
:arg metric: The metric for measuring distances in phase space
("manhattan", "euclidean", "supremum"). Give separately for each
time series.
:type normalize: tuple of bool
:arg normalize: Decide whether to normalize the time series to zero
mean and unit standard deviation. Give separately for each time
series.
:arg number lag: To create a delayed version of the JRP.
:arg number silence_level: Inverse level of verbosity of the object.
:type threshold: tuple of number
:keyword threshold: The recurrence threshold keyword for generating the
recurrence plot using a fixed threshold. Give separately for each
time series.
:type threshold_std: tuple of number
:keyword threshold_std: The recurrence threshold keyword for generating
the recurrence plot using a fixed threshold in units of the time
series' STD. Give separately for each time series.
:type recurrence_rate: tuple of number
:keyword recurrence_rate: The recurrence rate keyword for generating
the recurrence plot using a fixed recurrence rate. Give separately
for each time series.
:type dim: tuple of number
:keyword dim: The embedding dimension. Give separately for each time
series.
:type tau: tuple of number
:keyword tau: The embedding delay. Give separately for each time
series.
"""
# Check for consistency
if np.abs(lag) < x.shape[0]:
if x.shape[0] == y.shape[0]:
# Initialize the underlying RecurrencePlot object
JointRecurrencePlot.__init__(self, x, y, metric, normalize,
lag, silence_level, **kwds)
# Set diagonal of JR to zero to avoid self-loops in the joint
# recurrence network
A = self.JR - np.eye((self.N-np.abs(lag)), dtype="int8")
# Create a Network object interpreting the recurrence matrix
# as the graph adjacency matrix. Joint recurrence networks
# are undirected by definition.
Network.__init__(self, A, directed=False,
silence_level=silence_level)
else:
raise ValueError(
"Both time series x and y need to have the same length!")
else:
raise ValueError(
"Delay value (lag) must not exceed length of time series!")
0
Example 20
Project: RLScore Source File: test_kronsvm.py
def test_kronsvm(self):
regparam = 0.01
pyrandom.seed(100)
numpyrandom.seed(100)
X1_train, X2_train, Y_train, rows, cols, X1_test, X2_test, Y_test = load_data(primal=True)
class PrimalCallback(object):
def __init__(self):
self.iter = 0
def callback(self, learner):
X1 = learner.resource_pool['X1']
X2 = learner.resource_pool['X2']
rowind = learner.label_row_inds
colind = learner.label_col_inds
w = learner.W.ravel(order='F')
loss = primal_svm_objective(w, X1, X2, Y_train, rowind, colind, regparam)
print "iteration", self.iter
print "Primal SVM loss", loss
self.iter += 1
def finished(self, learner):
pass
params = {}
params["X1"] = X1_train
params["X2"] = X2_train
params["Y"] = Y_train
params["label_row_inds"] = rows
params["label_col_inds"] = cols
params["maxiter"] = 100
params["inneriter"] = 100
params["regparam"] = regparam
params['callback'] = PrimalCallback()
learner = KronSVM(**params)
P_linear = learner.predictor.predict(X1_test, X2_test)
pyrandom.seed(100)
numpyrandom.seed(100)
K1_train, K2_train, Y_train, rows, cols, K1_test, K2_test, Y_test = load_data(primal=False)
class DualCallback(object):
def __init__(self):
self.iter = 0
self.atol = None
def callback(self, learner):
K1 = learner.resource_pool['K1']
K2 = learner.resource_pool['K2']
rowind = learner.label_row_inds
colind = learner.label_col_inds
loss = dual_svm_objective(learner.A, K1, K2, Y_train, rowind, colind, regparam)
#loss = learner.bestloss
print "iteration", self.iter
print "Dual SVM loss", loss
#model = learner.predictor
self.iter += 1
def finished(self, learner):
pass
params = {}
params["K1"] = K1_train
params["K2"] = K2_train
params["Y"] = Y_train
params["label_row_inds"] = rows
params["label_col_inds"] = cols
params["maxiter"] = 100
params["inneriter"] = 100
params["regparam"] = regparam
params['callback'] = DualCallback()
learner = KronSVM(**params)
P_dual = learner.predictor.predict(K1_test, K2_test)
print np.max(1. - np.abs(P_linear / P_dual))
assert np.max(1. - np.abs(P_linear / P_dual)) < 0.001
0
Example 21
def errorbar(self, **kwargs):
"""errorbar plot for a single time series with errors.
Set *columns* keyword to select [x, y, dy] or [x, y, dx, dy],
e.g. ``columns=[0,1,2]``. See :meth:`XVG.plot` for
details. Only a single timeseries can be plotted and the user
needs to select the appropriate columns with the *columns*
keyword.
By default, the data are decimated (see :meth:`XVG.plot`) for
the default of *maxpoints* = 10000 by averaging data in
*maxpoints* bins.
x,y,dx,dy data can plotted with error bars in the x- and
y-dimension (use *filled* = ``False``).
For x,y,dy use *filled* = ``True`` to fill the region between
y±dy. *fill_alpha* determines the transparency of the fill
color. *filled* = ``False`` will draw lines for the error
bars. Additional keywords are passed to
:func:`pylab.errorbar`.
By default, the errors are decimated by plotting the 5% and
95% percentile of the data in each bin. The percentile can be
changed with the *percentile* keyword; e.g. *percentile* = 1
will plot the 1% and 99% perentile (as will *percentile* =
99).
The *error_method* keyword can be used to compute errors as
the root mean square sum (*error_method* = "rms") across each
bin instead of percentiles ("percentile"). The value of the
keyword *demean* is applied to the decimation of error data
alone.
.. SeeAlso::
:meth:`XVG.plot` lists keywords common to both methods.
"""
import pylab
color = kwargs.pop('color', 'black')
filled = kwargs.pop('filled', True)
fill_alpha = kwargs.pop('fill_alpha', 0.2)
kwargs.setdefault('capsize', 0)
kwargs.setdefault('elinewidth', 1)
kwargs.setdefault('ecolor', color)
kwargs.setdefault('alpha', 0.3)
kwargs.setdefault('fmt', None)
columns = kwargs.pop('columns', Ellipsis) # slice for everything
maxpoints = kwargs.pop('maxpoints', self.maxpoints_default)
transform = kwargs.pop('transform', lambda x: x) # default is identity transformation
method = kwargs.pop('method', "mean")
if method != "mean":
raise NotImplementedError("For errors only method == 'mean' is supported.")
error_method = kwargs.pop('error_method', "percentile") # can also use 'rms' and 'error'
percentile = numpy.abs(kwargs.pop('percentile', 95.))
demean = kwargs.pop('demean', False)
# order: (decimate/smooth o slice o transform)(array)
try:
data = numpy.asarray(transform(self.array))[columns]
except IndexError:
raise MissingDataError("columns {0!r} are not suitable to index the transformed array, possibly not eneough data".format(columns))
if data.shape[-1] == 0:
raise MissingDataError("There is no data to be plotted.")
a = numpy.zeros((data.shape[0], maxpoints), dtype=numpy.float64)
a[0:2] = self.decimate("mean", data[0:2], maxpoints=maxpoints)
error_data = numpy.vstack((data[0], data[2:]))
if error_method == "percentile":
if percentile > 50:
upper_per = percentile
lower_per = 100 - percentile
else:
upper_per = 100 - percentile
lower_per = percentile
# demean generally does not make sense with the percentiles (but for analysing
# the regularised data itself we use this as a flag --- see below!)
upper = a[2:] = self.decimate("percentile", error_data, maxpoints=maxpoints,
per=upper_per, demean=False)[1:]
lower = self.decimate("percentile", error_data, maxpoints=maxpoints,
per=lower_per, demean=False)[1:]
else:
a[2:] = self.decimate(error_method, error_data, maxpoints=maxpoints, demean=demean)[1:]
lower = None
# now deal with infs, nans etc AFTER all transformations (needed for plotting across inf/nan)
ma = numpy.ma.MaskedArray(a, mask=numpy.logical_not(numpy.isfinite(a)))
if lower is not None:
mlower = numpy.ma.MaskedArray(lower, mask=numpy.logical_not(numpy.isfinite(lower)))
# finally plot
X = ma[0] # abscissa set separately
Y = ma[1]
try:
kwargs['yerr'] = ma[3]
kwargs['xerr'] = ma[2]
except IndexError:
try:
kwargs['yerr'] = ma[2]
except IndexError:
raise TypeError("Either too few columns selected or data does not have a error column")
if filled:
# can only plot dy
if error_method == "percentile":
if demean:
# signal that we are looking at percentiles of an observable and not error
y1 = mlower[-1]
y2 = kwargs['yerr']
else:
# percentiles of real errors (>0)
y1 = Y - mlower[-1]
y2 = Y + kwargs['yerr']
else:
y1 = Y - kwargs['yerr']
y2 = Y + kwargs['yerr']
pylab.fill_between(X, y1, y2, color=color, alpha=fill_alpha)
else:
if error_method == "percentile":
# errorbars extend to different lengths;
if demean:
kwargs['yerr'] = numpy.vstack((mlower[-1], kwargs['yerr']))
else:
kwargs['yerr'] = numpy.vstack((Y - mlower[-1], Y + kwargs['yerr']))
try:
# xerr only makes sense when the data is a real
# error so we don't even bother with demean=?
kwargs['xerr'] = numpy.vstack((X - mlower[0], X + kwargs['xerr']))
except (KeyError, IndexError):
pass
pylab.errorbar(X, Y, **kwargs)
# clean up args for plot
for kw in "yerr", "xerr", "capsize", "ecolor", "elinewidth", "fmt":
kwargs.pop(kw, None)
kwargs['alpha'] = 1.0
pylab.plot(X, Y, color=color, **kwargs)
0
Example 22
Project: PYPOWER Source File: t_is.py
def t_is(got, expected, prec=5, msg=''):
"""Tests if two matrices are identical to some tolerance.
Increments the global test count and if the maximum difference
between corresponding elements of C{got} and C{expected} is less
than 10**(-C{prec}) then it increments the passed tests count,
otherwise increments the failed tests count. Prints 'ok' or 'not ok'
followed by the MSG, unless the global variable t_quiet is true.
Intended to be called between calls to C{t_begin} and C{t_end}.
@author: Ray Zimmerman (PSERC Cornell)
"""
if isinstance(got, int) or isinstance(got, float):
got = array([got], float)
elif isinstance(got, list) or isinstance(got, tuple):
got = array(got, float)
if isinstance(expected, int) or isinstance(expected, float):
expected = array([expected], float)
elif isinstance(expected, list) or isinstance(expected, tuple):
expected = array(expected, float)
if (got.shape == expected.shape) or (expected.shape == (0,)):
got_minus_expected = got - expected
max_diff = max(max(abs(got_minus_expected)))
condition = ( max_diff < 10**(-prec) )
else:
condition = False
max_diff = 0
t_ok(condition, msg)
if (not condition and not TestGlobals.t_quiet):
s = ''
if max_diff != 0:
idx = nonzero(not abs(got_minus_expected) < 10**(-prec))
if len(idx) == 1: # 1D array
idx = (idx[0], zeros( len(got_minus_expected) ))
i, j = idx
k = i + (j-1) * expected.shape[0]
got = got.flatten()
expected = expected.flatten()
got_minus_expected = got_minus_expected.flatten()
kk = argmax( abs(got_minus_expected[ k.astype(int) ]) )
s += ' row col got expected got - exp\n'
s += '------- ------ ---------------- ---------------- ----------------'
for u in range(len(i)):
s += '\n%6d %6d %16g %16g %16g' % \
(i[u], j[u], got[k[u]], expected[k[u]], got_minus_expected[k[u]])
if u == kk:
s += ' *'
s += '\nmax diff @ (%d,%d) = %g > allowed tol of %g\n\n' % \
(i[kk], j[kk], max_diff, 10**(-prec))
else:
s += ' dimension mismatch:\n'
if len(got.shape) == 1: # 1D array
s += ' got: %d\n' % got.shape
else:
s += ' got: %d x %d\n' % got.shape
if len(expected.shape) == 1: # 1D array
s += ' expected: %d\n' % expected.shape
else:
s += ' expected: %d x %d\n' % expected.shape
print(s)
0
Example 23
def getSubRectangles(self, ims):
""" getSubRectangles(ims)
Calculate the minimal rectangles that need updating each frame.
Returns a two-element tuple containing the cropped images and a
list of x-y positions.
Calculating the subrectangles takes extra time, obviously. However,
if the image sizes were reduced, the actual writing of the GIF
goes faster. In some cases applying this method produces a GIF faster.
"""
# Check image count
if len(ims) < 2:
return ims, [(0,0) for i in ims]
# We need numpy
if np is None:
raise RuntimeError("Need Numpy to calculate sub-rectangles. ")
# Prepare
ims2 = [ims[0]]
xy = [(0,0)]
t0 = time.time()
# Iterate over images
prev = ims[0]
for im in ims[1:]:
# Get difference, sum over colors
diff = np.abs(im-prev)
if diff.ndim==3:
diff = diff.sum(2)
# Get begin and end for both dimensions
X = np.argwhere(diff.sum(0))
Y = np.argwhere(diff.sum(1))
# Get rect coordinates
if X.size and Y.size:
x0, x1 = X[0], X[-1]+1
y0, y1 = Y[0], Y[-1]+1
else: # No change ... make it minimal
x0, x1 = 0, 2
y0, y1 = 0, 2
# Cut out and store
im2 = im[y0:y1,x0:x1]
prev = im
ims2.append(im2)
xy.append((x0,y0))
# Done
#print('%1.2f seconds to determine subrectangles of %i images' %
# (time.time()-t0, len(ims2)) )
return ims2, xy
0
Example 24
Project: OpenPNM Source File: __Voronoi__.py
def compress_geometry(self, factor=None, preserve_fibres=False):
r"""
Adjust the vertices and recalculate geometry. Save fibre voxels before
and after then put them back into the image to preserve fibre volume.
Shape will not be conserved. Also make adjustments to the pore and throat
properties given an approximate volume change from adding the missing fibre
voxels back in
Parameters
----------
factor : array_like
List of 3 values, [x,y,z], 2 must be one and the other must be between
zero and one representing the fraction of the domain height to compress
to.
preserve_fibres : boolean
If the fibre image has been generated and used to calculate pore volumes
then preserve fibre volume artificially by adjusting pore and throat sizes
"""
if factor is None:
logger.warning('Please supply a compression factor in the form ' +
'[1,1,CR], with CR < 1')
return
if sp.any(sp.asarray(factor) > 1):
logger.warning('The supplied compression factor is greater than 1, ' +
'the method is not tested for stretching')
return
# uncompressed number of fibre voxels in each pore
fvu = self["pore.fibre_voxels"]
r1 = self["pore.diameter"]/2
# Most likely boundary pores - prevents divide by zero (vol change zero)
r1[r1 == 0] = 1
vo.scale(network=self._net, scale_factor=factor, preserve_vol=False)
self.models.regenerate()
if preserve_fibres and self._voxel_vol:
# compressed number of fibre voxels in each pore
fvc = self["pore.fibre_voxels"]
# amount to adjust pore volumes by
# (based on 1 micron cubed voxels for volume calc)
vol_diff = (fvu-fvc)*1e-18
# don't account for positive volume changes
vol_diff[vol_diff < 0] = 0
pv1 = self["pore.volume"].copy()
self["pore.volume"] -= vol_diff
self["pore.volume"][self["pore.volume"] < 0.0] = 0.0
pv2 = self["pore.volume"].copy()
"Now need to adjust the pore diameters"
from scipy.special import cbrt
rdiff = cbrt(3*np.abs(vol_diff)/(4*sp.pi))
self["pore.diameter"] -= 2*rdiff*sp.sign(vol_diff)
"Now as a crude approximation adjust all the throat areas and diameters"
"by the same ratio as the increase in a spherical pore surface area"
spd = np.ones(len(fvu))
spd[fvu > 0] = (pv2[fvu > 0]/pv1[fvu > 0])**(2/3)
spd[spd > 1.0] = 1.0
tconns = self._net["throat.conns"][self.map_throats(self._net,
self.throats())]
# Need to work out the average volume change for the two pores
# connected by each throat Boundary pores will be connected to a
# throat outside this geometry if there are multiple geoms so get mapping
mapping = self._net.map_pores(self, self._net.pores(),
return_mapping=True)
source = list(mapping['source'])
target = list(mapping['target'])
ta_diff_avg = np.ones(len(tconns))
for i in np.arange(len(tconns)):
np1, np2 = tconns[i]
if np1 in source and np2 in source:
gp1 = target[source.index(np1)]
gp2 = target[source.index(np2)]
ta_diff_avg[i] = (spd[gp1] + spd[gp2]) / 2
elif np1 in source:
gp1 = target[source.index(np1)]
ta_diff_avg[i] = spd[gp1]
elif np2 in source:
gp2 = target[source.index(np2)]
ta_diff_avg[i] = spd[gp2]
self["throat.area"] *= ta_diff_avg
self["throat.area"][self["throat.area"] < 0] = 0
self["throat.diameter"] = 2*sp.sqrt(self["throat.area"]/sp.pi)
self["throat.indiameter"] *= sp.sqrt(ta_diff_avg)
else:
logger.warning('Fibre volume is not be conserved under compression')
# Remove pores with zero throats
topo.trim_occluded_throats(self._net)
0
Example 25
Project: python-acoustics Source File: reflection.py
def plot_reflection_factor(self, filename=None):
"""
Plot reflection factor.
"""
if self.frequency is None:
raise ValueError("No frequency specified.")
if self.angle is None:
raise ValueError("No angle specified.")
try:
n_f = len(self.frequency)
except TypeError:
n_f = 1
try:
n_a = len(self.angle)
except TypeError:
n_a = 1
if n_f==1 and n_a==1:
raise ValueError("Either frequency or angle needs to be a vector.")
elif n_f==1 or n_a==1:
if n_f==1 and n_a>1:# Show R as function of angle for a single frequency.
xlabel = r"$\theta$ in degrees"
elif n_f>1 and n_a==1:# Show R as function of frequency for a single angle.
xlabel = r"$f$ in Hz"
R = self.reflection_factor
fig = plt.figure()
ax0 = fig.add_subplot(211)
ax0.set_title("Magnitude of reflection factor")
ax0.semilogx(self.frequency, np.abs(R))
ax0.set_xlabel(xlabel)
ax0.set_ylabel(r'$\left|R\right|$')
ax0.grid()
ax1 = fig.add_subplot(212)
ax1.set_title("Phase of reflection factor")
ax1.semilogx(self.frequency, np.angle(R))
ax1.set_xlabel(xlabel)
ax1.set_ylabel(r'$\angle R$')
ax1.grid()
elif n_f>1 and n_a>1:# Show 3D or pcolor
R = self.reflection_factor
fig = plt.figure()
#grid = AxesGrid(fig, 111, nrows_ncols=(2, 2), axes_pad=0.1, cbar_mode='each', cbar_location='right')
ax0 = fig.add_subplot(211)
#ax0 = grid[0]
ax0.set_title("Magnitude of reflection factor")
ax0.pcolormesh(self.frequency, self.angle * 180.0 / np.pi, np.abs(R))
#ax0.pcolor(self.angle, self.frequency, np.abs(R))
#ax0.set_xlabel(xlabel)
#ax0.set_ylabel(r'$\left|R\right|$')
ax0.grid()
ax1 = fig.add_subplot(212)
#ax1 = grid[1]
ax1.set_title("Phase of reflection factor")
ax1.pcolormesh(self.frequency, self.angle * 180.0 / np.pi, np.angle(R))
#ax1.pcolor(self.angle, self.frequency, np.angle(R))
#ax0.set_xlabel(xlabel)
#ax0.set_ylabel(r'$\angle R$')
ax1.grid()
else:
raise RuntimeError("Oops...")
#plt.tight_layout()
if filename:
fig.savefig(filename, transparant=True)
else:
return fig
0
Example 26
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
0
Example 27
Project: SHARPpy Source File: skew.py
def dragLine(self, e):
trans_x = (e.x() - self.originx) * self.scale
trans_y = (e.y() - self.originy) * self.scale
tmpc = self.pix_to_tmpc(trans_x, trans_y)
if self.drag_idx is None:
pres = self.pix_to_pres(trans_y)
idx = np.argmin(np.abs(pres - self.pres))
while self.tmpc.mask[idx] or self.dwpc.mask[idx]:
idx += 1
self.drag_idx = idx
else:
idx = self.drag_idx
prof_name, drag_prof = self.drag_prof
if prof_name == 'tmpc':
tmpc = max(tmpc, self.dwpc[idx])
elif prof_name == 'dwpc':
tmpc = min(tmpc, self.tmpc[idx])
# Figure out the bounds of the box we need to update
if idx == 0 or self.pres.mask[idx - 1] or self.tmpc.mask[idx - 1] or self.dwpc.mask[idx - 1]:
lb_p, ub_p = self.pres[idx], self.pres[idx + 1]
t_points = [ (tmpc, self.pres[idx]), (drag_prof[idx + 1], self.pres[idx + 1]) ]
elif idx == self.pres.shape[0] - 1:
lb_p, ub_p = self.pres[idx - 1], self.pres[idx]
t_points = [ (drag_prof[idx - 1], self.pres[idx - 1]), (tmpc, self.pres[idx]) ]
else:
lb_p, ub_p = self.pres[idx - 1], self.pres[idx + 1]
t_points = [ (drag_prof[idx - 1], self.pres[idx - 1]), (tmpc, self.pres[idx]), (drag_prof[idx + 1], self.pres[idx + 1]) ]
x_points = [ self.tmpc_to_pix(*pt) for pt in t_points ]
lb_x = self.originx + min(x_points) / self.scale
ub_x = self.originx + max(x_points) / self.scale
lb_y = self.originy + self.pres_to_pix(ub_p) / self.scale
ub_y = self.originy + self.pres_to_pix(lb_p) / self.scale
qp = QtGui.QPainter()
qp.begin(self.plotBitMap)
# If we have something saved, restore it
if self.saveBitMap is not None:
origin, size, bmap = self.saveBitMap
qp.drawPixmap(origin, bmap, QRect(QPoint(0, 0), size))
# Capture the new portion of the image to save
origin = QPoint(max(lb_x - self.drag_buffer, 0), max(lb_y - self.drag_buffer, 0))
size = QSize(ub_x - lb_x + 2 * self.drag_buffer, ub_y - lb_y + 2 * self.drag_buffer)
bmap = self.plotBitMap.copy(QRect(origin, size))
self.saveBitMap = (origin, size, bmap)
# Draw lines
if prof_name == 'dwpc':
color = QtGui.QColor("#019B06")
else:
color = QtGui.QColor("#9F0101")
pen = QtGui.QPen(color, 1, QtCore.Qt.SolidLine)
qp.setPen(pen)
if idx != 0 and not self.pres.mask[idx - 1] and not self.tmpc.mask[idx - 1] and not self.dwpc.mask[idx - 1]:
x1 = self.originx + self.tmpc_to_pix(drag_prof[idx - 1], self.pres[idx - 1]) / self.scale
x2 = self.originx + self.tmpc_to_pix(tmpc, self.pres[idx]) / self.scale
y1 = self.originy + self.pres_to_pix(self.pres[idx - 1]) / self.scale
y2 = self.originy + self.pres_to_pix(self.pres[idx]) / self.scale
qp.drawLine(x1, y1, x2, y2)
if idx != self.pres.shape[0] - 1:
x1 = self.originx + self.tmpc_to_pix(tmpc, self.pres[idx]) / self.scale
x2 = self.originx + self.tmpc_to_pix(drag_prof[idx + 1], self.pres[idx + 1]) / self.scale
y1 = self.originy + self.pres_to_pix(self.pres[idx]) / self.scale
y2 = self.originy + self.pres_to_pix(self.pres[idx + 1]) / self.scale
qp.drawLine(x1, y1, x2, y2)
qp.end()
self.update()
0
Example 28
Project: QuantEcon.py Source File: lss.py
def stationary_distributions(self, max_iter=200, tol=1e-5):
"""
Compute the moments of the stationary distributions of x_t and
y_t if possible. Computation is by iteration, starting from the
initial conditions self.mu_0 and self.Sigma_0
Parameters
----------
max_iter : scalar(int), optional(default=200)
The maximum number of iterations allowed
tol : scalar(float), optional(default=1e-5)
The tolerance level that one wishes to achieve
Returns
-------
mu_x_star : array_like(float)
An n x 1 array representing the stationary mean of x_t
mu_y_star : array_like(float)
An k x 1 array representing the stationary mean of y_t
Sigma_x_star : array_like(float)
An n x n array representing the stationary var-cov matrix
of x_t
Sigma_y_star : array_like(float)
An k x k array representing the stationary var-cov matrix
of y_t
"""
# == Initialize iteration == #
m = self.moment_sequence()
mu_x, mu_y, Sigma_x, Sigma_y = next(m)
i = 0
error = tol + 1
# == Loop until convergence or failure == #
while error > tol:
if i > max_iter:
fail_message = 'Convergence failed after {} iterations'
raise ValueError(fail_message.format(max_iter))
else:
i += 1
mu_x1, mu_y1, Sigma_x1, Sigma_y1 = next(m)
error_mu = np.max(np.abs(mu_x1 - mu_x))
error_Sigma = np.max(np.abs(Sigma_x1 - Sigma_x))
error = max(error_mu, error_Sigma)
mu_x, Sigma_x = mu_x1, Sigma_x1
# == Prepare return values == #
mu_x_star, Sigma_x_star = mu_x, Sigma_x
mu_y_star, Sigma_y_star = mu_y1, Sigma_y1
return mu_x_star, mu_y_star, Sigma_x_star, Sigma_y_star
0
Example 29
def l1_min_c(X, y, loss='squared_hinge', fit_intercept=True,
intercept_scaling=1.0):
"""
Return the lowest bound for C such that for C in (l1_min_C, infinity)
the model is guaranteed not to be empty. This applies to l1 penalized
classifiers, such as LinearSVC with penalty='l1' and
linear_model.LogisticRegression with penalty='l1'.
This value is valid if class_weight parameter in fit() is not set.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target vector relative to X
loss : {'squared_hinge', 'log'}, default 'squared_hinge'
Specifies the loss function.
With 'squared_hinge' it is the squared hinge loss (a.k.a. L2 loss).
With 'log' it is the loss of logistic regression models.
'l2' is accepted as an alias for 'squared_hinge', for backward
compatibility reasons, but should not be used in new code.
fit_intercept : bool, default: True
Specifies if the intercept should be fitted by the model.
It must match the fit() method parameter.
intercept_scaling : float, default: 1
when fit_intercept is True, instance vector x becomes
[x, intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
It must match the fit() method parameter.
Returns
-------
l1_min_c: float
minimum value for C
"""
if loss not in ('squared_hinge', 'log'):
raise ValueError('loss type not in ("squared_hinge", "log", "l2")')
X = check_array(X, accept_sparse='csc')
check_consistent_length(X, y)
Y = LabelBinarizer(neg_label=-1).fit_transform(y).T
# maximum absolute value over classes and features
den = np.max(np.abs(safe_sparse_dot(Y, X)))
if fit_intercept:
bias = intercept_scaling * np.ones((np.size(y), 1))
den = max(den, abs(np.dot(Y, bias)).max())
if den == 0.0:
raise ValueError('Ill-posed l1_min_c calculation: l1 will always '
'select zero coefficients for this data')
if loss == 'squared_hinge':
return 0.5 / den
else: # loss == 'log':
return 2.0 / den
0
Example 30
def set_center(data, center, crop='maintain_size', verbose=False):
""" Move image center to mid-point of image
Parameters
----------
data : 2D np.array
The image data
center : tuple
image pixel coordinate center (row, col)
crop : str
This determines how the image should be cropped. The options are:
``maintain_size``
return image of the same size. Some of the original
image may be lost
and some regions may be filled with zeros.
``valid_region``
return the largest image that can be created without padding.
All of the returned image will correspond to the original image.
However, portions of the original image will be lost.
If you can tolerate clipping the edges of the image, this is probably
the method to choose.
``maintain_data``
the image will be padded with zeros such that none of the original
image will be cropped.
verbose: boolean
True: print diagnostics
"""
c0, c1 = center
old_shape = data.shape
old_center = data.shape[0]/2.0, data.shape[1]/2.0
delta0 = old_center[0] - center[0]
delta1 = old_center[1] - center[1]
if crop == 'maintain_data':
# pad the image so that the center can be moved without losing any of the original data
# we need to pad the image with zeros before using the shift() function
shift0, shift1 = (None, None)
if delta0 != 0:
shift0 = 1 + int(np.abs(delta0))
if delta1 != 0:
shift1 = 1 + int(np.abs(delta1))
container = np.zeros((data.shape[0]+shift0, data.shape[1]+shift1),
dtype = data.dtype)
area = container[:,:]
if shift0:
if delta0 > 0:
area = area[:-shift0,:]
else:
area = area[shift0:,:]
if shift1:
if delta1 > 0:
area = area[:,:-shift1]
else:
area = area[:,shift1:]
area[:,:] = data[:,:]
data = container
delta0 += np.sign(delta0)*shift0/2.0
delta1 += np.sign(delta1)*shift1/2.0
if verbose:
print("delta = ({0}, {1})".format(delta0, delta1))
centered_data = shift(data, (delta0, delta1))
if crop == 'maintain_data':
# pad the image so that the center can be moved
# without losing any of the original data
return centered_data
elif crop == 'maintain_size':
return centered_data
elif crop == 'valid_region':
# crop to region containing data
shift0, shift1 = (None, None)
if delta0 != 0:
shift0 = 1 + int(np.abs(delta0))
if delta1 != 0:
shift1 = 1 + int(np.abs(delta1))
return centered_data[shift0:-shift0, shift1:-shift1]
else:
raise ValueError("Invalid crop method!!")
0
Example 31
Project: theano_pyglm Source File: parallel_coord_descent.py
def parallel_coord_descent(client,
N,
x0=None,
maxiter=50,
atol=1e-5,
use_hessian=True,
use_rop=False):
"""
Compute the maximum a posterior parameter estimate using Theano to compute
gradients of the log probability.
"""
dview = client[:]
master = client[client.ids[0]]
# Import req'd functions on engines
initialize_imports(dview)
# Parameter checking
# We only use Rops if use_hessian is False
use_rop = use_rop and not use_hessian
# Make sure the network is a complete adjacency matrix because we
# do not do integer programming
# if not isinstance(network.graph, CompleteGraphModel):
# print "WARNING: MAP inference via coordinate descent can only be performed "\
# "with the complete graph model."
# Draw initial state from prior if not given
print "Initializing parameters"
if x0 is None:
master.execute('x0 = popn.sample()', block=True)
master.execute('initialize_with_data(popn, data, x0)')
x0 = master['x0']
# else:
# master['x0'] = x0
# Also initialize with intelligent parameters from the data
# dview['x0d'] = x0
# @interactive
# def _initialize_with_data(n):
# initialize_with_data(popn, data, x0d, Ns=n)
# x0s = dview.map_async(_initialize_with_data,
# range(N))
# x0s = x0s.get()
# Extract the initial parameters for each GLM
#x0['glms'] = [x0s['glms'][n] for n in np.arange(N)]
print "Preparing Theano functions for inference"
# Compute log prob, gradient, and hessian wrt network parameters
dview.execute('net_inf_prms = prep_network_inference(popn,'
'use_hessian=%s,'
'use_rop=%s)' % (str(use_hessian), str(use_rop)),
block=True)
# Compute gradients of the log prob wrt the GLM parameters
dview.execute('glm_inf_prms = prep_glm_inference(popn,'
'use_hessian=%s,'
'use_rop=%s)' % (str(use_hessian), str(use_rop)),
block=True)
# Parallel function to fit GLMs
@interactive
def _parallel_fit_glm(n, x, use_hessian=False, use_rop=False):
nvars = popn.extract_vars(x, n)
fit_glm(nvars, n, glm_inf_prms, use_hessian, use_rop)
return nvars['glm']
# Alternate fitting the network and fitting the GLMs
x = x0
lp_prev = parallel_compute_log_p(dview, master, x, N)
converged = False
iter = 0
while not converged and iter < maxiter:
iter += 1
print "Coordinate descent iteration %d." % iter
# TODO Fit the network on the first engine
# fit_network(x, net_inf_prms, use_hessian, use_rop)
# Fit the GLMs in parallel
x_glms = dview.map_async(_parallel_fit_glm,
range(N),
[x]*N)
# Print progress report ever interval seconds
interval = 15.0
# Timeout after specified number of seconds (-1 = Inf?)
#timeout = -1
wait_watching_stdout(x_glms, interval=interval)
x['glms'] = x_glms.get()
x_glms.display_outputs()
# Check for convergence
lp = parallel_compute_log_p(dview, master, x, N)
print "Iteration %d: LP=%.2f. Change in LP: %.2f" % (iter, lp, lp-lp_prev)
converged = np.abs(lp-lp_prev) < atol
lp_prev = lp
return x
0
Example 32
Project: qutip Source File: metrics.py
def dnorm(A, B=None, solver="CVXOPT", verbose=False, force_solve=False):
"""
Calculates the diamond norm of the quantum map q_oper, using
the simplified semidefinite program of [Wat12]_.
The diamond norm SDP is solved by using CVXPY_.
Parameters
----------
A : Qobj
Quantum map to take the diamond norm of.
B : Qobj or None
If provided, the diamond norm of :math:`A - B` is
taken instead.
solver : str
Solver to use with CVXPY. One of "CVXOPT" (default)
or "SCS". The latter tends to be significantly faster,
but somewhat less accurate.
verbose : bool
If True, prints additional information about the
solution.
force_solve : bool
If True, forces dnorm to solve the associated SDP, even if a special
case is known for the argument.
Returns
-------
dn : float
Diamond norm of q_oper.
Raises
------
ImportError
If CVXPY cannot be imported.
.. _cvxpy: http://www.cvxpy.org/en/latest/
"""
if cvxpy is None: # pragma: no cover
raise ImportError("dnorm() requires CVXPY to be installed.")
# We follow the strategy of using Watrous' simpler semidefinite
# program in its primal form. This is the same strategy used,
# for instance, by both pyGSTi and SchattenNorms.jl. (By contrast,
# QETLAB uses the dual problem.)
# Check if A and B are both unitaries. If so, then we can without
# loss of generality choose B to be the identity by using the
# unitary invariance of the diamond norm,
# || A - B ||_♢ = || A B⁺ - I ||_♢.
# Then, using the technique mentioned by each of Johnston and
# da Silva,
# || A B⁺ - I ||_♢ = max_{i, j} | \lambda_i(A B⁺) - \lambda_j(A B⁺) |,
# where \lambda_i(U) is the ith eigenvalue of U.
if (
# There's a lot of conditions to check for this path.
not force_solve and B is not None and
# Only check if they aren't superoperators.
A.type == "oper" and B.type == "oper" and
# The difference of unitaries optimization is currently
# only implemented for d == 2. Much of the code below is more general,
# though, in anticipation of generalizing the optimization.
A.shape[0] == 2
):
# Make an identity the same size as A and B to
# compare against.
I = qeye(A.dims[0])
# Compare to B first, so that an error is raised
# as soon as possible.
Bd = B.dag()
if (
(B * Bd - I).norm() < 1e-6 and
(A * A.dag() - I).norm() < 1e-6
):
# Now we are on the fast path, so let's compute the
# eigenvalues, then find the diameter of the smallest circle
# containing all of them.
#
# For now, this is only implemented for dim = 2, such that
# generalizing here will allow for generalizing the optimization.
# A reasonable approach would probably be to use Welzl's algorithm
# (https://en.wikipedia.org/wiki/Smallest-circle_problem).
U = A * B.dag()
eigs = U.eigenenergies()
eig_distances = np.abs(eigs[:, None] - eigs[None, :])
return np.max(eig_distances)
# Force the input superoperator to be a Choi matrix.
J = to_choi(A)
if B is not None:
J -= to_choi(B)
# Watrous 2012 also points out that the diamond norm of Lambda
# is the same as the completely-bounded operator-norm (∞-norm)
# of the dual map of Lambda. We can evaluate that norm much more
# easily if Lambda is completely positive, since then the largest
# eigenvalue is the same as the largest singular value.
if not force_solve and J.iscp:
S_dual = to_super(J.dual_chan())
vec_eye = operator_to_vector(qeye(S_dual.dims[1][1]))
op = vector_to_operator(S_dual * vec_eye)
# The 2-norm was not implemented for sparse matrices as of the time
# of this writing. Thus, we must yet again go dense.
return la.norm(op.data.todense(), 2)
# If we're still here, we need to actually solve the problem.
# Assume square...
dim = np.prod(J.dims[0][0])
# The constraints only depend on the dimension, so
# we can cache them efficiently.
problem, Jr, Ji, X, rho0, rho1 = dnorm_problem(dim)
# Load the parameters with the Choi matrix passed in.
J_dat = J.data
Jr.value, Ji.value = J_dat.real, J_dat.imag
# Finally, set up and run the problem.
problem.solve(solver=solver, verbose=verbose)
return problem.value
0
Example 33
Project: theano_pyglm Source File: fit_latent_network.py
def fit_latent_network_to_mle():
""" Run a test with synthetic data and MCMC inference
"""
options, popn, data, popn_true, x_true = initialize_test_harness()
import pdb; pdb.set_trace()
# Load MLE parameters from command line
mle_x = None
if options.x0_file is not None:
with open(options.x0_file, 'r') as f:
print "Initializing with state from: %s" % options.x0_file
mle_x = cPickle.load(f)
mle_model = make_model('standard_glm', N=data['N'])
mle_popn = Population(mle_model)
mle_popn.set_data(data)
# Create a location sampler
print "Initializing latent location sampler"
loc_sampler = LatentLocationUpdate()
loc_sampler.preprocess(popn)
# Convert the mle results into a weighted adjacency matrix
x_aw = popn.sample(None)
x_aw = convert_model(mle_popn, mle_model, mle_x, popn, popn.model, x_aw)
# Get rid of unnecessary keys
del x_aw['glms']
# Fit the latent distance network to a thresholded adjacency matrix
ws = np.sort(np.abs(x_aw['net']['weights']['W']))
wperm = np.argsort(np.abs(x_aw['net']['weights']['W']))
nthrsh = 20
threshs = np.arange(ws.size, step=ws.size/nthrsh)
res = []
N = popn.N
for th in threshs:
print "Fitting network for threshold: %.3f" % th
A = np.zeros_like(ws, dtype=np.int8)
A[wperm[th:]] = 1
A = A.reshape((N,N))
# A = (np.abs(x_aw['net']['weights']['W']) >= th).astype(np.int8).reshape((N,N))
# Make sure the diag is still all 1s
A[np.diag_indices(N)] = 1
x = copy.deepcopy(x_aw)
x['net']['graph']['A'] = A
smpls = fit_latent_network_given_A(x, loc_sampler)
# Index the results by the overall sparsity of A
key = (np.sum(A)-N) / (np.float(np.size(A))-N)
res.append((key, smpls))
# Save results
results_file = os.path.join(options.resultsDir, 'fit_latent_network_results.pkl')
print "Saving results to %s" % results_file
with open(results_file, 'w') as f:
cPickle.dump(res, f)
0
Example 34
def find_onsets(signal=None, sampling_rate=1000., size=0.05, threshold=None):
"""Determine onsets of EMG pulses.
Skips corrupted signal parts.
Parameters
----------
signal : array
Input filtered BVP signal.
sampling_rate : int, float, optional
Sampling frequency (Hz).
size : float, optional
Detection window size (seconds).
threshold : float, optional
Detection threshold.
Returns
-------
onsets : array
Indices of BVP pulse onsets.
"""
# check inputs
if signal is None:
raise TypeError("Please specify an input signal.")
# full-wave rectification
fwlo = np.abs(signal)
# smooth
size = int(sampling_rate * size)
mvgav, _ = st.smoother(signal=fwlo,
kernel='boxzen',
size=size,
mirror=True)
# threshold
if threshold is None:
aux = np.abs(mvgav)
threshold = 1.2 * np.mean(aux) + 2.0 * np.std(aux, ddof=1)
# find onsets
length = len(signal)
start = np.nonzero(mvgav > threshold)[0]
stop = np.nonzero(mvgav <= threshold)[0]
onsets = np.union1d(np.intersect1d(start - 1, stop),
np.intersect1d(start + 1, stop))
if np.any(onsets):
if onsets[-1] >= length:
onsets[-1] = length - 1
return utils.ReturnTuple((onsets,), ('onsets',))
0
Example 35
Project: qutip Source File: steadystate.py
def build_preconditioner(A, c_op_list=[], **kwargs):
"""Constructs a iLU preconditioner necessary for solving for
the steady state density matrix using the iterative linear solvers
in the 'steadystate' function.
Parameters
----------
A : qobj
A Hamiltonian or Liouvillian operator.
c_op_list : list
A list of collapse operators.
return_info : bool, optional, default = False
Return a dictionary of solver-specific infomation about the
solution and how it was obtained.
use_rcm : bool, optional, default = False
Use reverse Cuthill-Mckee reordering to minimize fill-in in the
LU factorization of the Liouvillian.
use_wbm : bool, optional, default = False
Use Weighted Bipartite Matching reordering to make the Liouvillian
diagonally dominant. This is useful for iterative preconditioners
only, and is set to ``True`` by default when finding a preconditioner.
weight : float, optional
Sets the size of the elements used for adding the unity trace condition
to the linear solvers. This is set to the average abs value of the
Liouvillian elements if not specified by the user.
method : str, default = 'iterative'
Tells the preconditioner what type of Liouvillian to build for
iLU factorization. For direct iterative methods use 'iterative'.
For power iterative methods use 'power'.
permc_spec : str, optional, default='COLAMD'
Column ordering used internally by superLU for the
'direct' LU decomposition method. Options include 'COLAMD' and
'NATURAL'. If using RCM then this is set to 'NATURAL' automatically
unless explicitly specified.
fill_factor : float, optional, default = 100
Specifies the fill ratio upper bound (>=1) of the iLU
preconditioner. Lower values save memory at the cost of longer
execution times and a possible singular factorization.
drop_tol : float, optional, default = 1e-4
Sets the threshold for the magnitude of preconditioner
elements that should be dropped. Can be reduced for a courser
factorization at the cost of an increased number of iterations, and a
possible singular factorization.
diag_pivot_thresh : float, optional, default = None
Sets the threshold between [0,1] for which diagonal
elements are considered acceptable pivot points when using a
preconditioner. A value of zero forces the pivot to be the diagonal
element.
ILU_MILU : str, optional, default = 'smilu_2'
Selects the incomplete LU decomposition method algoithm used in
creating the preconditoner. Should only be used by advanced users.
Returns
-------
lu : object
Returns a SuperLU object representing iLU preconditioner.
info : dict, optional
Dictionary containing solver-specific information.
"""
ss_args = _default_steadystate_args()
ss_args['method'] = 'iterative'
for key in kwargs.keys():
if key in ss_args.keys():
ss_args[key] = kwargs[key]
else:
raise Exception("Invalid keyword argument '" + key +
"' passed to steadystate.")
# Set column perm to NATURAL if using RCM and not specified by user
if ss_args['use_rcm'] and ('permc_spec' not in kwargs.keys()):
ss_args['permc_spec'] = 'NATURAL'
L = _steadystate_setup(A, c_op_list)
# Set weight parameter to avg abs val in L if not set explicitly
if 'weight' not in kwargs.keys():
ss_args['weight'] = np.mean(np.abs(L.data.data.max()))
ss_args['info']['weight'] = ss_args['weight']
n = int(np.sqrt(L.shape[0]))
if ss_args['method'] == 'iterative':
L, perm, perm2, rev_perm, ss_args = _steadystate_LU_liouvillian(L, ss_args)
elif ss_args['method'] == 'power':
L, perm, perm2, rev_perm, ss_args = _steadystate_power_liouvillian(L, ss_args)
else:
raise Exception("Invalid preconditioning method.")
M, ss_args = _iterative_precondition(L, n, ss_args)
if ss_args['return_info']:
return M, ss_args['info']
else:
return M
0
Example 36
Project: mpop Source File: aapp1b.py
def load_avhrr(satscene, options):
"""Read avhrr data from file and load it into *satscene*.
"""
if "filename" not in options:
raise IOError("No filename given, cannot load.")
loaded = set([chn.name for chn in satscene.loaded_channels()])
chns = (satscene.channels_to_load &
(set(AVHRR_CHANNEL_NAMES) - loaded))
LOGGER.info("Loading channels %s", str(sorted(list(chns))))
if len(chns) == 0:
return
values = {"orbit": satscene.orbit,
"satname": satscene.satname,
"number": satscene.number,
"instrument": satscene.instrument_name,
"satellite": satscene.fullname
}
done_reading = False
if "full_filename" in options:
filename = options["full_filename"]
LOGGER.debug("Loading from %s", filename)
scene = AAPP1b(filename)
try:
scene.read()
done_reading = True
except ValueError:
LOGGER.info("Can't read %s", filename)
if not done_reading:
filename = \
os.path.join(satscene.time_slot.strftime(options["dir"]) % values,
satscene.time_slot.strftime(
options["filename"])
% values)
file_list = glob.glob(filename)
if len(file_list) > 1:
LOGGER.info("More than one l1b file found: %s", str(file_list))
# hrpt_noaa18_20150110_1658_49685.l1b
candidate = ('hrpt_' +
str(satscene.satname) + str(satscene.number) +
satscene.time_slot.strftime('_%Y%m%d_%H%M_') +
str(satscene.orbit) + '.l1b')
LOGGER.debug("Suggested filename = %s", str(candidate))
candidate_found = False
for fname in file_list:
l1bname = os.path.basename(fname)
if l1bname == candidate:
filename = fname
candidate_found = True
LOGGER.info('The l1b file chosen is this: %s',
str(filename))
break
if not candidate_found:
LOGGER.info("More than one l1b file found: %s", str(file_list))
LOGGER.warning("Couldn't decide which one to take. "
"Try take the first one: %s", str(filename))
filename = file_list[0]
elif len(file_list) == 0:
raise IOError("No l1b file matching!: %s", filename)
else:
filename = file_list[0]
LOGGER.debug("Loading from %s", filename)
scene = AAPP1b(filename)
try:
scene.read()
except ValueError:
LOGGER.info("Can't read %s, exiting.", filename)
return
calib_coeffs = None
if options["use_extern_calib"]:
import h5py
LOGGER.info("Reading external calibration coefficients.")
try:
fid = h5py.File(os.path.join(CONFIG_PATH, satscene.satname +
'_calibration_data.h5'), 'r')
calib_coeffs = {}
for key in fid.keys():
date_diffs = []
for dat in fid[key]['datetime']:
date_diffs.append(np.abs(satscene.time_slot -
datetime.datetime(dat[0],
dat[1],
dat[2])))
idx = date_diffs.index(min(date_diffs))
date_diff = satscene.time_slot - \
datetime.datetime(fid[key]['datetime'][idx][0],
fid[key]['datetime'][idx][1],
fid[key]['datetime'][idx][2])
if date_diff.days < 0:
older_or_newer = "newer"
else:
older_or_newer = "older"
LOGGER.info("External calibration for %s is %d "
"days %s than data.",
key, date_diffs[idx].days, older_or_newer)
calib_coeffs[key] = (fid[key]['slope1'][idx],
fid[key]['intercept1'][idx],
fid[key]['slope2'][idx],
fid[key]['intercept2'][idx])
fid.close()
if 'ch1' not in calib_coeffs:
calib_coeffs['ch1'] = None
if 'ch2' not in calib_coeffs:
calib_coeffs['ch2'] = None
if 'ch3a' not in calib_coeffs:
calib_coeffs['ch3a'] = None
except IOError:
LOGGER.info("No external calibration data found.")
scene.calibrate(chns, calibrate=options.get('calibrate', 1),
pre_launch_coeffs=options["pre_launch_coeffs"],
calib_coeffs=calib_coeffs)
if satscene.area is None:
scene.navigate()
try:
from pyresample import geometry
except ImportError, ex_:
LOGGER.debug("Could not load pyresample: %s", str(ex_))
satscene.lat = scene.lats
satscene.lon = scene.lons
else:
satscene.area = geometry.SwathDefinition(lons=scene.lons,
lats=scene.lats)
area_name = ("swath_" + satscene.fullname + "_" +
str(satscene.time_slot) + "_"
+ str(scene.lats.shape))
satscene.area.area_id = area_name
satscene.area.name = "Satellite projection"
satscene.area_id = area_name
for chn in chns:
if (chn in scene.channels) and np.ma.count(scene.channels[chn]) > 0:
satscene[chn].data = scene.channels[chn]
satscene[chn].info['units'] = scene.units[chn]
satscene[chn].area = satscene.area
else:
del satscene[chn]
0
Example 37
Project: scipy Source File: waveforms.py
def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
"""
Calculate the phase used by chirp_phase to generate its output.
See `chirp_phase` for a description of the arguments.
"""
t = asarray(t)
f0 = float(f0)
t1 = float(t1)
f1 = float(f1)
if method in ['linear', 'lin', 'li']:
beta = (f1 - f0) / t1
phase = 2 * pi * (f0 * t + 0.5 * beta * t * t)
elif method in ['quadratic', 'quad', 'q']:
beta = (f1 - f0) / (t1 ** 2)
if vertex_zero:
phase = 2 * pi * (f0 * t + beta * t ** 3 / 3)
else:
phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3)
elif method in ['logarithmic', 'log', 'lo']:
if f0 * f1 <= 0.0:
raise ValueError("For a logarithmic chirp, f0 and f1 must be "
"nonzero and have the same sign.")
if f0 == f1:
phase = 2 * pi * f0 * t
else:
beta = t1 / log(f1 / f0)
phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0)
elif method in ['hyperbolic', 'hyp']:
if f0 == 0 or f1 == 0:
raise ValueError("For a hyperbolic chirp, f0 and f1 must be "
"nonzero.")
if f0 == f1:
# Degenerate case: constant frequency.
phase = 2 * pi * f0 * t
else:
# Singular point: the instantaneous frequency blows up
# when t == sing.
sing = -f1 * t1 / (f0 - f1)
phase = 2 * pi * (-sing * f0) * log(np.abs(1 - t/sing))
else:
raise ValueError("method must be 'linear', 'quadratic', 'logarithmic',"
" or 'hyperbolic', but a value of %r was given."
% method)
return phase
0
Example 38
Project: spectralDNS Source File: integrators.py
def adaptiveRK(A, b, bhat, err_order, fY_hat, u0_new, sc, err, fsal, offset,
aTOL, rTOL, adaptive, errnorm, rhs, u0, solver, dt, tstep,
context, additional_callback, params, predictivecontroller=False):
"""
Take a step using any Runge-Kutta method.
Parameters
----------
A, b, bhat : arrays
Runge-Kutta coefficients
err_order : int
Order of embedded method
fY_hat, U_tmp, u0_new, sc, err : work arrays
fsal : boolean
Whether method is first-same-as-last
offset : length-1 array of int
Where to find the previous RHS evaluation (for FSAL methods). This can probably be eliminated.
aTOL, rTOL : float
Error tolerances
adaptive : boolean
If true, adapt the step size
errnorm : str
Which norm to use in computing the error estimate. One of {"2", "inf"}.
rhs : array
RHS evaluation
u0 : array
solution value (returned)
solver : calling module
contains method ComputeRHS for computing RHS of evolution equation
dt : float
time step size
tstep : int
Number of steps taken so far
predictivecontroller : boolean
If True use PI controller
"""
s = A.shape[0]
#Some parameters for adaptive time-stepping. See p167, Hairer, Norsett and Wanner. "Solving Ordinary Differential Equations 1"
#for details.
facmax_default = 2
facmax = facmax_default
fac = 0.8
facmin = 0.01
FFT = context.FFT
#We may need to repeat the time-step until a small enough value is used.
while True:
dt_prev = dt
if fsal:
offset[0] = (offset[0] - 1) % s
for i in range(0, s):
if not fsal or (tstep == 0 or i != 0 ):
fY_hat[(i + offset[0]) % s] = u0
for j in range(0,i):
fY_hat[(i+offset[0]) % s] += dt*A[i,j]*fY_hat[(j+offset[0]) % s]
#Compute F(Y)
rhs = solver.ComputeRHS(rhs, fY_hat[(i+offset[0]) % s], solver, **context)
fY_hat[(i+offset[0]) % s] = rhs
if i == 0:
context.fu0 = fY_hat[(0+offset[0]) % s]
additional_callback(context)
#Calculate the new value
u0_new[:] = u0
u0_new[:] += dt*b[0]*fY_hat[(0+offset[0]) % s]
err[:] = dt*(b[0] - bhat[0])*fY_hat[(0+offset[0]) % s]
for j in range(1,s):
u0_new[:] += dt*b[j]*fY_hat[(j+offset[0])%s]
err[:] += dt*(b[j] - bhat[j])*fY_hat[(j+offset[0])%s]
est = 0.0
sc[:] = aTOL + np.maximum(np.abs(u0),np.abs(u0_new))*rTOL
if errnorm == "2":
est_to_bcast = None
nsquared = np.zeros(u0.shape[0])
for k in range(u0.shape[0]):
nsquared[k] = FFT.comm.reduce(np.sum(np.power(np.abs(err[k]/sc[k]), 2)))
if FFT.comm.rank == 0:
est_to_bcast = np.zeros(1)
est = np.max(np.sqrt(nsquared))
est /= np.sqrt(np.array(FFT.global_complex_shape()).prod())
est_to_bcast[0] = est
est_to_bcast = FFT.comm.bcast(est_to_bcast,root=0)
est = est_to_bcast[0]
elif errnorm == "inf":
raise AssertionError("Don't use this, not sure if it works")
#TODO: Test this error norm
sc[:] = aTOL + np.maximum(np.abs(u0),np.abs(u0_new))*rTOL
err[:] = err[:] / sc[:]
err = np.abs(err, out=err)
asdf = np.max(err)
x = np.zeros(asdf.shape)
FFT.comm.Allreduce(asdf, x, op=MPI.MAX)
est = np.abs(np.max(x))
est /= np.sqrt(np.array(FFT.global_complex_shape()).prod())
else:
assert False, "Wrong error norm"
#Check error estimate
exponent = 1.0 / (err_order + 1)
if not predictivecontroller:
factor = min(facmax, max(facmin, fac*pow((1/est), exponent)))
else:
if not "last_dt" in vars(params):
params.last_dt = dt
if not "last_est" in vars(params):
params.last_est = est
last_dt = params.last_dt
last_est = params.last_est
factor = min(facmax, max(facmin, fac*pow((1/est), exponent)*dt/last_dt*pow(last_est/est, exponent)))
if adaptive:
dt = dt*factor
if est > 1.0:
facmax = 1
context.is_step_rejected_callback=True
context.dt_rejected = dt_prev
additional_callback(context)
#The offset gets decreased in the next step, which is something we do not want.
if fsal:
offset[0] += 1
continue
#if predictivecontroller:
#context.time_integrator["last_dt"] = dt_prev
#context.time_integrator["last_est"] = est
break
#Update u0 and U
u0[:] = u0_new
return u0, dt, dt_prev
0
Example 39
Project: pyspace Source File: SOR.py
def reduced_descent(self, current_dual, M, relevant_indices):
""" Basic iteration step over a set of indices, possibly subset of all
The main principle is to make a descent step with just one index,
while fixing the other dual_solutions.
The main formula comes from *M&M*:
.. math::
d = \\alpha_i - \\frac{\\omega}{M[i][i]}(M[i]\\alpha-1)
\\text{with } M[i][j] = y_i y_j(<x_i,x_j>+1)
\\text{and final projection: }\\alpha_i = \\max(0,\\min(d,c_i)).
Here we use c for the weights for each sample in the loss term,
which is normally complexity times corresponding class weight.
y is used for the labels, which have to be 1 or -1.
In the *sample* version only the diagonal of M is used.
The sum with the alpha is tracked by using the classification vector w
and the offset b.
.. math::
o = \\alpha_i
d = \\alpha_i - \\frac{\\omega}{M[i][i]}(y_i(<w,x_i>+b)-1)
\\text{with projection: }\\alpha_i = \\max(0,\\min(d,c_i)),
b=b+(\\alpha_i-o)y_i
w=w+(\\alpha_i-o)y_i x_i
"""
self.irrelevant_indices = []
self.difference = 0
for i in relevant_indices:
old_dual = current_dual[i]
### Main Function ###
### elemental update step of SOR algorithm ###
if self.version == "matrix":
# this step is kernel independent
x = old_dual - self.omega / (
M[i][i] + self.squ_factor/(2 * self.ci[i])) * \
(dot(M[i], current_dual) - 1)
elif self.version == "samples":
xi = self.samples[i]
bi = self.bi[i]
x = old_dual - self.omega * (M[i]) * \
(bi * (dot(xi.T, self.w) + self.b) - 1 +
self.squ_factor * old_dual / (2 * self.ci[i]))
# map dual solution to the interval [0,C]
if x <= 0:
self.irrelevant_indices.append(i)
current_dual[i] = 0
elif not self.squ_factor:
current_dual[i] = min(x, self.ci[i])
else:
current_dual[i] = x
if self.version == "matrix":
delta = (current_dual[i] - old_dual)
# update w and b in samples case
if self.version == "samples":
delta = (current_dual[i] - old_dual) * bi
# update classification function parameter w and b
# self.update_classification_function(delta=delta, index=i)
self.b = self.b + self.offset_factor * delta
self.w = self.w + delta * xi
current_difference = numpy.abs(delta)
if current_difference > self.difference:
self.difference = current_difference
self.sub_iterations += 1
self.iterations += 1
if not (self.sub_iterations < self.max_sub_iterations
and self.iterations < self.max_iterations):
break
if self.reduce_non_zeros:
for index in self.irrelevant_indices:
try:
relevant_indices.remove(index)
except:
# special mapping for RMM case
if index < self.num_samples:
relevant_indices.remove(index+self.num_samples)
else:
relevant_indices.remove(index-self.num_samples)
if self.random:
random.shuffle(relevant_indices)
0
Example 40
Project: statsmodels Source File: correlation_tools.py
def _spg_optim(func, grad, start, project, maxiter=1e4, M=10,
ctol=1e-3, maxiter_nmls=200, lam_min=1e-30,
lam_max=1e30, sig1=0.1, sig2=0.9, gam=1e-4):
"""
Implements the spectral projected gradient method for minimizing a
differentiable function on a convex domain.
Parameters
----------
func : real valued function
The objective function to be minimized.
grad : real array-valued function
The gradient of the objective function
start : array_like
The starting point
project : function
In-place projection of the argument to the domain
of func.
... See notes regarding additional arguments
Returns
-------
rslt : Bunch
rslt.params is the final iterate, other fields describe
convergence status.
Notes
-----
This can be an effective heuristic algorithm for problems where no
gauranteed algorithm for computing a global minimizer is known.
There are a number of tuning parameters, but these generally
should not be changed except for `maxiter` (positive integer) and
`ctol` (small positive real). See the Birgin et al reference for
more information about the tuning parameters.
Reference
---------
E. Birgin, J.M. Martinez, and M. Raydan. Spectral projected
gradient methods: Review and perspectives. Journal of Statistical
Software (preprint). Available at:
http://www.ime.usp.br/~egbirgin/publications/bmr5.pdf
"""
lam = min(10*lam_min, lam_max)
params = start.copy()
gval = grad(params)
obj_hist = [func(params),]
for itr in range(int(maxiter)):
# Check convergence
df = params - gval
project(df)
df -= params
if np.max(np.abs(df)) < ctol:
return Bunch(**{"Converged": True, "params": params,
"objective_values": obj_hist,
"Message": "Converged successfully"})
# The line search direction
d = params - lam*gval
project(d)
d -= params
# Carry out the nonmonotone line search
alpha, params1, fval, gval1 = _nmono_linesearch(func, grad, params, d,
obj_hist, M=M,
sig1=sig1,
sig2=sig2,
gam=gam,
maxiter=maxiter_nmls)
if alpha is None:
return Bunch(**{"Converged": False, "params": params,
"objective_values": obj_hist,
"Message": "Failed in nmono_linesearch"})
obj_hist.append(fval)
s = params1 - params
y = gval1 - gval
sy = (s*y).sum()
if sy <= 0:
lam = lam_max
else:
ss = (s*s).sum()
lam = max(lam_min, min(ss/sy, lam_max))
params = params1
gval = gval1
return Bunch(**{"Converged": False, "params": params,
"objective_values": obj_hist,
"Message": "spg_optim did not converge"})
0
Example 41
def normalize(self, initial_viewbox=None, symmetric=False):
"""Normalize data given the initial view box.
This function also defines the four following methods:
* `(un)normalize_[x|y]`: normalize or un-normalize in the x or y
dimension. Un-normalization can be useful for e.g. retrieving the
original coordinates of a point in the window.
Arguments:
* initial_viewbox=None: the initial view box is a 4-tuple
(x0, y0, x1, y1) describing the initial view of the data and
defining the normalization. By default, it is the bounding box of the
data (min/max x/y). x0 and/or y0 can be None, meaning no
normalization for that dimension.
Returns:
* normalized_data: the normalized data.
"""
if not initial_viewbox:
initial_viewbox = (None, None, None, None)
dx0, dy0, dx1, dy1 = initial_viewbox
if self.data is not None:
x, y = self.data[:,0], self.data[:,1]
# default: replace None by min/max
if self.data.size == 0:
dx0 = dy0 = dx1 = dy1 = 0.
else:
if dx0 is None:
dx0 = x.min()
if dy0 is None:
dy0 = y.min()
if dx1 is None:
dx1 = x.max()
if dy1 is None:
dy1 = y.max()
else:
if dx0 is None:
dx0 = -1.
if dy0 is None:
dy0 = -1.
if dx1 is None:
dx1 = 1.
if dy1 is None:
dy1 = 1.
if dx0 == dx1:
dx0 -= .5
dx1 += .5
if dy0 == dy1:
dy0 -= .5
dy1 += .5
# force a symmetric viewbox
if symmetric:
vx = max(np.abs(dx0), np.abs(dx1))
vy = max(np.abs(dy0), np.abs(dy1))
dx0, dx1 = -vx, vx
dy0, dy1 = -vy, vy
if dx0 is None:
self.normalize_x = self.unnormalize_x = lambda X: X
else:
self.normalize_x = lambda X: -1+2*(X-dx0)/(dx1-dx0)
self.unnormalize_x = lambda X: dx0 + (dx1 - dx0) * (1+X)/2.
if dy0 is None:
self.normalize_y = self.unnormalize_y = lambda Y: Y
else:
self.normalize_y = lambda Y: -1+2*(Y-dy0)/(dy1-dy0)
self.unnormalize_y = lambda Y: dy0 + (dy1 - dy0) * (1+Y)/2.
if self.data is not None:
data_normalized = np.empty(self.data.shape, dtype=self.data.dtype)
data_normalized[:,0] = self.normalize_x(x)
data_normalized[:,1] = self.normalize_y(y)
return data_normalized
0
Example 42
Project: pyNastran Source File: patran_rpt.py
def csv_simplify(csv_filename, x0, ix, iname, tol=0.05):
A = loadtxt(csv_filename, delimiter=',')
f = open(csv_filename + '2', 'wb')
isort = argsort(A[:, ix])
X = A[isort, ix]
a_response = A[isort, iname]
f.write('#z,response\n')
update_flag = 1
tol = 0.001
response = None
for i, x in enumerate(X):
# reset the max RSS per subcase
if update_flag:
x_orig = X[i]
update_flag = False
# if the points are the same in the direction of interest, find the max
print("x=%s x_orig=%s" % (x, x_orig))
if allclose(x, x_orig, atol=tol):
if response is None:
response = a_response[i]
else:
response = vstack((response, a_response[i]))
update_flag = False
else:
print("response = %s" % response)
if len(response.shape) == 2:
# max
response = response[0:].max(axis=0)
# min
#response = response[0].max(axis=0)
# abs(max), abs(min) + sign
if 0:
values2 = array([response.max(),
response.min()])
# we figure out the absolute max/min
abs_vals = abs(values2)
abs_val = abs_vals.max()
# find the location of the absolute max value
# 1. we take the first value (the where[0]) to chop the return value
# since there is no else conditional
# 2. we take the first value (the where[0][0]) to only get the max
# value if 2+ values are returned
j = where(abs_val == abs_vals)[0][0]
# get the raw value from the absoluted value, so:
# value = abs(raw_value)
response = response[j]
#print response1, response2
#f.write("%g,%g,%g,%g\n" % (x, response1, response2, response1/1e6, response2/1e6))
f.write("%g,%g,%g\n" % (x, response, response / 1e6))
#f.write("-------------------\n")
x_orig = X[i]
response = a_response[i]
0
Example 43
def optimize(self, nr_samples=1):
self.x_initial_red = reduce_coordinates(self.x_initial,self.frozen_atoms,self.ndim)
self.optimizer = ModifiedFireCPP(self.x_initial_red.copy(), self.potential, tol = self.tol, maxstep = self.maxstep)
self.optimizer_ = LBFGS_CPP(self.x_initial_red.copy(), self.potential)
self.optimizer_cells = ModifiedFireCPP(self.x_initial_red.copy(), self.potential_cells, tol = self.tol, maxstep = self.maxstep)
self.optimizer_cells_ = LBFGS_CPP(self.x_initial_red.copy(), self.potential_cells)
t0 = time.time()
print "self.optimizer.run(self.nstepsmax)", self.nstepsmax
self.optimizer.run(self.nstepsmax)
self.res_x_final = self.optimizer.get_result()
t1 = time.time()
self.optimizer_cells.run(self.nstepsmax)
self.res_x_final_cells = self.optimizer_cells.get_result()
t2 = time.time()
self.x_final = self.res_x_final.coords
self.x_final_cells = self.res_x_final_cells.coords
print "fire final E, x:", self.optimizer.get_result().energy
print "fire final E, x_cells:", self.optimizer_cells.get_result().energy
print "fire final E, plain: ", self.potential.getEnergy(self.x_final)
print "fire final E, cell: ", self.potential_cells.getEnergy(self.x_final_cells)
print "fire number of particles:", self.N
print "fire time no cell lists:", t1 - t0, "sec"
print "fire time cell lists:", t2 - t1, "sec"
print "fire ratio:", (t1 - t0) / (t2 - t1)
if not self.res_x_final.success or not self.res_x_final_cells.success:
print "-------------"
print "res_x_final.rms:", self.res_x_final.rms
print "res_x_final.nfev:", self.res_x_final.nfev
print "res_x_final_cells.rms:", self.res_x_final_cells.rms
print "res_x_final_cells.nfev:", self.res_x_final_cells.nfev
print "self.res_x_final.success", self.res_x_final.success
print "self.res_x_final_cells.success", self.res_x_final_cells.success
print "-------------"
plot_disks(self.x_initial, self.radii, self.boxvec, sca=self.sca)
plot_disks(self.x_final, self.radii, self.boxvec, sca=self.sca)
plot_disks(self.x_final_cells, self.radii, self.boxvec, sca=self.sca)
self.optimizer_.run(self.nstepsmax)
self.res_x_final_ = self.optimizer_.get_result()
t3 = time.time()
self.optimizer_cells_.run(self.nstepsmax)
self.res_x_final_cells_ = self.optimizer_cells_.get_result()
t4 = time.time()
self.x_final_ = self.res_x_final_.coords
self.x_final_cells_ = self.res_x_final_cells_.coords
print "lbfgs final E, x:", self.optimizer_.get_result().energy
print "lbfgs final E, x_cells:", self.optimizer_cells_.get_result().energy
print "lbfgs final E, plain: ", self.potential.getEnergy(self.x_final_)
print "lbfgs final E, cell: ", self.potential_cells.getEnergy(self.x_final_cells_)
print "lbfgs number of particles:", self.N
print "lbfgs time no cell lists:", t3 - t2, "sec"
print "lbfgs time cell lists:", t4 - t3, "sec"
print "lbfgs ratio:", (t3 - t2) / (t4 - t3)
if not self.res_x_final_.success or not self.res_x_final_cells_.success or not self.res_x_final.success or not self.res_x_final_cells.success:
print "-------------"
print "res_x_final_.rms:", self.res_x_final_.rms
print "res_x_final_.nfev:", self.res_x_final_.nfev
print "res_x_final_cells_.rms:", self.res_x_final_cells_.rms
print "res_x_final_cells_.nfev:", self.res_x_final_cells_.nfev
print "self.res_x_final_.success", self.res_x_final.success
print "self.res_x_final_cells_.success", self.res_x_final_cells.success
print "-------------"
plot_disks(self.x_initial, self.radii, self.boxvec, sca=self.sca)
plot_disks(self.x_final_, self.radii, self.boxvec, sca=self.sca)
plot_disks(self.x_final_cells_, self.radii, self.boxvec, sca=self.sca)
assert(self.res_x_final.success)
assert(self.res_x_final_cells.success)
assert(self.res_x_final_.success)
assert(self.res_x_final_cells_.success)
for (xci, xi) in zip(self.x_final_cells, self.x_final):
passed = (np.abs(xci - xi) < 1e-10)
if (passed is False):
print "xci", xci
print "xi", xi
assert(passed)
print "energy no cell lists:", self.res_x_final.energy
print "energy cell lists:", self.res_x_final_cells.energy
self.t_ratio = (t1 - t0) / (t2 - t1)
self.t_ratio_lbfgs = (t3 - t2) / (t4 - t3)
0
Example 44
Project: PYPOWER Source File: qps_gurobi.py
def qps_gurobi(H, c, A, l, u, xmin, xmax, x0, opt):
"""Quadratic Program Solver based on GUROBI.
A wrapper function providing a PYPOWER standardized interface for using
gurobipy to solve the following QP (quadratic programming)
problem:
min 1/2 x'*H*x + c'*x
x
subject to
l <= A*x <= u (linear constraints)
xmin <= x <= xmax (variable bounds)
Inputs (all optional except H, c, A and l):
H : matrix (possibly sparse) of quadratic cost coefficients
c : vector of linear cost coefficients
A, l, u : define the optional linear constraints. Default
values for the elements of l and u are -Inf and Inf,
respectively.
xmin, xmax : optional lower and upper bounds on the
C{x} variables, defaults are -Inf and Inf, respectively.
x0 : optional starting value of optimization vector C{x}
opt : optional options structure with the following fields,
all of which are also optional (default values shown in
parentheses)
verbose (0) - controls level of progress output displayed
0 = no progress output
1 = some progress output
2 = verbose progress output
grb_opt - options dict for Gurobi, value in
verbose overrides these options
problem : The inputs can alternatively be supplied in a single
PROBLEM dict with fields corresponding to the input arguments
described above: H, c, A, l, u, xmin, xmax, x0, opt
Outputs:
x : solution vector
f : final objective function value
exitflag : gurobipy exit flag
1 = converged
0 or negative values = negative of GUROBI_MEX exit flag
(see gurobipy docuementation for details)
output : gurobipy output dict
(see gurobipy docuementation for details)
lmbda : dict containing the Langrange and Kuhn-Tucker
multipliers on the constraints, with fields:
mu_l - lower (left-hand) limit on linear constraints
mu_u - upper (right-hand) limit on linear constraints
lower - lower bound on optimization variables
upper - upper bound on optimization variables
Note the calling syntax is almost identical to that of QUADPROG
from MathWorks' Optimization Toolbox. The main difference is that
the linear constraints are specified with A, l, u instead of
A, b, Aeq, beq.
Calling syntax options:
x, f, exitflag, output, lmbda = ...
qps_gurobi(H, c, A, l, u, xmin, xmax, x0, opt)
r = qps_gurobi(H, c, A, l, u)
r = qps_gurobi(H, c, A, l, u, xmin, xmax)
r = qps_gurobi(H, c, A, l, u, xmin, xmax, x0)
r = qps_gurobi(H, c, A, l, u, xmin, xmax, x0, opt)
r = qps_gurobi(problem), where problem is a dict with fields:
H, c, A, l, u, xmin, xmax, x0, opt
all fields except 'c', 'A' and 'l' or 'u' are optional
Example: (problem from from http://www.jmu.edu/docs/sasdoc/sashtml/iml/chap8/sect12.htm)
H = [ 1003.1 4.3 6.3 5.9;
4.3 2.2 2.1 3.9;
6.3 2.1 3.5 4.8;
5.9 3.9 4.8 10 ]
c = zeros((4, 1))
A = [ [1 1 1 1]
[0.17 0.11 0.10 0.18] ]
l = [1; 0.10]
u = [1; Inf]
xmin = zeros((4, 1))
x0 = [1; 0; 0; 1]
opt = {'verbose': 2}
x, f, s, out, lmbda = qps_gurobi(H, c, A, l, u, xmin, [], x0, opt)
@see: L{gurobipy}.
"""
import gurobipy
##----- input argument handling -----
## gather inputs
if isinstance(H, dict): ## problem struct
p = H
if 'opt' in p: opt = p['opt']
if 'x0' in p: x0 = p['x0']
if 'xmax' in p: xmax = p['xmax']
if 'xmin' in p: xmin = p['xmin']
if 'u' in p: u = p['u']
if 'l' in p: l = p['l']
if 'A' in p: A = p['A']
if 'c' in p: c = p['c']
if 'H' in p: H = p['H']
else: ## individual args
assert H is not None
assert c is not None
assert A is not None
assert l is not None
if opt is None:
opt = {}
# if x0 is None:
# x0 = array([])
# if xmax is None:
# xmax = array([])
# if xmin is None:
# xmin = array([])
## define nx, set default values for missing optional inputs
if len(H) == 0 or not any(any(H)):
if len(A) == 0 and len(xmin) == 0 and len(xmax) == 0:
stderr.write('qps_gurobi: LP problem must include constraints or variable bounds\n')
else:
if len(A) > 0:
nx = shape(A)[1]
elif len(xmin) > 0:
nx = len(xmin)
else: # if len(xmax) > 0
nx = len(xmax)
H = sparse((nx, nx))
else:
nx = shape(H)[0]
if len(c) == 0:
c = zeros(nx)
if len(A) > 0 and (len(l) == 0 or all(l == -Inf)) and \
(len(u) == 0 or all(u == Inf)):
A = None ## no limits => no linear constraints
nA = shape(A)[0] ## number of original linear constraints
if nA:
if len(u) == 0: ## By default, linear inequalities are ...
u = Inf * ones(nA) ## ... unbounded above and ...
if len(l) == 0:
l = -Inf * ones(nA) ## ... unbounded below.
if len(x0) == 0:
x0 = zeros(nx)
## default options
if 'verbose' in opt:
verbose = opt['verbose']
else:
verbose = 0
# if 'max_it' in opt:
# max_it = opt['max_it']
# else:
# max_it = 0
## set up options struct for Gurobi
if 'grb_opt' in opt:
g_opt = gurobi_options(opt['grb_opt'])
else:
g_opt = gurobi_options()
g_opt['Display'] = min(verbose, 3)
if verbose:
g_opt['DisplayInterval'] = 1
else:
g_opt['DisplayInterval'] = Inf
if not issparse(A):
A = sparse(A)
## split up linear constraints
ieq = find( abs(u-l) <= EPS ) ## equality
igt = find( u >= 1e10 & l > -1e10 ) ## greater than, unbounded above
ilt = find( l <= -1e10 & u < 1e10 ) ## less than, unbounded below
ibx = find( (abs(u-l) > EPS) & (u < 1e10) & (l > -1e10) )
## grab some dimensions
nlt = len(ilt) ## number of upper bounded linear inequalities
ngt = len(igt) ## number of lower bounded linear inequalities
nbx = len(ibx) ## number of doubly bounded linear inequalities
neq = len(ieq) ## number of equalities
niq = nlt + ngt + 2 * nbx ## number of inequalities
AA = [ A[ieq, :], A[ilt, :], -A[igt, :], A[ibx, :], -A[ibx, :] ]
bb = [ u[ieq], u[ilt], -l[igt], u[ibx], -l[ibx] ]
contypes = '=' * neq + '<' * niq
## call the solver
if len(H) == 0 or not any(any(H)):
lpqp = 'LP'
else:
lpqp = 'QP'
rr, cc, vv = find(H)
g_opt['QP']['qrow'] = int(rr.T - 1)
g_opt['QP']['qcol'] = int(cc.T - 1)
g_opt['QP']['qval'] = 0.5 * vv.T
if verbose:
methods = [
'primal simplex',
'dual simplex',
'interior point',
'concurrent',
'deterministic concurrent'
]
print('Gurobi Version %s -- %s %s solver\n'
'<unknown>' % (methods[g_opt['Method'] + 1], lpqp))
x, f, eflag, output, lmbda = \
gurobipy(c.T, 1, AA, bb, contypes, xmin, xmax, 'C', g_opt)
pi = lmbda['Pi']
rc = lmbda['RC']
output['flag'] = eflag
if eflag == 2:
eflag = 1 ## optimal solution found
else:
eflag = -eflag ## failed somehow
## check for empty results (in case optimization failed)
lam = {}
if len(x) == 0:
x = NaN(nx, 1);
lam['lower'] = NaN(nx)
lam['upper'] = NaN(nx)
else:
lam['lower'] = zeros(nx)
lam['upper'] = zeros(nx)
if len(f) == 0:
f = NaN
if len(pi) == 0:
pi = NaN(len(bb))
kl = find(rc > 0); ## lower bound binding
ku = find(rc < 0); ## upper bound binding
lam['lower'][kl] = rc[kl]
lam['upper'][ku] = -rc[ku]
lam['eqlin'] = pi[:neq + 1]
lam['ineqlin'] = pi[neq + range(niq + 1)]
mu_l = zeros(nA)
mu_u = zeros(nA)
## repackage lmbdas
kl = find(lam['eqlin'] > 0) ## lower bound binding
ku = find(lam['eqlin'] < 0) ## upper bound binding
mu_l[ieq[kl]] = lam['eqlin'][kl]
mu_l[igt] = -lam['ineqlin'][nlt + range(ngt + 1)]
mu_l[ibx] = -lam['ineqlin'][nlt + ngt + nbx + range(nbx)]
mu_u[ieq[ku]] = -lam['eqlin'][ku]
mu_u[ilt] = -lam['ineqlin'][:nlt + 1]
mu_u[ibx] = -lam['ineqlin'][nlt + ngt + range(nbx + 1)]
lmbda = {
'mu_l': mu_l,
'mu_u': mu_u,
'lower': lam['lower'],
'upper': lam['upper']
}
return x, f, eflag, output, lmbda
0
Example 45
Project: sfepy Source File: utils.py
def extend_cell_data(data, domain, rname, val=None, is_surface=False,
average_surface=True):
"""
Extend cell data defined in a region to the whole domain.
Parameters
----------
data : array
The data defined in the region.
domain : FEDomain instance
The FE domain.
rname : str
The region name.
val : float, optional
The value for filling cells not covered by the region. If not given,
the smallest value in data is used.
is_surface : bool
If True, the data are defined on a surface region. In that case the
values are averaged or summed into the cells containing the region
surface faces (a cell can have several faces of the surface), see
`average_surface`.
average_surface : bool
If True, the data defined on a surface region are averaged, otherwise
the data are summed.
Returns
-------
edata : array
The data extended to all domain elements.
"""
n_el = domain.shape.n_el
if data.shape[0] == n_el: return data
if val is None:
if data.shape[2] > 1: # Vector.
val = nm.amin(nm.abs(data))
else: # Scalar.
val = nm.amin(data)
edata = nm.empty((n_el,) + data.shape[1:], dtype = nm.float64)
edata.fill(val)
region = domain.regions[rname]
if not is_surface:
edata[region.get_cells()] = data
else:
cells = region.get_cells(true_cells_only=False)
ucells = nm.unique(cells)
dii = region.facets
if len(cells) != len(dii):
raise ValueError('region %s has an inner face!'
% region.name)
if average_surface:
avg = nm.bincount(cells, minlength=n_el)[ucells]
else:
avg = 1.0
for ic in range(data.shape[2]):
evals = nm.bincount(cells, weights=data[dii, 0, ic, 0],
minlength=n_el)[ucells]
edata[ucells, 0, ic, 0] = evals / avg
return edata
0
Example 46
def optimize(self, nr_samples = 1):
self.optimizer = ModifiedFireCPP(self.x_initial.copy(), self.potential,
dtmax=1, maxstep=self.maxstep,
tol=self.tol, nsteps=1e8, verbosity=-1, iprint=-1)
self.optimizer_ = LBFGS_CPP(self.x_initial.copy(), self.potential_)
self.optimizer_cells = ModifiedFireCPP(self.x_initial.copy(), self.potential_cells,
dtmax=1, maxstep=self.maxstep,
tol=self.tol, nsteps=1e8, verbosity=-1, iprint=-1)
self.optimizer_cells_ = LBFGS_CPP(self.x_initial.copy(), self.potential_cells_)
print "initial E, x:", self.potential.getEnergy(self.x_initial.copy())
print "initial E, x_:", self.potential_cells.getEnergy(self.x_initial.copy())
t0 = time.time()
print "self.optimizer.run(self.nstepsmax)", self.nstepsmax
self.optimizer.run(self.nstepsmax)
self.res_x_final = self.optimizer.get_result()
t1 = time.time()
self.optimizer_cells.run(self.nstepsmax)
self.res_x_final_cells = self.optimizer_cells.get_result()
t2 = time.time()
self.x_final = self.res_x_final.coords
self.x_final_cells = self.res_x_final_cells.coords
print "fire final E, x:", self.optimizer.get_result().energy
print "fire final E, x_cells:", self.optimizer_cells.get_result().energy
print "fire final E, plain: ", self.potential.getEnergy(self.x_final)
print "fire final E, cell: ", self.potential_cells.getEnergy(self.x_final_cells)
print "fire number of particles:", self.N
print "fire time no cell lists:", t1 - t0, "sec"
print "fire time cell lists:", t2 - t1, "sec"
print "fire ratio:", (t1 - t0) / (t2 - t1)
if not self.res_x_final.success or not self.res_x_final_cells.success:
print "-------------"
print "res_x_final.rms:", self.res_x_final.rms
print "res_x_final.nfev:", self.res_x_final.nfev
print "res_x_final_cells.rms:", self.res_x_final_cells.rms
print "res_x_final_cells.nfev:", self.res_x_final_cells.nfev
print "self.res_x_final.success", self.res_x_final.success
print "self.res_x_final_cells.success", self.res_x_final_cells.success
print "-------------"
plot_disks(self.x_initial, self.radii, self.boxvec, sca=self.sca)
plot_disks(self.x_final, self.radii, self.boxvec, sca=self.sca)
plot_disks(self.x_final_cells, self.radii, self.boxvec, sca=self.sca)
self.optimizer_.run(self.nstepsmax)
self.res_x_final_ = self.optimizer_.get_result()
t3 = time.time()
self.optimizer_cells_.run(self.nstepsmax)
self.res_x_final_cells_ = self.optimizer_cells_.get_result()
t4 = time.time()
self.x_final_ = self.res_x_final_.coords
self.x_final_cells_ = self.res_x_final_cells_.coords
print "lbfgs final E, x:", self.optimizer_.get_result().energy
print "lbfgs final E, x_cells:", self.optimizer_cells_.get_result().energy
print "lbfgs final E, plain: ", self.potential_.getEnergy(self.x_final_)
print "lbfgs final E, cell: ", self.potential_cells_.getEnergy(self.x_final_cells_)
print "lbfgs number of particles:", self.N
print "lbfgs time no cell lists:", t3 - t2, "sec"
print "lbfgs time cell lists:", t4 - t3, "sec"
print "lbfgs ratio:", (t3 - t2) / (t4 - t3)
if not self.res_x_final_.success or not self.res_x_final_cells_.success or not self.res_x_final.success or not self.res_x_final_cells.success:
print "-------------"
print "res_x_final_.rms:", self.res_x_final_.rms
print "res_x_final_.nfev:", self.res_x_final_.nfev
print "res_x_final_cells_.rms:", self.res_x_final_cells_.rms
print "res_x_final_cells_.nfev:", self.res_x_final_cells_.nfev
print "self.res_x_final_.success", self.res_x_final.success
print "self.res_x_final_cells_.success", self.res_x_final_cells.success
print "-------------"
plot_disks(self.x_initial, self.radii, self.boxvec, sca=self.sca)
plot_disks(self.x_final_, self.radii, self.boxvec, sca=self.sca)
plot_disks(self.x_final_cells_, self.radii, self.boxvec, sca=self.sca)
assert(self.res_x_final.success)
assert(self.res_x_final_cells.success)
assert(self.res_x_final_.success)
assert(self.res_x_final_cells_.success)
for (xci, xi) in zip(self.x_final_cells, self.x_final):
passed = (np.abs(xci - xi) < 1e-10)
if (passed is False):
print "xci", xci
print "xi", xi
assert(passed)
print "energy no cell lists:", self.res_x_final.energy
print "energy cell lists:", self.res_x_final_cells.energy
self.t_ratio = (t1 - t0) / (t2 - t1)
self.t_ratio_lbfgs = (t3 - t2) / (t4 - t3)
0
Example 47
Project: magic_init Source File: magic_init.py
def calibrateGradientRatio(net, NIT=1):
import numpy as np
# When was a blob last used
last_used = {}
# Find the last layer to use
last_layer = 0
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
if l.type not in STRIP_LAYER:
last_layer = i
for b in net.bottom_names[n]:
last_used[b] = i
# Figure out which tops are involved
last_tops = net.top_names[net._layer_names[last_layer]]
for t in last_tops:
last_used[t] = len(net.layers)
# Call forward and store the data of all data layers
active_data, input_data, bottom_scale = {}, {}, {}
# Read all the input data
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
if i > last_layer: break
# Compute the input scale for parameter layers
if len(l.blobs) > 0:
bottom_scale[n] = np.mean([np.mean(np.abs(active_data[b])) for b in net.bottom_names[n]])
# Run the network forward
new_data = forward(net, i, NIT, {b: active_data[b] for b in net.bottom_names[n]}, net.top_names[n])
if l.type in INPUT_LAYERS:
input_data.update(new_data)
active_data.update(new_data)
# Delete all unused data
for k in list(active_data):
if k not in last_used or last_used[k] == i:
del active_data[k]
output_std = np.mean(np.std(flattenData(active_data[last_tops[0]]), axis=0))
for it in range(10):
# Reset the diffs
for l in net.layers:
for b in l.blobs:
b.diff[...] = 0
# Set the top diffs
for t in last_tops:
net.blobs[t].diff[...] = np.random.normal(0, 1, net.blobs[t].shape)
# Compute all gradients
net._backward(last_layer, 0)
# Compute the gradient ratio
ratio={}
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
if len(l.blobs) > 0:
assert l.type in PARAMETER_LAYERS, "Parameter layer '%s' currently not supported"%l.type
b = l.blobs[0]
ratio[n] = np.sqrt(np.mean(b.diff**2) / np.mean(b.data**2))
# If all layers are humogeneous, then the target ratio is the geometric mean of all ratios
# (assuming we want the same output)
# To deal with non-humogeneous layers we scale by output_std in the hope to undo correct the
# estimation over time.
# NOTE: for non feed-forward networks the geometric mean might not be the right scaling factor
target_ratio = np.exp(np.mean(np.log(np.array(list(ratio.values()))))) * (output_std)**(1. / len(ratio))
# Terminate if the relative change is less than 1% for all values
log_ratio = np.log( np.array(list(ratio.values())) )
if np.all( np.abs(log_ratio/np.log(target_ratio) - 1) < 0.01 ):
break
# Update all the weights and biases
active_data = {}
# Read all the input data
for i, (n, l) in enumerate(zip(net._layer_names, net.layers)):
if i > last_layer: break
# Use the stored input
if l.type in INPUT_LAYERS:
active_data.update({b: input_data[b] for b in net.top_names[n]})
else:
if len(l.blobs) > 0:
# Add the scaling from the bottom to the biases
current_scale = np.mean([np.mean(np.abs(active_data[b])) for b in net.bottom_names[n]])
adj = current_scale / bottom_scale[n]
for b in list(l.blobs)[1:]:
b.data[...] *= adj
bottom_scale[n] = current_scale
# Scale to obtain the target ratio
scale = np.sqrt(ratio[n] / target_ratio)
for b in l.blobs:
b.data[...] *= scale
active_data.update(forward(net, i, NIT, {b: active_data[b] for b in net.bottom_names[n]}, net.top_names[n]))
# Delete all unused data
for k in list(active_data):
if k not in last_used or last_used[k] == i:
del active_data[k]
new_output_std = np.mean(np.std(flattenData(active_data[last_tops[0]]), axis=0))
if np.abs(np.log(output_std) - np.log(new_output_std)) > 0.25:
# If we diverge by a factor of exp(0.25) = ~1.3, then we should check if the network is really
# humogeneous
print( "WARNING: It looks like one or more layers are not humogeneous! Trying to correct for this..." )
print( " Output std = %f" % new_output_std )
output_std = new_output_std
0
Example 48
Project: HPOlib Source File: plotParam.py
def plot_params(value_list, result_list, name, save="", title="", jitter=0):
color = 'k'
marker = 'o'
size = 1
# Get handles
ratio = 5
gs = GridSpec(ratio, 4)
fig = figure(1, dpi=100)
fig.suptitle(title, fontsize=16)
ax = subplot(gs[:, :])
ax.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
# Define xlims
min_y = min(result_list)
max_y = max(result_list)
y_offset = np.abs(max_y - min_y) * 0.1
min_y -= y_offset
max_y += y_offset
min_x = min(value_list)
max_x = max(value_list)
x_offset = np.abs(max_x - min_x) * 0.1
min_x -= x_offset
max_x += x_offset
# Maybe jitter data
# But before save best values
best_value = value_list[np.argmin(result_list)]
best_result = min(result_list)
for idx in range(len(result_list)):
# noinspection PyArgumentList
result_list[idx] += float((np.random.rand(1) - 0.5) * jitter)
# noinspection PyArgumentList
value_list[idx] += float((np.random.rand(1) - 0.5) * jitter)
# Plot
ax.scatter(value_list, result_list, facecolor=color, edgecolor=color, marker=marker, s=size*50)
ax.scatter(best_value, best_result, facecolor='r',
edgecolor='r', s=size*150, marker='o', alpha=0.5,
label="f[%5.3f]=%5.3f" % (best_value, best_result))
# Describe and label the stuff
ax.set_xlabel("Value of %s, jitter=%f" % (name, jitter))
ax.set_ylabel("Minfunction value, jitter=%f" % jitter)
leg = ax.legend(loc='best', fancybox=True, scatterpoints=1)
leg.get_frame().set_alpha(0.5)
# Apply the xlims
ax.set_xlim([min_x, max_x])
ax.set_ylim([min_y, max_y])
# Save Plot
tight_layout()
subplots_adjust(top=0.85)
if save != "":
savefig(save, dpi=100, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches="tight", pad_inches=0.1)
else:
show()
0
Example 49
Project: pyorbital Source File: orbital.py
def _get_time_at_horizon(self, utc_time, obslon, obslat, **kwargs):
"""Get the time closest in time to *utc_time* when the
satellite is at the horizon relative to the position of an observer on
ground (altitude = 0)
Note: This is considered deprecated and it's functionality is currently
replaced by 'get_next_passes'.
"""
warnings.warn("_get_time_at_horizon is replaced with get_next_passes",
DeprecationWarning)
if "precision" in kwargs:
precision = kwargs['precision']
else:
precision = timedelta(seconds=0.001)
if "max_iterations" in kwargs:
nmax_iter = kwargs["max_iterations"]
else:
nmax_iter = 100
sec_step = 0.5
t_step = timedelta(seconds=sec_step / 2.0)
# Local derivative:
def fprime(timex):
el0 = self.get_observer_look(timex - t_step,
obslon, obslat, 0.0)[1]
el1 = self.get_observer_look(timex + t_step,
obslon, obslat, 0.0)[1]
return el0, (abs(el1) - abs(el0)) / sec_step
tx0 = utc_time - timedelta(seconds=1.0)
tx1 = utc_time
idx = 0
#eps = 500.
eps = 100.
while abs(tx1 - tx0) > precision and idx < nmax_iter:
tx0 = tx1
fpr = fprime(tx0)
# When the elevation is high the scale is high, and when
# the elevation is low the scale is low
#var_scale = np.abs(np.sin(fpr[0] * np.pi/180.))
#var_scale = np.sqrt(var_scale)
var_scale = np.abs(fpr[0])
tx1 = tx0 - timedelta(seconds=(eps * var_scale * fpr[1]))
idx = idx + 1
# print idx, tx0, tx1, var_scale, fpr
if abs(tx1 - utc_time) < precision and idx < 2:
tx1 = tx1 + timedelta(seconds=1.0)
if abs(tx1 - tx0) <= precision and idx < nmax_iter:
return tx1
else:
return None
0
Example 50
Project: sfepy Source File: test_mesh_generators.py
def test_gen_mesh_from_geom(self):
from distutils.spawn import find_executable
from sfepy.mesh.geom_tools import geometry
from sfepy.mesh.mesh_generators import gen_mesh_from_geom
nx, ny, nz = 1, 2, 3
ok = True
# 2D
if find_executable('triangle'):
geo = geometry(2)
geo.addpoint(0, [0, 0])
geo.addpoint(1, [nx, 0])
geo.addpoint(2, [0, ny])
geo.addline(3, [0, 1])
geo.addline(4, [1, 2])
geo.addline(5, [2, 0])
geo.addsurface(6, [3, 4, 5])
geo.addphysicalsurface(1, [6])
mesh = gen_mesh_from_geom(geo, a=0.01, refine=True)
filename = op.join(self.options.out_dir, 'gen_triangle.mesh')
mesh.write(filename)
self.report('mesh generated by "triangle" generator')
orign = 98
actn = mesh.coors.shape[0]
ok = ok and nm.abs((actn - orign) / float(orign)) < 0.05
# 3D
if find_executable('tetgen'):
geo = geometry(3)
geo.addpoint(0, [0, 0 , 0])
geo.addpoint(1, [nx, 0, 0])
geo.addpoint(2, [0, ny, 0])
geo.addpoint(3, [0, 0, nz])
geo.addline(4, [0, 1])
geo.addline(5, [1, 2])
geo.addline(6, [2, 0])
geo.addline(7, [0, 3])
geo.addline(8, [1, 3])
geo.addline(9, [2, 3])
geo.addsurface(10, [4, 8, -7])
geo.addsurface(11, [5, 9, -8])
geo.addsurface(12, [6, 7, -9])
geo.addsurface(13, [4, 5, 6])
#Volume
geo.addvolume(14, [10, 11, 12, 13])
geo.addphysicalvolume(1, [14])
mesh = gen_mesh_from_geom(geo, a=0.001, refine=True)
filename = op.join(self.options.out_dir, 'gen_tetgen.mesh')
mesh.write(filename)
self.report('mesh generated by "tetgen" generator')
orign = 898
actn = mesh.coors.shape[0]
ok = ok and nm.abs((actn - orign) / float(orign)) < 0.05
return ok