Here are the examples of the python api numpy.r_ taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
200 Examples
0
Example 101
View licensedef plotSlice(self, v, vType='CC', normal='Z', ind=None, grid=False, view='real', ax=None, clim=None, showIt=False, pcolorOpts=None, streamOpts=None, gridOpts=None ): """ Plots a slice of a 3D mesh. .. plot:: from SimPEG import * hx = [(5,2,-1.3),(2,4),(5,2,1.3)] hy = [(2,2,-1.3),(2,6),(2,2,1.3)] hz = [(2,2,-1.3),(2,6),(2,2,1.3)] M = Mesh.TensorMesh([hx,hy,hz]) q = np.zeros(M.vnC) q[[4,4],[4,4],[2,6]]=[-1,1] q = Utils.mkvc(q) A = M.faceDiv*M.cellGrad b = Solver(A) * (q) M.plotSlice(M.cellGrad*b, 'F', view='vec', grid=True, showIt=True, pcolorOpts={'alpha':0.8}) """ if pcolorOpts is None: pcolorOpts = {} if streamOpts is None: streamOpts = {'color':'k'} if gridOpts is None: gridOpts = {'color':'k', 'alpha':0.5} if type(vType) in [list, tuple]: assert ax is None, "cannot specify an axis to plot on with this function." fig, axs = plt.subplots(1,len(vType)) out = [] for vTypeI, ax in zip(vType, axs): out += [self.plotSlice(v,vType=vTypeI, normal=normal, ind=ind, grid=grid, view=view, ax=ax, clim=clim, showIt=False, pcolorOpts=pcolorOpts, streamOpts=streamOpts, gridOpts=gridOpts)] return out viewOpts = ['real','imag','abs','vec'] normalOpts = ['X', 'Y', 'Z'] vTypeOpts = ['CC', 'CCv','N','F','E','Fx','Fy','Fz','E','Ex','Ey','Ez'] # Some user error checking assert vType in vTypeOpts, "vType must be in ['{0!s}']".format("','".join(vTypeOpts)) assert self.dim == 3, 'Must be a 3D mesh. Use plotImage.' assert view in viewOpts, "view must be in ['{0!s}']".format("','".join(viewOpts)) assert normal in normalOpts, "normal must be in ['{0!s}']".format("','".join(normalOpts)) assert type(grid) is bool, 'grid must be a boolean' szSliceDim = getattr(self, 'nC'+normal.lower()) #: Size of the sliced dimension if ind is None: ind = int(szSliceDim/2) assert type(ind) in integer_types, 'ind must be an integer' assert not (v.dtype == complex and view == 'vec'), 'Can not plot a complex vector.' # The slicing and plotting code!! def getIndSlice(v): if normal == 'X': v = v[ind,:,:] elif normal == 'Y': v = v[:,ind,:] elif normal == 'Z': v = v[:,:,ind] return v def doSlice(v): if vType == 'CC': return getIndSlice(self.r(v,'CC','CC','M')) elif vType == 'CCv': assert view == 'vec', 'Other types for CCv not supported' else: # Now just deal with 'F' and 'E' (x,y,z, maybe...) aveOp = 'ave' + vType + ('2CCV' if view == 'vec' else '2CC') Av = getattr(self,aveOp) if v.size == Av.shape[1]: v = Av * v else: v = self.r(v,vType[0],vType) # get specific component v = Av * v # we should now be averaged to cell centers (might be a vector) v = self.r(v.reshape((self.nC,-1),order='F'),'CC','CC','M') if view == 'vec': outSlice = [] if 'X' not in normal: outSlice.append(getIndSlice(v[0])) if 'Y' not in normal: outSlice.append(getIndSlice(v[1])) if 'Z' not in normal: outSlice.append(getIndSlice(v[2])) return np.r_[mkvc(outSlice[0]), mkvc(outSlice[1])] else: return getIndSlice(self.r(v,'CC','CC','M')) h2d = [] x2d = [] if 'X' not in normal: h2d.append(self.hx) x2d.append(self.x0[0]) if 'Y' not in normal: h2d.append(self.hy) x2d.append(self.x0[1]) if 'Z' not in normal: h2d.append(self.hz) x2d.append(self.x0[2]) tM = self.__class__(h2d, x2d) #: Temp Mesh v2d = doSlice(v) if ax is None: fig = plt.figure() ax = plt.subplot(111) else: assert isinstance(ax, matplotlib.axes.Axes), "ax must be an matplotlib.axes.Axes" fig = ax.figure out = tM._plotImage2D(v2d, vType=('CCv' if view == 'vec' else 'CC'), grid=grid, view=view, ax=ax, clim=clim, showIt=showIt, pcolorOpts=pcolorOpts, streamOpts=streamOpts, gridOpts=gridOpts) ax.set_xlabel('y' if normal == 'X' else 'x') ax.set_ylabel('y' if normal == 'Z' else 'z') ax.set_title('Slice {0:.0f}'.format(ind)) return out
0
Example 102
View licensedef test_setup(self): expMap = Maps.ExpMap(Mesh.TensorMesh((3,))) assert expMap.nP == 3 PM1 = MyPropMap(expMap) PM2 = MyPropMap([('sigma', expMap)]) PM3 = MyPropMap({'maps':[('sigma', expMap)], 'slices':{'sigma':slice(0,3)}}) for PM in [PM1,PM2,PM3]: assert PM.defaultInvProp == 'sigma' assert PM.sigmaMap is not None assert PM.sigmaMap is expMap assert PM.sigmaIndex == slice(0,3) assert getattr(PM, 'sigma', None) is None assert PM.muMap is None assert PM.muIndex is None assert 'sigma' in PM assert 'mu' not in PM assert 'mui' not in PM m = PM(np.r_[1.,2,3]) assert 'sigma' in m assert 'mu' not in m assert 'mui' not in m assert m.mu == mu_0 assert m.muModel is None assert m.muMap is None assert m.muDeriv is None assert np.all(m.sigmaModel == np.r_[1.,2,3]) assert m.sigmaMap is expMap assert np.all(m.sigma == np.exp(np.r_[1.,2,3])) assert m.sigmaDeriv is not None assert m.nP == 3
0
Example 103
View licensedef test_Links(self): m = Mesh.TensorMesh((3,)) expMap = Maps.ExpMap(m) iMap = Maps.IdentityMap(m) PM = MyReciprocalPropMap([('sigma', iMap)]) pm = PM(np.r_[1,2.,3]) # print(pm.sigma) # print(pm.sigmaMap) assert np.all(pm.sigma == [1,2,3]) assert np.all(pm.rho == 1./np.r_[1,2,3]) assert pm.sigmaMap is iMap assert pm.rhoMap is None assert pm.sigmaDeriv is not None assert pm.rhoDeriv is not None assert 'sigma' in PM assert 'rho' not in PM assert 'mu' not in PM assert 'mui' not in PM assert 'sigma' in pm assert 'rho' not in pm assert 'mu' not in pm assert 'mui' not in pm assert pm.mu == mu_0 assert pm.mui == 1.0/mu_0 assert pm.muMap is None assert pm.muDeriv is None assert pm.muiMap is None assert pm.muiDeriv is None PM = MyReciprocalPropMap([('rho', iMap)]) pm = PM(np.r_[1,2.,3]) # print(pm.sigma) # print(pm.sigmaMap) assert np.all(pm.sigma == 1./np.r_[1,2,3]) assert np.all(pm.rho == [1,2,3]) assert pm.sigmaMap is None assert pm.rhoMap is iMap assert pm.sigmaDeriv is not None assert pm.rhoDeriv is not None assert 'sigma' not in PM assert 'rho' in PM assert 'mu' not in PM assert 'mui' not in PM assert 'sigma' not in pm assert 'rho' in pm assert 'mu' not in pm assert 'mui' not in pm self.assertRaises(AssertionError, MyReciprocalPropMap, [('rho', iMap), ('sigma', iMap)]) self.assertRaises(AssertionError, MyReciprocalPropMap, [('sigma', iMap), ('rho', iMap)]) MyReciprocalPropMap([('sigma', iMap), ('mu', iMap)]) # This should be fine
0
Example 104
View licensedef getxBCyBC_CC(mesh, alpha, beta, gamma): """ This is a subfunction generating mixed-boundary condition: .. math:: \nabla \cdot \vec{j} = -\nabla \cdot \vec{j}_s = q \rho \vec{j} = -\nabla \phi \phi \alpha \phi + \beta \frac{\partial \phi}{\partial r} = \gamma \ at \ r = \partial \Omega xBC = f_1(\alpha, \beta, \gamma) yBC = f(\alpha, \beta, \gamma) Computes xBC and yBC for cell-centered discretizations """ if mesh.dim == 1: # 1D if (len(alpha) != 2 or len(beta) != 2 or len(gamma) != 2): raise Exception("Lenght of list, alpha should be 2") fCCxm, fCCxp = mesh.cellBoundaryInd nBC = fCCxm.sum()+fCCxp.sum() h_xm, h_xp = mesh.gridCC[fCCxm], mesh.gridCC[fCCxp] alpha_xm, beta_xm, gamma_xm = alpha[0], beta[0], gamma[0] alpha_xp, beta_xp, gamma_xp = alpha[1], beta[1], gamma[1] # h_xm, h_xp = mesh.gridCC[fCCxm], mesh.gridCC[fCCxp] h_xm, h_xp = mesh.hx[0], mesh.hx[-1] a_xm = gamma_xm/(0.5*alpha_xm-beta_xm/h_xm) b_xm = (0.5*alpha_xm+beta_xm/h_xm)/(0.5*alpha_xm-beta_xm/h_xm) a_xp = gamma_xp/(0.5*alpha_xp-beta_xp/h_xp) b_xp = (0.5*alpha_xp+beta_xp/h_xp)/(0.5*alpha_xp-beta_xp/h_xp) xBC_xm = 0.5*a_xm xBC_xp = 0.5*a_xp/b_xp yBC_xm = 0.5*(1.-b_xm) yBC_xp = 0.5*(1.-1./b_xp) xBC = np.r_[xBC_xm, xBC_xp] yBC = np.r_[yBC_xm, yBC_xp] elif mesh.dim == 2: # 2D if (len(alpha) != 4 or len(beta) != 4 or len(gamma) != 4): raise Exception("Lenght of list, alpha should be 4") fxm, fxp, fym, fyp = mesh.faceBoundaryInd nBC = fxm.sum()+fxp.sum()+fxm.sum()+fxp.sum() alpha_xm, beta_xm, gamma_xm = alpha[0], beta[0], gamma[0] alpha_xp, beta_xp, gamma_xp = alpha[1], beta[1], gamma[1] alpha_ym, beta_ym, gamma_ym = alpha[2], beta[2], gamma[2] alpha_yp, beta_yp, gamma_yp = alpha[3], beta[3], gamma[3] # h_xm, h_xp = mesh.gridCC[fCCxm,0], mesh.gridCC[fCCxp,0] # h_ym, h_yp = mesh.gridCC[fCCym,1], mesh.gridCC[fCCyp,1] h_xm = mesh.hx[0]*np.ones_like(alpha_xm) h_xp = mesh.hx[-1]*np.ones_like(alpha_xp) h_ym = mesh.hy[0]*np.ones_like(alpha_ym) h_yp = mesh.hy[-1]*np.ones_like(alpha_yp) a_xm = gamma_xm/(0.5*alpha_xm-beta_xm/h_xm) b_xm = (0.5*alpha_xm+beta_xm/h_xm)/(0.5*alpha_xm-beta_xm/h_xm) a_xp = gamma_xp/(0.5*alpha_xp-beta_xp/h_xp) b_xp = (0.5*alpha_xp+beta_xp/h_xp)/(0.5*alpha_xp-beta_xp/h_xp) a_ym = gamma_ym/(0.5*alpha_ym-beta_ym/h_ym) b_ym = (0.5*alpha_ym+beta_ym/h_ym)/(0.5*alpha_ym-beta_ym/h_ym) a_yp = gamma_yp/(0.5*alpha_yp-beta_yp/h_yp) b_yp = (0.5*alpha_yp+beta_yp/h_yp)/(0.5*alpha_yp-beta_yp/h_yp) xBC_xm = 0.5*a_xm xBC_xp = 0.5*a_xp/b_xp yBC_xm = 0.5*(1.-b_xm) yBC_xp = 0.5*(1.-1./b_xp) xBC_ym = 0.5*a_ym xBC_yp = 0.5*a_yp/b_yp yBC_ym = 0.5*(1.-b_ym) yBC_yp = 0.5*(1.-1./b_yp) sortindsfx = np.argsort(np.r_[np.arange(mesh.nFx)[fxm], np.arange(mesh.nFx)[fxp]]) sortindsfy = np.argsort(np.r_[np.arange(mesh.nFy)[fym], np.arange(mesh.nFy)[fyp]]) xBC_x = np.r_[xBC_xm, xBC_xp][sortindsfx] xBC_y = np.r_[xBC_ym, xBC_yp][sortindsfy] yBC_x = np.r_[yBC_xm, yBC_xp][sortindsfx] yBC_y = np.r_[yBC_ym, yBC_yp][sortindsfy] xBC = np.r_[xBC_x, xBC_y] yBC = np.r_[yBC_x, yBC_y] elif mesh.dim == 3: # 3D if (len(alpha) != 6 or len(beta) != 6 or len(gamma) != 6): raise Exception("Lenght of list, alpha should be 6") # fCCxm,fCCxp,fCCym,fCCyp,fCCzm,fCCzp = mesh.cellBoundaryInd fxm, fxp, fym, fyp, fzm, fzp = mesh.faceBoundaryInd nBC = fxm.sum()+fxp.sum()+fxm.sum()+fxp.sum() alpha_xm, beta_xm, gamma_xm = alpha[0], beta[0], gamma[0] alpha_xp, beta_xp, gamma_xp = alpha[1], beta[1], gamma[1] alpha_ym, beta_ym, gamma_ym = alpha[2], beta[2], gamma[2] alpha_yp, beta_yp, gamma_yp = alpha[3], beta[3], gamma[3] alpha_zm, beta_zm, gamma_zm = alpha[4], beta[4], gamma[4] alpha_zp, beta_zp, gamma_zp = alpha[5], beta[5], gamma[5] # h_xm, h_xp = mesh.gridCC[fCCxm,0], mesh.gridCC[fCCxp,0] # h_ym, h_yp = mesh.gridCC[fCCym,1], mesh.gridCC[fCCyp,1] # h_zm, h_zp = mesh.gridCC[fCCzm,2], mesh.gridCC[fCCzp,2] h_xm = mesh.hx[0]*np.ones_like(alpha_xm) h_xp = mesh.hx[-1]*np.ones_like(alpha_xp) h_ym = mesh.hy[0]*np.ones_like(alpha_ym) h_yp = mesh.hy[-1]*np.ones_like(alpha_yp) h_zm = mesh.hz[0]*np.ones_like(alpha_zm) h_zp = mesh.hz[-1]*np.ones_like(alpha_zp) a_xm = gamma_xm/(0.5*alpha_xm-beta_xm/h_xm) b_xm = (0.5*alpha_xm+beta_xm/h_xm)/(0.5*alpha_xm-beta_xm/h_xm) a_xp = gamma_xp/(0.5*alpha_xp-beta_xp/h_xp) b_xp = (0.5*alpha_xp+beta_xp/h_xp)/(0.5*alpha_xp-beta_xp/h_xp) a_ym = gamma_ym/(0.5*alpha_ym-beta_ym/h_ym) b_ym = (0.5*alpha_ym+beta_ym/h_ym)/(0.5*alpha_ym-beta_ym/h_ym) a_yp = gamma_yp/(0.5*alpha_yp-beta_yp/h_yp) b_yp = (0.5*alpha_yp+beta_yp/h_yp)/(0.5*alpha_yp-beta_yp/h_yp) a_zm = gamma_zm/(0.5*alpha_zm-beta_zm/h_zm) b_zm = (0.5*alpha_zm+beta_zm/h_zm)/(0.5*alpha_zm-beta_zm/h_zm) a_zp = gamma_zp/(0.5*alpha_zp-beta_zp/h_zp) b_zp = (0.5*alpha_zp+beta_zp/h_zp)/(0.5*alpha_zp-beta_zp/h_zp) xBC_xm = 0.5*a_xm xBC_xp = 0.5*a_xp/b_xp yBC_xm = 0.5*(1.-b_xm) yBC_xp = 0.5*(1.-1./b_xp) xBC_ym = 0.5*a_ym xBC_yp = 0.5*a_yp/b_yp yBC_ym = 0.5*(1.-b_ym) yBC_yp = 0.5*(1.-1./b_yp) xBC_zm = 0.5*a_zm xBC_zp = 0.5*a_zp/b_zp yBC_zm = 0.5*(1.-b_zm) yBC_zp = 0.5*(1.-1./b_zp) sortindsfx = np.argsort(np.r_[np.arange(mesh.nFx)[fxm], np.arange(mesh.nFx)[fxp]]) sortindsfy = np.argsort(np.r_[np.arange(mesh.nFy)[fym], np.arange(mesh.nFy)[fyp]]) sortindsfz = np.argsort(np.r_[np.arange(mesh.nFz)[fzm], np.arange(mesh.nFz)[fzp]]) xBC_x = np.r_[xBC_xm, xBC_xp][sortindsfx] xBC_y = np.r_[xBC_ym, xBC_yp][sortindsfy] xBC_z = np.r_[xBC_zm, xBC_zp][sortindsfz] yBC_x = np.r_[yBC_xm, yBC_xp][sortindsfx] yBC_y = np.r_[yBC_ym, yBC_yp][sortindsfy] yBC_z = np.r_[yBC_zm, yBC_zp][sortindsfz] xBC = np.r_[xBC_x, xBC_y, xBC_z] yBC = np.r_[yBC_x, yBC_y, yBC_z] return xBC, yBC
0
Example 105
View licensedef _raveled_index_for_transformed(self, param): """ get the raveled index for a param for the transformed parameter array (optimizer array). that is an int array, containing the indexes for the flattened param inside this parameterized logic. !Warning! be sure to call this method on the highest parent of a hierarchy, as it uses the fixes to do its work. If you do not know what you are doing, do not use this method, it will have unexpected returns! """ ravi = self._raveled_index_for(param) if self._has_fixes(): fixes = self._fixes_ ### Transformed indices, handling the offsets of previous fixes transformed = (np.r_[:self.size] - (~fixes).cumsum()) return transformed[ravi[fixes[ravi]]] else: return ravi
0
Example 106
View licensedef test_index_view(self): #======================================================================= # 0 1 2 3 4 5 6 7 8 9 10 # one one # two two # three three three three # view: [0 1 2 3 4 5 ] #======================================================================= self.view = ParameterIndexOperationsView(self.param_index, 2, 6) self.assertSetEqual(set(self.view.properties()), set([one, two, three])) for v,p in zip(self.view.properties_for(np.r_[:6]), self.param_index.properties_for(np.r_[2:2+6])): self.assertEqual(v, p) self.assertSetEqual(set(self.view[two]), set([3])) self.assertSetEqual(set(self.param_index[two]), set([0, 5])) self.view.add(two, np.array([0])) self.assertSetEqual(set(self.view[two]), set([0,3])) self.assertSetEqual(set(self.param_index[two]), set([0, 2, 5])) self.view.clear() for v,p in zip(self.view.properties_for(np.r_[:6]), self.param_index.properties_for(np.r_[2:2+6])): self.assertEqual(v, p) self.assertEqual(v, []) param_index = ParameterIndexOperations() param_index.add(one, [3,9]) param_index.add(two, [0,5]) param_index.add(three, [2,4,7,10]) view2 = ParameterIndexOperationsView(param_index, 2, 8) self.view.update(view2) for [i,v],[i2,v2] in zip(sorted(param_index.items()), sorted(self.param_index.items())): self.assertEqual(i, i2) np.testing.assert_equal(v, v2)
0
Example 107
View licensedef test_indexview_remove(self): removed = self.view.remove(two, [3]) self.assertListEqual(removed.tolist(), [3]) removed = self.view.remove(three, np.r_[:5]) self.assertListEqual(removed.tolist(), [0, 2])
0
Example 108
View licensedef test_incidence(): # Check estimates in R: # ftime = c(1, 1, 2, 4, 4, 4, 6, 6, 7, 8, 9, 9, 9, 1, 2, 2, 4, 4) # fstat = c(1, 1, 1, 2, 2, 2, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0) # cuminc(ftime, fstat) # # The standard errors agree with Stata, not with R (cmprisk # package), which uses a different SE formula from Aalen (1978) ftime = np.r_[1, 1, 2, 4, 4, 4, 6, 6, 7, 8, 9, 9, 9, 1, 2, 2, 4, 4] fstat = np.r_[1, 1, 1, 2, 2, 2, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0] ci = CumIncidenceRight(ftime, fstat) cinc = [np.array([0.11111111, 0.17037037, 0.17037037, 0.17037037, 0.17037037, 0.17037037, 0.17037037]), np.array([0., 0., 0.20740741, 0.20740741, 0.20740741, 0.20740741, 0.20740741]), np.array([0., 0., 0., 0.17777778, 0.26666667, 0.26666667, 0.26666667])] assert_allclose(cinc[0], ci.cinc[0]) assert_allclose(cinc[1], ci.cinc[1]) assert_allclose(cinc[2], ci.cinc[2]) cinc_se = [np.array([0.07407407, 0.08976251, 0.08976251, 0.08976251, 0.08976251, 0.08976251, 0.08976251]), np.array([0., 0., 0.10610391, 0.10610391, 0.10610391, 0.10610391, 0.10610391]), np.array([0., 0., 0., 0.11196147, 0.12787781, 0.12787781, 0.12787781])] assert_allclose(cinc_se[0], ci.cinc_se[0]) assert_allclose(cinc_se[1], ci.cinc_se[1]) assert_allclose(cinc_se[2], ci.cinc_se[2]) # Simple check for frequency weights weights = np.ones(len(ftime)) ciw = CumIncidenceRight(ftime, fstat, freq_weights=weights) assert_allclose(ci.cinc[0], ciw.cinc[0]) assert_allclose(ci.cinc[1], ciw.cinc[1]) assert_allclose(ci.cinc[2], ciw.cinc[2])
0
Example 109
View licensedef generate_nominal(): ## Regression coefficients beta1 = np.r_[0.5, 0.5] beta2 = np.r_[-1, -0.5] p = len(beta1) rz = 0.5 OUT = open("gee_nominal_1.csv", "w") for i in range(200): n = np.random.randint(3, 6) # Cluster size x = np.random.normal(size=(n,p)) x[:,0] = 1 for j in range(1,x.shape[1]): x[:,j] += np.random.normal() pr1 = np.exp(np.dot(x, beta1))[:,None] pr2 = np.exp(np.dot(x, beta2))[:,None] den = 1 + pr1 + pr2 pr = np.hstack((pr1/den, pr2/den, 1/den)) cpr = np.cumsum(pr, 1) z = rz*np.random.normal() +\ np.sqrt(1-rz**2)*np.random.normal(size=n) u = norm.cdf(z) y = (u[:,None] > cpr).sum(1) for j in range(n): OUT.write("%d,%d," % (i, y[j])) OUT.write(",".join(["%.3f" % b for b in x[j,:]]) + "\n") OUT.close()
0
Example 110
View licensedef ar2arma(ar_des, p, q, n=20, mse='ar', start=None): '''find arma approximation to ar process This finds the ARMA(p,q) coefficients that minimize the integrated squared difference between the impulse_response functions (MA representation) of the AR and the ARMA process. This does currently not check whether the MA lagpolynomial of the ARMA process is invertible, neither does it check the roots of the AR lagpolynomial. Parameters ---------- ar_des : array_like coefficients of original AR lag polynomial, including lag zero p, q : int length of desired ARMA lag polynomials n : int number of terms of the impuls_response function to include in the objective function for the approximation mse : string, 'ar' not used yet, Returns ------- ar_app, ma_app : arrays coefficients of the AR and MA lag polynomials of the approximation res : tuple result of optimize.leastsq Notes ----- Extension is possible if we want to match autocovariance instead of impulse response function. TODO: convert MA lag polynomial, ma_app, to be invertible, by mirroring roots outside the unit intervall to ones that are inside. How do we do this? ''' #p,q = pq def msear_err(arma, ar_des): ar, ma = np.r_[1, arma[:p-1]], np.r_[1, arma[p-1:]] ar_approx = arma_impulse_response(ma, ar, n) ## print(ar,ma) ## print(ar_des.shape, ar_approx.shape) ## print(ar_des) ## print(ar_approx) return (ar_des - ar_approx) # ((ar - ar_approx)**2).sum() if start is None: arma0 = np.r_[-0.9 * np.ones(p-1), np.zeros(q-1)] else: arma0 = start res = optimize.leastsq(msear_err, arma0, ar_des, maxfev=5000) #print(res) arma_app = np.atleast_1d(res[0]) ar_app = np.r_[1, arma_app[:p-1]], ma_app = np.r_[1, arma_app[p-1:]] return ar_app, ma_app, res
0
Example 111
View licensedef _simulate(self, nsimulations, measurement_shocks, state_shocks, initial_state): if self._compatibility_mode: return super(SimulationSmoother, self)._simulate( nsimulations, measurement_shocks, state_shocks, initial_state) prefix = self.prefix # Create the simulator if necessary if (prefix not in self._simulators or nsimulations > self._simulators[prefix].nobs): # Make sure we have the required Statespace representation prefix, dtype, create_statespace = ( self._initialize_representation()) # Initialize the state self._initialize_state(prefix=self.prefix) simulation_output = 0 # Kalman smoother parameters smoother_output = -1 # Kalman filter parameters filter_method = self.filter_method inversion_method = self.inversion_method stability_method = self.stability_method conserve_memory = self.conserve_memory filter_timing = self.filter_timing loglikelihood_burn = self.loglikelihood_burn tolerance = self.tolerance # Create a new simulation smoother object cls = self.prefix_simulation_smoother_map[prefix] self._simulators[prefix] = cls( self._statespaces[prefix], filter_method, inversion_method, stability_method, conserve_memory, filter_timing, tolerance, loglikelihood_burn, smoother_output, simulation_output, nsimulations, True ) simulator = self._simulators[prefix] # Set the disturbance variates disturbance_variates = np.array( np.r_[measurement_shocks.ravel(), state_shocks.ravel()], dtype=self.dtype ).squeeze() simulator.set_disturbance_variates(disturbance_variates) # Set the intial state vector initial_state_variates = np.array( initial_state, dtype=self.dtype ).squeeze() simulator.set_initial_state_variates(initial_state_variates) # Perform simulation smoothing # Note: simulation_output=-1 corresponds to whatever was setup when # the simulation smoother was constructed simulator.simulate(-1) simulated_obs = np.array(simulator.generated_obs, copy=True) simulated_state = np.array(simulator.generated_state, copy=True) return ( simulated_obs[:, :nsimulations].T, simulated_state[:, :nsimulations].T )
0
Example 112
View licensedef test_score_analytic_ar1(): # Test the score against the analytic score for an AR(1) model with 2 # observations # Let endog = [1, 0.5], params=[0, 1] mod = sarimax.SARIMAX([1, 0.5], order=(1,0,0)) def partial_phi(phi, sigma2): return -0.5 * (phi**2 + 2*phi*sigma2 - 1) / (sigma2 * (1 - phi**2)) def partial_sigma2(phi, sigma2): return -0.5 * (2*sigma2 + phi - 1.25) / (sigma2**2) params = np.r_[0., 2] # Compute the analytic score analytic_score = np.r_[ partial_phi(params[0], params[1]), partial_sigma2(params[0], params[1])] # Check each of the approximations, transformed parameters approx_cs = mod.score(params, transformed=True, approx_complex_step=True) assert_allclose(approx_cs, analytic_score) approx_fd = mod.score(params, transformed=True, approx_complex_step=False) assert_allclose(approx_fd, analytic_score, atol=1e-5) approx_fd_centered = ( mod.score(params, transformed=True, approx_complex_step=False, approx_centered=True)) assert_allclose(approx_fd, analytic_score, atol=1e-5) harvey_cs = mod.score(params, transformed=True, method='harvey', approx_complex_step=True) assert_allclose(harvey_cs, analytic_score) harvey_fd = mod.score(params, transformed=True, method='harvey', approx_complex_step=False) assert_allclose(harvey_fd, analytic_score, atol=1e-5) harvey_fd_centered = mod.score(params, transformed=True, method='harvey', approx_complex_step=False, approx_centered=True) assert_allclose(harvey_fd_centered, analytic_score, atol=1e-5) # Check the approximations for untransformed parameters. The analytic # check now comes from chain rule with the analytic derivative of the # transformation # if L* is the likelihood evaluated at untransformed parameters and # L is the likelihood evaluated at transformed parameters, then we have: # L*(u) = L(t(u)) # and then # L'*(u) = L'(t(u)) * t'(u) def partial_transform_phi(phi): return -1. / (1 + phi**2)**(3./2) def partial_transform_sigma2(sigma2): return 2. * sigma2 uparams = mod.untransform_params(params) analytic_score = np.dot( np.diag(np.r_[partial_transform_phi(uparams[0]), partial_transform_sigma2(uparams[1])]), np.r_[partial_phi(params[0], params[1]), partial_sigma2(params[0], params[1])]) approx_cs = mod.score(uparams, transformed=False, approx_complex_step=True) assert_allclose(approx_cs, analytic_score) approx_fd = mod.score(uparams, transformed=False, approx_complex_step=False) assert_allclose(approx_fd, analytic_score, atol=1e-5) approx_fd_centered = ( mod.score(uparams, transformed=False, approx_complex_step=False, approx_centered=True)) assert_allclose(approx_fd, analytic_score, atol=1e-5) harvey_cs = mod.score(uparams, transformed=False, method='harvey', approx_complex_step=True) assert_allclose(harvey_cs, analytic_score) harvey_fd = mod.score(uparams, transformed=False, method='harvey', approx_complex_step=False) assert_allclose(harvey_fd, analytic_score, atol=1e-5) harvey_fd_centered = mod.score(uparams, transformed=False, method='harvey', approx_complex_step=False, approx_centered=True) assert_allclose(harvey_fd_centered, analytic_score, atol=1e-5) # Check the Hessian: these approximations are not very good, particularly # when phi is close to 0 params = np.r_[0.5, 1.] def hessian(phi, sigma2): hessian = np.zeros((2,2)) hessian[0,0] = (-phi**2 - 1) / (phi**2 - 1)**2 hessian[1,0] = hessian[0,1] = -1 / (2 * sigma2**2) hessian[1,1] = (sigma2 + phi - 1.25) / sigma2**3 return hessian analytic_hessian = hessian(params[0], params[1]) with warnings.catch_warnings(): warnings.simplefilter("ignore") assert_allclose(mod._hessian_complex_step(params) * 2, analytic_hessian, atol=1e-1) assert_allclose(mod._hessian_finite_difference(params) * 2, analytic_hessian, atol=1e-1)
0
Example 113
View licensedef test_pandas_endog(): # Test various types of pandas endog inputs (e.g. TimeSeries, etc.) # Example (failure): pandas.Series, no dates endog = pd.Series([1., 2.]) # raises error due to no dates warnings.simplefilter('always') assert_raises(ValueError, check_endog, endog, **kwargs) # Example : pandas.Series dates = pd.date_range(start='1980-01-01', end='1981-01-01', freq='AS') endog = pd.Series([1., 2.], index=dates) mod = check_endog(endog, **kwargs) mod.filter([]) # Example : pandas.Series, string datatype endog = pd.Series(['a'], index=dates) # raises error due to direct type casting check in Statsmodels base classes assert_raises(ValueError, check_endog, endog, **kwargs) # Example : pandas.Series endog = pd.Series([1., 2.], index=dates) mod = check_endog(endog, **kwargs) mod.filter([]) # Example : pandas.DataFrame with 1 column endog = pd.DataFrame({'a': [1., 2.]}, index=dates) mod = check_endog(endog, **kwargs) mod.filter([]) # Example (failure): pandas.DataFrame with 2 columns endog = pd.DataFrame({'a': [1., 2.], 'b': [3., 4.]}, index=dates) # raises error because 2-columns means k_endog=2, but the design matrix # set in **kwargs is shaped (1,1) assert_raises(ValueError, check_endog, endog, **kwargs) # Check behavior of the link maintained between passed `endog` and # `mod.endog` arrays endog = pd.DataFrame({'a': [1., 2.]}, index=dates) mod = check_endog(endog, **kwargs) assert_equal(mod.endog.base is not mod.data.orig_endog, True) assert_equal(mod.endog.base is not endog, True) assert_equal(mod.data.orig_endog.values.base is not endog, True) endog.iloc[0, 0] = 2 # there is no link to mod.endog assert_equal(mod.endog, np.r_[1, 2].reshape(2,1)) # there remains a link to mod.data.orig_endog assert_allclose(mod.data.orig_endog, endog) # Example : pandas.DataFrame with 2 columns # Update kwargs for k_endog=2 kwargs2 = { 'k_states': 1, 'design': [[1], [0.]], 'obs_cov': [[1, 0], [0, 1]], 'transition': [[1]], 'selection': [[1]], 'state_cov': [[1]], 'initialization': 'approximate_diffuse' } endog = pd.DataFrame({'a': [1., 2.], 'b': [3., 4.]}, index=dates) mod = check_endog(endog, k_endog=2, **kwargs2) mod.filter([])
0
Example 114
View license@classmethod def setup_class(cls, alternate_timing=False, *args, **kwargs): # Dataset / Stata comparison path = current_path + os.sep + 'results/results_wpi1_ar3_stata.csv' cls.stata = pd.read_csv(path) cls.stata.index = pd.date_range(start='1960-01-01', periods=124, freq='QS') # Matlab comparison path = current_path + os.sep+'results/results_wpi1_ar3_matlab_ssm.csv' matlab_names = [ 'a1', 'a2', 'a3', 'detP', 'alphahat1', 'alphahat2', 'alphahat3', 'detV', 'eps', 'epsvar', 'eta', 'etavar' ] cls.matlab_ssm = pd.read_csv(path, header=None, names=matlab_names) cls.model = sarimax.SARIMAX( cls.stata['wpi'], order=(3, 1, 0), simple_differencing=True, hamilton_representation=True, *args, **kwargs ) if alternate_timing: cls.model.ssm.timing_init_filtered = True # Parameters from from Stata's sspace MLE estimation params = np.r_[.5270715, .0952613, .2580355, .5307459] cls.results = cls.model.smooth(params, cov_type='none') # Calculate the determinant of the covariance matrices (for easy # comparison to other languages without having to store 2-dim arrays) cls.results.det_predicted_state_cov = np.zeros((1, cls.model.nobs)) cls.results.det_smoothed_state_cov = np.zeros((1, cls.model.nobs)) for i in range(cls.model.nobs): cls.results.det_predicted_state_cov[0, i] = np.linalg.det( cls.results.filter_results.predicted_state_cov[:, :, i]) cls.results.det_smoothed_state_cov[0, i] = np.linalg.det( cls.results.smoother_results.smoothed_state_cov[:, :, i]) if not compatibility_mode: # Perform simulation smoothing n_disturbance_variates = ( (cls.model.k_endog + cls.model.ssm.k_posdef) * cls.model.nobs ) cls.sim = cls.model.simulation_smoother(filter_timing=0) cls.sim.simulate( disturbance_variates=np.zeros(n_disturbance_variates), initial_state_variates=np.zeros(cls.model.k_states) )
0
Example 115
View licensedef unintegrate(x, levels): """ After taking n-differences of a series, return the original series Parameters ---------- x : array-like The n-th differenced series levels : list A list of the first-value in each differenced series, for [first-difference, second-difference, ..., n-th difference] Returns ------- y : array-like The original series de-differenced Examples -------- >>> x = np.array([1, 3, 9., 19, 8.]) >>> levels = unintegrate_levels(x, 2) >>> levels array([ 1., 2.]) >>> unintegrate(np.diff(x, 2), levels) array([ 1., 3., 9., 19., 8.]) """ levels = list(levels)[:] # copy if len(levels) > 1: x0 = levels.pop(-1) return unintegrate(np.cumsum(np.r_[x0, x]), levels) x0 = levels[0] return np.cumsum(np.r_[x0, x])
0
Example 116
View licensedef smooth(x, window_len=11, window='hanning'): if x.ndim != 1: raise ValueError("smooth only accepts 1 dimension arrays.") if x.size < window_len: raise ValueError("Input vector needs to be bigger than window size.") if window_len < 3: return x if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']: raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'") s = numpy.r_[x[window_len - 1:0:-1], x, x[-1:-window_len:-1]] if window == 'flat': # moving average w = numpy.ones(window_len, 'd') else: w = eval('numpy.' + window + '(window_len)') y = numpy.convolve(w / w.sum(), s, mode='valid') return y
0
Example 117
View license@transactional def launch(self, data_file, dataset_name, structure_path='', transpose=False, slice=None, sampling_rate=1000, start_time=0, tstype=None, tstype_parameters=None): try: data = read_nested_mat_file(data_file, dataset_name, structure_path) if transpose: data = data.T if slice: data = data[parse_slice(slice)] ts = self.ts_builder[tstype](self, data, **tstype_parameters) ts.start_time = start_time ts.sample_period = 1.0 / sampling_rate ts.sample_period_unit = 's' ts.write_time_slice(numpy.r_[:data.shape[0]] * ts.sample_period) # we expect empirical data shape to be time, channel. # But tvb expects time, state, channel, mode. Introduce those dimensions ts.write_data_slice(data[:, numpy.newaxis, :, numpy.newaxis]) ts.close_file() return ts except ParseException as ex: self.log.exception(ex) raise LaunchException(ex)
0
Example 118
View licensedef plot_trajectory(self, x, y): """ Plot a sample trajectory, starting at the position x,y in the phase-plane. This method is called as a result of a mouse click on the phase-plane. """ svx_ind = self.model.state_variables.index(self.svx) svy_ind = self.model.state_variables.index(self.svy) #Calculate an example trajectory state = self.default_sv.copy() self.integrator.clamped_state_variable_indices = numpy.setdiff1d( numpy.r_[:len(self.model.state_variables)], numpy.r_[svx_ind, svy_ind]) self.integrator.clamped_state_variable_values = self.default_sv[self.integrator.clamped_state_variable_indices] state[svx_ind] = x state[svy_ind] = y scheme = self.integrator.scheme traj = numpy.zeros((TRAJ_STEPS+1, self.model.nvar, 1, self.model.number_of_modes)) traj[0, :] = state for step in range(TRAJ_STEPS): #import pdb; pdb.set_trace() state = scheme(state, self.model.dfun, self.no_coupling, 0.0, 0.0) traj[step+1, :] = state self.pp_ax.scatter(x, y, s=42, c='g', marker='o', edgecolor=None) self.pp_ax.plot(traj[:, svx_ind, 0, self.mode], traj[:, svy_ind, 0, self.mode]) #Plot the selected state variable trajectories as a function of time self.pp_splt.plot(numpy.arange(TRAJ_STEPS+1) * self.integrator.dt, traj[:, :, 0, self.mode]) pylab.draw()
0
Example 119
View licensedef preconfigure(self): "Configure just the basic fields, so that memory can be estimated." self.connectivity.configure() if self.surface: self.surface.configure() if self.stimulus: self.stimulus.configure() self.coupling.configure() self.model.configure() self.integrator.configure() # monitors needs to be a list or tuple, even if there is only one... if not isinstance(self.monitors, (list, tuple)): self.monitors = [self.monitors] # Configure monitors for monitor in self.monitors: monitor.configure() # "Nodes" refers to either regions or vertices + non-cortical regions. if self.surface is None: self.number_of_nodes = self.connectivity.number_of_regions LOG.info('Region simulation with %d ROI nodes', self.number_of_nodes) else: rm = self.surface.region_mapping unmapped = self.connectivity.unmapped_indices(rm) self._regmap = numpy.r_[rm, unmapped] self.number_of_nodes = self._regmap.shape[0] LOG.info('Surface simulation with %d vertices + %d non-cortical, %d total nodes', rm.size, unmapped.size, self.number_of_nodes) self._guesstimate_memory_requirement()
0
Example 120
View licensedef test_shape(self): # try to avoid introspector picking up this model Gen2D = copy.deepcopy(models.Generic2dOscillator) class CouplingShapeTestModel(Gen2D): def __init__(self, test_case=None, n_node=None, **kwds): super(CouplingShapeTestModel, self).__init__(**kwds) self.cvar = numpy.r_[0, 1] self.n_node = n_node self.test_case = test_case def dfun(self, state, coupling, local_coupling): if self.test_case is not None: self.test_case.assertEqual( (2, self.n_node, 1), coupling.shape ) return state surf = cortex.Cortex(load_default=True) sim = simulator.Simulator( model=CouplingShapeTestModel(self, surf.vertices.shape[0]), connectivity=connectivity.Connectivity(load_default=True), surface=surf) sim.configure() for _ in sim(simulation_length=sim.integrator.dt * 2): pass
0
Example 121
View licensedef union(H1, H2): """ Join two matrices by stacking them on top of each other. Parameters ---------- H1 : 2d-array The matrix that goes on top of the new matrix H2 : 2d-array The matrix that goes on bottom of the new matrix Returns ------- mat : 2d-array The new matrix that contains the rows of ``H1`` on top of the rows of ``H2``. Example ------- :: >>> union(np.eye(2), -np.eye(2)) array([[ 1., 0.], [ 0., 1.], [-1., 0.], [ 0., -1.]]) """ H = np.r_[H1, H2] return H
0
Example 122
View licensedef naive_peaks(vec, k=33): """A naive method for finding peaks of a signal. 1. Smooth vector 2. Find peaks (local maxima) 3. Find local max from original signal, pre-smoothing 4. Return (sorted, descending) peaks """ a = smooth_hanning(vec, k) k2 = (k - 1) / 2 peaks = np.r_[True, a[1:] > a[:-1]] & np.r_[a[:-1] > a[1:], True] p = np.array(np.where(peaks)[0]) maxidx = np.zeros(np.shape(p)) maxvals = np.zeros(np.shape(p)) for i, pk in enumerate(p): maxidx[i] = np.argmax(vec[pk - k2:pk + k2]) + pk - k2 maxvals[i] = np.max(vec[pk - k2:pk + k2]) out = np.array([maxidx, maxvals]).T return out[(-out[:, 1]).argsort()]
0
Example 123
View licensedef smooth_data(x,window_len=13,window='flat'): """smooth the data using a window with requested size. courtesy of the scipy cookbook. This method is based on the convolution of a scaled window with the signal. The signal is prepared by introducing reflected copies of the signal (with the window size) in both ends so that transient parts are minimized in the begining and end part of the output signal. Output length equals input length here """ if x.ndim != 1: raise ValueError, "smooth only accepts 1 dimension arrays." if x.size < window_len: raise ValueError, "Input vector needs to be bigger than window size." if window_len<3: return x if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']: raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'" s=np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]] if window == 'flat': #moving average w=np.ones(window_len,'d') else: w=eval('np.'+window+'(window_len)') y=np.convolve(w/w.sum(),s,mode='valid') return y[(window_len/2):-(window_len/2)]
0
Example 124
View licensedef smooth_data(x,window_len=13,window='flat'): """smooth the data using a window with requested size. courtesy of the scipy cookbook. This method is based on the convolution of a scaled window with the signal. The signal is prepared by introducing reflected copies of the signal (with the window size) in both ends so that transient parts are minimized in the begining and end part of the output signal. Output length equals input length here """ if x.ndim != 1: raise ValueError, "smooth only accepts 1 dimension arrays." if x.size < window_len: raise ValueError, "Input vector needs to be bigger than window size." if window_len<3: return x if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']: raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'" s=np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]] if window == 'flat': #moving average w=np.ones(window_len,'d') else: w=eval('np.'+window+'(window_len)') y=np.convolve(w/w.sum(),s,mode='valid') return y[(window_len/2):-(window_len/2)]
0
Example 125
View licensedef concat(x,y,train='on'): xdim = 1 xp=Deel.xp if Deel.gpu>=0: x = cuda.to_cpu(x) y= cuda.to_cpu(y) x = x.copy() y = y.copy() for n in x.shape: xdim *= n if len(x.shape)>1: x = x.reshape((xdim,)) ydim=1 for n in y.shape: ydim *= n if len(y.shape)>1: y = y.reshape((ydim,)) z = np.r_[x,y] z = xp.asarray(z,dtype=xp.float32) return z
0
Example 126
View licensedef smooth(x, window_len=7, window='hanning'): """smooth the data using a window with requested size. This method is based on the convolution of a scaled window with the signal. The signal is prepared by introducing reflected copies of the signal (with the window size) in both ends so that transient parts are minimized in the begining and end part of the output signal. input: x: the input signal window_len: the dimension of the smoothing window; should be an odd integer window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman' flat window will produce a moving average smoothing. output: the smoothed signal example: t=linspace(-2,2,0.1) x=sin(t)+randn(len(t))*0.1 y=smooth(x) see also: numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve scipy.signal.lfilter TODO: the window parameter could be the window itself if an array instead of a string NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y. """ if x.ndim != 1: raise ValueError, "smooth only accepts 1 dimension arrays." if x.size < window_len: raise ValueError, "Input vector needs to be bigger than window size." if window_len<3: return x if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']: raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'" s=np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]] #print(len(s)) if window == 'flat': #moving average w=np.ones(window_len,'d') else: w=eval('np.'+window+'(window_len)') y=np.convolve(w/w.sum(),s,mode='valid') y = y[(window_len/2 - 1):-(window_len/2 + 1)] return y
0
Example 127
View licensedef setUp(self): """ Prepare an assembly with two subassemblies: one assembly representing a spherical lens behind a flat screen, and one asssembly representing a perfect mirror. The mirror will be placed at the two subassemblies' focus, so a paraxial ray will come back on the other side of the optical axis. Reference: In [1], the lensmaker equation """ # focal length = 1, thickness = 1/6 R = 1./6. back_surf = Surface(HemisphereGM(R), opt.RefractiveHomogenous(1., 1.5), location=N.r_[0., 0., -R/2.]) front_surf = Surface(HemisphereGM(R), opt.RefractiveHomogenous(1., 1.5), location=N.r_[0., 0., R/2.], rotation=rotx(N.pi/2.)[:3,:3]) front_lens = AssembledObject(surfs=[back_surf, front_surf]) back_surf = Surface(FlatGeometryManager(), opt.RefractiveHomogenous(1., 1.5), location=N.r_[0., 0., -0.01]) front_surf = Surface(FlatGeometryManager(), opt.RefractiveHomogenous(1., 1.5), location=N.r_[0., 0., 0.01]) glass_screen = AssembledObject(surfs=[back_surf, front_surf], transform=translate(0., 0., 0.5)) lens_assembly = Assembly(objects=[glass_screen, front_lens]) lens_assembly.set_transform(translate(0., 0., 1.)) full_assembly = Assembly(objects=[rect_one_sided_mirror(1., 1., 0.)], subassemblies = [lens_assembly]) self.engine = TracerEngine(full_assembly)
0
Example 128
View licensedef ray_tracer(self, bundle, reps, min_energy, tree=True): """ Creates a ray bundle or uses a reflected ray bundle, and intersects it with all objects, uses intersect_ray(). Based on the intersections, generates an outgoing ray in accordance with way the incoming ray reflects or refracts off any surfaces. Arguments: bundle - the initial incoming bundle reps - stop iteration after this many ray bundles were generated (i.e. after the original rays intersected some surface this many times). min_energy - the minimum energy the rays have to have continue tracking them; rays with a lower energy are discarded. A float. tree - if True, register each bundle in self.tree, otherwise only register the last bundle. Returns: A tuple containing an array of vertices and an array of the the direcitons of the last outgoing raybundle (note that the vertices of the new bundle are the intersection points of the previous incoming bundle) NB: the order of the rays within the arrays may change, but they are tracked by the ray tree """ self.tree = RayTree() bund = bundle if tree is True: self.tree.append(bund) # A list of surfaces and their matching objects: surfaces = self._asm.get_surfaces() objects = self._asm.get_objects() num_surfs = len(surfaces) surfs_per_obj = [len(obj.get_surfaces()) for obj in objects] surfs_until_obj = N.hstack((N.r_[0], N.add.accumulate(surfs_per_obj))) surf_ownership = N.repeat(N.arange(len(objects)), surfs_per_obj) ray_ownership = -1*N.ones(bund.get_num_rays()) surfs_relevancy = N.ones((num_surfs, bund.get_num_rays()), dtype=N.bool) for i in xrange(reps): front_surf, owned_rays = self.intersect_ray(bund, surfaces, objects, \ surf_ownership, ray_ownership, surfs_relevancy) outg = [] record = [] out_ray_own = [] new_surfs_relevancy = [] weak_ray_pos = [] for surf_idx in xrange(num_surfs): inters = front_surf[surf_idx, owned_rays[surf_idx]] if not any(inters): surfaces[surf_idx].done() continue surfaces[surf_idx].select_rays(N.nonzero(inters)[0]) new_outg = surfaces[surf_idx].get_outgoing() new_record = new_outg # Fix parent indexing to refer to the full original bundle: parents = N.nonzero(owned_rays[surf_idx])[0][new_outg.get_parents()] new_outg.set_parents(parents) # Delete rays with negligible energies delete = new_outg.get_energy() <= min_energy weak_ray_pos.append(delete) if delete.any(): new_outg = new_outg.delete_rays(N.nonzero(delete)[0]) surfaces[surf_idx].done() # Aggregate outgoing bundles from all the objects outg.append(new_outg) record.append(new_record) # Add new ray-ownership information to the total list: obj_idx = surf_ownership[surf_idx] surf_rel_idx = surf_idx - surfs_until_obj[obj_idx] object_owns_outg = objects[obj_idx].own_rays(new_outg, surf_rel_idx) out_ray_own.append(N.where(object_owns_outg, obj_idx, -1)) # Add new surface-relevancy information, saying which surfaces # of the full list of surfaces must be checked next. This is # somewhat memory-intensize and requires optimization. surf_relev = N.ones((num_surfs, new_outg.get_num_rays()), dtype=N.bool) surf_relev[surf_ownership == obj_idx] = \ objects[obj_idx].surfaces_for_next_iteration(new_outg, surf_rel_idx) new_surfs_relevancy.append(surf_relev) bund = concatenate_rays(outg) if tree: # stores parent branch for purposes of ray tracking record = concatenate_rays(record) if record.get_num_rays() != 0: weak_ray_pos = N.hstack(weak_ray_pos) record = bund + record.inherit(N.nonzero(weak_ray_pos)[0]) self.tree.append(record) if bund.get_num_rays() == 0: # All rays escaping break ray_ownership = N.hstack(out_ray_own) surfs_relevancy = N.hstack(new_surfs_relevancy) if not tree: # Save only the last bundle. Don't bother moving weak rays to end. record = concatenate_rays(record) self.tree.append(record) return bund.get_vertices(), bund.get_directions()
0
Example 129
View license@experimental(as_of="0.4.0") def rda(y, x, scale_Y=False, scaling=1): r"""Compute redundancy analysis, a type of canonical analysis. It is related to PCA and multiple regression because the explained variables `y` are fitted to the explanatory variables `x` and PCA is then performed on the fitted values. A similar process is performed on the residuals. RDA should be chosen if the studied gradient is small, and CCA when it's large, so that the contingency table is sparse. Parameters ---------- y : pd.DataFrame :math:`n \times p` response matrix, where :math:`n` is the number of samples and :math:`p` is the number of features. Its columns need be dimensionally homogeneous (or you can set `scale_Y=True`). This matrix is also referred to as the community matrix that commonly stores information about species abundances x : pd.DataFrame :math:`n \times m, n \geq m` matrix of explanatory variables, where :math:`n` is the number of samples and :math:`m` is the number of metadata variables. Its columns need not be standardized, but doing so turns regression coefficients into standard regression coefficients. scale_Y : bool, optional Controls whether the response matrix columns are scaled to have unit standard deviation. Defaults to `False`. scaling : int Scaling type 1 produces a distance biplot. It focuses on the ordination of rows (samples) because their transformed distances approximate their original euclidean distances. Especially interesting when most explanatory variables are binary. Scaling type 2 produces a correlation biplot. It focuses on the relationships among explained variables (`y`). It is interpreted like scaling type 1, but taking into account that distances between objects don't approximate their euclidean distances. See more details about distance and correlation biplots in [1]_, \S 9.1.4. Returns ------- OrdinationResults Object that stores the computed eigenvalues, the proportion explained by each of them (per unit), transformed coordinates for feature and samples, biplot scores, sample constraints, etc. See Also -------- ca cca OrdinationResults Notes ----- The algorithm is based on [1]_, \S 11.1, and is expected to give the same results as ``rda(y, x)`` in R's package vegan. The eigenvalues reported in vegan are re-normalized to :math:`\sqrt{\frac{s}{n-1}}` `n` is the number of samples, and `s` is the original eigenvalues. Here we will only return the original eigenvalues, as recommended in [1]_. References ---------- .. [1] Legendre P. and Legendre L. 1998. Numerical Ecology. Elsevier, Amsterdam. """ Y = y.as_matrix() X = x.as_matrix() n, p = y.shape n_, m = x.shape if n != n_: raise ValueError( "Both data matrices must have the same number of rows.") if n < m: # Mmm actually vegan is able to do this case, too raise ValueError( "Explanatory variables cannot have less rows than columns.") sample_ids = y.index feature_ids = y.columns # Centre response variables (they must be dimensionally # homogeneous) Y = scale(Y, with_std=scale_Y) # Centre explanatory variables X = scale(X, with_std=False) # Distribution of variables should be examined and transformed # if necessary (see paragraph 4 in p. 580 L&L 1998) # Compute Y_hat (fitted values by multivariate linear # regression, that is, linear least squares). Formula 11.6 in # L&L 1998 involves solving the normal equations, but that fails # when cond(X) ~ eps**(-0.5). A more expensive but much more # stable solution (fails when cond(X) ~ eps**-1) is computed # using the QR decomposition of X = QR: # (11.6) Y_hat = X [X' X]^{-1} X' Y # = QR [R'Q' QR]^{-1} R'Q' Y # = QR [R' R]^{-1} R'Q' Y # = QR R^{-1} R'^{-1} R' Q' Y # = Q Q' Y # and B (matrix of regression coefficients) # (11.4) B = [X' X]^{-1} X' Y # = R^{-1} R'^{-1} R' Q' Y # = R^{-1} Q' # Q, R = np.linalg.qr(X) # Y_hat = Q.dot(Q.T).dot(Y) # B = scipy.linalg.solve_triangular(R, Q.T.dot(Y)) # This works provided X has full rank. When not, you can still # fix it using R's pseudoinverse or partitioning R. To avoid any # issues, like the numerical instability when trying to # reproduce an example in L&L where X was rank-deficient, we'll # just use `np.linalg.lstsq`, which uses the SVD decomposition # under the hood and so it's also more expensive. B, _, rank_X, _ = lstsq(X, Y) Y_hat = X.dot(B) # Now let's perform PCA on the fitted values from the multiple # regression u, s, vt = svd(Y_hat, full_matrices=False) # vt are the right eigenvectors, which is what we need to # perform PCA. That is, we're changing points in Y_hat from the # canonical basis to the orthonormal basis given by the right # eigenvectors of Y_hat (or equivalently, the eigenvectors of # the covariance matrix Y_hat.T.dot(Y_hat)) # See 3) in p. 583 in L&L 1998 rank = svd_rank(Y_hat.shape, s) # Theoretically, there're at most min(p, m, n - 1) non-zero eigenvalues U = vt[:rank].T # U as in Fig. 11.2 # Ordination in the space of response variables. Its columns are # sample scores. (Eq. 11.12) F = Y.dot(U) # Ordination in the space of explanatory variables. Its columns # are fitted sample scores. (Eq. 11.13) Z = Y_hat.dot(U) # Canonical coefficients (formula 11.14) # C = B.dot(U) # Not used Y_res = Y - Y_hat # PCA on the residuals u_res, s_res, vt_res = svd(Y_res, full_matrices=False) # See 9) in p. 587 in L&L 1998 rank_res = svd_rank(Y_res.shape, s_res) # Theoretically, there're at most min(p, n - 1) non-zero eigenvalues as U_res = vt_res[:rank_res].T F_res = Y_res.dot(U_res) # Ordination in the space of residuals eigenvalues = np.r_[s[:rank], s_res[:rank_res]] # Compute scores if scaling not in {1, 2}: raise NotImplementedError("Only scalings 1, 2 available for RDA.") # According to the vegan-FAQ.pdf, the scaling factor for scores # is (notice that L&L 1998 says in p. 586 that such scaling # doesn't affect the interpretation of a biplot): pc_ids = ['RDA%d' % (i+1) for i in range(len(eigenvalues))] eigvals = pd.Series(eigenvalues, index=pc_ids) const = np.sum(eigenvalues**2)**0.25 if scaling == 1: scaling_factor = const elif scaling == 2: scaling_factor = eigenvalues / const feature_scores = np.hstack((U, U_res)) * scaling_factor sample_scores = np.hstack((F, F_res)) / scaling_factor feature_scores = pd.DataFrame(feature_scores, index=feature_ids, columns=pc_ids) sample_scores = pd.DataFrame(sample_scores, index=sample_ids, columns=pc_ids) # TODO not yet used/displayed sample_constraints = pd.DataFrame(np.hstack((Z, F_res)) / scaling_factor, index=sample_ids, columns=pc_ids) # Vegan seems to compute them as corr(X[:, :rank_X], # u) but I don't think that's a good idea. In fact, if # you take the example shown in Figure 11.3 in L&L 1998 you # can see that there's an arrow for each of the 4 # environmental variables (depth, coral, sand, other) even if # other = not(coral or sand) biplot_scores = corr(X, u) biplot_scores = pd.DataFrame(biplot_scores, index=x.columns, columns=pc_ids[:biplot_scores.shape[1]]) # The "Correlations of environmental variables with sample # scores" from table 11.4 are quite similar to vegan's biplot # scores, but they're computed like this: # corr(X, F)) p_explained = pd.Series(eigenvalues / eigenvalues.sum(), index=pc_ids) return OrdinationResults('RDA', 'Redundancy Analysis', eigvals=eigvals, proportion_explained=p_explained, features=feature_scores, samples=sample_scores, biplot_scores=biplot_scores, sample_constraints=sample_constraints)
0
Example 130
View licensedef _join_batches(self, main_batch, sub_batch): main_batch['data'] = n.r_[main_batch['data'], sub_batch['data']]
0
Example 131
View license@borg.annotations( out_path = ("results output path"), bundle = ("path to pre-recorded runs", "positional", None, os.path.abspath), workers = ("submit jobs?", "option", "w", int), local = ("workers are local?", "flag"), ) def main(out_path, bundle, workers = 0, local = False): """Evaluate the pure multinomial model over a range of smoothing values.""" def yield_jobs(): run_data = borg.storage.RunData.from_bundle(bundle) validation = sklearn.cross_validation.KFold(len(run_data), 10, indices = False) for (train_mask, test_mask) in validation: split = uuid.uuid4() alphas = numpy.r_[1e-8:1e-1:64j] for alpha in alphas: yield (evaluate_split, [run_data, alpha, split, train_mask, test_mask]) with open(out_path, "w") as out_file: writer = csv.writer(out_file) writer.writerow(["alpha", "instances", "split", "mean_log_probability"]) for (_, row) in condor.do(yield_jobs(), workers, local): writer.writerow(row) out_file.flush()
0
Example 132
View license@borg.annotations( out_path = ("results CSV output path"), runs = ("path to JSON runs specification", "positional", None, borg.util.load_json), repeats = ("number of times to repeat each run", "option", None, int), workers = ("submit jobs?", "option", "w"), local = ("workers are local?", "flag"), ) def main(out_path, runs, repeats = 128, workers = 0, local = False): """Simulate portfolio and solver behavior.""" logger.info("simulating %i runs", len(runs)) get_run_data = borg.util.memoize(borg.storage.RunData.from_bundle) def yield_jobs(): for run in runs: all_data = get_run_data(run["bundle"]) validation = sklearn.cross_validation.ShuffleSplit(len(all_data), repeats, test_fraction = 0.2, indices = False) if run["portfolio_name"] == "-": makers = map(borg.experiments.simulate_runs.SolverMaker, all_data.solver_names) else: makers = [borg.experiments.simulate_runs.PortfolioMaker(run["portfolio_name"])] max_instances = len(all_data) * 0.8 for (train_mask, test_mask) in validation: for instances in map(int, map(round, numpy.r_[10.0:max_instances:32j])): for maker in makers: yield ( simulate_run, [ run, maker, all_data, train_mask, test_mask, instances, run["independent"], run["mixture"], ], ) with borg.util.openz(out_path, "wb") as out_file: writer = csv.writer(out_file) writer.writerow(["description", "solver", "instances", "successes", "mean_time", "median_time"]) for (_, row) in condor.do(yield_jobs(), workers, local): writer.writerow(row) out_file.flush()
0
Example 133
View licensedef reshape_to_2d(data,axis): """Reshape data to 2D with specified axis as the 2nd dimension.""" # get the shape, rank, and the length of the chosen axis dshape = data.shape rnk = len(dshape) n = dshape[axis] # convert negative axis to positive axis if axis < 0: axis = axis + rnk # determine the new order of the axes newdims = np.r_[0:axis,axis+1:rnk,axis] # reshape and transpose the data newdata = np.reshape(np.transpose(data,tuple(newdims)), (np.prod(dshape,axis=0)/n,n)) # make sure we have a copy #newdata = newdata.copy() return newdata
0
Example 134
View licensedef reshape_from_2d(data,axis,dshape): """Reshape data from 2D back to specified dshape.""" # set the rank of the array rnk = len(dshape) # fix negative axis to be positive if axis < 0: axis = axis + rnk # determine the dims from reshape_to_2d call newdims = np.r_[0:axis,axis+1:rnk,axis] # determine the transposed shape and reshape it back tdshape = np.take(dshape,newdims,0) ret = np.reshape(data,tuple(tdshape)) # figure out how to retranspose the matrix vals = range(rnk) olddims = vals[:axis] + [rnk-1] +vals[axis:rnk-1] ret = np.transpose(ret,tuple(olddims)) # make sure we have a copy #ret = ret.copy() return ret
0
Example 135
View licensedef permutation_test(X, Y=None, parametric=True, iterations=1000): """ Perform a permutation test on paired or non-paired data. Observations must be on the first axis. """ # see if paired or not and concat data if Y is None: paired = True data = X nX = len(X) else: paired = False data = np.r_[X,Y] nX = len(X) nY = len(Y) # currently no non-parametric if not parametric: raise NotImplementedError("Currently only parametric stats are supported.") # perform stats z_boot = [] if paired: # paired stat raise NotImplementedError("Currently only non-paired stats are supported.") # first on actual data #t,p = ttest_1samp(data) else: # non-paired # first on actual data z = ttest_ind_z_one_sided(data[:nX],data[nX:]) # now on random shuffles sys.stdout.write('%d: '%iterations) for i in xrange(iterations): # shuffle it sys.stdout.write('%d '%i) sys.stdout.flush() np.random.shuffle(data) z_boot.append(ttest_ind_z_one_sided(data[:nX],data[nX:])) sys.stdout.write('\n') sys.stdout.flush() # convert z_boot to array z_boot = np.asarray(z_boot) # return those z values return z, z_boot
0
Example 136
View license@pm.deterministic def adj_weights(weights=weights): return np.sort(np.r_[weights, 1 - weights.sum()])
0
Example 137
View licensedef smooth(signal, winlen=11, window='hanning', lencorrect=True): """Smooth a trace, based on: http://wiki.scipy.org/Cookbook/SignalSmooth arguments signal -- a vector (i.e. a NumPy array) containing a single trace of your signal keyword arguments winlen -- integer indicating window length (default = 11) window -- smoothing type, should be one of the following: 'flat', 'hanning', 'hamming', 'bartlett', or 'blackman' (default = 'hanning') lencorrect -- Boolean indicating if the output (the smoothed signal) should have the same length as the input (the raw signal); this is not necessarily so because of data convolution (default = True) returns signal -- smoothed signal """ # # # # # # input errors # really small window if winlen < 3: return signal # non-integer window length if type(winlen) != int: try: winlen = int(winlen) except: raise Exception("Error in pyenalysis.smooth: provided window length ('%s') is not compatible; please provide an integer window length" % winlen) # wrong type of window if window not in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']: raise Exception("Error in pyenalysis.smooth: windowtype '%s' is not supported; please use one of the following: 'flat', 'hanning', 'hamming', 'bartlett', or 'blackman'" % window) # wrong signal dimension if signal.ndim != 1: raise Exception("Error in pyenalysis.smooth: input is not a single signal trace, but has %d dimensions; please provide a 1-dimension array" % signal.ndim) # too small a trace if signal.size < winlen: raise Exception("Error in pyenalysis.smooth: input signal has too few datapoints (%d) for provided window length (%d)" % (signal.size,winlen)) # # # # # # smoothing # slice to concatenation s = numpy.r_[signal[winlen-1:0:-1],signal,signal[-1:-winlen:-1]] # this is equivalent to: # p1 = signal[winlen-1:0:-1].tolist() # first part of signal reversed # p2 = signal.tolist() # p3 = signal[-1:-winlen:-1].tolist() # last part of signal reversed # s = p1 + p2 + p3 # moving average if window == 'flat': w = numpy.ones(winlen, 'd') # bit more sophisticated smoothing algorithms else: w = eval("numpy.%s(%d)" % (window,winlen)) # convolve signal, according to chosen smoothing type smoothed = numpy.convolve(w/w.sum(), s, mode='valid') # correct length if necessary if lencorrect: smoothed = smoothed[(winlen/2-1):(-winlen/2)] try: smoothed = smoothed[:len(signal)] except: raise Exception("Error in pyenalysis.smooth: output array is too short (len(output)=%d, len(signal)=%d)" % (len(smoothed),len(signal))) return smoothed
0
Example 138
View licensedef __getitem__(self, args): raw_array = _np.r_[args] return wrap_if_nodes_inside(raw_array, slow_op_name = "r_")
0
Example 139
View licensedef vasicek_czcb_values(self, r0, R, ratio, T, sigma, kappa, theta, M, prob=1e-6, max_policy_iter=10, grid_struct_const=0.25, rs=None): r_min, dr, N, dtau = \ self.vasicek_params(r0, M, sigma, kappa, theta, T, prob, grid_struct_const, rs) r = np.r_[0:N]*dr + r_min v_mplus1 = np.ones(N) for i in range(1, M+1): K = self.exercise_call_price(R, ratio, i*dtau) eex = np.ones(N)*K subdiagonal, diagonal, superdiagonal = \ self.vasicek_diagonals(sigma, kappa, theta, r_min, dr, N, dtau) v_mplus1, iterations = \ self.iterate(subdiagonal, diagonal, superdiagonal, v_mplus1, eex, max_policy_iter) return r, v_mplus1
0
Example 140
View licensedef vasicek_diagonals(self, sigma, kappa, theta, r_min, dr, N, dtau): rn = np.r_[0:N]*dr + r_min subdiagonals = kappa*(theta-rn)*dtau/(2*dr) - \ 0.5*(sigma**2)*dtau/(dr**2) diagonals = 1 + rn*dtau + sigma**2*dtau/(dr**2) superdiagonals = -kappa*(theta-rn)*dtau/(2*dr) - \ 0.5*(sigma**2)*dtau/(dr**2) # Implement boundary conditions. if N > 0: v_subd0 = subdiagonals[0] superdiagonals[0] = superdiagonals[0] - \ subdiagonals[0] diagonals[0] += 2*v_subd0 subdiagonals[0] = 0 if N > 1: v_superd_last = superdiagonals[-1] superdiagonals[-1] = superdiagonals[-1] - \ subdiagonals[-1] diagonals[-1] += 2*v_superd_last superdiagonals[-1] = 0 return subdiagonals, diagonals, superdiagonals
0
0
Example 142
View licensedef remove_duplicate_rows(mat): diffs = mat[1:] - mat[:-1] return mat[np.r_[True,(abs(diffs) >= 1e-5).any(axis=1)]]
0
Example 143
View licensedef remove_duplicate_rows(mat): diffs = mat[1:] - mat[:-1] return mat[np.r_[True,(abs(diffs) >= 1e-6).any(axis=1)]]
0
Example 144
View license@property def B_(self): r''':math:`\underline{B}=\langle V_{n+1},M_lAM_rU\rangle`. This property is obtained from :math:`C` if the operator is self-adjoint. Otherwise, the inner products have to be formed explicitly.''' (n_, n) = self.H.shape ls = self.linear_system if self._B_ is None or self._B_.shape[1] < n_: # compute B_ if ls.self_adjoint: self._B_ = self.C.T.conj() if n_ > n: self._B_ = numpy.r_[self._B_, utils.inner(self.V[:, [-1]], self.projection.AU, ip_B=ls.ip_B)] else: self._B_ = utils.inner(self.V, self.projection.AU, ip_B=ls.ip_B) return self._B_
0
Example 145
View licensedef seed_elements(self, lon, lat, radius=0, number=1, time=None, objectType=1): """Seed particles in a cone-shaped area over a time period.""" # All particles carry their own objectType (number), # but so far we only use one for each sim # objtype = np.ones(number)*objectType # Probability of jibing (4 % per hour) pjibe = 0.04 if time is None: # Use first time of first reader if time is not given for seeding try: for reader in self.readers.items(): if reader[1].start_time is not None: firstReader = reader[1] break except: raise ValueError('Time must be specified when no ' 'reader is added') logging.info('Using start time (%s) of reader %s' % (firstReader.start_time, firstReader.name)) self.start_time = firstReader.start_time else: self.start_time = time # Drift orientation of particles. 0 is right of downwind, # 1 is left of downwind # orientation = 1*(np.random.rand(number)>=0.5) # Odd numbered particles are left-drifting, even are right of downwind. orientation = np.r_[:number] % 2 ones = np.ones_like(orientation) # Downwind leeway properties. # Generate normal, N(0,1), random perturbations for leeway coeffs. # Negative downwind slope must be avoided as # particles should drift downwind. # The problem arises because of high error variances (see e.g. PIW-1). downwindSlope = ones*self.leewayprop[objectType]['DWSLOPE'] downwindOffset = ones*self.leewayprop[objectType]['DWOFFSET'] dwstd = self.leewayprop[objectType]['DWSTD'] rdw = np.zeros(number) epsdw = np.zeros(number) for i in xrange(number): rdw[i] = np.random.randn(1) epsdw[i] = rdw[i]*dwstd # Avoid negative downwind slopes while downwindSlope[i] + epsdw[i]/20.0 < 0.0: rdw[i] = np.random.randn(1) epsdw[i] = rdw[i]*dwstd downwindEps = epsdw # NB # downwindEps = np.zeros(number) # Crosswind leeway properties rcw = np.random.randn(number) crosswindSlope = np.zeros(number) crosswindOffset = np.zeros(number) crosswindEps = np.zeros(number) crosswindSlope[orientation == RIGHT] = \ self.leewayprop[objectType]['CWRSLOPE'] crosswindSlope[orientation == LEFT] = \ self.leewayprop[objectType]['CWLSLOPE'] crosswindOffset[orientation == RIGHT] = \ self.leewayprop[objectType]['CWROFFSET'] crosswindOffset[orientation == LEFT] = \ self.leewayprop[objectType]['CWLOFFSET'] crosswindEps[orientation == RIGHT] = \ rcw[orientation == RIGHT] * \ self.leewayprop[objectType]['CWRSTD'] crosswindEps[orientation == LEFT] = \ rcw[orientation == LEFT] * \ self.leewayprop[objectType]['CWLSTD'] # NB # crosswindEps = np.zeros(number) # Jibe probability jibeProbability = ones*pjibe # Call general seed_elements function of OpenDriftSimulation class # with the specific values calculated super(Leeway, self).seed_elements( lon=lon, lat=lat, radius=radius, number=number, time=time, cone=True, orientation=orientation, objectType=objectType, downwindSlope=downwindSlope, crosswindSlope=crosswindSlope, downwindOffset=downwindOffset, crosswindOffset=crosswindOffset, downwindEps=downwindEps, crosswindEps=crosswindEps, jibeProbability=jibeProbability)
0
Example 146
View licensedef seed_elements(self, lon, lat, radius=0, number=1, time=None, objectType=1): """Seed particles in a cone-shaped area over a time period.""" # All particles carry their own objectType (number), # but so far we only use one for each sim # objtype = np.ones(number)*objectType # Probability of jibing (4 % per hour) pjibe = 0.04 if time is None: # Use first time of first reader if time is not given for seeding try: for reader in self.readers.items(): if reader[1].start_time is not None: firstReader = reader[1] break except: raise ValueError('Time must be specified when no ' 'reader is added') logging.info('Using start time (%s) of reader %s' % (firstReader.start_time, firstReader.name)) self.start_time = firstReader.start_time else: self.start_time = time # Drift orientation of particles. 0 is right of downwind, # 1 is left of downwind # orientation = 1*(np.random.rand(number)>=0.5) # Odd numbered particles are left-drifting, even are right of downwind. orientation = np.r_[:number] % 2 ones = np.ones_like(orientation) # Downwind leeway properties. # Generate normal, N(0,1), random perturbations for leeway coeffs. # Negative downwind slope must be avoided as # particles should drift downwind. # The problem arises because of high error variances (see e.g. PIW-1). downwindSlope = ones*self.leewayprop[objectType]['DWSLOPE'] downwindOffset = ones*self.leewayprop[objectType]['DWOFFSET'] dwstd = self.leewayprop[objectType]['DWSTD'] rdw = np.zeros(number) epsdw = np.zeros(number) for i in xrange(number): rdw[i] = np.random.randn(1) epsdw[i] = rdw[i]*dwstd # Avoid negative downwind slopes while downwindSlope[i] + epsdw[i]/20.0 < 0.0: rdw[i] = np.random.randn(1) epsdw[i] = rdw[i]*dwstd downwindEps = epsdw # NB # downwindEps = np.zeros(number) # Crosswind leeway properties rcw = np.random.randn(number) crosswindSlope = np.zeros(number) crosswindOffset = np.zeros(number) crosswindEps = np.zeros(number) crosswindSlope[orientation == RIGHT] = \ self.leewayprop[objectType]['CWRSLOPE'] crosswindSlope[orientation == LEFT] = \ self.leewayprop[objectType]['CWLSLOPE'] crosswindOffset[orientation == RIGHT] = \ self.leewayprop[objectType]['CWROFFSET'] crosswindOffset[orientation == LEFT] = \ self.leewayprop[objectType]['CWLOFFSET'] crosswindEps[orientation == RIGHT] = \ rcw[orientation == RIGHT] * \ self.leewayprop[objectType]['CWRSTD'] crosswindEps[orientation == LEFT] = \ rcw[orientation == LEFT] * \ self.leewayprop[objectType]['CWLSTD'] # NB # crosswindEps = np.zeros(number) # Jibe probability jibeProbability = ones*pjibe # Call general seed_elements function of OpenDriftSimulation class # with the specific values calculated super(Leeway, self).seed_elements( lon=lon, lat=lat, radius=radius, number=number, time=time, cone=True, orientation=orientation, objectType=objectType, downwindSlope=downwindSlope, crosswindSlope=crosswindSlope, downwindOffset=downwindOffset, crosswindOffset=crosswindOffset, downwindEps=downwindEps, crosswindEps=crosswindEps, jibeProbability=jibeProbability)
0
Example 147
View licensedef smooth(x,window_len=200): s=np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]] w=np.hanning(window_len) y=np.convolve(w/w.sum(),s,mode='valid') return y[int(window_len/2):-int(window_len/2)+1]
0
Example 148
View licensedef smooth(x,window_len=200): s=np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]] w=np.hanning(window_len) y=np.convolve(w/w.sum(),s,mode='valid') return y[int(window_len/2):-int(window_len/2)+1]
0
Example 149
View license@property def hyperparameter_settings(self): return (dict(r_discrete_distn=np.r_[0.,0,0,1,1,1],alpha_0=5,beta_0=5),)
0
Example 150
View license@property def hyperparameter_settings(self): return (dict(r_discrete_distn=np.r_[0.,0,0,1,1,1],alpha_0=5,beta_0=5),)