numpy.c_

Here are the examples of the python api numpy.c_ taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

151 Examples 7

Example 101

Project: simpeg
Source File: test_TreeOperators.py
View license
    def getError(self):

        z = 5  # Because 5 is just such a great number.

        call = lambda fun, xy: fun(xy[:, 0], xy[:, 1])

        ex = lambda x, y: x**2+y*z
        ey = lambda x, y: (z**2)*x+y*z

        sigma1 = lambda x, y: x*y+1
        sigma2 = lambda x, y: x*z+2
        sigma3 = lambda x, y: 3+z*y

        Gc = self.M.gridCC
        if self.sigmaTest == 1:
            sigma = np.c_[call(sigma1, Gc)]
            analytic = 144877./360  # Found using sympy. z=5
        elif self.sigmaTest == 2:
            sigma = np.c_[call(sigma1, Gc), call(sigma2, Gc)]
            analytic = 189959./120  # Found using sympy. z=5
        elif self.sigmaTest == 3:
            sigma = np.r_[call(sigma1, Gc), call(sigma2, Gc), call(sigma3, Gc)]
            analytic = 781427./360  # Found using sympy. z=5

        if self.location == 'edges':
            cart = lambda g: np.c_[call(ex, g), call(ey, g)]
            Ec = np.vstack((cart(self.M.gridEx),
                            cart(self.M.gridEy)))
            E = self.M.projectEdgeVector(Ec)
            if self.invProp:
                A = self.M.getEdgeInnerProduct(Utils.invPropertyTensor(self.M, sigma), invProp=True)
            else:
                A = self.M.getEdgeInnerProduct(sigma)
            numeric = E.T.dot(A.dot(E))
        elif self.location == 'faces':
            cart = lambda g: np.c_[call(ex, g), call(ey, g)]
            Fc = np.vstack((cart(self.M.gridFx),
                            cart(self.M.gridFy)))
            F = self.M.projectFaceVector(Fc)

            if self.invProp:
                A = self.M.getFaceInnerProduct(Utils.invPropertyTensor(self.M, sigma), invProp=True)
            else:
                A = self.M.getFaceInnerProduct(sigma)
            numeric = F.T.dot(A.dot(F))

        err = np.abs(numeric - analytic)
        return err

Example 102

Project: statsmodels
Source File: onewaygls.py
View license
    def fitjoint(self):
        '''fit a joint fixed effects model to all observations

        The regression results are attached as `lsjoint`.

        The contrasts for overall and pairwise tests for equality of coefficients are
        attached as a dictionary `contrasts`. This also includes the contrasts for the test
        that the coefficients of a level are zero. ::

        >>> res.contrasts.keys()
        [(0, 1), 1, 'all', 3, (1, 2), 2, (1, 3), (2, 3), (0, 3), (0, 2)]

        The keys are based on the original names or labels of the groups.

        TODO: keys can be numpy scalars and then the keys cannot be sorted



        '''
        if not hasattr(self, 'weights'):
            self.fitbygroups()
        groupdummy = (self.groupsint[:,None] == self.uniqueint).astype(int)
        #order of dummy variables by variable - not used
        #dummyexog = self.exog[:,:,None]*groupdummy[:,None,1:]
        #order of dummy variables by grous - used
        dummyexog = self.exog[:,None,:]*groupdummy[:,1:,None]
        exog = np.c_[self.exog, dummyexog.reshape(self.exog.shape[0],-1)] #self.nobs ??
        #Notes: I changed to drop first group from dummy
        #instead I want one full set dummies
        if self.het:
            weights = self.weights
            res = WLS(self.endog, exog, weights=weights).fit()
        else:
            res = OLS(self.endog, exog).fit()
        self.lsjoint = res
        contrasts = {}
        nvars = self.exog.shape[1]
        nparams = exog.shape[1]
        ndummies = nparams - nvars
        contrasts['all'] = np.c_[np.zeros((ndummies, nvars)), np.eye(ndummies)]
        for groupind, group in enumerate(self.unique[1:]):  #need enumerate if groups != groupsint
            groupind = groupind + 1
            contr = np.zeros((nvars, nparams))
            contr[:,nvars*groupind:nvars*(groupind+1)] = np.eye(nvars)
            contrasts[group] = contr
            #save also for pairs, see next
            contrasts[(self.unique[0], group)] = contr

        #Note: I'm keeping some duplication for testing
        pairs = np.triu_indices(len(self.unique),1)
        for ind1,ind2 in zip(*pairs):  #replace with group1, group2 in sorted(keys)
            if ind1 == 0:
                continue # need comparison with benchmark/normalization group separate
            g1 = self.unique[ind1]
            g2 = self.unique[ind2]
            group = (g1, g2)
            contr = np.zeros((nvars, nparams))
            contr[:,nvars*ind1:nvars*(ind1+1)] = np.eye(nvars)
            contr[:,nvars*ind2:nvars*(ind2+1)] = -np.eye(nvars)
            contrasts[group] = contr


        self.contrasts = contrasts

Example 103

Project: statsmodels
Source File: test_diagnostic.py
View license
def notyet_atst():
    d = macrodata.load().data

    realinv = d['realinv']
    realgdp = d['realgdp']
    realint = d['realint']
    endog = realinv
    exog = add_constant(np.c_[realgdp, realint])
    res_ols1 = OLS(endog, exog).fit()

    #growth rates
    gs_l_realinv = 400 * np.diff(np.log(d['realinv']))
    gs_l_realgdp = 400 * np.diff(np.log(d['realgdp']))
    lint = d['realint'][:-1]
    tbilrate = d['tbilrate'][:-1]

    endogg = gs_l_realinv
    exogg = add_constant(np.c_[gs_l_realgdp, lint])
    exogg2 = add_constant(np.c_[gs_l_realgdp, tbilrate])

    res_ols = OLS(endogg, exogg).fit()
    res_ols2 = OLS(endogg, exogg2).fit()

    #the following were done accidentally with res_ols1 in R,
    #with original Greene data

    params = np.array([-272.3986041341653, 0.1779455206941112,
                       0.2149432424658157])
    cov_hac_4 = np.array([1321.569466333051, -0.2318836566017612,
                37.01280466875694, -0.2318836566017614, 4.602339488102263e-05,
                -0.0104687835998635, 37.012804668757, -0.0104687835998635,
                21.16037144168061]).reshape(3,3, order='F')
    cov_hac_10 = np.array([2027.356101193361, -0.3507514463299015,
        54.81079621448568, -0.350751446329901, 6.953380432635583e-05,
        -0.01268990195095196, 54.81079621448564, -0.01268990195095195,
        22.92512402151113]).reshape(3,3, order='F')

    #goldfeld-quandt
    het_gq_greater = dict(statistic=13.20512768685082, df1=99, df2=98,
                          pvalue=1.246141976112324e-30, distr='f')
    het_gq_less = dict(statistic=13.20512768685082, df1=99, df2=98, pvalue=1.)
    het_gq_2sided = dict(statistic=13.20512768685082, df1=99, df2=98,
                          pvalue=1.246141976112324e-30, distr='f')

    #goldfeld-quandt, fraction = 0.5
    het_gq_greater_2 = dict(statistic=87.1328934692124, df1=48, df2=47,
                          pvalue=2.154956842194898e-33, distr='f')

    gq = smsdia.het_goldfeldquandt(endog, exog, split=0.5)
    compare_t_est(gq, het_gq_greater, decimal=(13, 14))
    assert_equal(gq[-1], 'increasing')


    harvey_collier = dict(stat=2.28042114041313, df=199,
                          pvalue=0.02364236161988260, distr='t')
    #hc = harvtest(fm, order.by=ggdp , data = list())
    harvey_collier_2 = dict(stat=0.7516918462158783, df=199,
                          pvalue=0.4531244858006127, distr='t')

Example 104

View license
    @property
    def start_params(self):
        """
        (array) Starting parameters for maximum likelihood estimation.
        """
        # Inherited parameters
        params = markov_switching.MarkovSwitching.start_params.fget(self)

        # OLS for starting parameters
        endog = self.endog.copy()
        if self._k_exog > 0 and self.order > 0:
            exog = np.c_[self.exog, self.exog_ar]
        elif self._k_exog > 0:
            exog = self.exog
        elif self.order > 0:
            exog = self.exog_ar

        if self._k_exog > 0 or self.order > 0:
            beta = np.dot(np.linalg.pinv(exog), endog)
            variance = np.var(endog - np.dot(exog, beta))
        else:
            variance = np.var(endog)

        # Regression coefficients
        if self._k_exog > 0:
            if np.any(self.switching_coeffs):
                for i in range(self.k_regimes):
                    params[self.parameters[i, 'exog']] = (
                        beta[:self._k_exog] * (i / self.k_regimes))
            else:
                params[self.parameters['exog']] = beta[:self._k_exog]

        # Autoregressive
        if self.order > 0:
            if np.any(self.switching_ar):
                for i in range(self.k_regimes):
                    params[self.parameters[i, 'autoregressive']] = (
                        beta[self._k_exog:] * (i / self.k_regimes))
            else:
                params[self.parameters['autoregressive']] = beta[self._k_exog:]

        # Variance
        if self.switching_variance:
            params[self.parameters['variance']] = (
                np.linspace(variance / 10., variance, num=self.k_regimes))
        else:
            params[self.parameters['variance']] = variance

        return params

Example 105

Project: statsmodels
Source File: test_tools.py
View license
    def test_cases(self):
        # Basic cases
        for series, diff, seasonal_diff, k_seasons, result in self.cases:
            
            # Test numpy array
            x = tools.diff(series, diff, seasonal_diff, k_seasons)
            assert_almost_equal(x, result)

            # Test as Pandas Series
            series = pd.Series(series)

            # Rewrite to test as n-dimensional array
            series = np.c_[series, series]
            result = np.c_[result, result]

            # Test Numpy array
            x = tools.diff(series, diff, seasonal_diff, k_seasons)
            assert_almost_equal(x, result)

            # Test as Pandas Dataframe
            series = pd.DataFrame(series)
            x = tools.diff(series, diff, seasonal_diff, k_seasons)
            assert_almost_equal(x, result)

Example 106

Project: pyDOE
Source File: doe_box_behnken.py
View license
def bbdesign(n, center=None):
    """
    Create a Box-Behnken design
    
    Parameters
    ----------
    n : int
        The number of factors in the design
    
    Optional
    --------
    center : int
        The number of center points to include (default = 1).
    
    Returns
    -------
    mat : 2d-array
        The design matrix
    
    Example
    -------
    ::
    
        >>> bbdesign(3)
        array([[-1., -1.,  0.],
               [ 1., -1.,  0.],
               [-1.,  1.,  0.],
               [ 1.,  1.,  0.],
               [-1.,  0., -1.],
               [ 1.,  0., -1.],
               [-1.,  0.,  1.],
               [ 1.,  0.,  1.],
               [ 0., -1., -1.],
               [ 0.,  1., -1.],
               [ 0., -1.,  1.],
               [ 0.,  1.,  1.],
               [ 0.,  0.,  0.],
               [ 0.,  0.,  0.],
               [ 0.,  0.,  0.]])
        
    """
    assert n>=3, 'Number of variables must be at least 3'
    
    # First, compute a factorial DOE with 2 parameters
    H_fact = ff2n(2)
    # Now we populate the real DOE with this DOE
    
    # We made a factorial design on each pair of dimensions
    # - So, we created a factorial design with two factors
    # - Make two loops
    Index = 0
    nb_lines = (n*(n-1)/2)*H_fact.shape[0]
    H = repeat_center(n, nb_lines)
    
    for i in range(n - 1):
        for j in range(i + 1, n):
            Index = Index + 1
            H[max([0, (Index - 1)*H_fact.shape[0]]):Index*H_fact.shape[0], i] = H_fact[:, 0]
            H[max([0, (Index - 1)*H_fact.shape[0]]):Index*H_fact.shape[0], j] = H_fact[:, 1]

    if center is None:
        if n<=16:
            points= [0, 0, 0, 3, 3, 6, 6, 6, 8, 9, 10, 12, 12, 13, 14, 15, 16]
            center = points[n]
        else:
            center = n
        
    H = np.c_[H.T, repeat_center(n, center).T].T
    
    return H

Example 107

Project: tracer
Source File: gui.py
View license
    @t_api.on_trait_change('source_y, source_z')
    def bundle_move(self):
        position = N.tile(N.c_[[0, self.source_y, self.source_z]], (1, 2))
        self.bund.set_vertices(position)
        self.plot_ray_trace()

Example 108

Project: tracer
Source File: test_case.py
View license
def test_case(focus, num_rays=100, h_depth=0.7, side=0.4):
    # Case parameters (to be moved out:
    D = 5.
    
    center = N.c_[[0, 7., 7.]]
    x = -1/(math.sqrt(2))
    direction = N.array([0,x,x])
    radius_sun = 2.5 
    ang_range = 0.005
    
    iterate = 100
    min_energy = 1e-6
    
    # Model:
    assembly = MiniDish(D, focus, 0.9, focus + h_depth, side, h_depth, 0.9)
    assembly.set_transform(rotx(-N.pi/4))
    
    # Rays:
    sun = solar_disk_bundle(num_rays, center, direction, radius_sun, ang_range,
        flux=1000.)
    
    # Do the tracing:
    engine = TracerEngine(assembly)
    engine.ray_tracer(sun, iterate, min_energy)
    
    # Plot, scale in suns:
    f = plot_hits(assembly.histogram_hits()[0]/(side/50)**2/1000., (-side/2., side/2., -side/2., side/2.))
    f.show()

Example 109

Project: tracer
Source File: test_opt_callable.py
View license
    def test_all_refracted(self):
        dir = N.c_[[1, 1, -1], [-1, 1, -1], [-1, -1, -1], [1, -1, -1]] / N.sqrt(3)
        position = N.c_[[0,0,1], [1,-1,1], [1,1,1], [-1,1,1]]
        en = N.r_[100, 200, 300, 400]
        bund = RayBundle(position, dir, energy=en, ref_index=N.ones(4))
        
        gm = FlatGeometryManager()
        prm = gm.find_intersections(N.eye(4), bund)
        refractive = optics_callables.RefractiveHomogenous(1,1.5)
        selector = N.array([0, 1, 3])
        gm.select_rays(selector)
        outg = refractive(gm, bund, selector)
        
        correct_pts = N.zeros((3,4))
        correct_pts[:2,0] = 1
        correct_pts = N.hstack((correct_pts[:,selector], correct_pts[:,selector]))
        N.testing.assert_array_equal(outg.get_vertices(), correct_pts)
        
        norm = N.c_[gm.get_normals()[:,0]]
        correct_refl_cos = -(dir*norm).sum(axis=0)[selector]
        correct_refr_cos = -N.sqrt(1 - (1./1.5)**2*(1 - correct_refl_cos**2))
        outg_cos = (outg.get_directions()*norm).sum(axis=0)
        N.testing.assert_array_equal(outg_cos, N.r_[correct_refl_cos, correct_refr_cos])
        
        N.testing.assert_array_equal(outg.get_energy().reshape(2,-1).sum(axis=0), \
            N.r_[100, 200, 400]) # reflection and refraction sum to 100%
        N.testing.assert_array_equal(outg.get_parents(), N.tile(selector, 2))

Example 110

Project: tracer
Source File: test_opt_callable.py
View license
    def test_up_down(self):
        """Rays coming from below are absorbed, from above reflected"""
        going_down = N.c_[[1, 1, -1], [-1, 1, -1], [-1, -1, -1], [1, -1, -1]] / N.sqrt(3)
        going_up = going_down.copy()
        going_up[2] = 1 / N.sqrt(3)
        
        pos_up = N.c_[[0,0,1], [1,-1,1], [1,1,1], [-1,1,1]]
        pos_down = pos_up.copy()
        pos_down[2] = -1

        bund = RayBundle()
        bund.set_directions(N.hstack((going_down, going_up)))
        bund.set_vertices(N.hstack((pos_up, pos_down)))
        bund.set_energy(N.tile(100, 8))
        bund.set_ref_index(N.tile(1, 8))
        
        gm = FlatGeometryManager()
        prm = gm.find_intersections(N.eye(4), bund)
        absref = optics_callables.AbsorberReflector(0.)
        selector = N.arange(8)
        gm.select_rays(selector)
        outg = absref(gm, bund, selector)
        
        e = outg.get_energy()
        N.testing.assert_array_equal(e[:4], 100)
        N.testing.assert_array_equal(e[4:], 0)

Example 111

Project: tracer
Source File: test_tracer_engine.py
View license
    def test_ray_tracer1(self):
        params = self.engine.ray_tracer(self._bund, 1,.05)[0]
        correct_params = N.c_[[0,.5,.5],[0,1,1]]

        N.testing.assert_array_almost_equal(params,correct_params)

Example 112

Project: tracer
Source File: test_tracer_engine.py
View license
    def test_ray_tracer1(self):
        params = self.engine.ray_tracer(self._bund, 1,.05)[0]
        correct_params = N.c_[[0,1.5,1.5],[0,2,0]]
        
        N.testing.assert_array_almost_equal(params,correct_params)

Example 113

Project: tracer
Source File: test_tracer_engine.py
View license
    def test_ray_tracer2(self):
        params = self.engine.ray_tracer(self._bund, 2,.05)[0]
        correct_params = N.c_[[0,2,2],[0,3,0]]
        
        N.testing.assert_array_almost_equal(params,correct_params)

Example 114

Project: tracer
Source File: test_tracer_engine.py
View license
    def test_ray_tracer1(self):
        params = self.engine.ray_tracer(self._bund, 1, .05)[0]
        correct_params = N.c_[[0,-1,0],[0,1,0],[0,1,0]]
         
        N.testing.assert_array_almost_equal(params,correct_params, decimal=3)

Example 115

Project: tracer
Source File: test_tracer_engine.py
View license
    def test_ray_tracers1(self):
        params = self.engine.ray_tracer(self._bund, 1, .05)[0]
        correct_params = N.c_[[0,2,0]]

        N.testing.assert_array_almost_equal(params,correct_params)

Example 116

Project: orange3-text
Source File: test_corpus.py
View license
    def test_titles(self):
        c = Corpus.from_file('bookexcerpts')

        # no title feature set
        titles = c.titles
        self.assertEqual(len(titles), len(c))
        for title in titles:
            self.assertIn('Document ', title)

        # inferred title from heuristics
        expected = list(map(str, range(len(c))))
        c2 = Corpus(Domain([], [], (StringVariable('heading'),)),
                    None, None, np.c_[expected])
        titles = c2.titles
        self.assertEqual(titles, expected)

        # title feature set
        c.domain[0].attributes['title'] = True
        titles = c.titles
        self.assertEqual(len(titles), len(c))
        for title in titles:
            self.assertIn(title, c.domain.class_var.values)

Example 117

Project: mayavi
Source File: sources.py
View license
    def reset(self, **traits):
        """Creates the dataset afresh or resets existing data source."""

        # First set the attributes without really doing anything since
        # the notification handlers are not called.
        self.set(trait_change_notify=False, **traits)

        points = self.points
        scalars = self.scalars
        x, y, z = self.x, self.y, self.z

        if 'points' in traits:
            x = points[:, 0].ravel()
            y = points[:, 1].ravel()
            z = points[:, 2].ravel()
            self.set(x=x, y=y, z=z, trait_change_notify=False)

        else:
            points = np.c_[x.ravel(), y.ravel(), z.ravel()].ravel()
            points.shape = (len(x), 3)
            self.set(points=points, trait_change_notify=False)

        # Create the dataset.
        n_pts = len(points) - 1
        lines = np.zeros((n_pts, 2), 'l')
        lines[:, 0] = np.arange(0, n_pts - 0.5, 1, 'l')
        lines[:, 1] = np.arange(1, n_pts + 0.5, 1, 'l')
        if self.dataset is None:
            pd = tvtk.PolyData()
        else:
            pd = self.dataset
        # Avoid lines refering to non existing points: First set the
        # lines to None, then set the points, then set the lines
        # refering to the new points.
        pd.set(lines=None)
        pd.set(points=points)
        pd.set(lines=lines)

        if scalars is not None and len(scalars) > 0:
            assert len(x) == len(scalars)
            pd.point_data.scalars = np.ravel(scalars)
            pd.point_data.scalars.name = 'scalars'

        self.dataset = pd

Example 118

Project: mayavi
Source File: sources.py
View license
    def reset(self, **traits):
        """Creates the dataset afresh or resets existing data source."""

        # First set the attributes without really doing anything since
        # the notification handlers are not called.
        self.set(trait_change_notify=False, **traits)

        points = self.points
        scalars = self.scalars

        x, y, z = self.x, self.y, self.z
        points = np.c_[x.ravel(), y.ravel(), z.ravel()].ravel()
        points.shape = (-1, 3)
        self.set(points=points, trait_change_notify=False)

        triangles = self.triangles
        assert triangles.shape[1] == 3, \
            "The shape of the triangles array must be (X, 3)"
        assert triangles.max() < len(points), \
            "The triangles indices must be smaller that the number of points"
        assert triangles.min() >= 0, \
            "The triangles indices must be positive or null"

        if self.dataset is None:
            pd = tvtk.PolyData()
        else:
            pd = self.dataset
        # Set the points first, and the triangles after: so that the
        # polygone can refer to the right points, in the polydata.
        pd.set(points=points)
        pd.set(polys=triangles)

        if (not 'scalars' in traits
                    and scalars is not None
                    and scalars.shape != x.shape):
            # The scalars where set probably automatically to z, by the
            # factory. We need to reset them, as the size has changed.
            scalars = z

        if scalars is not None and len(scalars) > 0:
            if not scalars.flags.contiguous:
                scalars = scalars.copy()
                self.set(scalars=scalars, trait_change_notify=False)
            assert x.shape == scalars.shape
            pd.point_data.scalars = scalars.ravel()
            pd.point_data.scalars.name = 'scalars'

        self.dataset = pd

Example 119

Project: bci-challenge-ner-2015
Source File: classif.py
View license
    def transform(self,X):
        if self.meta is not None:
            return numpy.c_[X,self.meta]
        else:
            return X

Example 120

View license
    def predict_proba(self, X):
        """Get predictions."""
        return np.c_[[self.calcMean(X[:, col::6], self.best_params)
                      for col in range(6)]].transpose()

Example 121

Project: autograd
Source File: numpy_wrapper.py
View license
    def __getitem__(self, args):
        raw_array = _np.c_[args]
        return wrap_if_nodes_inside(raw_array, slow_op_name = "c_")

Example 122

Project: harold
Source File: _discrete_funcs.py
View license
def __discretize(T, dt, method, PrewarpAt, q):
    """
    Actually, I think that presenting this topic as a numerical
    integration problem is confusing more than it explains. Most
    items here can be presented as conformal mappings and nobody
    needs to be limited to riemann sums of particular shape. As
    I found that scipy version of this, adopts Zhang SICON 2007
    parametrization which surprisingly fresh!

    Here I "generalized" to any rational function representation
    if you are into that mathematician lingo (see the 'fancy'
    ASCII art below). I used LFTs instead, for all real rational
    approx. mappings (for whoever wants to follow that rabbit).
    """

    m, n = T.shape[1], T.NumberOfStates

    if method == 'zoh':
        """
        Zero-order hold is not much useful for linear systems and
        in fact it should be discouraged since control problems
        don't have boundary conditions as in stongly nonlinear
        FEM simulations of CFDs so on. Most importantly it is not
        stability-invariant which defeats its purpose. But whatever



        This conversion is usually done via the expm() identity

            [A | B]   [ exp(A) | int(exp(A))*B ]   [ Ad | Bd ]
        expm[- - -] = [------------------------] = [---------]
            [0 | 0]   [   0    |       I       ]   [ C  | D  ]

        TODO: I really want to display a warning here against 'zoh' use
        """

        M = np.r_[np.c_[T.a, T.b], np.zeros((m, m+n))]
        eM = expm(M*dt)
        Ad, Bd, Cd, Dd = eM[:n, :n], eM[:n, n:], T.c, T.d

    elif method == 'lft':
        """
        Here we form the following star product
                                      _
                       ---------       |
                       |  1    |       |
                    ---| --- I |<--    |
                    |  |  z    |  |    |
                    |  ---------  |    |
                    |             |    |> this is the lft of (1/s)*I
                    |   -------   |    |
                    --->|     |----    |
                        |  Q  |        |
                    --->|     |----    |
                    |   -------   |   _|
                    |             |
                    |   -------   |
                    ----|     |<---
                        |  T  |
                    <---|     |<---
                        -------

        Here Q is whatever the rational mapping that links s to z In
        the floowing sense:

         1         1
        --- = F_u(---,Q)
         s         z

        where F_u denotes the upper linear fractional representation.
        For exemaple, the usual case of Tustin, Euler etc. the map is

                  [     I     |  sqrt(T)*I ]
              Q = [-----------|------------]
                  [ sqrt(T)*I |    T*x*I   ]

        with alpha defined as in Zhang 2007 SICON.
        x = 0   --> backward diff, (backward euler)
        x = 0.5 --> Tustin,
        x = 1   --> forward difference (forward euler)

        """

        # TODO: Check if interconnection is well-posed !!!!

        if q is None:
            raise ValueError('\"lft\" method requires an interconnection '
                             'matrix. Consider providing a matrix \"q". '
                             )

        # Copy n times for n integrators
        q11, q12, q21, q22 = (kron(np.eye(n), x) for x in matrix_slice(
                              q, (-1, -1)))

        # Compute the star product
        ZAinv = solve(np.eye(n) - q22.dot(T.a), q21)
        AZinv = solve(np.eye(n) - T.a.dot(q22), T.b)

        Ad = q11 + q12.dot(T.a.dot(ZAinv))
        Bd = q12.dot(AZinv)
        Cd = T.c.dot(ZAinv)
        Dd = T.d + T.c.dot(q22.dot(AZinv))

    elif method in ('bilinear', 'tustin', 'trapezoidal'):
        if not PrewarpAt == 0.:
            if 1/(2*dt) < PrewarpAt:
                raise ValueError('Prewarping Frequency is beyond '
                                 'the Nyquist rate.\nIt has to '
                                 'satisfy 0 < w < 1/(2*dt) and dt '
                                 'being the sampling\nperiod in '
                                 'seconds (dt={0} is provided, '
                                 'hence the max\nallowed is '
                                 '{1} Hz.'.format(dt, 1/(2*dt))
                                 )

            PrewarpAt *= 2*np.pi
            TwoTanw_Over_w = 2*np.tan(PrewarpAt*dt/2)/PrewarpAt
            q = np.array([[1, np.sqrt(TwoTanw_Over_w)],
                          [np.sqrt(TwoTanw_Over_w), TwoTanw_Over_w]])
        else:
            q = np.array([[1, np.sqrt(dt)], [np.sqrt(dt), dt/2]])

        return __discretize(T, dt, "lft", 0., q)

    elif method in ('forward euler', 'forward difference',
                    'forward rectangular', '>>'):
        return __discretize(T, dt, "lft", 0, q=np.array([[1, np.sqrt(dt)],
                                                         [np.sqrt(dt), 0]]))

    elif method in ('backward euler', 'backward difference',
                    'backward rectangular', '<<'):
        return __discretize(T, dt, "lft", 0, q=np.array([[1, np.sqrt(dt)],
                                                        [np.sqrt(dt), dt]]))

    else:
        raise ValueError('I don\'t know that discretization method. But '
                         'I know {0} methods.'
                         ''.format(_KnownDiscretizationMethods)
                         )

    return Ad, Bd, Cd, Dd, dt

Example 123

Project: harold
Source File: _discrete_funcs.py
View license
def __undiscretize(G, method_to_use):

    if isinstance(G, Transfer):
        T = transfer_to_state(G)
    else:
        T = G

    m, n = T.NumberOfInputs, T.NumberOfStates
    dt = G.SamplingPeriod

    if method_to_use is None:
        if 'with it' in G.DiscretizedWith:  # Check if warning comes back
            missing_method = True
        else:
            method_to_use = G.DiscretizedWith

    if method_to_use == 'zoh':
        M = np.r_[
                   np.c_[T.a, T.b],
                   np.c_[np.zeros((m, n)), np.eye(m)]
                  ]
        eM = logm(M)*(1/T.SamplingPeriod)
        Ac, Bc, Cc, Dc = eM[:n, :n], eM[:n, n:], T.c, T.d

    elif (method_to_use in ('bilinear', 'tustin',
                            'trapezoidal') or missing_method):  # Manually
        X = np.eye(n) + T.a
        if 1/np.linalg.cond(X) < 1e-8:  # TODO: Totally psychological limit
            raise ValueError('The discrete A matrix has eigenvalue(s) '
                             'very close to -1 (rcond of I+Ad is {0})'
                             ''.format(1/np.linalg.cond(X)))

        iX = dt/2*(np.eye(n) + T.a)
        Ac = solve(-iX, (np.eye(n)-T.a))
        Bc = solve(iX, T.b)
        Cc = T.c.dot(np.eye(n) + 0.5*dt*solve(iX, (np.eye(n) - T.a)))
        Dc = T.d - 0.5*dt*T.c.dot(solve(iX, T.b))

    elif (method_to_use in ('forward euler', 'forward difference',
                            'forward rectangular', '>>')):
        Ac = -1/dt * (np.eye(n)-T.a)
        Bc = 1/dt * (T.b)
        Cc = T.c
        Dc = T.d

    elif (method_to_use in ('backward euler', 'backward difference',
                            'backward rectangular', '<<')):
        X = T.a
        if 1/cond(X) < 1e-8:  # TODO: Totally psychological limit
            raise ValueError('The discrete A matrix has eigenvalue(s) '
                             'very close to 0 (rcond of I+Ad is {0})'
                             ''.format(1/np.linalg.cond(X)))

        iX = dt*T.a
        Ac = solve(-iX, (np.eye(n) - T.a))
        Bc = solve(iX, T.b)
        Cc = T.c.dot(np.eye(n) + dt*solve(iX, (np.eye(n) - T.a)))
        Dc = T.d - dt*T.c.dot(solve(iX, T.b))

    return Ac, Bc, Cc, Dc

Example 124

Project: harold
Source File: _system_funcs.py
View license
def cancellation_distance(F,G):
    """
    Given matrices :math:`F,G`, computes the upper and lower bounds of
    the perturbation needed to render the pencil :math:`\\left[
    \\begin{array}{c|c}F-pI & G\\end{array}\\right]` rank deficient. It is
    used for assessing the controllability/observability degenerate distance
    and hence for minimality assessment.

    Implements the algorithm given in D.Boley SIMAX vol.11(4) 1990.

    Parameters
    ----------

    F,G : 2D arrays
        Pencil matrices to be checked for rank deficiency distance

    Returns
    -------

    upper2 : float
        Upper bound on the norm of the perturbation
        :math:`\\left[\\begin{array}{c|c}dF & dG\\end{array}\\right]` such
        that :math:`\\left[\\begin{array}{c|c}F+dF-pI & G+dG \\end{array}
        \\right]` is rank deficient.
    upper1 : float
        A theoretically softer upper bound than the upper2 for the
        same quantity.
    lower0 : float
        Lower bound on the same quantity given in upper2
    e_f    : complex
        Indicates the eigenvalue that renders [F + dF - pI | G + dG ]
        rank deficient i.e. equals to the p value at the closest rank
        deficiency.
    radius : float
        The perturbation with the norm bound "upper2" is located within
        a disk in the complex plane whose center is on "e_f" and whose
        radius is bounded by this output.

    """
    A = np.c_[F, G].T
    n, m = A.shape
    B = e_i(n, np.s_[:m])
    D = e_i(n, np.s_[m:])
    C = qr(2*np.random.rand(n, n-m) - 1, mode='economic')[0]
    evals, V = eig(np.c_[A, C])
    K = cond(V)
    X = V[:m, :]
    Y = V[m:, :]

    upp0 = [0]*n
    for x in range(n):
        upp0[x] = norm((C-evals[x]*D).dot(Y[:, x])) / norm(X[:, x])

    f = np.argsort(upp0)[0]
    e_f = evals[f]
    upper1 = upp0[f]
    upper2 = svdvals(A - e_f*B)[-1]
    lower0 = upper2/(K+1)
    radius = upper2*K

    return upper2, upper1, lower0, e_f, radius

Example 125

Project: krypy
Source File: test_deflation.py
View license
def check_Ritz(solver, ls):
    (n_, n) = solver.H.shape
    m = solver.projection.U.shape[1]

    # compute Ritz pairs 'by hand'
    Z = numpy.c_[solver.V[:, :n], solver.projection.U]
    MMlAMrZ = ls.M * (ls.MlAMr * Z)

    inner_left = krypy.utils.inner(Z, MMlAMrZ, ip_B=ls.get_ip_Minv_B())
    inner_right = krypy.utils.inner(Z, Z, ip_B=ls.get_ip_Minv_B())

    if ls.self_adjoint:
        assert_array_almost_equal(inner_left, inner_left.T.conj())

    # inner_right should be identity if full orthogonalization is performed
    if isinstance(solver, krypy.linsys.Gmres) and 0 < n+m <= ls.N:
        assert_array_almost_equal(inner_right, numpy.eye(n+m), decimal=4)

    if 0 < n+m <= ls.N:
        if numpy.linalg.norm(inner_right - numpy.eye(n+m), 2) < 1e-8:
            # use eigh for self-adjoint problems
            eig = scipy.linalg.eigh if ls.self_adjoint else scipy.linalg.eig
            eig = scipy.linalg.eig

            cmp_values, cmp_coeffs = eig(inner_left, inner_right)
            cmp_sort = numpy.argsort(numpy.abs(cmp_values))
            cmp_values = cmp_values[cmp_sort]
            cmp_coeffs = cmp_coeffs[:, cmp_sort]
            # normalize coeffs
            for i in range(n+m):
                cmp_coeffs[:, [i]] /= numpy.linalg.norm(cmp_coeffs[:, [i]], 2)
            cmp_vectors = Z.dot(cmp_coeffs)
            cmp_res = ls.M*(ls.MlAMr*cmp_vectors) - cmp_vectors*cmp_values
            cmp_resnorms = [krypy.utils.norm(cmp_res[:, [i]],
                                             ip_B=ls.get_ip_Minv_B())
                            for i in range(n+m)]

            # get Ritz pairs via Ritz()
            ritz = krypy.deflation.Ritz(solver, mode='ritz')
            sort = numpy.argsort(numpy.abs(ritz.values))

            # check values
            assert_array_almost_equal(ritz.values[sort], cmp_values)

            # check vectors via inner products
            assert_array_almost_equal(numpy.diag(numpy.abs(
                krypy.utils.inner(ritz.get_vectors()[:, sort], cmp_vectors,
                                  ip_B=ls.get_ip_Minv_B()))),
                numpy.ones(m+n))

Example 126

Project: krypy
Source File: test_deflation.py
View license
def check_Ritz(solver, ls):
    (n_, n) = solver.H.shape
    m = solver.projection.U.shape[1]

    # compute Ritz pairs 'by hand'
    Z = numpy.c_[solver.V[:, :n], solver.projection.U]
    MMlAMrZ = ls.M * (ls.MlAMr * Z)

    inner_left = krypy.utils.inner(Z, MMlAMrZ, ip_B=ls.get_ip_Minv_B())
    inner_right = krypy.utils.inner(Z, Z, ip_B=ls.get_ip_Minv_B())

    if ls.self_adjoint:
        assert_array_almost_equal(inner_left, inner_left.T.conj())

    # inner_right should be identity if full orthogonalization is performed
    if isinstance(solver, krypy.linsys.Gmres) and 0 < n+m <= ls.N:
        assert_array_almost_equal(inner_right, numpy.eye(n+m), decimal=4)

    if 0 < n+m <= ls.N:
        if numpy.linalg.norm(inner_right - numpy.eye(n+m), 2) < 1e-8:
            # use eigh for self-adjoint problems
            eig = scipy.linalg.eigh if ls.self_adjoint else scipy.linalg.eig
            eig = scipy.linalg.eig

            cmp_values, cmp_coeffs = eig(inner_left, inner_right)
            cmp_sort = numpy.argsort(numpy.abs(cmp_values))
            cmp_values = cmp_values[cmp_sort]
            cmp_coeffs = cmp_coeffs[:, cmp_sort]
            # normalize coeffs
            for i in range(n+m):
                cmp_coeffs[:, [i]] /= numpy.linalg.norm(cmp_coeffs[:, [i]], 2)
            cmp_vectors = Z.dot(cmp_coeffs)
            cmp_res = ls.M*(ls.MlAMr*cmp_vectors) - cmp_vectors*cmp_values
            cmp_resnorms = [krypy.utils.norm(cmp_res[:, [i]],
                                             ip_B=ls.get_ip_Minv_B())
                            for i in range(n+m)]

            # get Ritz pairs via Ritz()
            ritz = krypy.deflation.Ritz(solver, mode='ritz')
            sort = numpy.argsort(numpy.abs(ritz.values))

            # check values
            assert_array_almost_equal(ritz.values[sort], cmp_values)

            # check vectors via inner products
            assert_array_almost_equal(numpy.diag(numpy.abs(
                krypy.utils.inner(ritz.get_vectors()[:, sort], cmp_vectors,
                                  ip_B=ls.get_ip_Minv_B()))),
                numpy.ones(m+n))

Example 127

View license
    def get_variables(self, requestedVariables, time=None,
                      x=None, y=None, z=None, block=False):

        if isinstance(requestedVariables, str):
            requestedVariables = [requestedVariables]

        self.check_arguments(requestedVariables, time, x, y, z)

        # Apparently it is necessary to first convert from x,y to lon,lat
        # using proj library, and back to x,y using Basemap instance
        # Perhaps a bug in Basemap related to x_0/y_0 offsets?
        # Nevertheless, seems not to affect performance
        lon, lat = self.xy2lonlat(x, y)
        x, y = self.map(lon, lat, inverse=False)
        points = np.c_[x, y]

        insidePoly = np.array([False]*len(x))
        if has_nxutils is True:
            for polygon in self.polygons:
                insidePoly[nx.points_inside_poly(points, polygon)] = True
        else:
            for polygon in self.polygons:
                insidePoly += np.array(polygon.contains_points(points))

        variables = {}
        variables['land_binary_mask'] = insidePoly

        return variables

Example 128

View license
    def get_variables(self, requestedVariables, time=None,
                      x=None, y=None, z=None, block=False):

        if isinstance(requestedVariables, str):
            requestedVariables = [requestedVariables]

        self.check_arguments(requestedVariables, time, x, y, z)

        # Apparently it is necessary to first convert from x,y to lon,lat
        # using proj library, and back to x,y using Basemap instance
        # Perhaps a bug in Basemap related to x_0/y_0 offsets?
        # Nevertheless, seems not to affect performance
        lon, lat = self.xy2lonlat(x, y)
        x, y = self.map(lon, lat, inverse=False)
        points = np.c_[x, y]

        insidePoly = np.array([False]*len(x))
        if has_nxutils is True:
            for polygon in self.polygons:
                insidePoly[nx.points_inside_poly(points, polygon)] = True
        else:
            for polygon in self.polygons:
                insidePoly += np.array(polygon.contains_points(points))

        variables = {}
        variables['land_binary_mask'] = insidePoly

        return variables

Example 129

Project: phy
Source File: visuals.py
View license
    @staticmethod
    def validate(x=None,
                 y=None,
                 pos=None,
                 color=None,
                 size=None,
                 depth=None,
                 data_bounds='auto',
                 ):
        if pos is None:
            x, y = _get_pos(x, y)
            pos = np.c_[x, y]
        pos = np.asarray(pos)
        assert pos.ndim == 2
        assert pos.shape[1] == 2
        n = pos.shape[0]

        # Validate the data.
        color = _get_array(color, (n, 4),
                           ScatterVisual._default_color,
                           dtype=np.float32)
        size = _get_array(size, (n, 1), ScatterVisual._default_marker_size)
        depth = _get_array(depth, (n, 1), 0)
        if data_bounds is not None:
            data_bounds = _get_data_bounds(data_bounds, pos)
            assert data_bounds.shape[0] == n

        return Bunch(pos=pos, color=color, size=size,
                     depth=depth, data_bounds=data_bounds)

Example 130

Project: phy
Source File: visuals.py
View license
    @staticmethod
    def validate(x=None,
                 y=None,
                 pos=None,
                 masks=None,
                 data_bounds='auto',
                 ):
        if pos is None:
            x, y = _get_pos(x, y)
            pos = np.c_[x, y]
        pos = np.asarray(pos)
        assert pos.ndim == 2
        assert pos.shape[1] == 2
        n = pos.shape[0]

        masks = _get_array(masks, (n, 1), 1., np.float32)
        assert masks.shape == (n, 1)

        # Validate the data.
        if data_bounds is not None:
            data_bounds = _get_data_bounds(data_bounds, pos)
            assert data_bounds.shape[0] == n

        return Bunch(pos=pos,
                     masks=masks,
                     data_bounds=data_bounds,
                     )

Example 131

Project: phy
Source File: visuals.py
View license
    @staticmethod
    def validate(x=None,
                 y=None,
                 pos=None,
                 color=None,
                 size=None,
                 depth=None,
                 data_bounds='auto',
                 ):
        if pos is None:
            x, y = _get_pos(x, y)
            pos = np.c_[x, y]
        pos = np.asarray(pos)
        assert pos.ndim == 2
        assert pos.shape[1] == 2
        n = pos.shape[0]

        # Validate the data.
        color = _get_array(color, (n, 4),
                           ScatterVisual._default_color,
                           dtype=np.float32)
        size = _get_array(size, (n, 1), ScatterVisual._default_marker_size)
        depth = _get_array(depth, (n, 1), 0)
        if data_bounds is not None:
            data_bounds = _get_data_bounds(data_bounds, pos)
            assert data_bounds.shape[0] == n

        return Bunch(pos=pos, color=color, size=size,
                     depth=depth, data_bounds=data_bounds)

Example 132

Project: phy
Source File: visuals.py
View license
    @staticmethod
    def validate(x=None,
                 y=None,
                 pos=None,
                 masks=None,
                 data_bounds='auto',
                 ):
        if pos is None:
            x, y = _get_pos(x, y)
            pos = np.c_[x, y]
        pos = np.asarray(pos)
        assert pos.ndim == 2
        assert pos.shape[1] == 2
        n = pos.shape[0]

        masks = _get_array(masks, (n, 1), 1., np.float32)
        assert masks.shape == (n, 1)

        # Validate the data.
        if data_bounds is not None:
            data_bounds = _get_data_bounds(data_bounds, pos)
            assert data_bounds.shape[0] == n

        return Bunch(pos=pos,
                     masks=masks,
                     data_bounds=data_bounds,
                     )

Example 133

View license
def plot_simple_demo_lda():
    pylab.clf()
    fig = pylab.figure(num=None, figsize=(10, 4))
    pylab.subplot(121)

    title = "Original feature space"
    pylab.title(title)
    pylab.xlabel("$X_1$")
    pylab.ylabel("$X_2$")

    good = x1 > x2
    bad = ~good

    x1g = x1[good]
    x2g = x2[good]
    pylab.scatter(x1g, x2g, edgecolor="blue", facecolor="blue")

    x1b = x1[bad]
    x2b = x2[bad]
    pylab.scatter(x1b, x2b, edgecolor="red", facecolor="white")

    pylab.grid(True)

    pylab.subplot(122)

    X = np.c_[(x1, x2)]

    lda_inst = lda.LDA(n_components=1)
    Xtrans = lda_inst.fit_transform(X, good)

    Xg = Xtrans[good]
    Xb = Xtrans[bad]

    pylab.scatter(
        Xg[:, 0], np.zeros(len(Xg)), edgecolor="blue", facecolor="blue")
    pylab.scatter(
        Xb[:, 0], np.zeros(len(Xb)), edgecolor="red", facecolor="white")
    title = "Transformed feature space"
    pylab.title(title)
    pylab.xlabel("$X'$")
    fig.axes[1].get_yaxis().set_visible(False)

    pylab.grid(True)

    pylab.autoscale(tight=True)
    filename = "lda_demo.png"
    pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")

Example 134

View license
def plot_simple_demo_lda():
    pylab.clf()
    fig = pylab.figure(num=None, figsize=(10, 4))
    pylab.subplot(121)

    title = "Original feature space"
    pylab.title(title)
    pylab.xlabel("$X_1$")
    pylab.ylabel("$X_2$")

    good = x1 > x2
    bad = ~good

    x1g = x1[good]
    x2g = x2[good]
    pylab.scatter(x1g, x2g, edgecolor="blue", facecolor="blue")

    x1b = x1[bad]
    x2b = x2[bad]
    pylab.scatter(x1b, x2b, edgecolor="red", facecolor="white")

    pylab.grid(True)

    pylab.subplot(122)

    X = np.c_[(x1, x2)]

    lda_inst = lda.LDA(n_components=1)
    Xtrans = lda_inst.fit_transform(X, good)

    Xg = Xtrans[good]
    Xb = Xtrans[bad]

    pylab.scatter(
        Xg[:, 0], np.zeros(len(Xg)), edgecolor="blue", facecolor="blue")
    pylab.scatter(
        Xb[:, 0], np.zeros(len(Xb)), edgecolor="red", facecolor="white")
    title = "Transformed feature space"
    pylab.title(title)
    pylab.xlabel("$X'$")
    fig.axes[1].get_yaxis().set_visible(False)

    pylab.grid(True)

    pylab.autoscale(tight=True)
    filename = "lda_demo.png"
    pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")

Example 135

Project: mne-python
Source File: _lcmv.py
View license
def _prepare_beamformer_input(info, forward, label, picks, pick_ori):
    """Input preparation common for all beamformer functions.

    Check input values, prepare channel list and gain matrix. For documentation
    of parameters, please refer to _apply_lcmv.
    """
    is_free_ori = forward['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI

    if pick_ori in ['normal', 'max-power'] and not is_free_ori:
        raise ValueError('Normal or max-power orientation can only be picked '
                         'when a forward operator with free orientation is '
                         'used.')
    if pick_ori == 'normal' and not forward['surf_ori']:
        raise ValueError('Normal orientation can only be picked when a '
                         'forward operator oriented in surface coordinates is '
                         'used.')
    if pick_ori == 'normal' and not forward['src'][0]['type'] == 'surf':
        raise ValueError('Normal orientation can only be picked when a '
                         'forward operator with a surface-based source space '
                         'is used.')

    # Restrict forward solution to selected channels
    info_ch_names = [c['ch_name'] for c in info['chs']]
    ch_names = [info_ch_names[k] for k in picks]
    fwd_ch_names = forward['sol']['row_names']
    # Keep channels in forward present in info:
    fwd_ch_names = [c for c in fwd_ch_names if c in info_ch_names]
    forward = pick_channels_forward(forward, fwd_ch_names)
    picks_forward = [fwd_ch_names.index(c) for c in ch_names]

    # Get gain matrix (forward operator)
    if label is not None:
        vertno, src_sel = label_src_vertno_sel(label, forward['src'])

        if is_free_ori:
            src_sel = 3 * src_sel
            src_sel = np.c_[src_sel, src_sel + 1, src_sel + 2]
            src_sel = src_sel.ravel()

        G = forward['sol']['data'][:, src_sel]
    else:
        vertno = _get_vertno(forward['src'])
        G = forward['sol']['data']

    # Apply SSPs
    proj, ncomp, _ = make_projector(info['projs'], fwd_ch_names)

    if info['projs']:
        G = np.dot(proj, G)

    # Pick after applying the projections
    G = G[picks_forward]
    proj = proj[np.ix_(picks_forward, picks_forward)]

    return is_free_ori, ch_names, proj, vertno, G

Example 136

Project: mne-python
Source File: _lcmv.py
View license
def _prepare_beamformer_input(info, forward, label, picks, pick_ori):
    """Input preparation common for all beamformer functions.

    Check input values, prepare channel list and gain matrix. For documentation
    of parameters, please refer to _apply_lcmv.
    """
    is_free_ori = forward['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI

    if pick_ori in ['normal', 'max-power'] and not is_free_ori:
        raise ValueError('Normal or max-power orientation can only be picked '
                         'when a forward operator with free orientation is '
                         'used.')
    if pick_ori == 'normal' and not forward['surf_ori']:
        raise ValueError('Normal orientation can only be picked when a '
                         'forward operator oriented in surface coordinates is '
                         'used.')
    if pick_ori == 'normal' and not forward['src'][0]['type'] == 'surf':
        raise ValueError('Normal orientation can only be picked when a '
                         'forward operator with a surface-based source space '
                         'is used.')

    # Restrict forward solution to selected channels
    info_ch_names = [c['ch_name'] for c in info['chs']]
    ch_names = [info_ch_names[k] for k in picks]
    fwd_ch_names = forward['sol']['row_names']
    # Keep channels in forward present in info:
    fwd_ch_names = [c for c in fwd_ch_names if c in info_ch_names]
    forward = pick_channels_forward(forward, fwd_ch_names)
    picks_forward = [fwd_ch_names.index(c) for c in ch_names]

    # Get gain matrix (forward operator)
    if label is not None:
        vertno, src_sel = label_src_vertno_sel(label, forward['src'])

        if is_free_ori:
            src_sel = 3 * src_sel
            src_sel = np.c_[src_sel, src_sel + 1, src_sel + 2]
            src_sel = src_sel.ravel()

        G = forward['sol']['data'][:, src_sel]
    else:
        vertno = _get_vertno(forward['src'])
        G = forward['sol']['data']

    # Apply SSPs
    proj, ncomp, _ = make_projector(info['projs'], fwd_ch_names)

    if info['projs']:
        G = np.dot(proj, G)

    # Pick after applying the projections
    G = G[picks_forward]
    proj = proj[np.ix_(picks_forward, picks_forward)]

    return is_free_ori, ch_names, proj, vertno, G

Example 137

Project: mne-python
Source File: test_source_space.py
View license
@testing.requires_testing_data
@requires_mne
def test_discrete_source_space():
    """Test setting up (and reading/writing) discrete source spaces
    """
    tempdir = _TempDir()
    src = read_source_spaces(fname)
    v = src[0]['vertno']

    # let's make a discrete version with the C code, and with ours
    temp_name = op.join(tempdir, 'temp-src.fif')
    try:
        # save
        temp_pos = op.join(tempdir, 'temp-pos.txt')
        np.savetxt(temp_pos, np.c_[src[0]['rr'][v], src[0]['nn'][v]])
        # let's try the spherical one (no bem or surf supplied)
        run_subprocess(['mne_volume_source_space', '--meters',
                        '--pos', temp_pos, '--src', temp_name])
        src_c = read_source_spaces(temp_name)
        pos_dict = dict(rr=src[0]['rr'][v], nn=src[0]['nn'][v])
        src_new = setup_volume_source_space('sample', None,
                                            pos=pos_dict,
                                            subjects_dir=subjects_dir)
        _compare_source_spaces(src_c, src_new, mode='approx')
        assert_allclose(src[0]['rr'][v], src_new[0]['rr'],
                        rtol=1e-3, atol=1e-6)
        assert_allclose(src[0]['nn'][v], src_new[0]['nn'],
                        rtol=1e-3, atol=1e-6)

        # now do writing
        write_source_spaces(temp_name, src_c)
        src_c2 = read_source_spaces(temp_name)
        _compare_source_spaces(src_c, src_c2)

        # now do MRI
        assert_raises(ValueError, setup_volume_source_space, 'sample',
                      pos=pos_dict, mri=fname_mri)
        assert_equal(repr(src_new), repr(src_c))
        assert_equal(src_new.kind, 'discrete')
    finally:
        if op.isfile(temp_name):
            os.remove(temp_name)

Example 138

Project: mne-python
Source File: test_source_space.py
View license
def test_accumulate_normals():
    """Test efficient normal accumulation for surfaces"""
    # set up comparison
    n_pts = int(1.6e5)  # approx number in sample source space
    n_tris = int(3.2e5)
    # use all positive to make a worst-case for cumulative summation
    # (real "nn" vectors will have both positive and negative values)
    tris = (rng.rand(n_tris, 1) * (n_pts - 2)).astype(int)
    tris = np.c_[tris, tris + 1, tris + 2]
    tri_nn = rng.rand(n_tris, 3)
    this = dict(tris=tris, np=n_pts, ntri=n_tris, tri_nn=tri_nn)

    # cut-and-paste from original code in surface.py:
    #    Find neighboring triangles and accumulate vertex normals
    this['nn'] = np.zeros((this['np'], 3))
    for p in range(this['ntri']):
        # vertex normals
        verts = this['tris'][p]
        this['nn'][verts, :] += this['tri_nn'][p, :]
    nn = _accumulate_normals(this['tris'], this['tri_nn'], this['np'])

    # the moment of truth (or reckoning)
    assert_allclose(nn, this['nn'], rtol=1e-7, atol=1e-7)

Example 139

Project: mne-python
Source File: test_source_space.py
View license
@testing.requires_testing_data
@requires_mne
def test_discrete_source_space():
    """Test setting up (and reading/writing) discrete source spaces
    """
    tempdir = _TempDir()
    src = read_source_spaces(fname)
    v = src[0]['vertno']

    # let's make a discrete version with the C code, and with ours
    temp_name = op.join(tempdir, 'temp-src.fif')
    try:
        # save
        temp_pos = op.join(tempdir, 'temp-pos.txt')
        np.savetxt(temp_pos, np.c_[src[0]['rr'][v], src[0]['nn'][v]])
        # let's try the spherical one (no bem or surf supplied)
        run_subprocess(['mne_volume_source_space', '--meters',
                        '--pos', temp_pos, '--src', temp_name])
        src_c = read_source_spaces(temp_name)
        pos_dict = dict(rr=src[0]['rr'][v], nn=src[0]['nn'][v])
        src_new = setup_volume_source_space('sample', None,
                                            pos=pos_dict,
                                            subjects_dir=subjects_dir)
        _compare_source_spaces(src_c, src_new, mode='approx')
        assert_allclose(src[0]['rr'][v], src_new[0]['rr'],
                        rtol=1e-3, atol=1e-6)
        assert_allclose(src[0]['nn'][v], src_new[0]['nn'],
                        rtol=1e-3, atol=1e-6)

        # now do writing
        write_source_spaces(temp_name, src_c)
        src_c2 = read_source_spaces(temp_name)
        _compare_source_spaces(src_c, src_c2)

        # now do MRI
        assert_raises(ValueError, setup_volume_source_space, 'sample',
                      pos=pos_dict, mri=fname_mri)
        assert_equal(repr(src_new), repr(src_c))
        assert_equal(src_new.kind, 'discrete')
    finally:
        if op.isfile(temp_name):
            os.remove(temp_name)

Example 140

Project: mne-python
Source File: test_source_space.py
View license
def test_accumulate_normals():
    """Test efficient normal accumulation for surfaces"""
    # set up comparison
    n_pts = int(1.6e5)  # approx number in sample source space
    n_tris = int(3.2e5)
    # use all positive to make a worst-case for cumulative summation
    # (real "nn" vectors will have both positive and negative values)
    tris = (rng.rand(n_tris, 1) * (n_pts - 2)).astype(int)
    tris = np.c_[tris, tris + 1, tris + 2]
    tri_nn = rng.rand(n_tris, 3)
    this = dict(tris=tris, np=n_pts, ntri=n_tris, tri_nn=tri_nn)

    # cut-and-paste from original code in surface.py:
    #    Find neighboring triangles and accumulate vertex normals
    this['nn'] = np.zeros((this['np'], 3))
    for p in range(this['ntri']):
        # vertex normals
        verts = this['tris'][p]
        this['nn'][verts, :] += this['tri_nn'][p, :]
    nn = _accumulate_normals(this['tris'], this['tri_nn'], this['np'])

    # the moment of truth (or reckoning)
    assert_allclose(nn, this['nn'], rtol=1e-7, atol=1e-7)

Example 141

Project: mne-python
Source File: transforms.py
View license
def _sph_to_cart_partials(az, pol, g_rad, g_az, g_pol):
    """Convert spherical partial derivatives to cartesian coords.

    Note: Because we are dealing with partial derivatives, this calculation is
    not a static transformation. The transformation matrix itself is dependent
    on azimuth and polar coord.

    See the 'Spherical coordinate sytem' section here:
    wikipedia.org/wiki/Vector_fields_in_cylindrical_and_spherical_coordinates

    Parameters
    ----------
    az : ndarray, shape (n_points,)
        Array containing spherical coordinates points (azimuth).
    pol : ndarray, shape (n_points,)
        Array containing spherical coordinates points (polar).
    sph_grads : ndarray, shape (n_points, 3)
        Array containing partial derivatives at each spherical coordinate
        (radius, azimuth, polar).

    Returns
    -------
    cart_grads : ndarray, shape (n_points, 3)
        Array containing partial derivatives in Cartesian coordinates (x, y, z)
    """
    sph_grads = np.c_[g_rad, g_az, g_pol]
    cart_grads = np.zeros_like(sph_grads)
    c_as, s_as = np.cos(az), np.sin(az)
    c_ps, s_ps = np.cos(pol), np.sin(pol)
    trans = np.array([[c_as * s_ps, -s_as, c_as * c_ps],
                      [s_as * s_ps, c_as, c_ps * s_as],
                      [c_ps, np.zeros_like(c_as), -s_ps]])
    cart_grads = np.einsum('ijk,kj->ki', trans, sph_grads)
    return cart_grads

Example 142

Project: mne-python
Source File: transforms.py
View license
def _sph_to_cart_partials(az, pol, g_rad, g_az, g_pol):
    """Convert spherical partial derivatives to cartesian coords.

    Note: Because we are dealing with partial derivatives, this calculation is
    not a static transformation. The transformation matrix itself is dependent
    on azimuth and polar coord.

    See the 'Spherical coordinate sytem' section here:
    wikipedia.org/wiki/Vector_fields_in_cylindrical_and_spherical_coordinates

    Parameters
    ----------
    az : ndarray, shape (n_points,)
        Array containing spherical coordinates points (azimuth).
    pol : ndarray, shape (n_points,)
        Array containing spherical coordinates points (polar).
    sph_grads : ndarray, shape (n_points, 3)
        Array containing partial derivatives at each spherical coordinate
        (radius, azimuth, polar).

    Returns
    -------
    cart_grads : ndarray, shape (n_points, 3)
        Array containing partial derivatives in Cartesian coordinates (x, y, z)
    """
    sph_grads = np.c_[g_rad, g_az, g_pol]
    cart_grads = np.zeros_like(sph_grads)
    c_as, s_as = np.cos(az), np.sin(az)
    c_ps, s_ps = np.cos(pol), np.sin(pol)
    trans = np.array([[c_as * s_ps, -s_as, c_as * c_ps],
                      [s_as * s_ps, c_as, c_ps * s_as],
                      [c_ps, np.zeros_like(c_as), -s_ps]])
    cart_grads = np.einsum('ijk,kj->ki', trans, sph_grads)
    return cart_grads

Example 143

View license
def test_rfe_deprecation_estimator_params():
    deprecation_message = ("The parameter 'estimator_params' is deprecated as "
                           "of version 0.16 and will be removed in 0.18. The "
                           "parameter is no longer necessary because the "
                           "value is set via the estimator initialisation or "
                           "set_params method.")
    generator = check_random_state(0)
    iris = load_iris()
    X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
    y = iris.target
    assert_warns_message(DeprecationWarning, deprecation_message,
                         RFE(estimator=SVC(), n_features_to_select=4, step=0.1,
                             estimator_params={'kernel': 'linear'}).fit,
                         X=X,
                         y=y)

    assert_warns_message(DeprecationWarning, deprecation_message,
                         RFECV(estimator=SVC(), step=1, cv=5,
                               estimator_params={'kernel': 'linear'}).fit,
                         X=X,
                         y=y)

Example 144

View license
def test_rfe():
    generator = check_random_state(0)
    iris = load_iris()
    X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
    X_sparse = sparse.csr_matrix(X)
    y = iris.target

    # dense model
    clf = SVC(kernel="linear")
    rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
    rfe.fit(X, y)
    X_r = rfe.transform(X)
    clf.fit(X_r, y)
    assert_equal(len(rfe.ranking_), X.shape[1])

    # sparse model
    clf_sparse = SVC(kernel="linear")
    rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
    rfe_sparse.fit(X_sparse, y)
    X_r_sparse = rfe_sparse.transform(X_sparse)

    assert_equal(X_r.shape, iris.data.shape)
    assert_array_almost_equal(X_r[:10], iris.data[:10])

    assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
    assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
    assert_array_almost_equal(X_r, X_r_sparse.toarray())

Example 145

View license
def test_rfecv():
    generator = check_random_state(0)
    iris = load_iris()
    X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
    y = list(iris.target)   # regression test: list should be supported

    # Test using the score function
    rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
    rfecv.fit(X, y)
    # non-regression test for missing worst feature:
    assert_equal(len(rfecv.grid_scores_), X.shape[1])
    assert_equal(len(rfecv.ranking_), X.shape[1])
    X_r = rfecv.transform(X)

    # All the noisy variable were filtered out
    assert_array_equal(X_r, iris.data)

    # same in sparse
    rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
    X_sparse = sparse.csr_matrix(X)
    rfecv_sparse.fit(X_sparse, y)
    X_r_sparse = rfecv_sparse.transform(X_sparse)
    assert_array_equal(X_r_sparse.toarray(), iris.data)

    # Test using a customized loss function
    scoring = make_scorer(zero_one_loss, greater_is_better=False)
    rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
                  scoring=scoring)
    ignore_warnings(rfecv.fit)(X, y)
    X_r = rfecv.transform(X)
    assert_array_equal(X_r, iris.data)

    # Test using a scorer
    scorer = get_scorer('accuracy')
    rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
                  scoring=scorer)
    rfecv.fit(X, y)
    X_r = rfecv.transform(X)
    assert_array_equal(X_r, iris.data)

    # Test fix on grid_scores
    def test_scorer(estimator, X, y):
        return 1.0
    rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
                  scoring=test_scorer)
    rfecv.fit(X, y)
    assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))

    # Same as the first two tests, but with step=2
    rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
    rfecv.fit(X, y)
    assert_equal(len(rfecv.grid_scores_), 6)
    assert_equal(len(rfecv.ranking_), X.shape[1])
    X_r = rfecv.transform(X)
    assert_array_equal(X_r, iris.data)

    rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
    X_sparse = sparse.csr_matrix(X)
    rfecv_sparse.fit(X_sparse, y)
    X_r_sparse = rfecv_sparse.transform(X_sparse)
    assert_array_equal(X_r_sparse.toarray(), iris.data)

Example 146

View license
def test_rfe_deprecation_estimator_params():
    deprecation_message = ("The parameter 'estimator_params' is deprecated as "
                           "of version 0.16 and will be removed in 0.18. The "
                           "parameter is no longer necessary because the "
                           "value is set via the estimator initialisation or "
                           "set_params method.")
    generator = check_random_state(0)
    iris = load_iris()
    X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
    y = iris.target
    assert_warns_message(DeprecationWarning, deprecation_message,
                         RFE(estimator=SVC(), n_features_to_select=4, step=0.1,
                             estimator_params={'kernel': 'linear'}).fit,
                         X=X,
                         y=y)

    assert_warns_message(DeprecationWarning, deprecation_message,
                         RFECV(estimator=SVC(), step=1, cv=5,
                               estimator_params={'kernel': 'linear'}).fit,
                         X=X,
                         y=y)

Example 147

View license
def test_rfe():
    generator = check_random_state(0)
    iris = load_iris()
    X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
    X_sparse = sparse.csr_matrix(X)
    y = iris.target

    # dense model
    clf = SVC(kernel="linear")
    rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
    rfe.fit(X, y)
    X_r = rfe.transform(X)
    clf.fit(X_r, y)
    assert_equal(len(rfe.ranking_), X.shape[1])

    # sparse model
    clf_sparse = SVC(kernel="linear")
    rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
    rfe_sparse.fit(X_sparse, y)
    X_r_sparse = rfe_sparse.transform(X_sparse)

    assert_equal(X_r.shape, iris.data.shape)
    assert_array_almost_equal(X_r[:10], iris.data[:10])

    assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
    assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
    assert_array_almost_equal(X_r, X_r_sparse.toarray())

Example 148

View license
def test_rfecv():
    generator = check_random_state(0)
    iris = load_iris()
    X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
    y = list(iris.target)   # regression test: list should be supported

    # Test using the score function
    rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
    rfecv.fit(X, y)
    # non-regression test for missing worst feature:
    assert_equal(len(rfecv.grid_scores_), X.shape[1])
    assert_equal(len(rfecv.ranking_), X.shape[1])
    X_r = rfecv.transform(X)

    # All the noisy variable were filtered out
    assert_array_equal(X_r, iris.data)

    # same in sparse
    rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
    X_sparse = sparse.csr_matrix(X)
    rfecv_sparse.fit(X_sparse, y)
    X_r_sparse = rfecv_sparse.transform(X_sparse)
    assert_array_equal(X_r_sparse.toarray(), iris.data)

    # Test using a customized loss function
    scoring = make_scorer(zero_one_loss, greater_is_better=False)
    rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
                  scoring=scoring)
    ignore_warnings(rfecv.fit)(X, y)
    X_r = rfecv.transform(X)
    assert_array_equal(X_r, iris.data)

    # Test using a scorer
    scorer = get_scorer('accuracy')
    rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
                  scoring=scorer)
    rfecv.fit(X, y)
    X_r = rfecv.transform(X)
    assert_array_equal(X_r, iris.data)

    # Test fix on grid_scores
    def test_scorer(estimator, X, y):
        return 1.0
    rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
                  scoring=test_scorer)
    rfecv.fit(X, y)
    assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))

    # Same as the first two tests, but with step=2
    rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
    rfecv.fit(X, y)
    assert_equal(len(rfecv.grid_scores_), 6)
    assert_equal(len(rfecv.ranking_), X.shape[1])
    X_r = rfecv.transform(X)
    assert_array_equal(X_r, iris.data)

    rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
    X_sparse = sparse.csr_matrix(X)
    rfecv_sparse.fit(X_sparse, y)
    X_r_sparse = rfecv_sparse.transform(X_sparse)
    assert_array_equal(X_r_sparse.toarray(), iris.data)

Example 149

Project: seaborn
Source File: distributions.py
View license
def _scipy_bivariate_kde(x, y, bw, gridsize, cut, clip):
    """Compute a bivariate kde using scipy."""
    data = np.c_[x, y]
    kde = stats.gaussian_kde(data.T)
    data_std = data.std(axis=0, ddof=1)
    if isinstance(bw, string_types):
        bw = "scotts" if bw == "scott" else bw
        bw_x = getattr(kde, "%s_factor" % bw)() * data_std[0]
        bw_y = getattr(kde, "%s_factor" % bw)() * data_std[1]
    elif np.isscalar(bw):
        bw_x, bw_y = bw, bw
    else:
        msg = ("Cannot specify a different bandwidth for each dimension "
               "with the scipy backend. You should install statsmodels.")
        raise ValueError(msg)
    x_support = _kde_support(data[:, 0], bw_x, gridsize, cut, clip[0])
    y_support = _kde_support(data[:, 1], bw_y, gridsize, cut, clip[1])
    xx, yy = np.meshgrid(x_support, y_support)
    z = kde([xx.ravel(), yy.ravel()]).reshape(xx.shape)
    return xx, yy, z

Example 150

Project: seaborn
Source File: distributions.py
View license
def _scipy_bivariate_kde(x, y, bw, gridsize, cut, clip):
    """Compute a bivariate kde using scipy."""
    data = np.c_[x, y]
    kde = stats.gaussian_kde(data.T)
    data_std = data.std(axis=0, ddof=1)
    if isinstance(bw, string_types):
        bw = "scotts" if bw == "scott" else bw
        bw_x = getattr(kde, "%s_factor" % bw)() * data_std[0]
        bw_y = getattr(kde, "%s_factor" % bw)() * data_std[1]
    elif np.isscalar(bw):
        bw_x, bw_y = bw, bw
    else:
        msg = ("Cannot specify a different bandwidth for each dimension "
               "with the scipy backend. You should install statsmodels.")
        raise ValueError(msg)
    x_support = _kde_support(data[:, 0], bw_x, gridsize, cut, clip[0])
    y_support = _kde_support(data[:, 1], bw_y, gridsize, cut, clip[1])
    xx, yy = np.meshgrid(x_support, y_support)
    z = kde([xx.ravel(), yy.ravel()]).reshape(xx.shape)
    return xx, yy, z