numpy.kron

Here are the examples of the python api numpy.kron taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

31 Examples 7

Example 1

Project: simpeg Source File: CylMesh.py
Function: area
    @property
    def area(self):
        """Face areas"""
        if getattr(self, '_area', None) is None:
            if self.nCy > 1:
                raise NotImplementedError('area not yet implemented for 3D '
                                          'cyl mesh')
            areaR = np.kron(self.hz, 2*pi*self.vectorNx)
            areaZ = np.kron(np.ones_like(self.vectorNz), pi*(self.vectorNx**2 -
                            np.r_[0, self.vectorNx[:-1]]**2))
            self._area = np.r_[areaR, areaZ]
        return self._area

Example 2

Project: simpeg Source File: CylMesh.py
Function: vol
    @property
    def vol(self):
        """Volume of each cell"""
        if getattr(self, '_vol', None) is None:
            if self.nCy > 1:
                raise NotImplementedError('vol not yet implemented for 3D '
                                          'cyl mesh')
            az = pi*(self.vectorNx**2 - np.r_[0, self.vectorNx[:-1]]**2)
            self._vol = np.kron(self.hz, az)
        return self._vol

Example 3

Project: statsmodels Source File: test_tools.py
    def solve_dicrete_lyapunov_direct(self, a, q, complex_step=False):
        # This is the discrete Lyapunov solver as "real function of real
        # variables":  the difference between this and the usual, complex,
        # version is that in the Kronecker product the second argument is
        # *not* conjugated here.
        if not complex_step:
            lhs = np.kron(a, a.conj())
            lhs = np.eye(lhs.shape[0]) - lhs
            x = np.linalg.solve(lhs, q.flatten())
        else:
            lhs = np.kron(a, a)
            lhs = np.eye(lhs.shape[0]) - lhs
            x = np.linalg.solve(lhs, q.flatten())

        return np.reshape(x, q.shape)

Example 4

Project: APGL Source File: KroneckerGenerator.py
    def generate(self):
        """
        Generate a Kronecker graph using the adjacency matrix of the input graph.

        :returns: The generate graph as a SparseGraph object.
        """
        W = self.initialGraph.adjacencyMatrix()
        Wi = W

        for i in range(1, self.k):
            Wi = numpy.kron(Wi, W)

        vList = VertexList(Wi.shape[0], 0)
        graph = SparseGraph(vList, self.initialGraph.isUndirected())
        graph.setWeightMatrix(Wi)

        return graph

Example 5

Project: APGL Source File: StochasticKroneckerGenerator.py
    def generateGraph(self):
        """
        Generate a Kronecker graph
        """
        W = self.initialGraph.getWeightMatrix()
        Wi = W

        for i in range(1, self.k):
            Wi = numpy.kron(Wi, W)

        P = numpy.random.rand(Wi.shape[0], Wi.shape[0])
        Wi = numpy.array(P < Wi, numpy.float64)

        vList = VertexList(Wi.shape[0], 0)
        graph = SparseGraph(vList, self.initialGraph.isUndirected())
        graph.setWeightMatrix(Wi)

        return graph

Example 6

Project: shapelets Source File: conv.py
def generate2dClmnTensor(a,b,c,lmax,mmax,nmax):
    """Generate the 2-dimensional C tensor [Refregier and Bacon 2003 eq. 6]
    a,b,c: scale factors (float)
    lmax,mmax,nmax: maximum values for l,m,n (2 element integer lists)
    """
    C1=generate1dClmnTensor(a,b,c,lmax[0],mmax[0],nmax[0])
    C2=generate1dClmnTensor(a,b,c,lmax[1],mmax[1],nmax[1])
    C=np.kron(C1,C2)
    return np.reshape(C,(lmax[0],lmax[1],mmax[0],mmax[1],nmax[0],nmax[1]))

Example 7

Project: chumpy Source File: linalg.py
Function: compute_dr_wrt
    def compute_dr_wrt(self, wrt):
        if wrt is not self.a:
            return None
    
        Ainv = self.r

        if Ainv.ndim <= 2:
            return -np.kron(Ainv, Ainv.T)
        else:
            Ainv = np.reshape(Ainv,  (-1, Ainv.shape[-2], Ainv.shape[-1]))
            AinvT = np.rollaxis(Ainv, -1, -2)
            AinvT = np.reshape(AinvT, (-1, AinvT.shape[-2], AinvT.shape[-1]))
            result = np.dstack([-np.kron(Ainv[i], AinvT[i]).T for i in range(Ainv.shape[0])]).T
            result = sp.block_diag(result)

        return result

Example 8

Project: rescal.py Source File: rescal.py
def _updateR(X, A, lmbdaR):
    _log.debug('Updating R (SVD) lambda R: %s' % str(lmbdaR))
    rank = A.shape[1]
    U, S, Vt = svd(A, full_matrices=False)
    Shat = kron(S, S)
    Shat = (Shat / (Shat ** 2 + lmbdaR)).reshape(rank, rank)
    R = []
    for i in range(len(X)):
        Rn = Shat * dot(U.T, X[i].dot(U))
        Rn = dot(Vt.T, dot(Rn, Vt))
        R.append(Rn)
    return R

Example 9

Project: scikit-tensor Source File: core.py
Function: center
def center(X, n):
    Xn = unfold(X, n)
    N = Xn.shape[0]
    m = Xn.sum(axis=0) / N
    m = kron(m, ones((N, 1)))
    Xn = Xn - m
    return fold(Xn, n)

Example 10

Project: scikit-tensor Source File: rescal.py
def _updateR(X, A, lmbdaR):
    rank = A.shape[1]
    U, S, Vt = svd(A, full_matrices=False)
    Shat = kron(S, S)
    Shat = (Shat / (Shat ** 2 + lmbdaR)).reshape(rank, rank)
    R = []
    for i in range(len(X)):
        Rn = Shat * dot(U.T, X[i].dot(U))
        Rn = dot(Vt.T, dot(Rn, Vt))
        R.append(Rn)
    return R

Example 11

Project: RLScore Source File: test_kronecker_rls.py
    def test_kron_rls(self):
        
        regparam = 0.001
        
        K_train1, K_train2, Y_train, K_test1, K_test2, Y_test, X_train1, X_train2, X_test1, X_test2 = self.generate_xortask()
        Y_train = Y_train.ravel(order = 'F')
        Y_test = Y_test.ravel(order = 'F')
        train_rows, train_columns = K_train1.shape[0], K_train2.shape[0]
        test_rows, test_columns = K_test1.shape[0], K_test2.shape[0]
        trainlabelcount = train_rows * train_columns
        
        #Train linear Kronecker RLS with data-matrices
        params = {}
        params["regparam"] = regparam
        params["X1"] = X_train1
        params["X2"] = X_train2
        params["Y"] = Y_train
        linear_kron_learner = KronRLS(**params)
        linear_kron_testpred = linear_kron_learner.predict(X_test1, X_test2).reshape((test_rows, test_columns), order = 'F')
        
        #Train kernel Kronecker RLS with pre-computed kernel matrices
        params = {}
        params["regparam"] = regparam
        params["K1"] = K_train1
        params["K2"] = K_train2
        params["Y"] = Y_train
        kernel_kron_learner = KronRLS(**params)
        kernel_kron_testpred = kernel_kron_learner.predict(K_test1, K_test2).reshape((test_rows, test_columns), order = 'F')
        
        #Train an ordinary RLS regressor for reference
        K_Kron_train_x = np.kron(K_train2, K_train1)
        params = {}
        params["X"] = K_Kron_train_x
        params["kernel"] = "PrecomputedKernel"
        params["Y"] = Y_train.reshape(trainlabelcount, 1, order = 'F')
        ordrls_learner = RLS(**params)
        ordrls_learner.solve(regparam)
        K_Kron_test_x = np.kron(K_test2, K_test1)
        ordrls_testpred = ordrls_learner.predict(K_Kron_test_x)
        ordrls_testpred = ordrls_testpred.reshape((test_rows, test_columns), order = 'F')
        
        print('')
        print('Prediction: linear KronRLS, kernel KronRLS, ordinary RLS')
        print('[0, 0] ' + str(linear_kron_testpred[0, 0]) + ' ' + str(kernel_kron_testpred[0, 0]) + ' ' + str(ordrls_testpred[0, 0]))
        print('[0, 1] ' + str(linear_kron_testpred[0, 1]) + ' ' + str(kernel_kron_testpred[0, 1]) + ' ' + str(ordrls_testpred[0, 1]))
        print('[1, 0] ' + str(linear_kron_testpred[1, 0]) + ' ' + str(kernel_kron_testpred[1, 0]) + ' ' + str(ordrls_testpred[1, 0]))
        print('Meanabsdiff: linear KronRLS - ordinary RLS, kernel KronRLS - ordinary RLS')
        print(str(np.mean(np.abs(linear_kron_testpred - ordrls_testpred))) + ' ' + str(np.mean(np.abs(kernel_kron_testpred - ordrls_testpred))))
        np.testing.assert_almost_equal(linear_kron_testpred, ordrls_testpred)
        np.testing.assert_almost_equal(kernel_kron_testpred, ordrls_testpred)
        
        print('')
        ordrls_loopred = ordrls_learner.leave_one_out().reshape((train_rows, train_columns), order = 'F')
        linear_kron_loopred = linear_kron_learner.in_sample_loo().reshape((train_rows, train_columns), order = 'F')
        kernel_kron_loopred = kernel_kron_learner.in_sample_loo().reshape((train_rows, train_columns), order = 'F')
        print('In-sample LOO: linear KronRLS, kernel KronRLS, ordinary RLS')
        print('[0, 0] ' + str(linear_kron_loopred[0, 0]) + ' ' + str(kernel_kron_loopred[0, 0]) + ' ' + str(ordrls_loopred[0, 0]))
        print('[0, 1] ' + str(linear_kron_loopred[0, 1]) + ' ' + str(kernel_kron_loopred[0, 1]) + ' ' + str(ordrls_loopred[0, 1]))
        print('[1, 0] ' + str(linear_kron_loopred[1, 0]) + ' ' + str(kernel_kron_loopred[1, 0]) + ' ' + str(ordrls_loopred[1, 0]))
        print('Meanabsdiff: linear KronRLS - ordinary RLS, kernel KronRLS - ordinary RLS')
        print(str(np.mean(np.abs(linear_kron_loopred - ordrls_loopred))) + ' ' + str(np.mean(np.abs(kernel_kron_loopred - ordrls_loopred))))
        np.testing.assert_almost_equal(linear_kron_loopred, ordrls_loopred)
        np.testing.assert_almost_equal(kernel_kron_loopred, ordrls_loopred)

Example 12

Project: RLScore Source File: test_kronecker_rls.py
    def test_conditional_ranking(self):
        
        regparam = 0.001
        
        K_train1, K_train2, Y_train, K_test1, K_test2, Y_test, X_train1, X_train2, X_test1, X_test2 = self.generate_xortask()
        train_rows, train_columns = Y_train.shape
        trainlabelcount = train_rows * train_columns
        
        K_Kron_train_x = np.kron(K_train2, K_train1)
        
        
        #Train linear Conditional Ranking Kronecker RLS
        params = {}
        params["X1"] = X_train1
        params["X2"] = X_train2
        params["Y"] = Y_train
        params["regparam"] = regparam
        linear_kron_condrank_learner = KronRLS(**params)
        linear_kron_condrank_learner.solve_linear_conditional_ranking(regparam)
        
        #Train an ordinary RankRLS for reference
        params = {}
        params["X"] = K_Kron_train_x
        params["kernel"] = "PrecomputedKernel"
        params["Y"] = Y_train.reshape((trainlabelcount, 1), order = 'F')
        qids = []
        for j in range(Y_train.shape[1]):
            for i in range(Y_train.shape[0]):
                qids.append(i)
        params["qids"] = qids
        rankrls_learner = QueryRankRLS(**params)
        rankrls_learner.solve(regparam)
        K_test_x = np.kron(K_test2, K_test1)
        ordrankrls_testpred = rankrls_learner.predict(K_test_x)
        condrank_testpred = linear_kron_condrank_learner.predict(X_test1, X_test2)
        #print('')
        print('\n\nMeanabsdiff: conditional ranking vs rankrls ' + str(np.mean(np.abs(condrank_testpred - ordrankrls_testpred))) + '\n')
        np.testing.assert_almost_equal(condrank_testpred, ordrankrls_testpred)

Example 13

Project: RLScore Source File: test_two_step_rls.py
    def test_two_step_rls(self):
        
        regparam1 = 0.001
        regparam2 = 10
        #regparam1 = 1
        #regparam2 = 1
        
        K_train1, K_train2, Y_train, K_test1, K_test2, Y_test, X_train1, X_train2, X_test1, X_test2 \
            = self.generate_xortask()
        Y_train = Y_train.ravel(order = 'F')
        Y_test = Y_test.ravel(order = 'F')
        train_rows, train_columns = K_train1.shape[0], K_train2.shape[0]
        #print K_train1.shape, K_train2.shape, K_test1.shape, K_test2.shape, train_rows, train_columns
        trainlabelcount = train_rows * train_columns
        
        #Train linear two-step RLS with data-matrices
        params = {}
        params["regparam1"] = regparam1
        params["regparam2"] = regparam2
        params["X1"] = X_train1
        params["X2"] = X_train2
        params["Y"] = Y_train
        linear_two_step_learner = TwoStepRLS(**params)
        linear_twostepoutofsampleloo = linear_two_step_learner.out_of_sample_loo().reshape((train_rows, train_columns), order = 'F')
        linear_lro = linear_two_step_learner.leave_x1_out()
        linear_lco = linear_two_step_learner.leave_x2_out()
                                                              
        #Train kernel two-step RLS with pre-computed kernel matrices
        params = {}
        params["regparam1"] = regparam1
        params["regparam2"] = regparam2
        params["K1"] = K_train1
        params["K2"] = K_train2
        params["Y"] = Y_train
        kernel_two_step_learner = TwoStepRLS(**params)
        kernel_twostepoutofsampleloo = kernel_two_step_learner.out_of_sample_loo().reshape((train_rows, train_columns), order = 'F')
        kernel_lro = kernel_two_step_learner.leave_x1_out()
        kernel_lco = kernel_two_step_learner.leave_x2_out()
        tspred = kernel_two_step_learner.predict(K_train1, K_train2)
        
        #Train ordinary linear RLS in two steps for a reference
        params = {}
        params["regparam"] = regparam2
        params["X"] = X_train2
        params["Y"] = Y_train.reshape((train_rows, train_columns), order = 'F').T
        params['bias'] = 0
        ordinary_linear_rls_first_step = RLS(**params)
        firststeploo = ordinary_linear_rls_first_step.leave_one_out().T
        params = {}
        params["regparam"] = regparam1
        params["X"] = X_train1
        params["Y"] = firststeploo.reshape((train_rows, train_columns), order = 'F')
        params['bias'] = 0
        ordinary_linear_rls_second_step = RLS(**params)
        secondsteploo_linear_rls = ordinary_linear_rls_second_step.leave_one_out()
        
        #Train ordinary kernel RLS in two steps for a reference
        params = {}
        params["regparam"] = regparam2
        params["X"] = K_train2
        params['kernel'] = 'PrecomputedKernel'
        params["Y"] = Y_train.reshape((train_rows, train_columns), order = 'F').T
        ordinary_kernel_rls_first_step = RLS(**params)
        firststeploo = ordinary_kernel_rls_first_step.leave_one_out().T
        params = {}
        params["regparam"] = regparam1
        params["X"] = K_train1
        params["kernel"] = "PrecomputedKernel"
        params["Y"] = firststeploo.reshape((train_rows, train_columns), order = 'F')
        ordinary_kernel_rls_second_step = RLS(**params)
        secondsteploo_kernel_rls = ordinary_kernel_rls_second_step.leave_one_out()
        
        #Train ordinary kernel RLS in one step with the crazy kernel for a reference
        params = {}
        params["regparam"] = 1.
        crazykernel = la.inv(regparam1 * regparam2 * np.kron(la.inv(K_train2), la.inv(K_train1))
                       + regparam1 * np.kron(np.eye(K_train2.shape[0]), la.inv(K_train1))
                       + regparam2 * np.kron(la.inv(K_train2), np.eye(K_train1.shape[0])))
        params["X"] = crazykernel
        params['kernel'] = 'PrecomputedKernel'
        params["Y"] = Y_train
        ordinary_one_step_kernel_rls_with_crazy_kernel = RLS(**params)
        fooloo = ordinary_one_step_kernel_rls_with_crazy_kernel.leave_one_out()[0]
        allinds = np.arange(trainlabelcount)
        allinds_fortran_shaped = allinds.reshape((train_rows, train_columns), order = 'F')
        hoinds = sorted(allinds_fortran_shaped[0].tolist() + allinds_fortran_shaped[1:, 0].tolist())
        hocompl = sorted(list(set(allinds)-set(hoinds)))
        fooholdout = ordinary_one_step_kernel_rls_with_crazy_kernel.holdout(hoinds)[0]
        params = {}
        params["regparam"] = 1.
        params["X"] = crazykernel[np.ix_(hocompl, hocompl)]
        params['kernel'] = 'PrecomputedKernel'
        params["Y"] = Y_train[hocompl]
        ordinary_one_step_kernel_rls_with_crazy_kernel = RLS(**params)
        barholdout = ordinary_one_step_kernel_rls_with_crazy_kernel.predict(crazykernel[np.ix_([0], hocompl)])
        params = {}
        params["regparam"] = 1.
        K_train1_cut = K_train1[np.ix_(range(1, K_train1.shape[0]), range(1, K_train1.shape[1]))]
        K_train2_cut = K_train2[np.ix_(range(1, K_train2.shape[0]), range(1, K_train2.shape[1]))]
        crazykernel_cut = la.inv(regparam1 * regparam2 * np.kron(la.inv(K_train2_cut), la.inv(K_train1_cut))
                       + regparam1 * np.kron(np.eye(K_train2_cut.shape[0]), la.inv(K_train1_cut))
                       + regparam2 * np.kron(la.inv(K_train2_cut), np.eye(K_train1_cut.shape[0])))
        params["X"] = crazykernel_cut
        params['kernel'] = 'PrecomputedKernel'
        #params["Y"] = Y_train[hocompl]
        params["Y"] = Y_train.reshape((train_rows, train_columns), order = 'F')[np.ix_(range(1, train_rows), range(1, train_columns))].ravel(order = 'F')
        ordinary_one_step_kernel_rls_with_crazy_kernel = RLS(**params)
        bazholdout = ordinary_one_step_kernel_rls_with_crazy_kernel.predict(
            np.dot(np.dot(np.kron(K_train2[np.ix_([0], range(1, K_train2.shape[1]))], K_train1[np.ix_([0], range(1, K_train1.shape[1]))]),
                           la.inv(np.kron(K_train2_cut, K_train1_cut))),
                           crazykernel_cut))
        #print fooholdout, 'fooholdout', barholdout, bazholdout
        
        #Train linear two-step RLS without out-of-sample rows or columns for [0,0]
        params = {}
        params["regparam1"] = regparam1
        params["regparam2"] = regparam2
        params["X1"] = X_train1[range(1, X_train1.shape[0])]
        params["X2"] = X_train2[range(1, X_train2.shape[0])]
        params["Y"] = Y_train.reshape((train_rows, train_columns), order = 'F')[np.ix_(range(1, train_rows), range(1, train_columns))].ravel(order = 'F')
        linear_two_step_learner_00 = TwoStepRLS(**params)
        linear_two_step_testpred_00 = linear_two_step_learner_00.predict(X_train1[0], X_train2[0])
        
        #Train linear two-step RLS without out-of-sample rows or columns for [2,4]
        params = {}
        params["regparam1"] = regparam1
        params["regparam2"] = regparam2
        params["X1"] = X_train1[[0, 1] + range(3, K_train1.shape[0])]
        params["X2"] = X_train2[[0, 1, 2, 3] + range(5, K_train2.shape[0])]
        params["Y"] = Y_train.reshape((train_rows, train_columns), order = 'F')[np.ix_([0, 1] + range(3, train_rows), [0, 1, 2, 3] + range(5, train_columns))].ravel(order = 'F')
        linear_two_step_learner_24 = TwoStepRLS(**params)
        linear_two_step_testpred_24 = linear_two_step_learner_24.predict(X_train1[2], X_train2[4])
        
        #Train kernel two-step RLS without out-of-sample rows or columns for [0,0]
        params = {}
        params["regparam1"] = regparam1
        params["regparam2"] = regparam2
        params["K1"] = K_train1[np.ix_(range(1, K_train1.shape[0]), range(1, K_train1.shape[1]))]
        params["K2"] = K_train2[np.ix_(range(1, K_train2.shape[0]), range(1, K_train2.shape[1]))]
        params["Y"] = Y_train.reshape((train_rows, train_columns), order = 'F')[np.ix_(range(1, train_rows), range(1, train_columns))].ravel(order = 'F')
        kernel_two_step_learner_00 = TwoStepRLS(**params)
        kernel_two_step_testpred_00 = kernel_two_step_learner_00.predict(K_train1[range(1, K_train1.shape[0]), 0], K_train2[0, range(1, K_train2.shape[0])])
        
        #Train kernel two-step RLS without out-of-sample rows or columns for [2,4]
        params = {}
        params["regparam1"] = regparam1
        params["regparam2"] = regparam2
        params["K1"] = K_train1[np.ix_([0, 1] + range(3, K_train1.shape[0]), [0, 1] + range(3, K_train1.shape[0]))]
        params["K2"] = K_train2[np.ix_([0, 1, 2, 3] + range(5, K_train2.shape[0]), [0, 1, 2, 3] + range(5, K_train2.shape[0]))]
        params["Y"] = Y_train.reshape((train_rows, train_columns), order = 'F')[np.ix_([0, 1] + range(3, train_rows), [0, 1, 2, 3] + range(5, train_columns))].ravel(order = 'F')
        kernel_two_step_learner_24 = TwoStepRLS(**params)
        kernel_two_step_testpred_24 = kernel_two_step_learner_24.predict(K_train1[[0, 1] + range(3, K_train1.shape[0]), 2], K_train2[4, [0, 1, 2, 3] + range(5, K_train2.shape[0])])
        
        #Train kernel two-step RLS without out-of-sample row 0
        params = {}
        params["regparam1"] = regparam1
        params["regparam2"] = regparam2
        params["K1"] = K_train1[np.ix_(range(1, K_train1.shape[0]), range(1, K_train1.shape[1]))]
        params["K2"] = K_train2
        params["Y"] = Y_train.reshape((train_rows, train_columns), order = 'F')[range(1, train_rows)].ravel(order = 'F')
        kernel_two_step_learner_lro_0 = TwoStepRLS(**params)
        kernel_two_step_testpred_lro_0 = kernel_two_step_learner_lro_0.predict(K_train1[range(1, K_train1.shape[0]), 0], K_train2)
        print('')
        print('Leave-row-out with linear two-step RLS:')
        print(linear_lro.reshape((train_rows, train_columns), order = 'F')[0])
        print('Leave-row-out with kernel two-step RLS:')
        print(kernel_lro.reshape((train_rows, train_columns), order = 'F')[0])
        print('Two-step RLS trained without the held-out row predictions for the row:')
        print(kernel_two_step_testpred_lro_0)
        np.testing.assert_almost_equal(linear_lro.reshape((train_rows, train_columns), order = 'F')[0], kernel_two_step_testpred_lro_0)
        np.testing.assert_almost_equal(kernel_lro.reshape((train_rows, train_columns), order = 'F')[0], kernel_two_step_testpred_lro_0)
        
        
        #Train kernel two-step RLS without out-of-sample column 0
        params = {}
        params["regparam1"] = regparam1
        params["regparam2"] = regparam2
        params["K1"] = K_train1
        params["K2"] = K_train2[np.ix_(range(1, K_train2.shape[0]), range(1, K_train2.shape[1]))]
        params["Y"] = Y_train.reshape((train_rows, train_columns), order = 'F')[:, range(1, train_columns)].ravel(order = 'F')
        kernel_two_step_learner_lco_0 = TwoStepRLS(**params)
        kernel_two_step_testpred_lco_0 = kernel_two_step_learner_lco_0.predict(K_train1, K_train2[range(1, K_train2.shape[0]), 0])
        print('')
        print('Leave-column-out with linear two-step RLS:')
        print(linear_lco[range(train_rows)])
        print('Leave-column-out with kernel two-step RLS:')
        print(kernel_lco[range(train_rows)])
        print('Two-step RLS trained without the held-out column predictions for the column:')
        print(kernel_two_step_testpred_lco_0)
        np.testing.assert_almost_equal(linear_lco[range(train_rows)], kernel_two_step_testpred_lco_0)
        np.testing.assert_almost_equal(kernel_lco[range(train_rows)], kernel_two_step_testpred_lco_0)
        
        print('')
        print('Out-of-sample LOO: Stacked ordinary linear RLS LOO, Stacked ordinary kernel RLS LOO, linear two-step RLS OOSLOO, kernel two-step RLS OOSLOO, linear two-step RLS OOS-pred, kernel two-step RLS OOS-pred')
        print('[0, 0]: ' + str(secondsteploo_linear_rls[0, 0])
                         + ' ' + str(secondsteploo_kernel_rls[0, 0])
                         + ' ' + str(linear_two_step_testpred_00)
                         + ' ' + str(kernel_two_step_testpred_00)
                         + ' ' + str(linear_twostepoutofsampleloo[0, 0])
                         + ' ' + str(kernel_twostepoutofsampleloo[0, 0]))
        print('[2, 4]: ' + str(secondsteploo_linear_rls[2, 4])
                         + ' ' + str(secondsteploo_kernel_rls[2, 4])
                         + ' ' + str(linear_two_step_testpred_24)
                         + ' ' + str(kernel_two_step_testpred_24)
                         + ' ' + str(linear_twostepoutofsampleloo[2, 4])
                         + ' ' + str(kernel_twostepoutofsampleloo[2, 4]))
        np.testing.assert_almost_equal(secondsteploo_linear_rls, secondsteploo_kernel_rls)
        np.testing.assert_almost_equal(secondsteploo_linear_rls, linear_twostepoutofsampleloo)
        np.testing.assert_almost_equal(secondsteploo_linear_rls, kernel_twostepoutofsampleloo)
        np.testing.assert_almost_equal(secondsteploo_linear_rls[0, 0], linear_two_step_testpred_00)
        np.testing.assert_almost_equal(secondsteploo_linear_rls[0, 0], kernel_two_step_testpred_00)
        
        
        #Train kernel two-step RLS with pre-computed kernel matrices and with output at position [2, 4] changed
        Y_24 = Y_train.copy()
        Y_24 = Y_24.reshape((train_rows, train_columns), order = 'F')
        Y_24[2, 4] = 55.
        Y_24 = Y_24.ravel(order = 'F')
        params = {}
        params["regparam1"] = regparam1
        params["regparam2"] = regparam2
        params["K1"] = K_train1
        params["K2"] = K_train2
        params["Y"] = Y_24
        kernel_two_step_learner_Y_24 = TwoStepRLS(**params)
        kernel_two_step_testpred_Y_24 = kernel_two_step_learner_Y_24.predict(K_test1, K_test2)
        
        
        kernel_two_step_learner_inSampleLOO_24a = kernel_two_step_learner.in_sample_loo().reshape((train_rows, train_columns), order = 'F')[2, 4]
        kernel_two_step_learner_inSampleLOO_24b = kernel_two_step_learner_Y_24.in_sample_loo().reshape((train_rows, train_columns), order = 'F')[2, 4]
        print('')
        print('In-sample LOO: Kernel two-step RLS ISLOO with original outputs, Kernel two-step RLS ISLOO with modified output at [2, 4]')
        print('[2, 4] ' + str(kernel_two_step_learner_inSampleLOO_24a) + ' ' + str(kernel_two_step_learner_inSampleLOO_24b))
        np.testing.assert_almost_equal(kernel_two_step_learner_inSampleLOO_24a, kernel_two_step_learner_inSampleLOO_24b)
        
        
        
        #Train kernel two-step RLS with pre-computed kernel matrices and with output at position [1, 1] changed
        Y_00 = Y_train.copy()
        Y_00 = Y_00.reshape((train_rows, train_columns), order = 'F')
        Y_00[0, 0] = 55.
        Y_00 = Y_00.ravel(order = 'F')
        params = {}
        params["regparam1"] = regparam1
        params["regparam2"] = regparam2
        params["K1"] = K_train1
        params["K2"] = K_train2
        params["Y"] = Y_00
        kernel_two_step_learner_Y_00 = TwoStepRLS(**params)
        kernel_two_step_testpred_Y_00 = kernel_two_step_learner_Y_00.predict(K_test1, K_test2)
        
        kernel_two_step_learner_inSampleLOO_00a = kernel_two_step_learner.in_sample_loo()[0]
        kernel_two_step_learner_inSampleLOO_00b = kernel_two_step_learner_Y_00.in_sample_loo()[0]
        print('')
        print('In-sample LOO: Kernel two-step RLS ISLOO with original outputs, Kernel two-step RLS ISLOO with modified output at [0, 0]')
        print('[0, 0] ' + str(kernel_two_step_learner_inSampleLOO_00a) + ' ' + str(kernel_two_step_learner_inSampleLOO_00b))
        np.testing.assert_almost_equal(kernel_two_step_learner_inSampleLOO_00a, kernel_two_step_learner_inSampleLOO_00b)
        
        
        #Create symmetric data
        K_train1, K_train2, Y_train, K_test1, K_test2, Y_test, X_train1, X_train2, X_test1, X_test2 \
            = self.generate_xortask(
            trainpos1 = 6,
            trainneg1 = 7,
            trainpos2 = 6,
            trainneg2 = 7,
            testpos1 = 26,
            testneg1 = 27,
            testpos2 = 25,
            testneg2 = 25
            )
        K_train1 = K_train2
        K_test1 = K_test2
        Y_train = 0.5 * (Y_train + Y_train.T)
        
        Y_train = Y_train.ravel(order = 'F')
        Y_test = Y_test.ravel(order = 'F')
        train_rows, train_columns = K_train1.shape[0], K_train2.shape[0]
        test_rows, test_columns = K_test1.shape[0], K_test2.shape[0]
        trainlabelcount = train_rows * train_columns
        
        #Train symmetric kernel two-step RLS with pre-computed kernel matrices
        params = {}
        params["regparam1"] = regparam2
        params["regparam2"] = regparam2
        params["K1"] = K_train1
        params["K2"] = K_train2
        params["Y"] = Y_train
        kernel_two_step_learner = TwoStepRLS(**params)
        kernel_two_step_testpred = kernel_two_step_learner.predict(K_test1, K_test2).reshape((test_rows, test_columns), order = 'F')
        
        #Train two-step RLS without out-of-sample rows or columns
        rowind, colind = 2, 4
        trainrowinds = range(K_train1.shape[0])
        trainrowinds.remove(rowind)
        trainrowinds.remove(colind)
        traincolinds = range(K_train2.shape[0])
        traincolinds.remove(rowind)
        traincolinds.remove(colind)
        
        params = {}
        params["regparam1"] = regparam2
        params["regparam2"] = regparam2
        params["K1"] = K_train1[np.ix_(trainrowinds, trainrowinds)]
        params["K2"] = K_train2[np.ix_(traincolinds, traincolinds)]
        params["Y"] = Y_train.reshape((train_rows, train_columns), order = 'F')[np.ix_(trainrowinds, traincolinds)].ravel(order = 'F')
        kernel_kron_learner = TwoStepRLS(**params)
        kernel_kron_testpred = kernel_kron_learner.predict(K_train1[np.ix_([rowind], trainrowinds)], K_train2[np.ix_([colind], traincolinds)]).reshape((1, 1), order = 'F')
        
        fcsho = kernel_two_step_learner.out_of_sample_loo_symmetric().reshape((train_rows, train_columns), order = 'F')
        
        print('')
        print('Symmetric double out-of-sample LOO: Test prediction, LOO')
        print('[2, 4]: ' + str(kernel_kron_testpred[0, 0]) + ' ' + str(fcsho[2, 4]))
        np.testing.assert_almost_equal(kernel_kron_testpred[0, 0], fcsho[2, 4])

Example 14

Project: QuantEcon.py Source File: ce_util.py
def ckron(*arrays):
    """
    Repeatedly applies the np.kron function to an arbitrary number of
    input arrays

    Parameters
    ----------
    *arrays : tuple/list of np.ndarray

    Returns
    -------
    out : np.ndarray
        The result of repeated kronecker products

    Notes
    -----
    Based of original function `ckron` in CompEcon toolbox by Miranda
    and Fackler

    References
    ----------
    Miranda, Mario J, and Paul L Fackler. Applied Computational
    Economics and Finance, MIT Press, 2002.

    """
    return reduce(np.kron, arrays)

Example 15

Project: scipy Source File: test_construct.py
Function: test_kron
    def test_kron(self):
        cases = []

        cases.append(array([[0]]))
        cases.append(array([[-1]]))
        cases.append(array([[4]]))
        cases.append(array([[10]]))
        cases.append(array([[0],[0]]))
        cases.append(array([[0,0]]))
        cases.append(array([[1,2],[3,4]]))
        cases.append(array([[0,2],[5,0]]))
        cases.append(array([[0,2,-6],[8,0,14]]))
        cases.append(array([[5,4],[0,0],[6,0]]))
        cases.append(array([[5,4,4],[1,0,0],[6,0,8]]))
        cases.append(array([[0,1,0,2,0,5,8]]))
        cases.append(array([[0.5,0.125,0,3.25],[0,2.5,0,0]]))

        for a in cases:
            for b in cases:
                result = construct.kron(csr_matrix(a),csr_matrix(b)).todense()
                expected = np.kron(a,b)
                assert_array_equal(result,expected)

Example 16

Project: scipy Source File: test_spfuncs.py
    def test_scale_rows_and_cols(self):
        D = matrix([[1,0,0,2,3],
                    [0,4,0,5,0],
                    [0,0,6,7,0]])

        #TODO expose through function
        S = csr_matrix(D)
        v = array([1,2,3])
        csr_scale_rows(3,5,S.indptr,S.indices,S.data,v)
        assert_equal(S.todense(), diag(v)*D)

        S = csr_matrix(D)
        v = array([1,2,3,4,5])
        csr_scale_columns(3,5,S.indptr,S.indices,S.data,v)
        assert_equal(S.todense(), D*diag(v))

        # blocks
        E = kron(D,[[1,2],[3,4]])
        S = bsr_matrix(E,blocksize=(2,2))
        v = array([1,2,3,4,5,6])
        bsr_scale_rows(3,5,2,2,S.indptr,S.indices,S.data,v)
        assert_equal(S.todense(), diag(v)*E)

        S = bsr_matrix(E,blocksize=(2,2))
        v = array([1,2,3,4,5,6,7,8,9,10])
        bsr_scale_columns(3,5,2,2,S.indptr,S.indices,S.data,v)
        assert_equal(S.todense(), E*diag(v))

        E = kron(D,[[1,2,3],[4,5,6]])
        S = bsr_matrix(E,blocksize=(2,3))
        v = array([1,2,3,4,5,6])
        bsr_scale_rows(3,5,2,3,S.indptr,S.indices,S.data,v)
        assert_equal(S.todense(), diag(v)*E)

        S = bsr_matrix(E,blocksize=(2,3))
        v = array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
        bsr_scale_columns(3,5,2,3,S.indptr,S.indices,S.data,v)
        assert_equal(S.todense(), E*diag(v))

Example 17

Project: scipy Source File: test_spfuncs.py
    def test_estimate_blocksize(self):
        mats = []
        mats.append([[0,1],[1,0]])
        mats.append([[1,1,0],[0,0,1],[1,0,1]])
        mats.append([[0],[0],[1]])
        mats = [array(x) for x in mats]

        blks = []
        blks.append([[1]])
        blks.append([[1,1],[1,1]])
        blks.append([[1,1],[0,1]])
        blks.append([[1,1,0],[1,0,1],[1,1,1]])
        blks = [array(x) for x in blks]

        for A in mats:
            for B in blks:
                X = kron(A,B)
                r,c = spfuncs.estimate_blocksize(X)
                assert_(r >= B.shape[0])
                assert_(c >= B.shape[1])

Example 18

Project: scipy Source File: test_spfuncs.py
    def test_count_blocks(self):
        def gold(A,bs):
            R,C = bs
            I,J = A.nonzero()
            return len(set(zip(I//R,J//C)))

        mats = []
        mats.append([[0]])
        mats.append([[1]])
        mats.append([[1,0]])
        mats.append([[1,1]])
        mats.append([[0,1],[1,0]])
        mats.append([[1,1,0],[0,0,1],[1,0,1]])
        mats.append([[0],[0],[1]])

        for A in mats:
            for B in mats:
                X = kron(A,B)
                Y = csr_matrix(X)
                for R in range(1,6):
                    for C in range(1,6):
                        assert_equal(spfuncs.count_blocks(Y, (R, C)), gold(X, (R, C)))

        X = kron([[1,1,0],[0,0,1],[1,0,1]],[[1,1]])
        Y = csc_matrix(X)
        assert_equal(spfuncs.count_blocks(X, (1, 2)), gold(X, (1, 2)))
        assert_equal(spfuncs.count_blocks(Y, (1, 2)), gold(X, (1, 2)))

Example 19

Project: GPy Source File: gp_kronecker_gaussian_regression.py
Function: predict
    def predict(self, X1new, X2new):
        """
        Return the predictive mean and variance at a series of new points X1new, X2new
        Only returns the diagonal of the predictive variance, for now.

        :param X1new: The points at which to make a prediction
        :type X1new: np.ndarray, Nnew x self.input_dim1
        :param X2new: The points at which to make a prediction
        :type X2new: np.ndarray, Nnew x self.input_dim2

        """
        k1xf = self.kern1.K(X1new, self.X1)
        k2xf = self.kern2.K(X2new, self.X2)
        A = k1xf.dot(self.U1)
        B = k2xf.dot(self.U2)
        mu = A.dot(self.Ytilde.reshape(self.num_data1, self.num_data2, order='F')).dot(B.T).flatten(order='F')
        k1xx = self.kern1.Kdiag(X1new)
        k2xx = self.kern2.Kdiag(X2new)
        BA = np.kron(B, A)
        var = np.kron(k2xx, k1xx) - np.sum(BA**2*self.Wi, 1) + self.likelihood.variance

        return mu[:, None], var[:, None]

Example 20

Project: statsmodels Source File: kalmanfilter.py
def _init_diffuse(T,R):
    m = T.shape[1] # number of states
    r = R.shape[1] # should also be the number of states?
    Q_0 = dot(inv(identity(m**2)-kron(T,T)),dot(R,R.T).ravel('F'))
    return zeros((m,1)), Q_0.reshape(r,r,order='F')

Example 21

Project: statsmodels Source File: irf.py
Function: g
    @cache_readonly
    def G(self):
        # Gi matrices as defined on p. 111

        K = self.neqs

        # nlags = self.model.p
        # J = np.hstack((np.eye(K),) + (np.zeros((K, K)),) * (nlags - 1))

        def _make_g(i):
            # p. 111 Lutkepohl
            G = 0.
            for m in range(i):
                # be a bit cute to go faster
                idx = i - 1 - m
                if idx in self._g_memo:
                    apow = self._g_memo[idx]
                else:
                    apow = la.matrix_power(self._A.T, idx)
                    # apow = np.dot(J, apow)
                    apow = apow[:K]
                    self._g_memo[idx] = apow

                # take first K rows
                piece = np.kron(apow, self.irfs[m])
                G = G + piece

            return G

        return [_make_g(i) for i in range(1, self.periods + 1)]

Example 22

Project: tensorly Source File: _kronecker.py
def kronecker(matrices, skip_matrix=None, reverse=False):
    """Kronecker product of a list of matrices

        For more details, see [1]_

    Parameters
    ----------
    matrices : ndarray list

    skip_matrix : None or int, optional, default is None
        if not None, index of a matrix to skip

    reverse : bool, optional
        if True, the order of the matrices is reversed

    Returns
    -------
    kronecker_product: matrix of shape ``(prod(n_rows), prod(n_columns)``
        where ``prod(n_rows) = prod([m.shape[0] for m in matrices])``
        and ``prod(n_columns) = prod([m.shape[1] for m in matrices])``

    Notes
    -----
    Mathematically:

    .. math::
         \\text{If every matrix } U_k \\text{ is of size } (I_k \\times J_k),\\\\
         \\text{Then } \\left(U_1 \\otimes \\cdots \\otimes U_n \\right) \\text{ is of size } (\\prod_{k=1}^n I_k \\times \\prod_{k=1}^n J_k)

    References
    ----------
    .. [1] T.G.Kolda and B.W.Bader, "Tensor Decompositions and Applications",
       SIAM REVIEW, vol. 51, n. 3, pp. 455-500, 2009.
    """
    if skip_matrix is not None:
        matrices = [matrices[i] for i in range(len(matrices)) if i != skip_matrix]

    if reverse:
        order = -1
    else:
        order = 1

    for i, matrix in enumerate(matrices[::order]):
        if not i:
            res = matrix
        else:
            res = np.kron(res, matrix)
    return res

Example 23

Project: pyBAST Source File: distortion.py
def astrometry_cov(d2,scale=100.,amp=np.eye(2),var=None):
    """Evaluate covariance for gaussian process,
    given squared distance matrix and covariance parameters."""
    from numpy import exp, diag
    from scipy.linalg import block_diag
    
    S = exp( -d2 / scale ) # Correlation matrix
    C = np.kron(S, amp)    # Covariance matrix

    if var != None:
        # If scalar measurement uncertainty ('nugget') is used
        if np.size(var) == 1: 
            C += diag(np.tile(var,C.shape[0]))

        # If measurement uncertainty is a vector
        elif np.size(var) == C.shape[0]: # Vector
            C += diag(var)

        # If measurement uncertainty is a vector of 2x2 matrices
        elif np.shape(var) == (C.shape[0]/2, 2, 2):
            C += block_diag( *[v for v in var] )

    return C

Example 24

Project: APGL Source File: KroneckerGeneratorTest.py
    def testGenerate(self):
        k = 2
        numVertices = 3
        numFeatures = 0

        vList = VertexList(numVertices, numFeatures)
        initialGraph = SparseGraph(vList)
        initialGraph.addEdge(0, 1)
        initialGraph.addEdge(1, 2)

        for i in range(numVertices):
            initialGraph.addEdge(i, i)

        d = initialGraph.diameter()
        degreeSequence = initialGraph.outDegreeSequence()
        generator = KroneckerGenerator(initialGraph, k)

        graph = generator.generate()
        d2 = graph.diameter()
        degreeSequence2 = graph.outDegreeSequence()

        self.assertTrue((numpy.kron(degreeSequence, degreeSequence) == degreeSequence2).all())
        self.assertTrue(graph.getNumVertices() == numVertices**k)
        self.assertTrue(graph.getNumDirEdges() == initialGraph.getNumDirEdges()**k)
        self.assertEquals(d, d2)

        #Try different k
        k = 3
        generator.setK(k)
        graph = generator.generate()
        d3 = graph.diameter()
        degreeSequence3 = graph.outDegreeSequence()

        self.assertTrue((numpy.kron(degreeSequence, degreeSequence2) == degreeSequence3).all())
        self.assertTrue(graph.getNumVertices() == numVertices**k)
        self.assertTrue(graph.getNumDirEdges() == initialGraph.getNumDirEdges()**k)
        self.assertEquals(d, d3)

        #Test the multinomial degree distribution
        logging.debug(degreeSequence)
        logging.debug(degreeSequence2)
        logging.debug(degreeSequence3)

Example 25

Project: APGL Source File: StochasticKroneckerGeneratorTest.py
    def testGenerateGraph(self):
        k = 2
        numVertices = 3
        numFeatures = 0

        vList = VertexList(numVertices, numFeatures)
        initialGraph = SparseGraph(vList)
        initialGraph.addEdge(0, 1)
        initialGraph.addEdge(1, 2)

        for i in range(numVertices):
            initialGraph.addEdge(i, i)

        d = initialGraph.diameter()
        degreeSequence = initialGraph.outDegreeSequence()
        generator = StochasticKroneckerGenerator(initialGraph, k)

        graph = generator.generateGraph()
        d2 = graph.diameter()
        degreeSequence2 = graph.outDegreeSequence()

        self.assertTrue((numpy.kron(degreeSequence, degreeSequence) == degreeSequence2).all())
        self.assertTrue(graph.getNumVertices() == numVertices**k)
        self.assertTrue(graph.getNumDirEdges() == initialGraph.getNumDirEdges()**k)
        self.assertEquals(d, d2)

        #Try different k
        k = 3
        generator.setK(k)
        graph = generator.generateGraph()
        d3 = graph.diameter()
        degreeSequence3 = graph.outDegreeSequence()

        self.assertTrue((numpy.kron(degreeSequence, degreeSequence2) == degreeSequence3).all())
        self.assertTrue(graph.getNumVertices() == numVertices**k)
        self.assertTrue(graph.getNumDirEdges() == initialGraph.getNumDirEdges()**k)
        self.assertEquals(d, d3)

        #Test the multinomial degree distribution
        logging.debug(degreeSequence)
        logging.debug(degreeSequence2)
        logging.debug(degreeSequence3)

Example 26

Project: cvxpy Source File: kron.py
    @AffAtom.numpy_numeric
    def numeric(self, values):
        """Kronecker product of the two values.
        """
        return np.kron(values[0], values[1])

Example 27

Project: dolo Source File: matrix_equations.py
def solve_sylvester(A,B,C,D,Ainv = None):
    # Solves equation : A X + B X [C,...,C] + D = 0
    # where X is a multilinear function whose dimension is determined by D
    # inverse of A can be optionally specified as an argument

    import slycot

    n_d = D.ndim - 1
    n_v = C.shape[1]

    n_c = D.size//n_v**n_d


#    import dolo.config
#    opts = dolo.config.use_engine
#    if opts['sylvester']:
#        DD = D.flatten().reshape( n_c, n_v**n_d)
#        [err,XX] = dolo.config.engine.engine.feval(2,'gensylv',n_d,A,B,C,-DD)
#        X = XX.reshape( (n_c,)+(n_v,)*(n_d))

    DD = D.reshape( n_c, n_v**n_d )

    if n_d == 1:
        CC = C
    else:
        CC = np.kron(C,C)
    for i in range(n_d-2):
        CC = np.kron(CC,C)

    if Ainv != None:
        Q = sdot(Ainv,B)
        S = sdot(Ainv,DD)
    else:
        Q = np.linalg.solve(A,B)
        S = np.linalg.solve(A,DD)

    n = n_c
    m = n_v**n_d

    XX = slycot.sb04qd(n,m,Q,CC,-S)

    X = XX.reshape( (n_c,)+(n_v,)*(n_d) )

    return X

Example 28

Project: C-PAC Source File: python_ncut_lib.py
def discretisation( eigen_vec ):
	eps=2.2204e-16

	# normalize the eigenvectors
	[n,k]=shape(eigen_vec)
	vm=kron(ones((1,k)),sqrt(multiply(eigen_vec,eigen_vec).sum(1)))
	eigen_vec=divide(eigen_vec,vm)

	svd_restarts=0
	exitLoop=0

	### if there is an exception we try to randomize and rerun SVD again
        ### do this 30 times
	while (svd_restarts < 30) and (exitLoop==0):

		# initialize algorithm with a random ordering of eigenvectors
		c=zeros((n,1))
		R=matrix(zeros((k,k)))
		R[:,0]=eigen_vec[int(rand(1)*(n)),:].transpose()

		for j in range(1,k):
  			c=c+abs(eigen_vec*R[:,j-1])
			R[:,j]=eigen_vec[c.argmin(),:].transpose()


		lastObjectiveValue=0
		nbIterationsDiscretisation=0
		nbIterationsDiscretisationMax=20

		# iteratively rotate the discretised eigenvectors until they
		# are maximally similar to the input eignevectors, this 
		# converges when the differences between the current solution
                # and the previous solution differs by less than eps or we
		# we have reached the maximum number of itarations
		while exitLoop == 0:
			nbIterationsDiscretisation = nbIterationsDiscretisation + 1

			# rotate the original eigen_vectors
			tDiscrete=eigen_vec*R

			# discretise the result by setting the max of each row=1 and
			# other values to 0
        		j=reshape(asarray(tDiscrete.argmax(1)),n)
        		eigenvec_discrete=csc_matrix((ones(len(j)),(range(0,n), array(j))),shape=(n,k))

			# calculate a rotation to bring the discrete eigenvectors cluster to the
			# original eigenvectors
        		tSVD=eigenvec_discrete.transpose()*eigen_vec
			# catch a SVD convergence error and restart
			try:
        			U, S, Vh = svd(tSVD)
        			svd_restarts += 1
			except LinAlgError:
				# catch exception and go back to the beginning of the loop
				print >> sys.stderr, "SVD did not converge, randomizing and trying again"
				break 

			# test for convergence
        		NcutValue=2*(n-S.sum())
        		if((abs(NcutValue-lastObjectiveValue) < eps ) or 
                  	( nbIterationsDiscretisation > nbIterationsDiscretisationMax )):
				exitLoop=1
			else:
				# otherwise calculate rotation and continue
				lastObjectiveValue=NcutValue
				R=matrix(Vh).transpose()*matrix(U).transpose()

	if exitLoop == 0:
		raise SVDError("SVD did not converge after 30 retries")
	else:
		return(eigenvec_discrete)

Example 29

Project: Analyzing_Neural_Time_Series Source File: firls.py
def firls(m, bands, desired, weight=None):
    """
    FIR filter design using least squares method.
    
    Inputs :
        m : oder of FIR filter

        bands : A montonic sequence containing the band edges.  All elements
                must be non-negative and less than 1 the sampling frequency
                as given in pi units.
        desired : A sequency with the same size of bands containing the desired gain
        in each of the specified bands
        weight : A relative weighting to give to each band region.
    
    Output :
        h  : coefficients of length m+1 fir filter.
    
    Example :
        h = firls(50, [0,0.2,0.3,1.0], [1,1,0,0],[1,5.0])
        
        Calculate impulse response for 51 tabs lowpass filter using least squares method
    with passband == [0,0.2*pi], stopband == [0.3*pi, pi],
        weight to passband == 1, weight to stopband == 5
    
    Note : This function is modified from signal package for octave 
    (http://octave.sourceforge.net/signal/index.html)
    """
    if weight==None : weight = ones(len(bands)/2)

    bands, desired, weight = array(bands), array(desired), array(weight)
    
    M = m/2;
    w = kron(weight, [-1,1])
    omega = bands * pi
    i1 = arange(1,M+1)
    
    # generate the matrix q
    # as illustrated in the above-cited reference, the matrix can be
    # expressed as the sum of a hankel and toeplitz matrix. a factor of
    # 1/2 has been dropped and the final filter hficients multiplied
    # by 2 to compensate.
    cos_ints = append(omega, sin(mat(arange(1,m+1)).T*mat(omega))).reshape((-1,omega.shape[0]))
    q = append(1, 1.0/arange(1.0,m+1)) * array(mat(cos_ints) * mat(w).T).T[0]
    q = toeplitz(q[:M+1]) + hankel(q[:M+1], q[M : ])
    
    # the vector b is derived from solving the integral:
    #
    #           _ w
    #          /   2
    #  b  =   /       w(w) d(w) cos(kw) dw
    #   k    /    w
    #       -      1
    #
    # since we assume that w(w) is constant over each band (if not, the
    # computation of q above would be considerably more complex), but
    # d(w) is allowed to be a linear function, in general the function
    # w(w) d(w) is linear. the computations below are derived from the
    # fact that:
    #     _
    #    /                          a              ax + b
    #   /   (ax + b) cos(nx) dx =  --- cos (nx) +  ------ sin(nx)
    #  /                             2                n
    # -                             n
    #

    
    enum = append(omega[::2]**2 - omega[1::2]**2, cos(mat(i1).T * mat(omega[1::2])) - cos(mat(i1).T * mat(omega[::2]))).flatten()
    deno = mat(append(2, i1)).T * mat(omega[1::2] - omega[::2])
    cos_ints2 = enum.reshape(deno.shape)/array(deno)
    
    d = zeros_like(desired)
    d[::2]  = -weight * desired[::2]
    d[1::2] =  weight * desired[1::2]
    
    b = append(1, 1.0/i1) * array(mat(kron (cos_ints2, [1, 1]) + cos_ints[:M+1,:]) * mat(d).T)[:,0]
    
    # having computed the components q and b of the  matrix equation,
    # solve for the filter hficients.
    a = (array(inv(q)*mat(b).T).T)[0]
    h = append( a[:0:-1], append(2*a[0],  a[1:]))
    return h

Example 30

Project: scikit-tensor Source File: dedicom.py
Function: update_r
def __updateR(X, A, D, R, nne):
    r = A.shape[1] ** 2
    T = zeros((r, r))
    t = zeros(r)
    for i in range(len(X)):
        AD = dot(A, diag(D[i, :]))
        ADt = AD.T
        tmp = dot(ADt, AD)
        T = T + kron(tmp, tmp)
        tmp = dot(ADt, X[i].dot(AD))
        t = t + tmp.flatten()
    r = A.shape[1]
    if nne > 0:
        Rflat = R.flatten()
        T = dot(T, Rflat) + nne
        R = (Rflat * t / T).reshape(r, r)
    else:
        # TODO check if this is correct
        R = solve(T, t).reshape(r, r)
        #R = (pinv(T + eye(r ** 2)).dot(t)).reshape(r, r)
    return R

Example 31

Project: ttpy Source File: tt_min.py
Function: mkron
def mkron(a, b):
    return np.kron(a, b)