numpy.linalg.svd

Here are the examples of the python api numpy.linalg.svd taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

144 Examples 7

Example 51

Project: AWS-Lambda-ML-Microservice-Skeleton Source File: test_regression.py
    def test_svd_no_uv(self):
        # gh-4733
        for shape in (3, 4), (4, 4), (4, 3):
            for t in float, complex:
                a = np.ones(shape, dtype=t)
                w = linalg.svd(a, compute_uv=False)
                c = np.count_nonzero(np.absolute(w) > 0.5)
                assert_equal(c, 1)
                assert_equal(np.linalg.matrix_rank(a), 1)
                assert_array_less(1, np.linalg.norm(a, ord=2))

Example 52

Project: trimesh Source File: points.py
def major_axis(points):
    '''
    Returns an approximate vector representing the major axis of points
    '''
    u,s,v = np.linalg.svd(points)
    axis_guess = v[np.argmax(s)]
    return axis_guess 

Example 53

Project: trimesh Source File: points.py
Function: surface_normal
def surface_normal(points):
    '''
    Returns a normal estimate of a group of points using SVD

    Arguments
    ---------
    points: (n,d) set of points

    Returns
    ---------
    normal: (d) vector
    '''
    normal = np.linalg.svd(points)[2][-1]
    return normal

Example 54

Project: rescal.py Source File: rescal.py
Function: update_r
def _updateR(X, A, lmbdaR):
    _log.debug('Updating R (SVD) lambda R: %s' % str(lmbdaR))
    rank = A.shape[1]
    U, S, Vt = svd(A, full_matrices=False)
    Shat = kron(S, S)
    Shat = (Shat / (Shat ** 2 + lmbdaR)).reshape(rank, rank)
    R = []
    for i in range(len(X)):
        Rn = Shat * dot(U.T, X[i].dot(U))
        Rn = dot(Vt.T, dot(Rn, Vt))
        R.append(Rn)
    return R

Example 55

Project: pylearn-parsimony Source File: test_svd.py
    def get_err_by_np_linalg_svd(self, computed_v, X):
        # svd from numpy array
        U, s_np, V = np.linalg.svd(X)
        np_v = V[[0], :].T

        sign = np.dot(computed_v.T, np_v)[0][0]
        np_v_new = np_v * sign
        err = np.linalg.norm(computed_v - np_v_new)

        return err

Example 56

Project: AWS-Lambda-ML-Microservice-Skeleton Source File: test_linalg.py
Function: do
    def do(self, a, b):
        u, s, vt = linalg.svd(a, 0)
        assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :],
                                           np.asarray(vt)),
                        rtol=get_rtol(u.dtype))
        assert_(imply(isinstance(a, matrix), isinstance(u, matrix)))
        assert_(imply(isinstance(a, matrix), isinstance(vt, matrix)))

Example 57

Project: ttpy Source File: tt_min.py
def mysvd(a, full_matrices=False):
    try:
        return np.linalg.svd(a, full_matrices)
    except:
        return np.linalg.svd(a + np.max(np.abs(a).flatten()) * 1e-14 *
                             np.random.randn(a.shape[0], a.shape[1]), full_matrices)

Example 58

Project: nipy Source File: reml.py
Function: orth
def orth(X, tol=1.0e-07):
    """
    
    Compute orthonormal basis for the column span of X.
    
    Rank is determined by zeroing all singular values, u, less
    than or equal to tol*u.max().

    INPUTS:
        X  -- n-by-p matrix

    OUTPUTS:
        B  -- n-by-rank(X) matrix with orthonormal columns spanning
              the column rank of X
    """

    B, u, _ = npl.svd(X, full_matrices=False)
    nkeep = np.greater(u, tol*u.max()).astype(np.int).sum()
    return B[:,:nkeep]

Example 59

Project: AWS-Lambda-ML-Microservice-Skeleton Source File: test_linalg.py
Function: test_types
    def test_types(self):
        def check(dtype):
            x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
            u, s, vh = linalg.svd(x)
            assert_equal(u.dtype, dtype)
            assert_equal(s.dtype, get_real_dtype(dtype))
            assert_equal(vh.dtype, dtype)
            s = linalg.svd(x, compute_uv=False)
            assert_equal(s.dtype, get_real_dtype(dtype))

        for dtype in [single, double, csingle, cdouble]:
            yield check, dtype

Example 60

Project: AWS-Lambda-ML-Microservice-Skeleton Source File: test_regression.py
Function: test_svd_build
    def test_svd_build(self, level=rlevel):
        # Ticket 627.
        a = array([[0., 1.], [1., 1.], [2., 1.], [3., 1.]])
        m, n = a.shape
        u, s, vh = linalg.svd(a)

        b = dot(transpose(u[:, n:]), a)

        assert_array_almost_equal(b, np.zeros((2, 2)))

Example 61

Project: chumpy Source File: linalg.py
    @depends_on('x')
    def UDV(self):
        result = np.linalg.svd(self.x.r, full_matrices=False)
        result = [result[0], result[1], result[2].T]
        result[1][np.abs(result[1]) < np.spacing(1)] = 0.
        return result

Example 62

Project: nupic Source File: mvn.py
Function: init
    def __init__(self, mean1, mean2, Sigma11, Sigma12, Sigma22):
      Sigma11 = numpy.asmatrix(Sigma11)
      Sigma12 = numpy.asmatrix(Sigma12)
      Sigma22 = numpy.asmatrix(Sigma22)
      u22, s22, vt22 = svd(Sigma22, full_matrices=0, compute_uv=1)

      rank22 = getRank(Sigma22.shape[1], s22)
      s22i = numpy.zeros(len(s22))
      s22i[0:rank22] = 1.0 / s22[0:rank22]
      # Rest are zeroes.
      Sigma22i = u22 * numpy.asmatrix(numpy.diag(s22i)) * vt22

      self.mean1 = mean1
      self.mean2 = mean2
      self.Sigma11 = Sigma11
      self.Sigma12 = Sigma12
      self.Sigma22i = Sigma22i

Example 63

Project: nupic Source File: mvn.py
Function: init
  def __init__(self, mean, varcov):
    if mean is not None: self.mean = numpy.asarray(mean)
    else: self.mean = None
    varcov = numpy.asmatrix(varcov)
    self.d = varcov.shape[1]
    self.varcov = varcov
    self.u, self.s, self.vt = svd(varcov, full_matrices=0, compute_uv=1)
    self.rank = getRank(self.d, self.s)

Example 64

Project: scikit-tensor Source File: rescal.py
Function: update_r
def _updateR(X, A, lmbdaR):
    rank = A.shape[1]
    U, S, Vt = svd(A, full_matrices=False)
    Shat = kron(S, S)
    Shat = (Shat / (Shat ** 2 + lmbdaR)).reshape(rank, rank)
    R = []
    for i in range(len(X)):
        Rn = Shat * dot(U.T, X[i].dot(U))
        Rn = dot(Vt.T, dot(Rn, Vt))
        R.append(Rn)
    return R

Example 65

Project: dipy Source File: shm.py
def hat(B):
    """Returns the hat matrix for the design matrix B
    """

    U, S, V = svd(B, False)
    H = dot(U, U.T)
    return H

Example 66

Project: apsg Source File: strain.py
    @property
    def eigenvects(self):
        U, _, _ = np.linalg.svd(self)
        return Group([Vec3(U.T[0]),
                      Vec3(U.T[1]),
                      Vec3(U.T[2])])

Example 67

Project: SciDB-Py Source File: test_basic.py
Function: test_svd
def test_svd():
    # chunk_size=32 currently required for svd
    A = sdb.random((6, 10), chunk_size=32)
    U, S, VT = sdb.svd(A)
    U2, S2, VT2 = np.linalg.svd(A.toarray(), full_matrices=False)

    assert_allclose(np.mat(U.toarray()) * np.diag(S.toarray()) * np.mat(VT.toarray()), np.mat(A.toarray()), rtol=RTOL)

    assert_allclose(np.absolute(U.toarray()), np.absolute(U2), rtol=RTOL)
    assert_allclose(np.absolute(S.toarray()), np.absolute(S2), rtol=RTOL)
    assert_allclose(np.absolute(VT.toarray()), np.absolute(VT2), rtol=RTOL)

Example 68

Project: twitch-troll-detection Source File: ComputePCA.py
Function: calculate_svd
    def calculateSVD(self):
        print "Calculating SVD..."
        U, s, Vh = numpy.linalg.svd(self.feature_matrix)
        print "Finished calculating SVD. Writing to files.."

Example 69

Project: QuantEcon.py Source File: rank_nullspace.py
def rank_est(A, atol=1e-13, rtol=0):
    """
    Estimate the rank (i.e. the dimension of the nullspace) of a matrix.

    The algorithm used by this function is based on the singular value
    decomposition of `A`.

    Parameters
    ----------
    A : array_like(float, ndim=1 or 2)
        A should be at most 2-D.  A 1-D array with length n will be
        treated as a 2-D with shape (1, n)
    atol : scalar(float), optional(default=1e-13)
        The absolute tolerance for a zero singular value.  Singular
        values smaller than `atol` are considered to be zero.
    rtol : scalar(float), optional(default=0)
        The relative tolerance.  Singular values less than rtol*smax are
        considered to be zero, where smax is the largest singular value.

    Returns
    -------
    r : scalar(int)
        The estimated rank of the matrix.

    Note: If both `atol` and `rtol` are positive, the combined tolerance
    is the maximum of the two; that is:

        tol = max(atol, rtol * smax)

    Note: Singular values smaller than `tol` are considered to be zero.

    See also
    --------
    numpy.linalg.matrix_rank
        matrix_rank is basically the same as this function, but it does
        not provide the option of the absolute tolerance.

    """

    A = np.atleast_2d(A)
    s = svd(A, compute_uv=False)
    tol = max(atol, rtol * s[0])
    rank = int((s >= tol).sum())

    return rank

Example 70

Project: scipy Source File: _solvers.py
def _are_validate_args(a, b, q, r, e, s, eq_type='care'):
    """
    A helper function to validate the arguments supplied to the
    Riccati equation solvers. Any discrepancy found in the input
    matrices leads to a ``ValueError`` exception.

    Essentially, it performs:

        - a check whether the input is free of NaN and Infs.
        - a pass for the data through ``numpy.atleast_2d()``
        - squareness check of the relevant arrays,
        - shape consistency check of the arrays,
        - singularity check of the relevant arrays,
        - symmetricity check of the relevant matrices,
        - a check whether the regular or the generalized version is asked.

    This function is used by ``solve_continuous_are`` and
    ``solve_discrete_are``.

    Parameters
    ----------
    a, b, q, r, e, s : array_like
        Input data
    eq_type : str
        Accepted arguments are 'care' and 'dare'.

    Returns
    -------
    a, b, q, r, e, s : ndarray
        Regularized input data
    m, n : int
        shape of the problem
    r_or_c : type
        Data type of the problem, returns float or complex
    gen_or_not : bool
        Type of the equation, True for generalized and False for regular ARE.

    """

    if not eq_type.lower() in ('dare', 'care'):
        raise ValueError("Equation type unknown. "
                         "Only 'care' and 'dare' is understood")

    a = np.atleast_2d(_asarray_validated(a, check_finite=True))
    b = np.atleast_2d(_asarray_validated(b, check_finite=True))
    q = np.atleast_2d(_asarray_validated(q, check_finite=True))
    r = np.atleast_2d(_asarray_validated(r, check_finite=True))

    # Get the correct data types otherwise Numpy complains
    # about pushing complex numbers into real arrays.
    r_or_c = complex if np.iscomplexobj(b) else float

    for ind, mat in enumerate((a, q, r)):
        if np.iscomplexobj(mat):
            r_or_c = complex

        if not np.equal(*mat.shape):
            raise ValueError("Matrix {} should be square.".format("aqr"[ind]))

    # Shape consistency checks
    m, n = b.shape
    if m != a.shape[0]:
        raise ValueError("Matrix a and b should have the same number of rows.")
    if m != q.shape[0]:
        raise ValueError("Matrix a and q should have the same shape.")
    if n != r.shape[0]:
        raise ValueError("Matrix b and r should have the same number of cols.")

    # Check if the data matrices q, r are (sufficiently) hermitian
    for ind, mat in enumerate((q, r)):
        if norm(mat - mat.conj().T, 1) > np.spacing(norm(mat, 1))*100:
            raise ValueError("Matrix {} should be symmetric/hermitian."
                             "".format("qr"[ind]))

    # Continuous time ARE should have a nonsingular r matrix.
    if eq_type == 'care':
        min_sv = svd(r, compute_uv=False)[-1]
        if min_sv == 0. or min_sv < np.spacing(1.)*norm(r, 1):
            raise ValueError('Matrix r is numerically singular.')

    # Check if the generalized case is required with omitted arguments
    # perform late shape checking etc.
    generalized_case = e is not None or s is not None

    if generalized_case:
        if e is not None:
            e = np.atleast_2d(_asarray_validated(e, check_finite=True))
            if not np.equal(*e.shape):
                raise ValueError("Matrix e should be square.")
            if m != e.shape[0]:
                raise ValueError("Matrix a and e should have the same shape.")
            # numpy.linalg.cond doesn't check for exact zeros and
            # emits a runtime warning. Hence the following manual check.
            min_sv = svd(e, compute_uv=False)[-1]
            if min_sv == 0. or min_sv < np.spacing(1.) * norm(e, 1):
                raise ValueError('Matrix e is numerically singular.')
            if np.iscomplexobj(e):
                r_or_c = complex
        if s is not None:
            s = np.atleast_2d(_asarray_validated(s, check_finite=True))
            if s.shape != b.shape:
                raise ValueError("Matrix b and s should have the same shape.")
            if np.iscomplexobj(s):
                r_or_c = complex

    return a, b, q, r, e, s, m, n, r_or_c, generalized_case

Example 71

Project: smartlearner Source File: initializers.py
Function: generate_array
    def _generate_array(self, dim):
        max_dim = max(dim)
        uniform_initer = UniformInitializer()
        uniform_initer.rng = self.rng
        return np.asarray(np.linalg.svd(uniform_initer((max_dim, max_dim)))[2][:dim[0], :dim[1]], dtype=floatX)

Example 72

Project: RLScore Source File: space_efficient_greedy_rls.py
Function: solve_bu
    def solve_bu(self, regparam):
        
        X = self.X
        Y = self.Y
        
        tsize = self.size
        fsize = X.shape[0]
        assert X.shape[1] == tsize
        self.A = np.mat(np.zeros((fsize, Y.shape[1])))
        
        rp = regparam
        rpinv = 1. / rp
        
        desiredfcount = self.desiredfcount
        if not fsize >= desiredfcount:
            raise Exception('The overall number of features ' + str(fsize) + ' is smaller than the desired number ' + str(desiredfcount) + ' of features to be selected.')
        
        #Biaz
        bias_slice = np.sqrt(self.bias)*np.mat(np.ones((1,X.shape[1]),dtype=np.float64))
        cv = np.sqrt(self.bias)*np.mat(np.ones((1, tsize)))
        ca = rpinv * (1. / (1. + cv * rpinv * cv.T)) * (cv * rpinv)
        
        self.dualvec = rpinv * Y - cv.T * rpinv * (1. / (1. + cv * rpinv * cv.T)) * (cv * rpinv * Y)
        
        diagG = []
        for i in range(tsize):
            diagGi = rpinv - cv.T[i, 0] * ca[0, i]
            diagG.append(diagGi)
        diagG = np.mat(diagG).T
        
        U, S, VT = la.svd(cv, full_matrices = False)
        U, S, VT = np.mat(U), np.mat(S), np.mat(VT)
        Omega = 1. / (S * S + rp) - rpinv
        
        self.selected = []
        
        currentfcount = 0
        self.performances = []
        while currentfcount < desiredfcount:
            
            if not self.measure is None:
                bestlooperf = None
            else:
                bestlooperf = float('inf')
            
            self.looperf = []
            for ci in range(fsize):
                if ci in self.selected: continue
                cv = X[ci]
                GXT_ci = VT.T * np.multiply(Omega.T, (VT * cv.T)) + rpinv * cv.T #GXT[:, ci]
                ca = GXT_ci * (1. / (1. + cv * GXT_ci))
                updA = self.dualvec - ca * (cv * self.dualvec)
                invupddiagG = 1. / (diagG - np.multiply(ca, GXT_ci))
                
                if not self.measure is None:
                    loopred = Y - np.multiply(invupddiagG, updA)
                    looperf_i = self.measure.multiOutputPerformance(Y, loopred)
                    if bestlooperf is None:
                        bestlooperf = looperf_i
                        bestcind = ci
                    if self.measure.comparePerformances(looperf_i, bestlooperf) > 0:
                        bestcind = ci
                        bestlooperf = looperf_i
                else:
                    #This default squared performance is a bit faster to compute than the one loaded separately.
                    loodiff = np.multiply(invupddiagG, updA)
                    #looperf_i = (loodiff.T * loodiff)[0, 0]
                    looperf_i = np.mean(np.sum(np.multiply(loodiff, loodiff), axis = 0))
                    if looperf_i < bestlooperf:
                        bestcind = ci
                        bestlooperf = looperf_i
                self.looperf.append(looperf_i)
            self.looperf = np.mat(self.looperf)
            
            self.bestlooperf = bestlooperf
            self.performances.append(bestlooperf)
            #cv = listX[bestcind]
            cv = X[bestcind]
            #GXT_bci = GXT[:, bestcind]
            GXT_bci = VT.T * np.multiply(Omega.T, (VT * cv.T)) + rpinv * cv.T
            ca = GXT_bci * (1. / (1. + cv * GXT_bci))
            self.dualvec = self.dualvec - ca * (cv * self.dualvec)
            diagG = diagG - np.multiply(ca, GXT_bci)
            #GXT = GXT - ca * (cv * GXT)
            self.selected.append(bestcind)
            X_sel = X[self.selected]
            if isinstance(X_sel, sp.base.spmatrix):
                X_sel = X_sel.todense()
            U, S, VT = la.svd(np.vstack([X_sel, bias_slice]), full_matrices = False)
            U, S, VT = np.mat(U), np.mat(S), np.mat(VT)
            Omega = 1. / (np.multiply(S, S) + rp) - rpinv
            #print self.selected
            #print self.performances
            currentfcount += 1
            
            #Linear predictor with bias
            self.A[self.selected] = X[self.selected] * self.dualvec
            self.b = bias_slice * self.dualvec
            
            if not self.callbackfun is None:
                self.callbackfun.callback(self)
        if not self.callbackfun is None:
            self.callbackfun.finished(self)
        self.A[self.selected] = X[self.selected] * self.dualvec
        self.b = bias_slice * self.dualvec
        self.results['selected_features'] = self.selected
        self.results['GreedyRLS_LOO_performances'] = self.performances

Example 73

Project: QuantEcon.py Source File: rank_nullspace.py
Function: null_space
def nullspace(A, atol=1e-13, rtol=0):
    """
    Compute an approximate basis for the nullspace of A.

    The algorithm used by this function is based on the singular value
    decomposition of `A`.

    Parameters
    ----------
    A : array_like(float, ndim=1 or 2)
        A should be at most 2-D.  A 1-D array with length k will be
        treated as a 2-D with shape (1, k)
    atol : scalar(float), optional(default=1e-13)
        The absolute tolerance for a zero singular value.  Singular
        values smaller than `atol` are considered to be zero.
    rtol : scalar(float), optional(default=0)
        The relative tolerance.  Singular values less than rtol*smax are
        considered to be zero, where smax is the largest singular value.

    Returns
    -------
    ns : array_like(float, ndim=2)
        If `A` is an array with shape (m, k), then `ns` will be an array
        with shape (k, n), where n is the estimated dimension of the
        nullspace of `A`.  The columns of `ns` are a basis for the
        nullspace; each element in numpy.dot(A, ns) will be
        approximately zero.

    Note: If both `atol` and `rtol` are positive, the combined tolerance
    is the maximum of the two; that is:

        tol = max(atol, rtol * smax)

    Note: Singular values smaller than `tol` are considered to be zero.

    """

    A = np.atleast_2d(A)
    u, s, vh = svd(A)
    tol = max(atol, rtol * s[0])
    nnz = (s >= tol).sum()
    ns = vh[nnz:].conj().T

    return ns

Example 74

Project: statsmodels Source File: regressionplots.py
def ceres_resids(results, focus_exog, frac=0.66, cond_means=None):
    """
    Calculate the CERES residuals (Conditional Expectation Partial
    Residuals) for a fitted model.

    Parameters
    ----------
    results : model results instance
        The fitted model for which the CERES residuals are calculated.
    focus_exog : int
        The column of results.model.exog used as the 'focus variable'.
    frac : float, optional
        Lowess smoothing parameter for estimating the conditional
        means.  Not used if `cond_means` is provided.
    cond_means : array-like, optional
        If provided, the columns of this array are the conditional
        means E[exog | focus exog], where exog ranges over some
        or all of the columns of exog other than focus exog.  If
        this is an empty nx0 array, the conditional means are
        treated as being zero.  If None, the conditional means are
        estimated.

    Returns
    -------
    An array containing the CERES residuals.

    Notes
    -----
    If `cond_means` is not provided, it is obtained by smoothing each
    column of exog (except the focus column) against the focus column.

    Currently only supports GLM, GEE, and OLS models.
    """

    model = results.model

    if not isinstance(model, (GLM, GEE, OLS)):
        raise ValueError("ceres residuals not available for %s" %
                         model.__class__.__name__)

    focus_exog, focus_col = utils.maybe_name_or_idx(focus_exog, model)

    # Indices of non-focus columns
    ix_nf = range(len(results.params))
    ix_nf = list(ix_nf)
    ix_nf.pop(focus_col)
    nnf = len(ix_nf)

    # Estimate the conditional means if not provided.
    if cond_means is None:

        # Below we calculate E[x | focus] where x is each column other
        # than the focus column.  We don't want the intercept when we do
        # this so we remove it here.
        pexog = model.exog[:, ix_nf]
        pexog -= pexog.mean(0)
        u, s, vt = np.linalg.svd(pexog, 0)
        ii = np.flatnonzero(s > 1e-6)
        pexog = u[:, ii]

        fcol = model.exog[:, focus_col]
        cond_means = np.empty((len(fcol), pexog.shape[1]))
        for j in range(pexog.shape[1]):

            # Get the fitted values for column i given the other
            # columns (skip the intercept).
            y0 = pexog[:, j]

            cf = lowess(y0, fcol, frac=frac, return_sorted=False)

            cond_means[:, j] = cf

    new_exog = np.concatenate((model.exog[:, ix_nf], cond_means), axis=1)

    # Refit the model using the adjusted exog values
    klass = model.__class__
    init_kwargs = model._get_init_kwds()
    new_model = klass(model.endog, new_exog, **init_kwargs)
    new_result = new_model.fit()

    # The partial residual, with respect to l(x2) (notation of Cook 1998)
    presid = model.endog - new_result.fittedvalues
    if isinstance(model, (GLM, GEE)):
        presid *= model.family.link.deriv(new_result.fittedvalues)
    if new_exog.shape[1] > nnf:
        presid += np.dot(new_exog[:, nnf:], new_result.params[nnf:])

    return presid

Example 75

Project: OpenNFB Source File: pyeeg.py
Function: svd_entropy
def svd_entropy(X, Tau, DE, W = None):
	"""Compute SVD Entropy from either two cases below:
	1. a time series X, with lag tau and embedding dimension dE (default)
	2. a list, W, of normalized singular values of a matrix (if W is provided,
	recommend to speed up.)

	If W is None, the function will do as follows to prepare singular spectrum:

		First, computer an embedding matrix from X, Tau and DE using pyeeg 
		function embed_seq(): 
					M = embed_seq(X, Tau, DE)

		Second, use scipy.linalg function svd to decompose the embedding matrix 
		M and obtain a list of singular values:
					W = svd(M, compute_uv=0)

		At last, normalize W:
					W /= sum(W)
	
	Notes
	-------------

	To speed up, it is recommended to compute W before calling this function 
	because W may also be used by other functions whereas computing	it here 
	again will slow down.
	"""

	if W is None:
		Y = EmbedSeq(X, tau, dE)
		W = svd(Y, compute_uv = 0)
		W /= sum(W) # normalize singular values

	return -1*sum(W * log(W))

Example 76

Project: qutip Source File: steadystate.py
def _steadystate_svd_dense(L, ss_args):
    """
    Find the steady state(s) of an open quantum system by solving for the
    nullspace of the Liouvillian.
    """
    ss_args['info'].pop('weight', None)
    atol = 1e-12
    rtol = 1e-12
    if settings.debug:
        logger.debug('Starting SVD solver.')
    _svd_start = time.time()
    u, s, vh = svd(L.full(), full_matrices=False)
    tol = max(atol, rtol * s[0])
    nnz = (s >= tol).sum()
    ns = vh[nnz:].conj().T
    _svd_end = time.time()
    ss_args['info']['solution_time'] = _svd_end-_svd_start
    if ss_args['all_states']:
        rhoss_list = []
        for n in range(ns.shape[1]):
            rhoss = Qobj(vec2mat(ns[:, n]), dims=L.dims[0])
            rhoss_list.append(rhoss / rhoss.tr())
        if ss_args['return_info']:
            return rhoss_list, ss_args['info']
        else:
            if ss_args['return_info']:
                return rhoss_list, ss_args['info']
            else:
                return rhoss_list
    else:
        rhoss = Qobj(vec2mat(ns[:, 0]), dims=L.dims[0])
        return rhoss / rhoss.tr()

Example 77

Project: OpenNFB Source File: pyeeg.py
Function: fisher_info
def fisher_info(X, Tau, DE, W = None):
	""" Compute Fisher information of a time series from either two cases below:
	1. X, a time series, with lag Tau and embedding dimension DE (default)
	2. W, a list of normalized singular values, i.e., singular spectrum (if W is
	   provided, recommended to speed up.)

	If W is None, the function will do as follows to prepare singular spectrum:

		First, computer an embedding matrix from X, Tau and DE using pyeeg 
		function embed_seq():
			M = embed_seq(X, Tau, DE)

		Second, use scipy.linalg function svd to decompose the embedding matrix 
		M and obtain a list of singular values:
			W = svd(M, compute_uv=0)

		At last, normalize W:
			W /= sum(W)
	
	Parameters
	----------

	X
		list

		a time series. X will be used to build embedding matrix and compute 
		singular values if W or M is not provided.
	Tau
		integer

		the lag or delay when building a embedding sequence. Tau will be used 
		to build embedding matrix and compute singular values if W or M is not
		provided.
	DE
		integer

		the embedding dimension to build an embedding matrix from a given 
		series. DE will be used to build embedding matrix and compute 
		singular values if W or M is not provided.
	W
		list or array

		the set of singular values, i.e., the singular spectrum

	Returns
	-------

	FI
		integer

		Fisher information

	Notes
	-----
	To speed up, it is recommended to compute W before calling this function 
	because W may also be used by other functions whereas computing	it here 
	again will slow down.

	See Also
	--------
	embed_seq : embed a time series into a matrix
	"""

	if W is None:
		M = embed_seq(X, Tau, DE)
		W = svd(M, compute_uv = 0)
		W /= sum(W)	
	
	FI = 0
	for i in xrange(0, len(W) - 1):	# from 1 to M
		FI += ((W[i +1] - W[i]) ** 2) / (W[i])
	
	return FI

Example 78

Project: GLRM Source File: glrm.py
    def _initialize_XY(self, B, k, missing_list):
        """ Scale by ration of non-missing, SVD, append col of ones, add noise. """
        A = hstack(bi for bi in B)
        m, n = A.shape

        # normalize entries that are missing
        if self.scale: stdev = A.std(0)
        else: stdev = ones(n)
        mu = A.mean(0)
        C = sqrt(1e-2/k) # XXX may need to be adjusted for larger problems
        A = (A-mu)/stdev + C*randn(m,n)

        # SVD to get initial point
        u, s, v = svd(A, full_matrices = False)
        u, s, v = u[:,:k], diag(sqrt(s[:k])), v[:k,:]
        X0, Y0 = asarray(u.dot(s)), asarray(s.dot(v))*asarray(stdev)

        # append col of ones to X, row of zeros to Y
        X0 = hstack((X0, ones((m,1)))) + C*randn(m,k+1)
        Y0 = vstack((Y0, mu)) + C*randn(k+1,n)

        # split Y0
        ns = cuemsum([bj.shape[1] for bj in B])
        if len(ns) == 1: Y0 = [Y0]
        else: Y0 = split(Y0, ns, 1)
        
        return X0, Y0

Example 79

Project: scipy Source File: linalg.py
    def time_svd(self, size, contig, module):
        if module == 'numpy':
            nl.svd(self.a)
        else:
            sl.svd(self.a)

Example 80

Project: pyscf Source File: newton_ah.py
def _effective_svd(a, tol=1e-5):
    w = numpy.linalg.svd(a)[1]
    return w[(tol<w) & (w<1-tol)]

Example 81

Project: pymanopt Source File: grassmann.py
Function: dist
    def dist(self, X, Y):
        u, s, v = svd(multiprod(multitransp(X), Y))
        s[s > 1] = 1
        s = np.arccos(s)
        return np.linalg.norm(s)

Example 82

Project: RLScore Source File: space_efficient_greedy_rls.py
    def solve_tradeoff(self, regparam):
        """Trains RLS with the given value of the regularization parameter
        
        @param regparam: value of the regularization parameter
        @type regparam: float
        """
        
        self.regparam = regparam
        X = self.X
        Y = self.Y
        
        if not hasattr(self, "bias"):
            self.bias = 0.
        
        tsize = self.size
        fsize = X.shape[0]
        assert X.shape[1] == tsize
        self.A = np.mat(np.zeros((fsize, Y.shape[1])))
        
        rp = regparam
        rpinv = 1. / rp
        
        if not self.resource_pool.has_key('subsetsize'):
            raise Exception("Parameter 'subsetsize' must be given.")
        desiredfcount = int(self.resource_pool['subsetsize'])
        if not fsize >= desiredfcount:
            raise Exception('The overall number of features ' + str(fsize) + ' is smaller than the desired number ' + str(desiredfcount) + ' of features to be selected.')
        
        #Biaz
        bias_slice = np.sqrt(self.bias)*np.mat(np.ones((1,X.shape[1]),dtype=np.float64))
        cv = bias_slice
        ca = rpinv * (1. / (1. + cv * rpinv * cv.T)) * (cv * rpinv)
        
        self.dualvec = rpinv * Y - cv.T * rpinv * (1. / (1. + cv * rpinv * cv.T)) * (cv * rpinv * Y)
        
        diagG = []
        for i in range(tsize):
            diagGi = rpinv - cv.T[i, 0] * ca[0, i]
            diagG.append(diagGi)
        diagG = np.mat(diagG).T
        
        #listX = []
        #for ci in range(fsize):
        #    listX.append(X[ci])
        
        U, S, VT = la.svd(cv, full_matrices = False)
        U, S, VT = np.mat(U), np.mat(S), np.mat(VT)
        Omega = 1. / (S * S + rp) - rpinv
        
        self.selected = []
        
        blocksize = 1000
        blocks = []
        blockcount = 0
        while True:
            startind = blockcount * blocksize
            if (blockcount + 1) * blocksize < fsize:
                print blockcount, fsize, (blockcount + 1) * blocksize
                endind = (blockcount + 1) * blocksize
                blocks.append(range(startind, endind))
                blockcount += 1
            else:
                blocks.append(range(startind, fsize))
                blockcount += 1
                break
        
        
        currentfcount = 0
        self.performances = []
        while currentfcount < desiredfcount:
            
            if not self.measure is None:
                self.bestlooperf = None
            else:
                self.bestlooperf = float('inf')
            
            
            looperf = np.mat(np.zeros((1, fsize)))
            
            for blockind in range(blockcount):
                
                block = blocks[blockind]
                
                tempmatrix = np.mat(np.zeros((tsize, len(block))))
                temp2 = np.mat(np.zeros((tsize, len(block))))
                
                X_block = X[block]
                GXT_block = VT.T * np.multiply(Omega.T, (VT * X_block.T)) + rpinv * X_block.T
                
                np.multiply(X_block.T, GXT_block, tempmatrix)
                XGXTdiag = sum(tempmatrix, axis = 0)
                
                XGXTdiag = 1. / (1. + XGXTdiag)
                np.multiply(GXT_block, XGXTdiag, tempmatrix)
                
                tempvec1 = np.multiply((X_block * self.dualvec).T, XGXTdiag)
                np.multiply(GXT_block, tempvec1, temp2)
                np.subtract(self.dualvec, temp2, temp2)
                
                np.multiply(tempmatrix, GXT_block, tempmatrix)
                np.subtract(diagG, tempmatrix, tempmatrix)
                np.divide(1, tempmatrix, tempmatrix)
                np.multiply(tempmatrix, temp2, tempmatrix)
                
                
                if not self.measure is None:
                    np.subtract(Y, tempmatrix, tempmatrix)
                    np.multiply(temp2, 0, temp2)
                    np.add(temp2, Y, temp2)
                    looperf_block = self.measure.multiTaskPerformance(temp2, tempmatrix)
                    looperf_block = np.mat(looperf_block)
                else:
                    np.multiply(tempmatrix, tempmatrix, tempmatrix)
                    looperf_block = sum(tempmatrix, axis = 0)
                looperf[:, block] = looperf_block
                
            if not self.measure is None:
                if self.measure.isErrorMeasure():
                    looperf[0, self.selected] = float('inf')
                    bestcind = np.argmin(looperf)
                    self.bestlooperf = np.amin(looperf)
                else:
                    looperf[0, self.selected] = - float('inf')
                    bestcind = np.argmax(looperf)
                    self.bestlooperf = np.amax(looperf)
            else:
                looperf[0, self.selected] = float('inf')
                bestcind = np.argmin(looperf)
                self.bestlooperf = np.amin(looperf)
                
            self.looperf = looperf
            
            self.performances.append(self.bestlooperf)
            #cv = listX[bestcind]
            cv = X[bestcind]
            #GXT_bci = GXT[:, bestcind]
            GXT_bci = VT.T * np.multiply(Omega.T, (VT * cv.T)) + rpinv * cv.T
            ca = GXT_bci * (1. / (1. + cv * GXT_bci))
            self.dualvec = self.dualvec - ca * (cv * self.dualvec)
            diagG = diagG - np.multiply(ca, GXT_bci)
            #GXT = GXT - ca * (cv * GXT)
            self.selected.append(bestcind)
            X_sel = X[self.selected]
            if isinstance(X_sel, sp.base.spmatrix):
                X_sel = X_sel.todense()
            U, S, VT = la.svd(np.vstack([X_sel, bias_slice]), full_matrices = False)
            U, S, VT = np.mat(U), np.mat(S), np.mat(VT)
            #print U.shape, S.shape, VT.shape
            Omega = 1. / (np.multiply(S, S) + rp) - rpinv
            #print self.selected
            #print self.performances
            currentfcount += 1
            
            #Linear predictor with bias
            self.A[self.selected] = X[self.selected] * self.dualvec
            self.b = bias_slice * self.dualvec
            
            self.callback()
            #print who(locals())
        self.finished()
        self.A[self.selected] = X[self.selected] * self.dualvec
        self.b = bias_slice * self.dualvec
        self.results['selected_features'] = self.selected
        self.results['GreedyRLS_LOO_performances'] = self.performances
        #self.results['predictor'] = self.getModel()
        self.predictor = predictor.LinearPredictor(self.A, self.b)

Example 83

Project: ray Source File: linalg.py
@ray.remote(num_return_vals=3)
def svd(a):
  return np.linalg.svd(a)

Example 84

Project: nesoni Source File: graph_layout.py
Function: iterate
    def iterate(self):
        # Improve positions
        max_ns = float(len(self.links)) / numpy.arange(1,len(self.links)+1)
        random.shuffle(max_ns)
        
        roots = numpy.arange(len(self.links))
        random.shuffle(roots)

        #Working space
        idents = numpy.empty(len(self.links), 'int')
        signs = numpy.empty(len(self.links), 'int')
        distances = numpy.empty(len(self.links), 'float64')
        travels = numpy.empty(len(self.links), 'int')

        for item in self.links:
            random.shuffle(item)

        for i in xrange(len(self.links)):
            self._improve_layout(roots[i], max_ns[i], self.update_amount,  idents,signs,distances,travels)
        
        
        # Lay out nicely
        
        components = [ ]
        used = numpy.zeros(len(self.links), 'bool')

        for i in xrange(len(self.links)):
            if used[i]: continue
            
            component = [ i ]
            used[i] = True
            
            i = 0
            while i < len(component):
                for ident, sign, travel in self.links[component[i]]:
                    if not used[ident]:
                       used[ident] = True
                       component.append(ident)
                i += 1
            
            components.append(numpy.array(component))
            
        components.sort(key=lambda x: len(x), reverse=True)
        
                
        max_y = numpy.sqrt(
                   numpy.maximum.reduce(self.positions[:,0])*
                   numpy.maximum.reduce(self.positions[:,1]) ) * 0.75
        cur_y = 0.0
        left_x = 0.0
        max_x = 0.0
        for component in components:
            cpos = self.positions[component]     
            cpos = cpos - numpy.average(cpos,0)[None,:]
        
            if len(component) > 1:
                U, s, Vh = linalg.svd(cpos, full_matrices=0)
                for i in xrange(len(s)):
                    if numpy.sum(Vh[i,i]) < 0.0: s[i] *= -1        
                cpos = U * s[None,:]
            
            cpos[:,0] -= numpy.minimum.reduce(cpos[:,0])
            cpos[:,1] -= numpy.minimum.reduce(cpos[:,1])
            width = numpy.maximum.reduce(cpos[:,0])
            height = numpy.maximum.reduce(cpos[:,1])
            pad = max(width,1.0) * 0.1
            cpos[:,0] += pad
            cpos[:,1] += pad
            width += pad * 2
            height += pad * 2
            
            if height > max_y: 
                max_y = height
                
            if cur_y+height > max_y:
                left_x = max_x
                cur_y = 0
                        
            cpos[:,0] += left_x
            cpos[:,1] += cur_y
            cur_y += height
            max_x = max(max_x, left_x+width)
            
            self.positions[component] = cpos
        
        self.update_amount *= 1.0 - 1.0/1000

Example 85

Project: socialsent Source File: embedding_transformer.py
Function: orthogonalize
def orthogonalize(Q):
    U, S, V = np.linalg.svd(Q)
    return U.dot(V.T)

Example 86

Project: fast-rcnn Source File: compress_net.py
def compress_weights(W, l):
    """Compress the weight matrix W of an inner product (fully connected) layer
    using truncated SVD.

    Parameters:
    W: N x M weights matrix
    l: number of singular values to retain

    Returns:
    Ul, L: matrices such that W \approx Ul*L
    """

    # numpy doesn't seem to have a fast truncated SVD algorithm...
    # this could be faster
    U, s, V = np.linalg.svd(W, full_matrices=False)

    Ul = U[:, :l]
    sl = s[:l]
    Vl = V[:l, :]

    L = np.dot(np.diag(sl), Vl)
    return Ul, L

Example 87

Project: orange Source File: orngCA.py
Function: calculate_svd
    def __calculateSVD(self):
        """
            Computes generalized SVD...
            
            This function is used to calculate decomposition A = N * D_mi * M' , where N' * diag(rowSums) * N = I and
            M' * diag(colSums) * M = I. This decomposition is calculated in 4 steps:
            i) B = diag(rowSums)^1/2 * A * diag(colSums)^1/2
            ii) find the ordinary SVD of B: B = U * D * V'
            iii) N = diag(rowSums)^-1/2 * U
                M = diag(colSums)^-1/2 * V
                D_mi = D
            iv) A= N * D_mi * M'
            
            returns (N, D_mi, M)            
        """
        
        a = self.__corr - self.__rowSums * self.__colSums
        b = diag(sqrt((1. / self.__rowSums).reshape(1,-1).tolist()[0])) * a * diag(sqrt((1. / self.__colSums).tolist()[0]))
        u, d, v = numpy.linalg.svd(b, 0)
        N = diag(sqrt(self.__rowSums.reshape(1, -1).tolist()[0])) * u
        M = diag(sqrt(self.__colSums.tolist()[0])) * transpose(v)
        d = diag(d.tolist())
        
        return (N, d, M)       

Example 88

Project: orange Source File: orngProjectionPursuit.py
Function: sqrtm
def sqrtm(mat):
    """ Retruns the square root of the matrix mat """
    U, S, V = numpy.linalg.svd(mat)
    D = numpy.diag(numpy.sqrt(S))
    return numpy.dot(numpy.dot(U,D),V)

Example 89

Project: RLScore Source File: space_efficient_greedy_rls.py
    def solve_weak(self, regparam):
        
        X = self.X
        Y = self.Y
        
        tsize = self.size
        fsize = X.shape[0]
        assert X.shape[1] == tsize
        self.A = np.mat(np.zeros((fsize, Y.shape[1])))
        
        rp = regparam
        rpinv = 1. / rp
        
        desiredfcount = self.desiredfcount
        if not fsize >= desiredfcount:
            raise Exception('The overall number of features ' + str(fsize) + ' is smaller than the desired number ' + str(desiredfcount) + ' of features to be selected.')
        
        #Biaz
        bias_slice = np.sqrt(self.bias)*np.mat(np.ones((1,X.shape[1]),dtype=np.float64))
        cv = np.sqrt(self.bias)*np.mat(np.ones((1, tsize)))
        ca = rpinv * (1. / (1. + cv * rpinv * cv.T)) * (cv * rpinv)
        
        self.dualvec = rpinv * Y - cv.T * rpinv * (1. / (1. + cv * rpinv * cv.T)) * (cv * rpinv * Y)
        
        diagG = []
        for i in range(tsize):
            diagGi = rpinv - cv.T[i, 0] * ca[0, i]
            diagG.append(diagGi)
        diagG = np.mat(diagG).T
        
        U, S, VT = la.svd(cv, full_matrices = False)
        U, S, VT = np.mat(U), np.mat(S), np.mat(VT)
        Omega = 1. / (S * S + rp) - rpinv
        
        self.selected = []
        notselected = set(range(fsize))
        currentfcount = 0
        self.performances = []
        while currentfcount < desiredfcount:
            
            if not self.measure is None:
                bestlooperf = None
            else:
                bestlooperf = float('inf')
            
            X_s = X[self.selected]
            
            self.looperf = []
            #sample_60 = pyrandom.sample(notselected, len(notselected))
            #sample_60 = sorted(sample_60)
            #print sample_60
            sample_60 = pyrandom.sample(notselected, 60)
            for ci in sample_60:
                cv = X[ci]
                GXT_ci = VT.T * np.multiply(Omega.T, (VT * cv.T)) + rpinv * cv.T
                ca = GXT_ci * (1. / (1. + cv * GXT_ci))
                updA = self.dualvec - ca * (cv * self.dualvec)
                invupddiagG = 1. / (diagG - np.multiply(ca, GXT_ci))
                
                if not self.measure is None:
                    loopred = Y - np.multiply(invupddiagG, updA)
                    looperf_i = self.measure.multiOutputPerformance(Y, loopred)
                    if bestlooperf is None:
                        bestlooperf = looperf_i
                        bestcind = ci
                    if self.measure.comparePerformances(looperf_i, bestlooperf) > 0:
                        bestcind = ci
                        bestlooperf = looperf_i
                else:
                    #This default squared performance is a bit faster to compute than the one loaded separately.
                    loodiff = np.multiply(invupddiagG, updA)
                    looperf_i = np.mean(np.sum(np.multiply(loodiff, loodiff), axis = 0))
                    if looperf_i < bestlooperf:
                        bestcind = ci
                        bestlooperf = looperf_i
                        bestlooperf = looperf_i
                self.looperf.append(looperf_i)
            self.looperf = np.mat(self.looperf)
            
            self.bestlooperf = bestlooperf
            print bestlooperf
            self.performances.append(bestlooperf)
            cv = X[bestcind]
            GXT_bci = VT.T * np.multiply(Omega.T, (VT * cv.T)) + rpinv * cv.T
            ca = GXT_bci * (1. / (1. + cv * GXT_bci))
            self.dualvec = self.dualvec - ca * (cv * self.dualvec)
            diagG = diagG - np.multiply(ca, GXT_bci)
            #GXT = GXT - ca * (cv * GXT)
            self.selected.append(bestcind)
            notselected.remove(bestcind)
            X_sel = X[self.selected]
            if isinstance(X_sel, sp.base.spmatrix):
                X_sel = X_sel.todense()
            U, S, VT = la.svd(np.vstack([X_sel, bias_slice]), full_matrices = False)
            U, S, VT = np.mat(U), np.mat(S), np.mat(VT)
            Omega = 1. / (np.multiply(S, S) + rp) - rpinv
            currentfcount += 1
            
            #Linear predictor with bias
            self.A[self.selected] = X[self.selected] * self.dualvec
            self.b = bias_slice * self.dualvec
            
            
            if not self.callbackfun is None:
                self.callbackfun.callback(self)
        if not self.callbackfun is None:
            self.callbackfun.finished(self)
        self.A[self.selected] = X[self.selected] * self.dualvec
        self.b = bias_slice * self.dualvec
        self.results['selected_features'] = self.selected
        self.results['GreedyRLS_LOO_performances'] = self.performances
        self.predictor = predictor.LinearPredictor(self.A, self.b)

Example 90

Project: lfd Source File: tps.py
Function: solve_eqp1
def solve_eqp1(H, f, A):
    """solve equality-constrained qp
    min tr(x'Hx) + sum(f'x)
    s.t. Ax = 0
    """    
    n_vars = H.shape[0]
    assert H.shape[1] == n_vars
    assert f.shape[0] == n_vars
    assert A.shape[1] == n_vars
    n_cnts = A.shape[0]
    
    _u,_s,_vh = np.linalg.svd(A.T)
    N = _u[:,n_cnts:]
    # columns of N span the null space
    
    # x = Nz
    # then problem becomes unconstrained minimization .5*z'NHNz + z'Nf
    # NHNz + Nf = 0
    L = N.T.dot(H.dot(N))
    R = -N.T.dot(f)
    z = np.linalg.solve(L, R)
    x = N.dot(z)
    
    return x

Example 91

Project: orange Source File: correspondence.py
Function: calculate_svd
    def __calculate_svd(self):
        """ Computes generalized SVD ...
            
        This function is used to calculate decomposition A = N * D_mi * M' , where N' * diag(row_sums) * N = I and
        M' * diag(col_sums) * M = I. This decomposition is calculated in 4 steps:
        
            i)   B = diag(row_sums)^1/2 * A * diag(col_sums)^1/2
            ii)  find the ordinary SVD of B: B = U * D * V'
            iii) N = diag(row_sums)^-1/2 * U
                 M = diag(col_sums)^-1/2 * V
                 D_mi = D
            iv)  A = N * D_mi * M'
        
        Returns (N, D_mi, M)
                    
        """
        
        a = self.__corr - self.__row_sums * self.__col_sums
        b = numpy.diag(numpy.sqrt((1. / self.__row_sums).reshape(1,-1).tolist()[0])) * a * numpy.diag(numpy.sqrt((1. / self.__col_sums).tolist()[0]))
        u, d, v = numpy.linalg.svd(b, 0)
        N = numpy.diag(numpy.sqrt(self.__row_sums.reshape(1, -1).tolist()[0])) * u
        M = numpy.diag(numpy.sqrt(self.__col_sums.tolist()[0])) * numpy.transpose(v)
        d = numpy.diag(d.tolist())
        
        return (N, d, M)       

Example 92

Project: orange Source File: mds.py
    def torgerson(self):
        """
        Run the Torgerson algorithm that computes an initial analytical
        solution of the problem.
        
        """
        # Torgerson's initial approximation
        O = numpy.array([m for m in self.distances])
        
##        #B = matrixmultiply(O,O)
##        # bug!? B = O**2
##        B = dot(O,O)
##        # double-center B
##        cavg = sum(B, axis=0)/(self.n+0.0)      # column sum
##        ravg = sum(B, axis=1)/(self.n+0.0)    # row sum
##        tavg = sum(cavg)/(self.n+0.0)   # total sum
##        # B[row][column]
##        for i in xrange(self.n):
##            for j in xrange(self.n):
##                B[i,j] += -cavg[j]-ravg[i]
##        B = -0.5*(B+tavg)

        # B = double-center O**2 !!!
        J = numpy.identity(self.n) - (1/numpy.float(self.n))
        B = -0.5 * numpy.dot(numpy.dot(J, O**2), J)
        
        # SVD-solve B = ULU'
        #(U,L,V) = singular_value_decomposition(B)
        (U,L,V)=svd(B)
        # X = U(L^0.5)
        # # self.X = matrixmultiply(U,identity(self.n)*sqrt(L))
        # X is n-dimensional, we take the two dimensions with the largest singular values
        idx = numpy.argsort(L)[-self.dim:].tolist()
        idx.reverse()
        
        Lt = numpy.take(L,idx)   # take those singular values
        Ut = numpy.take(U,idx,axis=1) # take those columns that are enabled
        Dt = numpy.identity(self.dim)*numpy.sqrt(Lt)  # make a diagonal matrix, with squarooted values
        self.points = Orange.core.FloatListList(numpy.dot(Ut,Dt))
        self.freshD = 0

Example 93

Project: lfd Source File: tps.py
Function: solve_eqp1
def solve_eqp1(H, f, A, ret_factorization=False):
    """solve equality-constrained qp
    min .5 tr(x'Hx) + tr(f'x)
    s.t. Ax = 0
    """    
    n_vars = H.shape[0]
    assert H.shape[1] == n_vars
    assert f.shape[0] == n_vars
    assert A.shape[1] == n_vars
    n_cnts = A.shape[0]
    
    _u,_s,_vh = np.linalg.svd(A.T)
    N = _u[:,n_cnts:]
    # columns of N span the null space
    
    # x = Nz
    # then problem becomes unconstrained minimization .5 z'N'HNz + z'N'f
    # N'HNz + N'f = 0
    L = N.T.dot(H.dot(N))
    R = -N.T.dot(f)
    z = np.linalg.solve(L, R)
    x = N.dot(z)
    
    if ret_factorization:
        return x, (N, z)
    return x

Example 94

Project: cameo Source File: util.py
Function: null_space
def nullspace(A, atol=1e-13, rtol=0):
    """Compute an approximate basis for the nullspace of A.

    The algorithm used by this function is based on the singular value
    decomposition of `A`.

    Parameters
    ----------
    A : ndarray
        A should be at most 2-D.  A 1-D array with length k will be treated
        as a 2-D with shape (1, k)
    atol : float
        The absolute tolerance for a zero singular value.  Singular values
        smaller than `atol` are considered to be zero.
    rtol : float
        The relative tolerance.  Singular values less than rtol*smax are
        considered to be zero, where smax is the largest singular value.

    If both `atol` and `rtol` are positive, the combined tolerance is the
    maximum of the two; that is::
        tol = max(atol, rtol * smax)
    Singular values smaller than `tol` are considered to be zero.

    Return value
    ------------
    ns : ndarray
        If `A` is an array with shape (m, k), then `ns` will be an array
        with shape (k, n), where n is the estimated dimension of the
        nullspace of `A`.  The columns of `ns` are a basis for the
        nullspace; each element in numpy.dot(A, ns) will be approximately
        zero.
    """

    A = np.atleast_2d(A)
    u, s, vh = svd(A)
    tol = max(atol, rtol * s[0])
    nnz = (s >= tol).sum()
    ns = vh[nnz:].conj().T
    return ns

Example 95

Project: pyvision Source File: Perspective.py
def PerspectiveFromPointsOld(source, dest, new_size):
    '''
    Python/Scipy implementation implementation which finds a perspective 
    transform between points.
    
    Most users should use PerspectiveFromPoints instead.  This method
    may be eliminated in the future.
    '''
    assert len(source) == len(dest)
    
    src_nrm = pv.AffineNormalizePoints(source)
    source = src_nrm.transformPoints(source)
    dst_nrm = pv.AffineNormalizePoints(dest)
    dest   = dst_nrm.transformPoints(dest)
    
    A = []
    for i in range(len(source)):
        src = source[i]
        dst = dest[i]
        
        # See Hartley and Zisserman Ch. 4.1, 4.1.1, 4.4.4
        row1 = [0.0,0.0,0.0,-dst.w*src.x,-dst.w*src.y,-dst.w*src.w,dst.y*src.x,dst.y*src.y,dst.y*src.w]
        row2 = [dst.w*src.x,dst.w*src.y,dst.w*src.w,0.0,0.0,0.0,-dst.x*src.x,-dst.x*src.y,-dst.x*src.w]
        #row3 = [-dst.y*src.x,-dst.y*src.y,-dst.y*src.w,dst.x*src.x,dst.x*src.y,dst.x*src.w,0.0,0.0,0.0]
        A.append(row1)
        A.append(row2)
        #A.append(row3)
    A = np.array(A)
    U,D,Vt = la.svd(A)
    H = Vt[8,:].reshape(3,3)

    matrix = np.dot(dst_nrm.inverse,np.dot(H,src_nrm.matrix))

    return PerspectiveTransform(matrix,new_size)

Example 96

Project: popupcad Source File: constraint_system.py
    def constrained_shift(self, items):
        vertexdict = self.vertex_dict()
        ini = self.ini(vertexdict)
        if self.generator.empty:
            for vertex, dxdy in items:
                vertex.shift(dxdy)
        else:
            variables = self.generator.variables
            j = self.generator.j
            vertexdict = self.generator.vertex_dict
    
            dx_dict = {}
            for vertex, dxdy in items:
                key_x, key_y = vertex.constraints_ref().p()[:2]
                dx_dict[key_x] = dxdy[0]
                dx_dict[key_y] = dxdy[1]
    
            dx = numpy.zeros(len(variables))
            for key in dx_dict:
                if key in variables:
                    dx[variables.index(key)] = dx_dict[key]
                else:
                    vertexdict[key].setsymbol(key, ini[key] + dx_dict[key])
    
            x0 = numpy.array(self.inilist(variables, ini))
            Jnum = j(x0)
            L, S, R = numpy.linalg.svd(Jnum)
            aS = abs(S)
            m = (aS > (aS[0] / 100)).sum()
    
            rnull = R[m:]
            lnull = ((rnull**2).sum(1))**.5
            comp = ((rnull * dx).sum(1)) / lnull
            x_motion = (comp * rnull.T).sum(1)
    
            x = x0 + x_motion
    
            for ii, variable in enumerate(variables):
                vertexdict[variable].setsymbol(variable, x[ii])

Example 97

Project: pymanopt Source File: psd.py
Function: dist
    def dist(self, U, V):
        S, _, D = la.svd(V.T.conj().dot(U))
        E = U - V.dot(S).dot(D)  # numpy's svd returns D.H
        return self.inner(None, E, E) / 2

Example 98

Project: lowrank-highwaynetwork Source File: highwaylrdiagdropoutbn_layer.py
    def setup_quasi_ortho_init(self):
    	ortho_init = OrthogonalInitializer()
    	ortho_init.rand = self.init.rand
    	
    	# Initialize low-rank decomposition matrices
    	w_h = ortho_init.sample((self.input_dim, self.input_dim))
    	w_t = ortho_init.sample((self.input_dim, self.input_dim))
    	w_h_u, w_h_sv, w_h_v = np.linalg.svd(w_h)
    	w_t_u, w_t_sv, w_t_v = np.linalg.svd(w_t)
    	h_sqsv_truncated = np.diag(np.sqrt(w_h_sv[:self.projection_dim]))
    	t_sqsv_truncated = np.diag(np.sqrt(w_t_sv[:self.projection_dim]))
    	w_h_u_truncated = w_h_u[:, :self.projection_dim]
    	w_t_u_truncated = w_t_u[:, :self.projection_dim]
    	w_h_v_truncated = w_h_v[:self.projection_dim, :]
    	w_t_v_truncated = w_t_v[:self.projection_dim, :]
    	w_hl = np.dot(w_h_u_truncated, h_sqsv_truncated)
    	w_tl = np.dot(w_t_u_truncated, t_sqsv_truncated)
    	w_hr = np.dot(h_sqsv_truncated, w_h_v_truncated)
    	w_tr = np.dot(t_sqsv_truncated, w_t_v_truncated)
    	
    	# Initialize diagonal matrix
    	test_vec = self.init.rand.normal(0.0, 1.0, (self.input_dim,))
    	err_h = np.dot(test_vec, w_h) - np.dot(test_vec, w_hl).dot(w_hr)
    	err_t = np.dot(test_vec, w_t) - np.dot(test_vec, w_tl).dot(w_tr)
    	d_h = err_h / (test_vec + self.epsilon)
    	d_t = err_t / (test_vec + self.epsilon)
    	
    	# Correct for dropout
    	w_hl /= (1.0 - self.d_p_0)
    	w_tl /= (1.0 - self.d_p_0)
    	d_h  /= (1.0 - self.d_p_0)
    	d_t  /= (1.0 - self.d_p_0)
    	w_hr /= (1.0 - self.d_p_1)
    	w_tr /= (1.0 - self.d_p_1)
    	
    	# Set values
    	self.W_hl.set_value(w_hl.astype(FLOATX))
    	self.W_tl.set_value(w_tl.astype(FLOATX))
    	self.W_hr.set_value(w_hr.astype(FLOATX))
    	self.W_tr.set_value(w_tr.astype(FLOATX))
    	self.D_h.set_value(d_h.astype(FLOATX))
    	self.D_t.set_value(d_t.astype(FLOATX))

Example 99

Project: lfd Source File: tps.py
Function: solve_eqp1
def solve_eqp1(H, f, A):
    """solve equality-constrained qp
    min tr(x'Hx) + sum(f'x)
    s.t. Ax = 0
    """    
    # print f.shape
    n_vars = H.shape[0]
    assert H.shape[1] == n_vars
    assert f.shape[0] == n_vars
    assert A.shape[1] == n_vars
    n_cnts = A.shape[0]
    
    _u,_s,_vh = np.linalg.svd(A.T)
    N = _u[:,n_cnts:]
    # columns of N span the null space
    
    # x = Nz
    # then problem becomes unconstrained minimization .5*z'NHNz + z'Nf
    # NHNz + Nf = 0
    z = np.linalg.solve(N.T.dot(H.dot(N)), -N.T.dot(f))
    x = N.dot(z)
    
    return x

Example 100

Project: pyvision Source File: PCA.py
Function: train
    def train(self, drop_front=None, number=None, energy=None): 
        ''' Compute the PCA basis vectors using the SVD '''
        self.logger.info("Computing PCA basis vectors.")
        
        # One feature per row
        features = numpy.array(self.features)
        
        if self.center_points:
            self.center = features.mean(axis=0)
            for i in range(features.shape[0]):
                features[i,:] -= self.center
                #show(features[i,:])
        
        features = features.transpose()

        u,d,_ = svd(features,full_matrices=0)
        if drop_front:
            u = u[:,drop_front:]
            d = d[drop_front:]
            
        if number:
            u = u[:,:number]
            d = d[:number]
        if energy:
            # compute teh sum of energy
            s = 0.0
            for each in d:
                s += each*each
            cutoff = energy * s
            
            # compute the cutoff
            s = 0.0
            for i in range(len(d)):
                each = d[i]
                s += each*each
                if s > cutoff:
                    break
            
            u = u[:,:i]
            d = d[:i]   

        self.basis = u.transpose()
        self.values = d
        
        # Remove features because they are no longer needed and 
        # they take up a lot of space.
        del self.features
See More Examples - Go to Next Page
Page 1 Page 2 Selected Page 3