numpy.tensordot

Here are the examples of the python api numpy.tensordot taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

34 Examples 7

Example 1

Project: PyAbel Source File: dasch.py
def dasch_transform(IM, D):
    """Inverse Abel transform using a given D-operator basis matrix.

    Parameters
    ----------
    IM : 2D numpy array
        image data
    D : 2D numpy array 
        D-operator basis shape (cols, cols) 

    Returns
    -------
    inv_IM : 2D numpy array
        inverse Abel transform according to basis operator D 
    """
    # one-line Abel transform - dot product of each row of IM with D
    return np.tensordot(IM, D, axes=(1, 1))

Example 2

Project: pymanopt Source File: testing.py
def rhess(cost, proj):
    """
    Generates the Riemannian hessian of the cost. Specifically, rhess(cost,
    proj)(x, u) is the directional derivatative of cost at point X on the
    manifold, in direction u.
    cost and proj must be defined using autograd.numpy.
    See http://sites.uclouvain.be/absil/2013-01/Weingarten_07PA_techrep.pdf
    for some discussion.
    proj and cost must be defined using autograd.
    Currently this is correct but not efficient, because of the jacobian-
    vector product. Hopefully this can be fixed in future.
    """
    return lambda x, u: proj(x, np.tensordot(jacobian(rgrad(cost, proj))(x), u,
                                             axes=u.ndim))

Example 3

Project: pymanopt Source File: testing.py
Function: ehess2rhess
def ehess2rhess(proj):
    """
    Generates an ehess2rhess function for a manifold which is a sub-manifold
    of Euclidean space.
    ehess2rhess(proj)(x, egrad, ehess, u) converts the Euclidean hessian ehess
    at the point x to a Riemannian hessian. That is the directional
    derivatative of the gradient in the direction u.
    proj must be defined using autograd.numpy.
    This will not be an efficient implementation because of missing support
    for efficient jacobian-vector products in autograd.
    """
    # Differentiate proj w.r.t. the first argument
    d_proj = jacobian(proj)
    return lambda x, egrad, ehess, u: proj(x, ehess +
                                           np.tensordot(d_proj(x, egrad), u,
                                                        axes=u.ndim))

Example 4

Project: pymanopt Source File: test_manifold_positive_definite.py
Function: test_inner
    def test_inner(self):
        man = self.man
        k = self.k
        n = self.n
        x = man.rand()
        a, b = rnd.randn(2, k, n, n)
        np.testing.assert_almost_equal(np.tensordot(a, b, axes=a.ndim),
                                       man.inner(x, multiprod(x, a),
                                                 multiprod(x, b)))

Example 5

Project: pymanopt Source File: test_manifold_sphere.py
Function: test_dist
    def test_dist(self):
        s = self.man
        x = s.rand()
        y = s.rand()
        correct_dist = np.arccos(np.tensordot(x, y))
        np.testing.assert_almost_equal(correct_dist, s.dist(x, y))

Example 6

Project: pymanopt Source File: test_manifold_sphere.py
Function: test_randvec
    def test_randvec(self):
        # Just make sure that things generated are in the tangent space and
        # that if you generate two they are not equal.
        s = self.man
        x = s.rand()
        u = s.randvec(x)
        v = s.randvec(x)
        np_testing.assert_almost_equal(np.tensordot(x, u), 0)

        assert np.linalg.norm(u - v) > 1e-3

Example 7

Project: dolo Source File: tensor.py
Function: multi_dot
def multidot(ten,mats):
    '''
    Implements tensor operation : tensor-times-matrices.
    If last dimensions of ten represent multilinear operations of the type : [X1,...,Xk]->B[X1,...,Xk]
    and mats contains matrices or vectors [A1,...Ak] the function returns an array representing operators : 
    [X1,...,Xk]->B[A1 X1,...,Ak Xk]
    '''
    resp = ten
    n_d = ten.ndim
    n_m = len(mats)
    for i in range(n_m):
        #resp = np.tensordot( resp, mats[i], (n_d-n_m+i-1,0) )
        resp = np.tensordot( resp, mats[i], (n_d-n_m,0) )
    return resp

Example 8

Project: dolo Source File: tensor.py
def sdot( U, V ):
    '''
    Computes the tensorproduct reducing last dimensoin of U with first dimension of V.
    For matrices, it is equal to regular matrix product.
    '''
    nu = U.ndim
    #nv = V.ndim
    return np.tensordot( U, V, axes=(nu-1,0) )

Example 9

Project: polara Source File: models.py
    def slice_recommendations(self, test_data, shape, start, end):
        test_tensor_unfolded, slice_idx = self.get_test_tensor(test_data, shape, start, end)
        num_users = end - start
        num_items = shape[1]
        num_fdbks = shape[2]
        v = self._items_factors
        w = self._feedback_factors

        # assume that w.shape[1] < v.shape[1] (allows for more efficient calculations)
        scores = test_tensor_unfolded.dot(w).reshape(num_users, num_items, w.shape[1])
        scores = np.tensordot(scores, v, axes=(1, 0))
        scores = np.tensordot(np.tensordot(scores, v, axes=(2, 1)), w, axes=(1, 1))
        scores = self.flatten_scores(scores, self.flattener)
        return scores, slice_idx

Example 10

Project: shapelets Source File: conv.py
def psfMatrix(gl,gamma,alpha,beta,lmax,mmax,nmax,mode='hermite'):
    """Compute the PSF matrix as defined in Refregier and Bacon 2003 Section 3.2
    g: shapelet coefficents of the PSF
    gamma, alpha, beta: scale factors (float)
    nmax: number of coefficients used to represent the convolved image, 2 element list, if 1 element/integer assume it is square
    mmax: number of coefficients used to represent the unconvolved image, 2 element list, if 1 element/integer assume it is square
    lmax: number of coefficients used to represent the PSF, 2 element list, if 1 element/integer assume it is square
    mode: hermite or laguerre
    """
    if mode.startswith('herm'):
        if type(nmax)==int: nmax=[nmax,nmax]
        if type(mmax)==int: nmax=[mmax,mmax]
        if type(lmax)==int: nmax=[lmax,lmax]
        C=generate2dClmnTensor(gamma,alpha,beta,lmax,mmax,nmax) #compute convolution tensor [Refregier and Bacon 2003 eq. 6-11]
        return np.reshape( np.tensordot(C,gl,axes=[[0,1],[0,1]]), (C.shape[2]*C.shape[3], C.shape[4]*C.shape[5]) ) #return convolution tensor x g [Refregier and Bacon 2003 section 3.2]

Example 11

Project: Kayak Source File: test_TensorMult.py
def check_tensormult(A_shape, B_shape, axes):

    np_A = npr.randn(*A_shape)
    np_B = npr.randn(*B_shape)
    A = kayak.Parameter(np_A)
    B = kayak.Parameter(np_B)
    C = kayak.TensorMult(A, B, axes)
    D = kayak.Parameter(npr.randn(*C.shape))
    L = kayak.MatSum(kayak.ElemMult(C, D))
    
    assert np.all(close_float(C.value, np.tensordot(np_A, np_B, axes)))
    assert kayak.util.checkgrad(A, L) < MAX_GRAD_DIFF
    assert kayak.util.checkgrad(B, L) < MAX_GRAD_DIFF

Example 12

Project: imusim Source File: transforms.py
Function: apply
    def apply(self,v):
        """
        Apply transform to an array of column vectors.
        """
        return np.tensordot(self._transform, v, axes=([1],[0])) \
                + self._translation

Example 13

Project: imusim Source File: transforms.py
Function: reverse
    def reverse(self,v):
        """
        Apply the inverse of this transform to an array of column vectors.
        """
        return np.tensordot(self._inverseTransform,
                v-self._translation,axes=([1],[0]))

Example 14

Project: pymanopt Source File: euclidean.py
Function: inner
    def inner(self, X, G, H):
        return float(np.tensordot(G, H, axes=G.ndim))

Example 15

Project: pymanopt Source File: fixed_rank.py
Function: inner
    def inner(self, X, G, H):
        return np.sum(np.tensordot(a, b) for (a, b) in zip(G, H))

Example 16

Project: pymanopt Source File: grassmann.py
Function: inner
    def inner(self, X, G, H):
        # Inner product (Riemannian metric) on the tangent space
        # For the Grassmann this is the Frobenius inner product.
        return np.tensordot(G, H, axes=G.ndim)

Example 17

Project: pymanopt Source File: oblique.py
Function: inner
    def inner(self, X, U, V):
        return float(np.tensordot(U, V))

Example 18

Project: pymanopt Source File: psd.py
Function: inner
    def inner(self, x, u, v):
        return np.tensordot(la.solve(x, u), la.solve(x, v), axes=x.ndim)

Example 19

Project: pymanopt Source File: psd.py
Function: inner
    def inner(self, Y, U, V):
        # Euclidean metric on the total space.
        return float(np.tensordot(U, V))

Example 20

Project: pymanopt Source File: psd.py
Function: inner
    def inner(self, Y, U, V):
        return float(np.tensordot(U, V))

Example 21

Project: pymanopt Source File: sphere.py
Function: inner
    def inner(self, X, U, V):
        return float(np.tensordot(U, V, axes=U.ndim))

Example 22

Project: pymanopt Source File: stiefel.py
Function: inner
    def inner(self, X, G, H):
        # Inner product (Riemannian metric) on the tangent space
        # For the stiefel this is the Frobenius inner product.
        return np.tensordot(G, H, axes=G.ndim)

Example 23

Project: python-qinfer Source File: bases.py
    def modelparams_to_state(self, modelparams):
        """
        Converts one or more vectors of model parameters into
        QuTiP-represented states.

        :param np.ndarray modelparams: Array of shape
            ``(basis.dim ** 2, )`` or
            ``(n_states, basis.dim ** 2)`` containing
            states represented as model parameter vectors in this
            basis.
        :rtype: :class:`~qutip.Qobj` or `list` of :class:`~qutip.Qobj`
            instances.
        :return: The given states represented as :class:`~qutip.Qobj`
            instances.
        """
        if modelparams.ndim == 1:
            qobj = qt.Qobj(
                np.tensordot(modelparams, self.data, 1),
                dims=[self.dims, self.dims]
            )
            if self.superrep is not None:
                qobj.superrep = self.superrep
            return qobj
        else:
            return list(map(self.modelparams_to_state, modelparams))

Example 24

Project: python-qinfer Source File: models.py
    def trunc_neg_eigs(self, particle):
        """
        Given a state represented as a model parameter vector,
        returns a model parameter vector representing the same
        state with any negative eigenvalues set to zero.

        :param np.ndarray particle: Vector of length ``(dim ** 2, )``
            representing a state.
        :return: The same state with any negative eigenvalues
            set to zero.
        """
        arr = np.tensordot(particle, self._basis.data.conj(), 1)
        w, v = np.linalg.eig(arr)
        if np.all(w >= 0):
            return particle
        else:
            w[w < 0] = 0
            new_arr = np.dot(v * w, v.conj().T)
            new_particle = np.real(np.dot(self._basis.flat(), new_arr.flatten()))
            assert new_particle[0] > 0
            return new_particle

Example 25

Project: pyhawkes Source File: impulses.py
    @property
    def impulses(self):
        basis = self.model.basis.basis
        return np.tensordot(basis, self.g, axes=[1,2])

Example 26

Project: kaggle-galaxies Source File: load_data.py
def im_rgb_to_yuv(img):
    return np.tensordot(img, rgb2yuv, [[2], [0]])

Example 27

Project: kaggle-galaxies Source File: load_data.py
def im_yuv_to_rgb(img):
    return np.tensordot(img, yuv2rgb, [[2], [0]])

Example 28

Project: qiime Source File: estimate_observation_richness.py
    def __call__(self, size, confidence_level=0.95):
        if confidence_level <= 0 or confidence_level >= 1:
            raise ValueError("Invalid confidence level: %.4f. Must be between "
                             "zero and one (exclusive)." % confidence_level)

        # We'll use the variable names from Colwell 2012 for clarity and
        # brevity.
        m = size
        fk = self.getAbundanceFrequencyCounts()
        n = self.getTotalIndividualCount()
        s_obs = self.getObservationCount()
        s_est = self.estimateFullRichness()

        if m <= n:
            # Interpolation.

            # Equation 4 in Colwell 2012 for the estimate.
            estimate_acc = 0

            # Equation 5 in Colwell 2012 gives unconditional variance, but they
            # report the standard error (SE) (which is the same as the standard
            # deviation in this case) in their tables and use this to construct
            # confidence intervals. Thus, we compute SE as sqrt(variance).
            std_err_acc = 0

            for k in range(1, n + 1):
                alpha_km = self._calculate_alpha_km(n, k, m)
                estimate_acc += alpha_km * fk[k]
                std_err_acc += (((1 - alpha_km) ** 2) * fk[k])

            estimate = s_obs - estimate_acc

            # Convert variance to standard error.
            std_err = sqrt(std_err_acc - (estimate ** 2 / s_est))
        else:
            # Extrapolation.
            m_star = m - n
            f1 = fk[1]
            f2 = fk[2]
            f_hat = self.estimateUnobservedObservationCount()

            try:
                # Equation 9 in Colwell 2012.
                estimate = s_obs + f_hat * (1 -
                                            (1 - (f1 / (n * f_hat))) ** m_star)
            except ZeroDivisionError:
                # This can happen if we have exactly one singleton and no
                # doubletons, or no singletons and no doubletons.
                estimate = None
                std_err = None
            else:
                # Equation 10 in Colwell 2012. I used Wolfram Alpha to
                # calculate the analytic partial derivatives since they weren't
                # provided in the original paper. We have two partial
                # derivatives, wrt f1 and f2, that we really care about. All
                # other partial derivatives (e.g. wrt f3, f4, etc.) get a value
                # of 1.
                pd_f1 = self._partial_derivative_f1(f1, f2, m_star, n)
                pd_f2 = self._partial_derivative_f2(f1, f2, m_star, n)
                pd_f1f2 = pd_f1 * pd_f2

                # To do this efficiently, here's the algorithm:
                #
                # 1) Create nxn array filled with ones. Each element represents
                #    the multiplication of two partial derivatives.
                # 2) Fill in only what we need: the multiplication of partial
                #    derivatives wrt f1 and f2.
                # 3) Do an element-wise multiply between our partial derivative
                #    matrix and the covariance matrix. tensordot does this and
                #    also sums the result, which is exactly what we need. In
                #    the end, we've summed all n^2 elements, each of which are
                #    (pd_fi * pd_fj * cov_ij).
                self._pd_matrix[0, :] = pd_f1
                self._pd_matrix[1, :] = pd_f2
                self._pd_matrix[:, 0] = pd_f1
                self._pd_matrix[:, 1] = pd_f2

                self._pd_matrix[0, 0] = pd_f1 ** 2
                self._pd_matrix[0, 1] = pd_f1f2
                self._pd_matrix[1, 0] = pd_f1f2
                self._pd_matrix[1, 1] = pd_f2 ** 2

                std_err = sqrt(tensordot(self._pd_matrix, self._cov_matrix))

        # Compute CI based on std_err.
        ci_low = None
        ci_high = None
        if std_err is not None:
            # z_crit will be something like 1.96 for 95% CI.
            z_crit = abs(ndtri((1 - confidence_level) / 2))
            ci_bound = z_crit * std_err
            ci_low = estimate - ci_bound
            ci_high = estimate + ci_bound

        return estimate, std_err, ci_low, ci_high

Example 29

Project: healpy Source File: rotator.py
def rotateVector(rotmat,vec,vy=None,vz=None, do_rot=True):
    """Rotate a vector (or a list of vectors) using the rotation matrix
    given as first argument.
    
    Parameters
    ----------
    rotmat : float, array-like shape (3,3)
      The rotation matrix
    vec : float, scalar or array-like
      The vector to transform (shape (3,) or (3,N)),
      or x component (scalar or shape (N,)) if vy and vz are given
    vy : float, scalar or array-like, optional
      The y component of the vector (scalar or shape (N,))
    vz : float, scalar or array-like, optional
      The z component of the vector (scalar or shape (N,))
    do_rot : bool, optional
      if True, really perform the operation, if False do nothing.

    Returns
    -------
    vec : float, array
      The component of the rotated vector(s).

    See Also
    --------
    Rotator
    """
    if vy is None and vz is None:
       if do_rot: return np.tensordot(rotmat,vec,axes=(1,0))
       else: return vec
    elif vy is not None and vz is not None:
       if do_rot: return np.tensordot(rotmat,np.array([vec,vy,vz]),axes=(1,0))
       else: return vec,vy,vz
    else:
       raise TypeError("You must give either vec only or vec, vy "
                       "and vz parameters")

Example 30

Project: Kayak Source File: matrix_ops.py
Function: compute_value
    def _compute_value(self):
        A = self._parents[0].value
        B = self._parents[1].value
        return np.tensordot(A, B, self.axes)

Example 31

Project: evoMPS Source File: tdvp_common.py
def calc_C_mat_op_AA_tensordot(op, AA):
    return np.tensordot(op, AA, ((2, 3), (0, 1)))

Example 32

Project: evoMPS Source File: tdvp_common.py
def calc_C_3s_mat_op_AAA_tensordot(op, AAA):
    return np.tensordot(op, AAA, ((3, 4, 5), (0, 1, 2)))

Example 33

Project: evoMPS Source File: tdvp_common.py
def calc_C_conj_mat_op_AA_tensordot(op, AA):
    return np.tensordot(op.conj(), AA, ((0, 1), (0, 1)))

Example 34

Project: medpy Source File: noise.py
def immerkaer(input, mode="reflect", cval=0.0):
    r"""
    Estimate the global noise.
    
    The input image is assumed to have additive zero mean Gaussian noise. Using a
    convolution with a Laplacian operator and a subsequent averaging the standard
    deviation sigma of this noise is estimated. This estimation is global i.e. the
    noise is assumed to be globally humogeneous over the image.
    
    Implementation based on [1]_.
    
        
    Immerkaer suggested a Laplacian-based 2D kernel::
    
        [[ 1, -2,  1],
         [-2,  4, -1],
         [ 1, -2, 1]]

    , which is separable and can therefore be applied by consecutive convolutions with
    the one dimensional kernel [1, -2, 1].
    
    We generalize from this 1D-kernel to an ND-kernel by applying N consecutive
    convolutions with the 1D-kernel along all N dimensions.
    
    This is equivalent with convolving the image with an ND-kernel constructed by calling
    
    >>> kernel1d = numpy.asarray([1, -2, 1])
    >>> kernel = kernel1d.copy()
    >>> for _ in range(input.ndim):
    >>>     kernel = numpy.tensordot(kernel, kernel1d, 0)
    
    Parameters
    ----------
    input : array_like
        Array of which to estimate the noise.
    mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
        The `mode` parameter determines how the array borders are
        handled, where `cval` is the value when mode is equal to
        'constant'. Default is 'reflect'
    cval : scalar, optional
        Value to fill past edges of input if `mode` is 'constant'. Default
        is 0.0        
        
    Returns
    -------
    sigma : float
        The estimated standard deviation of the images Gaussian noise.
        
    Notes
    -----
    Does not take the voxel spacing into account.
    Works good with medium to strong noise. Tends to underestimate for low noise levels.
        
    See also
    --------
    immerkaer_local
    
    References
    ----------
    .. [1] John Immerkaer, "Fast Noise Variance Estimation", Computer Vision and Image
           Understanding, Volume 64, Issue 2, September 1996, Pages 300-302, ISSN 1077-3142
    """
    # build nd-kernel to acquire square root of sum of squared elements
    kernel = [1, -2, 1]
    for _ in range(input.ndim - 1):
        kernel = numpy.tensordot(kernel, [1, -2, 1], 0)
    divider = numpy.square(numpy.abs(kernel)).sum() # 36 for 1d, 216 for 3D, etc.
    
    # compute laplace of input and derive noise sigma
    laplace = separable_convolution(input, [1, -2, 1], None, mode, cval)
    factor = numpy.sqrt(numpy.pi / 2.) * 1. / ( numpy.sqrt(divider) * numpy.prod(laplace.shape) )
    sigma = factor * numpy.abs(laplace).sum()
    
    return sigma