numpy.linalg.eigvalsh

Here are the examples of the python api numpy.linalg.eigvalsh taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

76 Examples 7

3 Source : test_linalg.py
with GNU General Public License v3.0
from adityaprakash-bobby

    def do(self, a, b, tags):
        # note that eigenvalue arrays returned by eig must be sorted since
        # their order isn't guaranteed.
        ev = linalg.eigvalsh(a, 'L')
        evalues, evectors = linalg.eig(a)
        evalues.sort(axis=-1)
        assert_allclose(ev, evalues, rtol=get_rtol(ev.dtype))

        ev2 = linalg.eigvalsh(a, 'U')
        assert_allclose(ev2, evalues, rtol=get_rtol(ev.dtype))

    def test_types(self):

3 Source : test_linalg.py
with MIT License
from alvarobartt

    def do(self, a, b, tags):
        # note that eigenvalue arrays returned by eig must be sorted since
        # their order isn't guaranteed.
        ev = linalg.eigvalsh(a, 'L')
        evalues, evectors = linalg.eig(a)
        evalues.sort(axis=-1)
        assert_allclose(ev, evalues, rtol=get_rtol(ev.dtype))

        ev2 = linalg.eigvalsh(a, 'U')
        assert_allclose(ev2, evalues, rtol=get_rtol(ev.dtype))


class TestEigvalsh(object):

3 Source : linear_algebra.py
with GNU General Public License v3.0
from Artikash

def Heigenvalues(a, UPLO='L'):
    return linalg.eigvalsh(a, UPLO)

# Eigenvectors

def eigenvectors(A):

3 Source : test_corrpsd.py
with MIT License
from birforce

    def test_nearest(self):
        x = self.x
        res_r = self.res
        y = corr_nearest(x, threshold=1e-7, n_fact=100)
        #print np.max(np.abs(x - y))
        assert_almost_equal(y, res_r.mat, decimal=3)
        d = norm_f(x, y)
        assert_allclose(d, res_r.normF, rtol=0.0015)
        evals = np.linalg.eigvalsh(y)
        #print 'evals', evals / res_r.eigenvalues[::-1] - 1
        assert_allclose(evals, res_r.eigenvalues[::-1], rtol=0.003, atol=1e-7)
        #print evals[0] / 1e-7 - 1
        assert_allclose(evals[0], 1e-7, rtol=1e-6)


    def test_clipped(self):

3 Source : test_corrpsd.py
with MIT License
from birforce

    def test_clipped(self):
        x = self.x
        res_r = self.res
        y = corr_clipped(x, threshold=1e-7)
        #print np.max(np.abs(x - y)), np.max(np.abs((x - y) / y))
        assert_almost_equal(y, res_r.mat, decimal=1)
        d = norm_f(x, y)
        assert_allclose(d, res_r.normF, rtol=0.15)

        evals = np.linalg.eigvalsh(y)
        assert_allclose(evals, res_r.eigenvalues[::-1], rtol=0.1, atol=1e-7)
        assert_allclose(evals[0], 1e-7, rtol=0.02)

    def test_cov_nearest(self):

3 Source : test_linalg.py
with Apache License 2.0
from dashanji

    def do(self, a, b, tags):
        # note that eigenvalue arrays returned by eig must be sorted since
        # their order isn't guaranteed.
        ev = linalg.eigvalsh(a, 'L')
        evalues, evectors = linalg.eig(a)
        evalues.sort(axis=-1)
        assert_allclose(ev, evalues, rtol=get_rtol(ev.dtype))

        ev2 = linalg.eigvalsh(a, 'U')
        assert_allclose(ev2, evalues, rtol=get_rtol(ev.dtype))


class TestEigvalsh:

3 Source : hessians.py
with MIT License
from duartegroup

    def frequencies(self) -> List[Frequency]:
        """
        Calculate the normal mode frequencies from the eigenvalues of the
        Hessian matrix

        -----------------------------------------------------------------------
        Returns:
            (list(autode.values.Frequency)):

        Raises:
            (ValueError): Without atoms set
        """
        lambdas = np.linalg.eigvalsh(self._mass_weighted)
        freqs = self._eigenvalues_to_freqs(lambdas)

        return freqs

    @cached_property

3 Source : math.py
with MIT License
from feihoo87

def entropy(rho: np.ndarray) -> float:
    '''von Neumann / Shannon entropy function'''
    if rho.ndim == 1:  # pure states have no entropy
        return 0
    v = np.linalg.eigvalsh(rho).real
    return -np.sum(v[v > 0] * np.log2(v[v > 0]))


def concurrence(rho: np.ndarray) -> float:

3 Source : pops.feature_selection.py
with GNU General Public License v3.0
from FinucaneLab

def compute_Ls(sigmas, args):
	Ls = []
	min_lambda = 0
	for sigma in sigmas:
		W = np.linalg.eigvalsh(sigma)
		min_lambda = min(min_lambda, min(W))
	Y = pd.read_table(args.gene_results+'.genes.out', delim_whitespace=True).ZSTAT.values
	ridge = abs(min(min_lambda, 0))+.05+.9*max(0, np.var(Y)-1)
	for sigma in sigmas:
		sigma = sigma+ridge*np.identity(sigma.shape[0])
		L = np.linalg.cholesky(np.linalg.inv(sigma))
		Ls.append(L)
	full_L = scipy.linalg.block_diag(Ls[0], Ls[1], Ls[2], Ls[3], Ls[4], Ls[5], Ls[6], Ls[7], Ls[8], Ls[9], Ls[10], Ls[11], Ls[12], Ls[13], Ls[14], Ls[15], Ls[16], Ls[17], Ls[18], Ls[19], Ls[20], Ls[21])
	return full_L

def get_transformation_matrix(args):

3 Source : pops.predict_scores.py
with GNU General Public License v3.0
from FinucaneLab

def compute_Ls(sigmas, args):
	Ls = []
	min_lambda=0
	for sigma in sigmas:
		W = np.linalg.eigvalsh(sigma)
		min_lambda = min(min_lambda, min(W))
	Y = pd.read_table(args.gene_results+'.genes.out', delim_whitespace=True).ZSTAT.values
	ridge = abs(min_lambda)+.05+.9*max(0, np.var(Y)-1)
	for sigma in sigmas:
		sigma = sigma+ridge*np.identity(sigma.shape[0])
		L = np.linalg.cholesky(np.linalg.inv(sigma))
		Ls.append(L)
	return Ls

def munge_features(args):

3 Source : test_quantity_non_ufuncs.py
with BSD 3-Clause "New" or "Revised" License
from holzschu

    def test_eigvalsh(self):
        w = np.linalg.eigvalsh(self.q)
        wx = np.linalg.eigvalsh(self.q.value)   <   <  self.q.unit
        assert_array_equal(w, wx)


untested_functions = set()

3 Source : predicates.py
with Apache License 2.0
from indian-institute-of-science-qc

def is_positive_semidefinite_matrix(mat, rtol=RTOL_DEFAULT, atol=ATOL_DEFAULT):
    """Test if a matrix is positive semidefinite"""
    if atol is None:
        atol = ATOL_DEFAULT
    if rtol is None:
        rtol = RTOL_DEFAULT
    if not is_hermitian_matrix(mat, rtol=rtol, atol=atol):
        return False
    # Check eigenvalues are all positive
    vals = np.linalg.eigvalsh(mat)
    for v in vals:
        if v   <   -atol:
            return False
    return True


def is_identity_matrix(mat,

3 Source : gap.py
with GNU General Public License v3.0
from joselado

def raw_gap(h,kpgen,sparse=True,nk=100):
  hk_gen = h.get_hk_gen() # get hamiltonian generator
  ks = np.linspace(0.,1.,nk)
  etot = [] # total eigenvalues
  for k in ks:
    kp = kpgen(k)
    hk = hk_gen(kp) # generate hamiltonian
    if sparse: 
      es,ew = lgs.eigsh(csc_matrix(hk),k=4,which="LM",sigma=0.0)
    else:
      es = lg.eigvalsh(hk) # get eigenvalues
    etot.append(es)
  etot = np.array(etot)
  return min(etot[etot>0.])


def gap2d(h,nk=40,k0=None,rmap=1.0,recursive=False,

3 Source : krylov.py
with MIT License
from jsikyoon

def tridiagonal_eigenvalues(alphas, betas):
    T = make_tridiagonal(alphas, betas)
    return np.linalg.eigvalsh(T)


def test_lanczos():

3 Source : test_linalg.py
with MIT License
from ktraunmueller

    def do(self, a, b):
        # note that eigenvalue arrays must be sorted since
        # their order isn't guaranteed.
        ev = linalg.eigvalsh(a, 'L')
        evalues, evectors = linalg.eig(a)
        ev.sort(axis=-1)
        evalues.sort(axis=-1)
        assert_allclose(ev, evalues,
                        rtol=get_rtol(ev.dtype))

        ev2 = linalg.eigvalsh(a, 'U')
        ev2.sort(axis=-1)
        assert_allclose(ev2, evalues,
                        rtol=get_rtol(ev.dtype))

    def test_types(self):

3 Source : helper.py
with Apache License 2.0
from malllabiisc

def compute_kls(adj, num_nodes):
	"""
	Computes the KLS kernel for a given adjacency matrix

	Parameters
	----------
	adj:		Adjacency matrix of the graph
	num_nodes:	Number of nodes in the graph
	
	Returns
	-------
	KLS kernel for the graph
		
	"""
	min_eig = np.min(np.linalg.eigvalsh(adj))
	return (adj/np.abs(min_eig)) + np.eye(num_nodes)

def lovasz_theta(G, long_return=True, complement=False):

3 Source : Calculate.py
with MIT License
from phoebemrdevries

def MaximumShear(tensors):
        """ Calculate the maximum shear of a symmetric 3x3 tensor. This is just half the
        difference between the largest and smallest eigenvalues.
        tensors: A list of symmetric 3x3 arrays.
        Returns:
        ret: a list of maximum shear values.
        """
        ret = []
        for tensor in tensors:
            eigen_values = list(np.linalg.eigvalsh(tensor))
            ret.append((max(eigen_values) - min(eigen_values)) / 2.0)
        return np.array(ret)

def Eigenvalues(tensors):

3 Source : Calculate.py
with MIT License
from phoebemrdevries

def Eigenvalues(tensors):
        """ Calculate eigenvalues of matrix.
        """
        eigmax = []
        eigmed = []
        eigmin = []
        for tensor in tensors:
            eigen_values = list(np.linalg.eigvalsh(tensor))
            eigen_values.sort()
            eigmax.append(eigen_values[2])
            eigmed.append(eigen_values[1])
            eigmin.append(eigen_values[0])
        return np.array(eigmax), np.array(eigmed), np.array(eigmin)

def TensorInvariants(tensors):

3 Source : DensOp.py
with Apache License 2.0
from samn33

    def __von_neumann_entropy(self):  # von neumann entropy

        mat = self.get_elm()
        eigvals = np.linalg.eigvalsh(mat)
        diag = [-eigvals[i]*np.log2(eigvals[i])
                for i in range(len(eigvals)) if abs(eigvals[i]) > EPS]
        ent = np.sum(diag)
        return ent
    
    def entropy(self, qid=[]):

3 Source : do_find_Weyl.py
with GNU General Public License v3.0
from Sassafrass6

def gen_eigs ( HRaux, kq, R ):
  # Load balancing

  nawf,_,_,nspin = HRaux.shape
  E_kp = np.zeros((1,nawf,nspin), dtype=np.float64)

  kq=kq[:,None]

#  Hks_int  = np.zeros((nawf,nawf,1,nspin),dtype=complex,order='C') # final data arrays
  Hks_int = band_loop_H(0,1,HRaux,kq,R)

  for ispin in range(nspin):
    E_kp[:,:,ispin] =  LAN.eigvalsh(Hks_int[:,:,0,ispin],UPLO='U')

  return E_kp


def get_gap ( HR, kq, R, nelec ):

3 Source : convex_iteration.py
with MIT License
from utiasSTARS

def sparse_eigenvalue_sum(sdp_variable_map: dict, d: int):
    running_eigenvalue_sum = 0.
    for clique in sdp_variable_map:
        Z_clique = sdp_variable_map[clique].value
        running_eigenvalue_sum += np.sum(np.linalg.eigvalsh(Z_clique)[:-d])
    return running_eigenvalue_sum


def get_sparsity_pattern(G, canonical_point_order: list) -> set:

3 Source : linear_algebra.py
with GNU General Public License v3.0
from Valdes-Tresanco-MS

def Heigenvalues(a, UPLO='L'):
    return linalg.eigvalsh(a,UPLO)

# Eigenvectors

def eigenvectors(A):

3 Source : conics.py
with MIT License
from wdoppenberg

def ellipse_axes(A):
    if isinstance(A, torch.Tensor):
        lambdas = torch.linalg.eigvalsh(A[..., :2, :2]) / (-torch.det(A) / torch.det(A[..., :2, :2]))[..., None]
        axes = torch.sqrt(1 / lambdas)
    elif isinstance(A, np.ndarray):
        lambdas = LA.eigvalsh(A[..., :2, :2]) / (-LA.det(A) / LA.det(A[..., :2, :2]))[..., None]
        axes = np.sqrt(1 / lambdas)
    else:
        raise TypeError("Input conics must be of type torch.Tensor or np.ndarray.")
    return axes[..., 1], axes[..., 0]


def ellipse_angle(A):

0 Source : test_linalg.py
with GNU General Public License v3.0
from adityaprakash-bobby

    def test_0_size(self):
        # Check that all kinds of 0-sized arrays work
        class ArraySubclass(np.ndarray):
            pass
        a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
        res = linalg.eigvalsh(a)
        assert_(res.dtype.type is np.float64)
        assert_equal((0, 1), res.shape)
        # This is just for documentation, it might make sense to change:
        assert_(isinstance(res, np.ndarray))

        a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
        res = linalg.eigvalsh(a)
        assert_(res.dtype.type is np.float32)
        assert_equal((0,), res.shape)
        # This is just for documentation, it might make sense to change:
        assert_(isinstance(res, np.ndarray))


class TestEigh(HermitianTestCase, HermitianGeneralizedTestCase):

0 Source : hermite.py
with GNU General Public License v3.0
from adityaprakash-bobby

def hermgauss(deg):
    """
    Gauss-Hermite quadrature.

    Computes the sample points and weights for Gauss-Hermite quadrature.
    These sample points and weights will correctly integrate polynomials of
    degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]`
    with the weight function :math:`f(x) = \\exp(-x^2)`.

    Parameters
    ----------
    deg : int
        Number of sample points and weights. It must be >= 1.

    Returns
    -------
    x : ndarray
        1-D ndarray containing the sample points.
    y : ndarray
        1-D ndarray containing the weights.

    Notes
    -----

    .. versionadded:: 1.7.0

    The results have only been tested up to degree 100, higher degrees may
    be problematic. The weights are determined by using the fact that

    .. math:: w_k = c / (H'_n(x_k) * H_{n-1}(x_k))

    where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
    is the k'th root of :math:`H_n`, and then scaling the results to get
    the right value when integrating 1.

    """
    ideg = int(deg)
    if ideg != deg or ideg   <   1:
        raise ValueError("deg must be a non-negative integer")

    # first approximation of roots. We use the fact that the companion
    # matrix is symmetric in this case in order to obtain better zeros.
    c = np.array([0]*deg + [1], dtype=np.float64)
    m = hermcompanion(c)
    x = la.eigvalsh(m)

    # improve roots by one application of Newton
    dy = _normed_hermite_n(x, ideg)
    df = _normed_hermite_n(x, ideg - 1) * np.sqrt(2*ideg)
    x -= dy/df

    # compute the weights. We scale the factor to avoid possible numerical
    # overflow.
    fm = _normed_hermite_n(x, ideg - 1)
    fm /= np.abs(fm).max()
    w = 1/(fm * fm)

    # for Hermite we can also symmetrize
    w = (w + w[::-1])/2
    x = (x - x[::-1])/2

    # scale w to get the right value
    w *= np.sqrt(np.pi) / w.sum()

    return x, w


def hermweight(x):

0 Source : hermite_e.py
with GNU General Public License v3.0
from adityaprakash-bobby

def hermegauss(deg):
    """
    Gauss-HermiteE quadrature.

    Computes the sample points and weights for Gauss-HermiteE quadrature.
    These sample points and weights will correctly integrate polynomials of
    degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]`
    with the weight function :math:`f(x) = \\exp(-x^2/2)`.

    Parameters
    ----------
    deg : int
        Number of sample points and weights. It must be >= 1.

    Returns
    -------
    x : ndarray
        1-D ndarray containing the sample points.
    y : ndarray
        1-D ndarray containing the weights.

    Notes
    -----

    .. versionadded:: 1.7.0

    The results have only been tested up to degree 100, higher degrees may
    be problematic. The weights are determined by using the fact that

    .. math:: w_k = c / (He'_n(x_k) * He_{n-1}(x_k))

    where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
    is the k'th root of :math:`He_n`, and then scaling the results to get
    the right value when integrating 1.

    """
    ideg = int(deg)
    if ideg != deg or ideg   <   1:
        raise ValueError("deg must be a non-negative integer")

    # first approximation of roots. We use the fact that the companion
    # matrix is symmetric in this case in order to obtain better zeros.
    c = np.array([0]*deg + [1])
    m = hermecompanion(c)
    x = la.eigvalsh(m)

    # improve roots by one application of Newton
    dy = _normed_hermite_e_n(x, ideg)
    df = _normed_hermite_e_n(x, ideg - 1) * np.sqrt(ideg)
    x -= dy/df

    # compute the weights. We scale the factor to avoid possible numerical
    # overflow.
    fm = _normed_hermite_e_n(x, ideg - 1)
    fm /= np.abs(fm).max()
    w = 1/(fm * fm)

    # for Hermite_e we can also symmetrize
    w = (w + w[::-1])/2
    x = (x - x[::-1])/2

    # scale w to get the right value
    w *= np.sqrt(2*np.pi) / w.sum()

    return x, w


def hermeweight(x):

0 Source : laguerre.py
with GNU General Public License v3.0
from adityaprakash-bobby

def laggauss(deg):
    """
    Gauss-Laguerre quadrature.

    Computes the sample points and weights for Gauss-Laguerre quadrature.
    These sample points and weights will correctly integrate polynomials of
    degree :math:`2*deg - 1` or less over the interval :math:`[0, \\inf]`
    with the weight function :math:`f(x) = \\exp(-x)`.

    Parameters
    ----------
    deg : int
        Number of sample points and weights. It must be >= 1.

    Returns
    -------
    x : ndarray
        1-D ndarray containing the sample points.
    y : ndarray
        1-D ndarray containing the weights.

    Notes
    -----

    .. versionadded:: 1.7.0

    The results have only been tested up to degree 100 higher degrees may
    be problematic. The weights are determined by using the fact that

    .. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k))

    where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
    is the k'th root of :math:`L_n`, and then scaling the results to get
    the right value when integrating 1.

    """
    ideg = int(deg)
    if ideg != deg or ideg   <   1:
        raise ValueError("deg must be a non-negative integer")

    # first approximation of roots. We use the fact that the companion
    # matrix is symmetric in this case in order to obtain better zeros.
    c = np.array([0]*deg + [1])
    m = lagcompanion(c)
    x = la.eigvalsh(m)

    # improve roots by one application of Newton
    dy = lagval(x, c)
    df = lagval(x, lagder(c))
    x -= dy/df

    # compute the weights. We scale the factor to avoid possible numerical
    # overflow.
    fm = lagval(x, c[1:])
    fm /= np.abs(fm).max()
    df /= np.abs(df).max()
    w = 1/(fm * df)

    # scale w to get the right value, 1 in this case
    w /= w.sum()

    return x, w


def lagweight(x):

0 Source : legendre.py
with GNU General Public License v3.0
from adityaprakash-bobby

def leggauss(deg):
    """
    Gauss-Legendre quadrature.

    Computes the sample points and weights for Gauss-Legendre quadrature.
    These sample points and weights will correctly integrate polynomials of
    degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with
    the weight function :math:`f(x) = 1`.

    Parameters
    ----------
    deg : int
        Number of sample points and weights. It must be >= 1.

    Returns
    -------
    x : ndarray
        1-D ndarray containing the sample points.
    y : ndarray
        1-D ndarray containing the weights.

    Notes
    -----

    .. versionadded:: 1.7.0

    The results have only been tested up to degree 100, higher degrees may
    be problematic. The weights are determined by using the fact that

    .. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k))

    where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
    is the k'th root of :math:`L_n`, and then scaling the results to get
    the right value when integrating 1.

    """
    ideg = int(deg)
    if ideg != deg or ideg   <   1:
        raise ValueError("deg must be a non-negative integer")

    # first approximation of roots. We use the fact that the companion
    # matrix is symmetric in this case in order to obtain better zeros.
    c = np.array([0]*deg + [1])
    m = legcompanion(c)
    x = la.eigvalsh(m)

    # improve roots by one application of Newton
    dy = legval(x, c)
    df = legval(x, legder(c))
    x -= dy/df

    # compute the weights. We scale the factor to avoid possible numerical
    # overflow.
    fm = legval(x, c[1:])
    fm /= np.abs(fm).max()
    df /= np.abs(df).max()
    w = 1/(fm * df)

    # for Legendre we can also symmetrize
    w = (w + w[::-1])/2
    x = (x - x[::-1])/2

    # scale w to get the right value
    w *= 2. / w.sum()

    return x, w


def legweight(x):

0 Source : test_linalg.py
with MIT License
from alvarobartt

    def test_0_size(self):
        # Check that all kinds of 0-sized arrays work
        class ArraySubclass(np.ndarray):
            pass
        a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
        res = linalg.eigvalsh(a)
        assert_(res.dtype.type is np.float64)
        assert_equal((0, 1), res.shape)
        # This is just for documentation, it might make sense to change:
        assert_(isinstance(res, np.ndarray))

        a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
        res = linalg.eigvalsh(a)
        assert_(res.dtype.type is np.float32)
        assert_equal((0,), res.shape)
        # This is just for documentation, it might make sense to change:
        assert_(isinstance(res, np.ndarray))


class TestEighCases(HermitianTestCase, HermitianGeneralizedTestCase):

0 Source : theory.py
with MIT License
from benmaier

def get_effective_medium_eigenvalue_gap_from_matrix(N,k_over_2,beta):

    assert_parameters(N,k_over_2,beta)

    pS, pL = get_connection_probabilities(N,k_over_2,beta)

    N = int(N)
    k_over_2 = int(k_over_2)
    k = int(2*k_over_2)


    P = np.array([0.0] + [pS]*k_over_2 + [pL]*(N-1-k) + [pS]*k_over_2) / k
    P = circulant(P)


    omega = np.sort(np.linalg.eigvalsh(P))
    omega_N_m_1 = omega[-2]

    return 1 - omega_N_m_1

def get_effective_medium_eigenvalue_gap(N,k_over_2,beta):

0 Source : dist.py
with GNU General Public License v3.0
from caelan

def fixSigma(sigma, ridge=1e-20):
    # Can pass in ridge > 0 to ensure minimum eigenvalue is always >= ridge
    good = True
    for i in range(len(sigma)):
        for j in range(i):
            if sigma[i, j] != sigma[j, i]:
                if abs(sigma[i, j] - sigma[j, i]) > 1e-10:
                    print('found asymmetry mag:',
                             abs(sigma[i, j] - sigma[j, i]))
                good = False
    if not good:
        sigma = (sigma + sigma.T) / 2

    eigs = linalg.eigvalsh(sigma)
    if any([type(eig) in (complex, complex128) for eig in eigs]):
        print('Complex eigs', eigs)
        # Real symmetrix matrix should have real eigenvalues!
        raise Exception('Complex eigenvalues in COV')
    minEig = min(eigs)
    if minEig   <   ridge:
        if minEig  <  0:
            print('Not positive definite', minEig)
        print('Added to matrix', (ridge - minEig))
        # if -minEig > 1e-5:
        #     raw_input('okay?')
        sigma = sigma + 2 * (ridge - minEig) * identity(len(sigma))
    return sigma


# Uses numpy matrices
# pose4 is a big hack to deal with a case when the last element is a rotation
class MultivariateGaussianDistribution(Distribution):

0 Source : KCI.py
with GNU General Public License v3.0
from cmu-phil

    def null_sample_spectral(self, Kxc, Kyc):
        """
        Simulate data from null distribution

        Parameters
        ----------
        Kxc: centralized kernel matrix for data_x (nxn)
        Kyc: centralized kernel matrix for data_y (nxn)

        Returns
        _________
        null_dstr: samples from the null distribution

        """
        T = Kxc.shape[0]
        if T > 1000:
            num_eig = np.int(np.floor(T / 2))
        else:
            num_eig = T
        lambdax = eigvalsh(Kxc)
        lambday = eigvalsh(Kyc)
        lambdax = -np.sort(-lambdax)
        lambday = -np.sort(-lambday)
        lambdax = lambdax[0:num_eig]
        lambday = lambday[0:num_eig]
        lambda_prod = np.dot(lambdax.reshape(num_eig, 1), lambday.reshape(1, num_eig)).reshape(
            (num_eig ** 2, 1))
        lambda_prod = lambda_prod[lambda_prod > lambda_prod.max() * self.thresh]
        f_rand = np.random.chisquare(1, (lambda_prod.shape[0], self.nullss))
        null_dstr = lambda_prod.T.dot(f_rand) / T
        return null_dstr

    def get_kappa(self, Kx, Ky):

0 Source : KCI.py
with GNU General Public License v3.0
from cmu-phil

    def null_sample_spectral(self, uu_prod, size_u, T):
        """
        Simulate data from null distribution

        Parameters
        ----------
        uu_prod: product of the eigenvectors of Kx and Ky
        size_u: number of producted eigenvectors
        T: sample size

        Returns
        _________
        null_dstr: samples from the null distribution

        """
        eig_uu = eigvalsh(uu_prod)
        eig_uu = -np.sort(-eig_uu)
        eig_uu = eig_uu[0:np.min((T, size_u))]
        eig_uu = eig_uu[eig_uu > np.max(eig_uu) * self.thresh]

        f_rand = np.random.chisquare(1, (eig_uu.shape[0], self.nullss))
        null_dstr = eig_uu.T.dot(f_rand)
        return null_dstr

    def get_kappa(self, uu_prod):

0 Source : quadratic.py
with Apache License 2.0
from cog-imperial

def _eigval_test(A):
    """Check convexity by computing eigenvalues.

    Parameters
    ----------
    A : matrix
        the (symmetric) coefficients matrix

    Returns
    -------
    Convexity if the expression is Convex or Concave, None otherwise.
    """
    eigv = np.linalg.eigvalsh(A)
    if np.all(eigv >= 0):
        return Convexity.Convex
    elif np.all(eigv   <  = 0):
        return Convexity.Concave
    return None


def _build_var_to_idx_map(expr):

0 Source : ctf_estimator.py
with GNU General Public License v3.0
from ComputationalCryoEM

    def pca(self, signal, pixel_size, g_min, g_max):
        """

        :param signal: Estimated power spectrum.
        :param pixel_size: Pixel size in \u212b (Angstrom).
        :param g_min: Inverse of minimun resolution for PSD.
        :param g_max: Inverse of maximum resolution for PSD.
        :return: ratio.
        """

        # RCOPT
        signal = signal.asnumpy()[0].T

        N = signal.shape[0]
        center = N // 2

        grid = grid_2d(N, normalized=True, indexing="yx", dtype=self.dtype)

        r_ctf = grid["r"] / 2 * (10 / pixel_size)

        grid = grid_2d(N, normalized=False, indexing="yx", dtype=self.dtype)
        X = grid["x"]
        Y = grid["y"]

        signal -= np.min(signal)

        rad_sq_min = N * pixel_size / g_min
        rad_sq_max = N * pixel_size / g_max

        min_limit = r_ctf[center, (center + np.floor(rad_sq_min)).astype(int)]
        signal[r_ctf   <   min_limit] = 0

        max_limit = r_ctf[center, (center + np.ceil(rad_sq_max)).astype(int)]
        signal = np.where(r_ctf > max_limit, 0, signal)

        moment_02 = Y**2 * signal
        moment_02 = np.sum(moment_02, axis=(0, 1))

        moment_11 = Y * X * signal
        moment_11 = np.sum(moment_11, axis=(0, 1))

        moment_20 = X**2 * signal
        moment_20 = np.sum(moment_20, axis=(0, 1))

        moment_mat = np.zeros((2, 2))
        moment_mat[0, 0] = moment_20
        moment_mat[1, 1] = moment_02
        moment_mat[0, 1] = moment_11
        moment_mat[1, 0] = moment_11

        moment_evals = npla.eigvalsh(moment_mat)
        ratio = moment_evals[0] / moment_evals[1]

        return ratio

    def gd(

0 Source : hermite.py
with Apache License 2.0
from dashanji

def hermgauss(deg):
    """
    Gauss-Hermite quadrature.

    Computes the sample points and weights for Gauss-Hermite quadrature.
    These sample points and weights will correctly integrate polynomials of
    degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]`
    with the weight function :math:`f(x) = \\exp(-x^2)`.

    Parameters
    ----------
    deg : int
        Number of sample points and weights. It must be >= 1.

    Returns
    -------
    x : ndarray
        1-D ndarray containing the sample points.
    y : ndarray
        1-D ndarray containing the weights.

    Notes
    -----

    .. versionadded:: 1.7.0

    The results have only been tested up to degree 100, higher degrees may
    be problematic. The weights are determined by using the fact that

    .. math:: w_k = c / (H'_n(x_k) * H_{n-1}(x_k))

    where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
    is the k'th root of :math:`H_n`, and then scaling the results to get
    the right value when integrating 1.

    """
    ideg = pu._deprecate_as_int(deg, "deg")
    if ideg   <  = 0:
        raise ValueError("deg must be a positive integer")

    # first approximation of roots. We use the fact that the companion
    # matrix is symmetric in this case in order to obtain better zeros.
    c = np.array([0]*deg + [1], dtype=np.float64)
    m = hermcompanion(c)
    x = la.eigvalsh(m)

    # improve roots by one application of Newton
    dy = _normed_hermite_n(x, ideg)
    df = _normed_hermite_n(x, ideg - 1) * np.sqrt(2*ideg)
    x -= dy/df

    # compute the weights. We scale the factor to avoid possible numerical
    # overflow.
    fm = _normed_hermite_n(x, ideg - 1)
    fm /= np.abs(fm).max()
    w = 1/(fm * fm)

    # for Hermite we can also symmetrize
    w = (w + w[::-1])/2
    x = (x - x[::-1])/2

    # scale w to get the right value
    w *= np.sqrt(np.pi) / w.sum()

    return x, w


def hermweight(x):

0 Source : hermite_e.py
with Apache License 2.0
from dashanji

def hermegauss(deg):
    """
    Gauss-HermiteE quadrature.

    Computes the sample points and weights for Gauss-HermiteE quadrature.
    These sample points and weights will correctly integrate polynomials of
    degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]`
    with the weight function :math:`f(x) = \\exp(-x^2/2)`.

    Parameters
    ----------
    deg : int
        Number of sample points and weights. It must be >= 1.

    Returns
    -------
    x : ndarray
        1-D ndarray containing the sample points.
    y : ndarray
        1-D ndarray containing the weights.

    Notes
    -----

    .. versionadded:: 1.7.0

    The results have only been tested up to degree 100, higher degrees may
    be problematic. The weights are determined by using the fact that

    .. math:: w_k = c / (He'_n(x_k) * He_{n-1}(x_k))

    where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
    is the k'th root of :math:`He_n`, and then scaling the results to get
    the right value when integrating 1.

    """
    ideg = pu._deprecate_as_int(deg, "deg")
    if ideg   <  = 0:
        raise ValueError("deg must be a positive integer")

    # first approximation of roots. We use the fact that the companion
    # matrix is symmetric in this case in order to obtain better zeros.
    c = np.array([0]*deg + [1])
    m = hermecompanion(c)
    x = la.eigvalsh(m)

    # improve roots by one application of Newton
    dy = _normed_hermite_e_n(x, ideg)
    df = _normed_hermite_e_n(x, ideg - 1) * np.sqrt(ideg)
    x -= dy/df

    # compute the weights. We scale the factor to avoid possible numerical
    # overflow.
    fm = _normed_hermite_e_n(x, ideg - 1)
    fm /= np.abs(fm).max()
    w = 1/(fm * fm)

    # for Hermite_e we can also symmetrize
    w = (w + w[::-1])/2
    x = (x - x[::-1])/2

    # scale w to get the right value
    w *= np.sqrt(2*np.pi) / w.sum()

    return x, w


def hermeweight(x):

0 Source : laguerre.py
with Apache License 2.0
from dashanji

def laggauss(deg):
    """
    Gauss-Laguerre quadrature.

    Computes the sample points and weights for Gauss-Laguerre quadrature.
    These sample points and weights will correctly integrate polynomials of
    degree :math:`2*deg - 1` or less over the interval :math:`[0, \\inf]`
    with the weight function :math:`f(x) = \\exp(-x)`.

    Parameters
    ----------
    deg : int
        Number of sample points and weights. It must be >= 1.

    Returns
    -------
    x : ndarray
        1-D ndarray containing the sample points.
    y : ndarray
        1-D ndarray containing the weights.

    Notes
    -----

    .. versionadded:: 1.7.0

    The results have only been tested up to degree 100 higher degrees may
    be problematic. The weights are determined by using the fact that

    .. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k))

    where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
    is the k'th root of :math:`L_n`, and then scaling the results to get
    the right value when integrating 1.

    """
    ideg = pu._deprecate_as_int(deg, "deg")
    if ideg   <  = 0:
        raise ValueError("deg must be a positive integer")

    # first approximation of roots. We use the fact that the companion
    # matrix is symmetric in this case in order to obtain better zeros.
    c = np.array([0]*deg + [1])
    m = lagcompanion(c)
    x = la.eigvalsh(m)

    # improve roots by one application of Newton
    dy = lagval(x, c)
    df = lagval(x, lagder(c))
    x -= dy/df

    # compute the weights. We scale the factor to avoid possible numerical
    # overflow.
    fm = lagval(x, c[1:])
    fm /= np.abs(fm).max()
    df /= np.abs(df).max()
    w = 1/(fm * df)

    # scale w to get the right value, 1 in this case
    w /= w.sum()

    return x, w


def lagweight(x):

0 Source : legendre.py
with Apache License 2.0
from dashanji

def leggauss(deg):
    """
    Gauss-Legendre quadrature.

    Computes the sample points and weights for Gauss-Legendre quadrature.
    These sample points and weights will correctly integrate polynomials of
    degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with
    the weight function :math:`f(x) = 1`.

    Parameters
    ----------
    deg : int
        Number of sample points and weights. It must be >= 1.

    Returns
    -------
    x : ndarray
        1-D ndarray containing the sample points.
    y : ndarray
        1-D ndarray containing the weights.

    Notes
    -----

    .. versionadded:: 1.7.0

    The results have only been tested up to degree 100, higher degrees may
    be problematic. The weights are determined by using the fact that

    .. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k))

    where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
    is the k'th root of :math:`L_n`, and then scaling the results to get
    the right value when integrating 1.

    """
    ideg = pu._deprecate_as_int(deg, "deg")
    if ideg   <  = 0:
        raise ValueError("deg must be a positive integer")

    # first approximation of roots. We use the fact that the companion
    # matrix is symmetric in this case in order to obtain better zeros.
    c = np.array([0]*deg + [1])
    m = legcompanion(c)
    x = la.eigvalsh(m)

    # improve roots by one application of Newton
    dy = legval(x, c)
    df = legval(x, legder(c))
    x -= dy/df

    # compute the weights. We scale the factor to avoid possible numerical
    # overflow.
    fm = legval(x, c[1:])
    fm /= np.abs(fm).max()
    df /= np.abs(df).max()
    w = 1/(fm * df)

    # for Legendre we can also symmetrize
    w = (w + w[::-1])/2
    x = (x - x[::-1])/2

    # scale w to get the right value
    w *= 2. / w.sum()

    return x, w


def legweight(x):

0 Source : utils.py
with BSD 3-Clause "New" or "Revised" License
from deepanshs

def get_Haeberlen_components(tensors):
    """Return zeta and eta parameters of the tensor using the Haeberlen convention.

    Args:
        ndarray tensors: A `N x 3 x 3` ndarray of `N` traceless symmetric second-rank
            Cartesian tensors.
    """
    n = tensors.shape[0]
    eig_val = np.linalg.eigvalsh(tensors)
    eig_val_sort_ = np.argsort(np.abs(eig_val), axis=1, kind="mergesort")
    eig_val_sort_ = (eig_val_sort_.T + 3 * np.arange(n)).T.ravel()
    eig_val_sorted = eig_val.ravel()[eig_val_sort_].reshape(n, 3)

    eig_val_sort_ = eig_val = None
    del eig_val_sort_, eig_val

    zeta = eig_val_sorted[:, -1]
    eta = (eig_val_sorted[:, 0] - eig_val_sorted[:, 1]) / zeta

    return zeta, eta


def x_y_from_zeta_eta(zeta, eta):

0 Source : hessians.py
with MIT License
from duartegroup

    def frequencies_proj(self) -> List[Frequency]:
        """
        Frequencies with rotation and translation projected out

        -----------------------------------------------------------------------
        Returns:
            (list(autode.values.Frequency)):

        Raises:
            (ValueError): Without atoms set
        """
        if self.atoms is None:
            raise ValueError('Could not calculate projected frequencies, must '
                             'have atoms set')

        n_tr = self.n_tr             # Number of translational+rotational modes
        lambdas = np.linalg.eigvalsh(self._proj_mass_weighted[n_tr:, n_tr:])

        trans_rot_freqs = [Frequency(0.0) for _ in range(n_tr)]
        vib_freqs = self._eigenvalues_to_freqs(lambdas)

        return trans_rot_freqs + vib_freqs

    def copy(self) -> 'Hessian':

0 Source : info.py
with Apache License 2.0
from gecrooks

def entropy(rho: Density, base: float = None) -> float:
    """
    Returns the von-Neumann entropy of a mixed quantum state.

    Args:
        rho:    A density matrix
        base:   Optional logarithm base. Default is base e, and entropy is
               .info in nats. For bits set base to 2.

    Returns:
        The von-Neumann entropy of rho
    """
    op = rho.asoperator()
    probs = np.linalg.eigvalsh(op)
    probs = np.maximum(probs, 0.0)  # Compensate for floating point errors
    return scipy.stats.entropy(probs, base=base)


# TESTME
def mutual_info(

0 Source : old_mbar.py
with MIT License
from GHeinzelmann

    def _pseudoinverse(self, A, tol=1.0e-10):
        """
        Compute the Moore-Penrose pseudoinverse.

        REQUIRED ARGUMENTS
          A (np KxK matrix) - the square matrix whose pseudoinverse is to be computed

        RETURN VALUES
          Ainv (np KxK matrix) - the pseudoinverse

        OPTIONAL VALUES
          tol - the tolerance (relative to largest magnitude singlular value) below which singular values are to not be include in forming pseudoinverse (default: 1.0e-10)

        NOTES
          This implementation is provided because the 'pinv' function of np is broken in the version we were using.

        TODO
          Can we get rid of this and use np.linalg.pinv instead?

        """

        # DEBUG
        # TODO: Should we use pinv, or _pseudoinverse?
        # return np.linalg.pinv(A)

        # Get size
        [M, N] = A.shape
        if N != M:
            raise DataError("pseudoinverse can only be computed for square matrices: dimensions were %d x %d" % (
                M, N))

        # Make sure A contains no nan.
        if(np.any(np.isnan(A))):
            print("attempted to compute pseudoinverse of A =")
            print(A)
            raise ParameterError("A contains nan.")

        # DEBUG
        diagonal_loading = False
        if diagonal_loading:
            # Modify matrix by diagonal loading.
            eigs = linalg.eigvalsh(A)
            most_negative_eigenvalue = eigs.min()
            if (most_negative_eigenvalue   <   0.0):
                print("most negative eigenvalue = %e" % most_negative_eigenvalue)
                # Choose loading value.
                gamma = -most_negative_eigenvalue * 1.05
                # Modify Theta by diagonal loading
                A += gamma * np.eye(A.shape[0])

        # Compute SVD of A.
        [U, S, Vt] = linalg.svd(A)

        # Compute pseudoinverse by taking square root of nonzero singular
        # values.
        Ainv = np.matrix(np.zeros([M, M], dtype=np.float64))
        for k in range(M):
            if (abs(S[k]) > tol * abs(S[0])):
                Ainv += (1.0/S[k]) * np.outer(U[:, k], Vt[k, :]).T

        return Ainv
    #=========================================================================

    def _zerosamestates(self, A):

0 Source : arnoldi_test.py
with Apache License 2.0
from google-research

  def _eigval_eigvecs_max_error(self, distilled: ArnoldiEigens, true_matrix,
                                top_k, hermitian: bool):
    """Computes max approximation error on top_k eigenvalues/vectors."""
    if hermitian:
      eigvals = np.linalg.eigvalsh(true_matrix)
    else:
      eigvals = np.linalg.eigvals(true_matrix)
    idx = np.argsort(np.abs(eigvals))
    eigvals = eigvals[idx]
    eigvals = eigvals[-top_k:]

    err_val = np.max(np.abs(eigvals - distilled.eigenval))
    # Test directly if distilled eigenvectors are eigenvectors for
    # the corresponding eigenvalues.
    err_vec = [
        true_matrix @ distilled.eigenvec[i] - eigvals[i] * distilled.eigenvec[i]
        for i in range(eigvals.shape[0])
    ]
    err_vec = [np.linalg.norm(x, ord=np.inf) for x in err_vec]
    err_vec = np.max(err_vec)
    return err_vec, err_val

  @parameterized.named_parameters(

0 Source : pfmodel_ts.py
with MIT License
from igp-gravity

    def calc_log_prior_total_det_quiet(self,precision=1.0e-6):
        self._gen_gtg()
        self._gen_btb()
        self.log_prior_det_val = 0
        self.log_total_det_val = 0
        prior_eigs = np.zeros(self._nx*self._ny*self.nz)
        total_eigs = np.zeros(self._nx*self._ny*self.nz)
        tmp_mat = np.zeros((self.ns*self.nx*self.ny,self.ns*self.nx*self.ny))
        for key in self.smooth_components:
            if 't' in key:
                tmp_mat += self.weights[key] * self._btb[key]
            else:
                for i in range(self.ns):
                    tmp_mat[i*self.nx*self.ny:(i+1)*self.nx*self.ny,
                            i*self.nx*self.ny:(i+1)*self.nx*self.ny] += self.weights[key] * self._btb[key]
        prior_eigs = np.linalg.eigvalsh(tmp_mat)
        self.log_prior_det_val = sum(np.log(prior_eigs[prior_eigs > precision]))
        for i,k in enumerate(self.kernel_matrices):
            tmp_mat[i*self.nx*self.ny:(i+1)*self.nx*self.ny,
                    i*self.nx*self.ny:(i+1)*self.nx*self.ny] += self.weights['obs'] * self._gtg[i]
        if 'refer' in self._weights.keys():
            tmp_mat += self._weights['refer']*np.eye(self.nx*self.ny*self.ns)
        total_eigs = np.linalg.eigvalsh(tmp_mat)
        self.log_total_det_val = sum(np.log(total_eigs[total_eigs > precision]))
        self.eigs = {'prior':prior_eigs,'total':total_eigs}

    @timeit

0 Source : tcca.py
with MIT License
from jameschapman19

    def _setup_tensor(self, *views: np.ndarray):
        self.train_views = views
        kernels = [self._get_kernel(i, view) for i, view in enumerate(self.train_views)]
        covs = [
            (1 - self.c[i]) * kernel @ kernel.T / (self.n - 1) + self.c[i] * kernel
            for i, kernel in enumerate(kernels)
        ]
        smallest_eigs = [
            min(0, np.linalg.eigvalsh(cov).min()) - self.eps for cov in covs
        ]
        covs = [
            cov - smallest_eig * np.eye(cov.shape[0])
            for cov, smallest_eig in zip(covs, smallest_eigs)
        ]
        self.covs_invsqrt = [np.linalg.inv(sqrtm(cov)).real for cov in covs]
        kernels = [
            kernel @ cov_invsqrt
            for kernel, cov_invsqrt in zip(kernels, self.covs_invsqrt)
        ]
        return kernels, self.covs_invsqrt

    def transform(self, views: np.ndarray, **kwargs):

0 Source : solvers.py
with MIT License
from josefkaufmann

    def bayes_conv_offdiag(self, A, entr, alpha):
        """Bayesian convergence criterion for classic maxent, offdiagonal version

        Parameters
        ----------
        A : numpy.ndarray
                  spectral function
        entr : float
                  entropy
        alpha : float
                  weight factor of the entropy

        Returns
        -------
        ng : float
                  "number of good data points"
        tr : float
                  trace of the gamma matrix
        conv : float
                   convergence criterion (1 -> converged)
        """
        A_sq = np.power((A ** 2 + 4. * self.model_plus * self.model_minus) / self.dw ** 2, 0.25)
        LambdaMatrix = A_sq[:, None] * self.d2chi2 * A_sq[None, :]
        lam = np.linalg.eigvalsh(LambdaMatrix)
        ng = -2. * alpha * entr
        tr = np.sum(lam / (alpha + lam))
        conv = tr / ng
        return ng, tr, conv

    def posterior_probability(self, A, alpha, entr, chisq):

0 Source : gap.py
with GNU General Public License v3.0
from joselado

def gap_line(h,kpgen,assume_eh = False,sparse=True,nocc=None):
  """Return a function with argument between 0,1, which returns the gap"""
  hk_gen = h.get_hk_gen() # get hamiltonian generator
  def f(k):
    kp = kpgen(k) # get kpoint
    hk = hk_gen(kp) # generate hamiltonian
    if sparse: 
      es,ew = lgs.eigsh(csc_matrix(hk),k=4,which="LM",sigma=0.0)
    else:
      es = lg.eigvalsh(hk) # get eigenvalues
    if assume_eh: g = np.min(es[es>0.])
    else:  
      if nocc is None: # Assume conduction are staets above E = 0.0
        try: g = np.min(es[es>0.]) - np.max(es[es  <  0.])
        except: g = 0.0 
      else:
        g = es[nocc] - es[nocc-1]
    return g  # return gap
  return f  # return gap


def raw_gap(h,kpgen,sparse=True,nk=100):

0 Source : gap.py
with GNU General Public License v3.0
from joselado

def gap2d(h,nk=40,k0=None,rmap=1.0,recursive=False,
           iterations=10,sparse=True,mode="refine"):
  """Calculates the gap for a 2d Hamiltonian by doing
  a kmesh sampling. It will return the positive energy with smaller value"""
  if mode=="optimize": # using optimize library
    from scipy.optimize import minimize
    hk_gen = h.get_hk_gen() # generator
    def minfun(k): # function to minimize
      hk = hk_gen(k) # Hamiltonian 
      if h.is_sparse: es,ew = lgs.eigsh(hk,k=10,which="LM",sigma=0.0,tol=1e-06)
      else: es = lg.eigvalsh(hk) # get eigenvalues
      es = es[es>0.]
      return np.min(es) # retain positive
    gaps = [minimize(minfun,np.random.random(h.dimensionality),method="Powell").fun  for i in range(iterations)]
#    print(gaps)
    return np.min(gaps)

  else: # classical way
    if k0 is None: k0 = np.random.random(2) # random shift
    if h.dimensionality != 2: raise
    hk_gen = h.get_hk_gen() # get hamiltonian generator
    emin = 1000. # initial values
    for ix in np.linspace(-.5,.5,nk):  
      for iy in np.linspace(-.5,.5,nk):  
        k = np.array([ix,iy]) # generate kvector
        if recursive: k = k0 + k*rmap # scale vector
        hk = hk_gen(k) # generate hamiltonian
        if h.is_sparse: es,ew = lgs.eigsh(csc_matrix(hk),k=4,which="LM",sigma=0.0)
        else: es = lg.eigvalsh(hk) # get eigenvalues
        es = es[es>0.] # retain positive
        if min(es)  <  emin:
          emin = min(es) # store new minimum 
          kbest = k.copy() # store the best k
    if recursive: # if it has been chosen recursive
      if iterations>0: # if still iterations left
        emin = gap2d(h,nk=nk,k0=kbest,rmap=rmap/4,recursive=recursive,
                        iterations=iterations-1,sparse=sparse)
    return emin # gap



def optimize_gap_single(h,direct=True):

0 Source : gap.py
with GNU General Public License v3.0
from joselado

def optimize_gap_single(h,direct=True):
  """Return the gap, just one time"""
  hkgen = h.get_hk_gen() # get generator
  dim = h.dimensionality # dimensionality
  if direct: # returnt the direct gap
    def fg(k): # minimize the gap
      es = lg.eigvalsh(hkgen(k)) # eigenvalues
      return np.min(es[es>0.])-np.max(es[es  <  0.]) # return gap
    x0 = np.random.random(dim) # random point
    bounds = [(0,1.) for i in range(dim)] # bounds
    result = opt.minimize(fg,x0,bounds=bounds,method="SLSQP")
    x = result.x # position of the minimum gap
    return (fg(x),x)
  else: # indirect gap
    def fg(k): # minimize the gap
      k1 = np.array([k[2*i] for i in range(dim)])
      k2 = np.array([k[2*i+1] for i in range(dim)])
      es1 = lg.eigvalsh(hkgen(k1)) # eigenvalues
      es2 = lg.eigvalsh(hkgen(k2)) # eigenvalues
      return np.min(es1[es1>0.])-np.max(es2[es2 < 0.]) # return gap
    x0 = np.random.random(dim*2) # random point
    bounds = [(0,1.) for i in range(2*dim)] # bounds
    result = opt.minimize(fg,x0,bounds=bounds,method="SLSQP")
    x = result.x # position of the minimum gap
    return (fg(x),x)
     
def optimize_gap(h,direct=True,ntries=10):

See More Examples