numpy.diagonal

Here are the examples of the python api numpy.diagonal taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

107 Examples 7

3 Source : test_numeric.py
with GNU General Public License v3.0
from adityaprakash-bobby

    def test_diagonal(self):
        a = [[0, 1, 2, 3],
             [4, 5, 6, 7],
             [8, 9, 10, 11]]
        out = np.diagonal(a)
        tgt = [0, 5, 10]

        assert_equal(out, tgt)

    def test_mean(self):

3 Source : eugenium_mmd.py
with MIT License
from archmaester

def MMD_unbiased(Kxx, Kyy, Kxy):
#The estimate when distribution of x is not equal to y
    m = Kxx.shape[0]
    n = Kyy.shape[0]
    
    t1 = (1./(m*(m-1)))*np.sum(Kxx - np.diag(np.diagonal(Kxx)))
    t2 = (2./(m*n)) * np.sum(Kxy)
    t3 = (1./(n*(n-1)))* np.sum(Kyy - np.diag(np.diagonal(Kyy)))
    
    MMDsquared = (t1-t2+t3)
    
    return MMDsquared

3 Source : markov_switching.py
with MIT License
from birforce

    def expected_durations(self):
        """
        (array) Expected duration of a regime, possibly time-varying.
        """
        return 1. / (1 - np.diagonal(self.regime_transition).squeeze())


class KimSmootherResults(HamiltonFilterResults):

3 Source : gp.py
with MIT License
from C-bowman

    def marginal_likelihood(self, theta):
        """
        returns the log-marginal likelihood for the supplied hyper-parameter values.

        This implementation is based on equation (5.8) from Rasmussen & Williams.
        """
        K_xx = self.cov.build_covariance(theta[self.cov_slice]) + self.sig
        mu = self.mean.build_mean(theta[self.mean_slice])
        try:  # protection against singular matrix error crash
            L = cholesky(K_xx)
            alpha = solve_triangular(L.T, solve_triangular(L, self.y - mu, lower=True))
            return -0.5 * dot((self.y - mu).T, alpha) - log(diagonal(L)).sum()
        except:
            warn("Cholesky decomposition failure in marginal_likelihood")
            return -1e50

    def marginal_likelihood_gradient(self, theta):

3 Source : trainer.py
with MIT License
from ChrisMats

    def get_info_from_conf(self):
        Cij = self.confusion_matrix
        Cii = np.diagonal(Cij)
        Ci = np.sum(Cij, axis=1)
        Cj = np.sum(Cij, axis=0)        
        return Ci, Cj, Cii 
    
    # Calculate IoU and accuracy and return the results as a dictionary
    def get_value(self):

3 Source : _utils.py
with GNU Lesser General Public License v3.0
from deeptime-ml

def is_diagonal_matrix(matrix: _np.ndarray) -> bool:
    r""" Checks whether a provided matrix is a diagonal matrix, i.e., :math:`A = \mathrm{diag}(a_1,\ldots, a_n)`.

    Parameters
    ----------
    matrix : ndarray
        The matrix for which this check is performed.

    Returns
    -------
    is_diagonal : bool
        True if the matrix is a diagonal matrix, otherwise False.
    """
    return _np.all(matrix == _np.diag(_np.diagonal(matrix)))


def is_square_matrix(arr) -> bool:

3 Source : segmentation.py
with Apache License 2.0
from eora-ai

    def _calculate_score(self, conf_matrix):
        tp = np.diagonal(conf_matrix)
        pos_pred = conf_matrix.sum(axis=0)
        pos_gt = conf_matrix.sum(axis=1)

        # Check which classes have elements
        valid_classes = (pos_gt > 0) & (pos_gt + pos_pred - tp > 0)

        # Calculate intersections over union for each class
        dice_scores = np.zeros((self._num_classes,))
        dice_scores[valid_classes] = 2 * tp[valid_classes] / (pos_gt[valid_classes] + pos_pred[valid_classes])

        # Calculate mean intersection over union
        mean_dice = self._averaging(dice_scores, valid_classes, pos_gt, conf_matrix)

        return mean_dice

3 Source : process.py
with GNU General Public License v3.0
from GangCaoLab

    def diagonal_mean_std(mat: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
        means = []
        stds = []
        for i in range(mat.shape[0]):
            diagonal = np.diagonal(mat, i)
            means.append(diagonal.mean())
            stds.append(diagonal.std())
        stds = np.array(stds)
        try:
            stds[stds == 0] = stds[stds > 0].min()
        except Exception:
            log.error("Error when executing 'stds[stds > 0].min()'")
        return np.array(means), stds


    @staticmethod

3 Source : alignments.py
with MIT License
from GemsLab

def score(alignment_matrix, true_alignments = None):
	if true_alignments is None: #assume it's just identity permutation
		return np.sum(np.diagonal(alignment_matrix))
	else:
		nodes_g1 = [int(node_g1) for node_g1 in true_alignments.keys()]
		nodes_g2 = [int(true_alignments[node_g1]) for node_g1 in true_alignments.keys()]
		return np.sum(alignment_matrix[nodes_g1, nodes_g2])


def score_embeddings_matrices(embed1, embed2, topk = None, similarity_threshold = None, normalize = False, true_alignments = None, sim="cosine"):

3 Source : pycondor.py
with BSD 2-Clause "Simplified" License
from GiulioRossetti

def bipartite_modularity(B, m, R, T, CO):
    """Computation of the bipartite modularity as described in ""Modularity and community detection in bipartite
    networks" by Michael J. Barber." """
    RtBT = R.transpose().dot(B.dot(T))
    Qcoms = (1 / m) * (np.diagonal(RtBT))
    Q = sum(Qcoms)
    Qcoms = Qcoms[Qcoms > 0]
    CO["Qcoms"] = Qcoms
    return Q, CO


def initial_community(CO, method="LCS"):

3 Source : numpy_backend_test.py
with Apache License 2.0
from google

def test_diagonal(dtype, offset, axis1, axis2):
  shape = (5, 5, 5, 5)
  backend = numpy_backend.NumPyBackend()
  array = backend.randn(shape, dtype=dtype, seed=10)
  if axis1 == axis2:
    with pytest.raises(ValueError):
      actual = backend.diagonal(array, offset=offset, axis1=axis1, axis2=axis2)
  else:
    actual = backend.diagonal(array, offset=offset, axis1=axis1, axis2=axis2)
    expected = np.diagonal(array, offset=offset, axis1=axis1, axis2=axis2)
    np.testing.assert_allclose(actual, expected)


@pytest.mark.parametrize("dtype", np_dtypes)

3 Source : pytorch_backend_test.py
with Apache License 2.0
from google

def test_diagonal(dtype, offset, axis1, axis2):
  shape = (5, 5, 5, 5)
  backend = pytorch_backend.PyTorchBackend()
  array = backend.randn(shape, dtype=dtype, seed=10)
  if axis1 == axis2:
    with pytest.raises(ValueError):
      actual = backend.diagonal(array, offset=offset, axis1=axis1, axis2=axis2)
  else:
    actual = backend.diagonal(array, offset=offset, axis1=axis1, axis2=axis2)
    expected = np.diagonal(array, offset=offset, axis1=axis1, axis2=axis2)
    np.testing.assert_allclose(actual, expected)


@pytest.mark.parametrize("dtype", torch_randn_dtypes)

3 Source : tensorflow_backend_test.py
with Apache License 2.0
from google

def test_diagonal(dtype, offset, axis1, axis2):
  shape = (5, 5, 5, 5)
  backend = tensorflow_backend.TensorFlowBackend()
  array = backend.randn(shape, dtype=dtype, seed=10)
  if axis1 != -2 or axis2 != -1:
    with pytest.raises(NotImplementedError):
      actual = backend.diagonal(array, offset=offset, axis1=axis1, axis2=axis2)
  else:
    actual = backend.diagonal(array, offset=offset, axis1=axis1, axis2=axis2)
    expected = np.diagonal(array, offset=offset, axis1=axis1, axis2=axis2)
    np.testing.assert_allclose(actual, expected)


@pytest.mark.parametrize("dtype", tf_dtypes)

3 Source : target_matrix.py
with Apache License 2.0
from HazyResearch

def krylov_construct(A, v, m):
    n = v.shape[0]
    assert A.shape == (n,n)
    d = np.diagonal(A, 0)
    subd = np.diagonal(A, -1)

    K = np.zeros(shape=(m,n))
    K[0,:] = v
    for i in range(1,m):
        K[i,1:] = subd*K[i-1,:-1]
    return K

def toeplitz_like(G, H):

3 Source : permutation.py
with Apache License 2.0
from HazyResearch

def is_2x2_block_diag(mat: np.ndarray) -> bool:
    """Check that each of the 4 blocks of a matrix is diagonal
    (in other words, that the matrix is a butterfly factor).
    Assumes that the matrix is square with even dimension.
    """
    nh = mat.shape[0] // 2
    for block in [mat[:nh, :nh], mat[:nh, nh:], mat[nh:, :nh], mat[nh:, nh:]]:
        if np.count_nonzero(block - np.diag(np.diagonal(block))):
            return False  # there's a nonzero off-diagonal entry
    return True


def is_butterfly_factor(mat: np.ndarray, k: int) -> bool:

3 Source : test_ader_tools.py
with GNU General Public License v3.0
from IhmeGroup

def test_legendre_spacetime_massmatrix(order):
	'''
	This test makes sure that the already tested mass matrix yields
	the corrected inverse mass matrix with the diagonal inverted.
	'''
	basis = basis_defs.LegendreQuad(order)
	mesh = mesh_common.mesh_1D(num_elems=1, xmin=-1., xmax=1.)
		
	# Set quadrature
	basis.set_elem_quadrature_type("GaussLegendre")
	basis.set_face_quadrature_type("GaussLegendre")

	MM = basis_st_tools.get_elem_mass_matrix_ader(mesh, basis, order, -1)
	iMM = basis_st_tools.get_elem_inv_mass_matrix_ader(mesh, basis, order, -1)

	np.testing.assert_allclose(np.diagonal(iMM), 1./np.diagonal(MM), rtol, atol)


3 Source : baseline-jcsc.py
with GNU General Public License v3.0
from jmshen1994

def objective(Ct, Ca, kt, ka, sim_t, sim_a):
    Ct_mask = mask_mat_from_C(Ct, kt)

    Ca_mask = mask_mat_from_C(Ca, ka)

    return np.sum(Ct_mask * (1-sim_t))/2 + np.diagonal(sim_t).sum()/2 + np.sum((1-Ct_mask) * sim_t)/2 + np.sum(Ca_mask * (1-sim_a))/2 + np.diagonal(sim_a).sum()/2 + np.sum((1-Ca_mask) * sim_a)/2


def constraint_mat(edges, num, i, C, k):

3 Source : covfunctions.py
with MIT License
from microprediction

def cov_to_corrcoef(a):
    if isinstance(a, pd.DataFrame):
        return square_to_square_dataframe(a, cov_to_corrcoef)
    else:
        variances = np.diagonal(a)
        denominator = np.sqrt(variances[np.newaxis, :] * variances[:, np.newaxis])
        return a / denominator


def normalize(x):

3 Source : showContactmap.py
with MIT License
from open2c

def pivotHeatmap(heatmap, diags = 20):
    N = len(heatmap)
    newdata = np.zeros((diags, 2*N), dtype = float)
    for i in range(diags):
        diag = np.diagonal(heatmap,i)
        pad = N - len(diag)
        newdiag = np.zeros(2 * len(diag), dtype = float)
        newdiag[::2] = diag
        newdiag[1::2] = diag
        if pad == 0:
            newdata[i] = newdiag
        else:
            newdata[i][pad:-pad] = newdiag
    return newdata




def showCmap():

3 Source : utilities.py
with MIT License
from OpenSourceEconomics

def cov_to_sds_and_corr(cov):
    sds = np.sqrt(np.diagonal(cov))
    diag = np.diag(1 / sds)
    corr = diag @ cov @ diag
    return sds, corr


def sdcorr_params_to_matrix(sdcorr_params):

3 Source : utilities.py
with MIT License
from OpenSourceEconomics

def _make_cholesky_unique(chol):
    """Make a lower triangular cholesky factor unique.

    Cholesky factors are only unique with the additional requirement that all diagonal
    elements are positive. This is done automatically by np.linalg.cholesky.
    Since we calucate cholesky factors by QR decompositions we have to do it manually.
    It is obvious from that this is admissible because:
    chol sign_swither sign_switcher.T chol.T = chol chol.T

    """
    sign_switcher = np.sign(np.diagonal(chol))
    return chol * sign_switcher


def namedtuple_from_dict(field_dict):

3 Source : article_figure.py
with MIT License
from pierreablin

def loss(D):
    n, p, _ = D.shape
    output = 0
    for i in range(n):
        Di = D[i]
        output += np.sum(np.log(np.diagonal(Di))) - np.linalg.slogdet(Di)[1]
    return output / (2 * n)


n, p = 100, 40

3 Source : qndiag.py
with MIT License
from pierreablin

def loss(B, D, is_diag=False, weights=None):
    n, p = D.shape[:2]
    if not is_diag:
        diagonals = np.diagonal(D, axis1=1, axis2=2)
    else:
        diagonals = D
    logdet = -np.linalg.slogdet(B)[1]
    if weights is None:
        return logdet + 0.5 * np.sum(np.log(diagonals)) / n
    else:
        return logdet + 0.5 * np.sum(weights[:, None] * np.log(diagonals)) / n


def gradient(D, weights=None):

3 Source : qndiag.py
with MIT License
from pierreablin

def gradient(D, weights=None):
    n, p, _ = D.shape
    diagonals = np.diagonal(D, axis1=1, axis2=2)
    grad = np.average(D / diagonals[:, :, None], weights=weights, axis=0)
    grad.flat[::p + 1] -= 1  # equivalent to - np.eye(p)
    return grad


def _linesearch(D, B, direction, current_loss, n_ls_tries, diag_only,

3 Source : data_model.py
with Apache License 2.0
from quarkfin

    def _setup_r_square_of_each_predictor(self):
        regressors_df = self.input_data.regressors_df
        corr_matrix = regressors_df.corr()
        corr_matrix = cast_dataframe(corr_matrix, output_type=QFDataFrame)
        vif = np.diagonal(inv(corr_matrix))
        r_squared_values = 1 - (1 / vif)
        self.r_squared_of_each_predictor = QFSeries(data=r_squared_values, index=regressors_df.columns.copy())

    def _setup_autocorrelation(self, residuals):

3 Source : static.py
with BSD 3-Clause "New" or "Revised" License
from RaulAstudillo06

    def update_gradients_full(self, dL_dK, X, X2=None):
        if X2 is None:
            self.variance.gradient = np.diagonal(dL_dK)
        else:
            self.variance.gradient = 0.

    def update_gradients_diag(self, dL_dKdiag, X):

3 Source : linear_ordinal_regression.py
with MIT License
from Shopify

    def _compute_standard_errors(self, coefficients, X_data, y_data):
        hessian_function = Jacobian(self._gradient, method='forward')
        H = hessian_function(coefficients, X_data, y_data)
        P = self._compute_basis_change()
        return np.sqrt(np.diagonal(P.dot(inv(H)).dot(P.T)))

    def _compute_basis_change(self):

3 Source : evaluate_contact_prediction_metrics.py
with MIT License
from songlab-cal

def partition_contacts(full_contact_map: np.ndarray):
    """ Returns lists of short, medium, and long-range contacts.
    """
    length = full_contact_map.shape[0]
    short_contacts = [np.diagonal(full_contact_map, i) for i in range(6,12)] 
    medium_contacts = [np.diagonal(full_contact_map, i) for i in range(12,min(length, 24))]
    if length >= 24:
        long_contacts = [np.diagonal(full_contact_map, i) for i in range(24, length)] 
    else:
        return np.concatenate(short_contacts), np.concatenate(medium_contacts), []
    return np.concatenate(short_contacts), np.concatenate(medium_contacts), np.concatenate(long_contacts)
    
def precision_cutoff(true_labels: List[int],

3 Source : multivariate_normal.py
with Apache License 2.0
from stanfordmlgroup

def cov_to_sigma(cov_mat):
    """
    Parameters:
        cov_mat: Nx2x2 numpy array
    Returns:
        sigma: (N,2) numpy array containing the variances
        corr: (N,) numpy array the correlation [-1,1] extracted from cov_mat
    """

    sigma = np.sqrt(np.diagonal(cov_mat, axis1=1, axis2=2))
    corr = cov_mat[:, 0, 1] / (sigma[:, 0] * sigma[:, 1])
    return sigma, corr


if __name__ == "__main__":

3 Source : callbacks.py
with MIT License
from stevewyl

    def _calc_f1(self, y_true, y_pred):
        cm = confusion_matrix(y_true, y_pred)
        correct_preds = np.diagonal(cm)
        r = correct_preds / np.sum(cm, axis=1)
        p = correct_preds / np.sum(cm, axis=0)
        f1 = 2 * p * r / (p + r)
        return f1


class F1score_seq(Callback):

3 Source : markov_chain.py
with MIT License
from TommasoBelluzzo

    def is_regular(self) -> bool:

        """
        A property indicating whether the Markov chain is regular.
        """

        d = _np.diagonal(self.__p)
        nz = _np.count_nonzero(d)

        if nz > 0:
            k = (2 * self.__size) - nz - 1
        else:
            k = self.__size ** self.__size - (2 * self.__size) + 2

        result = _np.all(self.__p ** k > 0.0)

        return result

    @_cached_property

3 Source : nms.py
with MIT License
from yecharlie

def nmsOverlaps(overlaps, scores, threshold):
    assert (np.diagonal(overlaps) == 1).all()
    assert 0   <   threshold  <  1

    order = scores.argsort()[::-1]
    keep = []
    while order.size > 0:
        i = order[0]
        keep.append(i)

        inds = np.where(overlaps[i, order]  < = threshold)[0]
        order = order[inds]

    return keep

3 Source : test_transform.py
with MIT License
from yecharlie

def assert_is_scaling(transform, min, max):
    assert transform.shape == (3, 3)
    assert_almost_equal(transform, transform.T)
    assert_almost_equal(np.diagonal(transform, 1), 0)
    assert_almost_equal(np.diagonal(transform, 2), 0)
    assert np.greater_equal(np.diagonal(transform), min).all()
    assert np.less(np.diagonal(transform), max).all()


def test_random_scaling():

3 Source : delocalized_coordinates.py
with MIT License
from ZimmermanGroup

    def GInverse_diag(self, xyz):
        t0 = time()
        G = self.GMatrix(xyz)
        # print(G)
        dt = time() - t0
        # print(" total time to get GMatrix %.3f" % dt)
        d = np.diagonal(G)
        # print(d)
        return np.diag(1./d)

    def GInverse_EIG(self, xyz):

0 Source : kde.py
with GNU General Public License v3.0
from adityaprakash-bobby

    def integrate_gaussian(self, mean, cov):
        """
        Multiply estimated density by a multivariate Gaussian and integrate
        over the whole space.

        Parameters
        ----------
        mean : aray_like
            A 1-D array, specifying the mean of the Gaussian.
        cov : array_like
            A 2-D array, specifying the covariance matrix of the Gaussian.

        Returns
        -------
        result : scalar
            The value of the integral.

        Raises
        ------
        ValueError
            If the mean or covariance of the input Gaussian differs from
            the KDE's dimensionality.

        """
        mean = atleast_1d(squeeze(mean))
        cov = atleast_2d(cov)

        if mean.shape != (self.d,):
            raise ValueError("mean does not have dimension %s" % self.d)
        if cov.shape != (self.d, self.d):
            raise ValueError("covariance does not have dimension %s" % self.d)

        # make mean a column vector
        mean = mean[:, newaxis]

        sum_cov = self.covariance + cov

        # This will raise LinAlgError if the new cov matrix is not s.p.d
        # cho_factor returns (ndarray, bool) where bool is a flag for whether
        # or not ndarray is upper or lower triangular
        sum_cov_chol = linalg.cho_factor(sum_cov)

        diff = self.dataset - mean
        tdiff = linalg.cho_solve(sum_cov_chol, diff)

        sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
        norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det

        energies = sum(diff * tdiff, axis=0) / 2.0
        result = sum(exp(-energies), axis=0) / norm_const / self.n

        return result

    def integrate_box_1d(self, low, high):

0 Source : kde.py
with GNU General Public License v3.0
from adityaprakash-bobby

    def integrate_kde(self, other):
        """
        Computes the integral of the product of this  kernel density estimate
        with another.

        Parameters
        ----------
        other : gaussian_kde instance
            The other kde.

        Returns
        -------
        value : scalar
            The result of the integral.

        Raises
        ------
        ValueError
            If the KDEs have different dimensionality.

        """
        if other.d != self.d:
            raise ValueError("KDEs are not the same dimensionality")

        # we want to iterate over the smallest number of points
        if other.n   <   self.n:
            small = other
            large = self
        else:
            small = self
            large = other

        sum_cov = small.covariance + large.covariance
        sum_cov_chol = linalg.cho_factor(sum_cov)
        result = 0.0
        for i in range(small.n):
            mean = small.dataset[:, i, newaxis]
            diff = large.dataset - mean
            tdiff = linalg.cho_solve(sum_cov_chol, diff)

            energies = sum(diff * tdiff, axis=0) / 2.0
            result += sum(exp(-energies), axis=0)

        sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
        norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det

        result /= norm_const * large.n * small.n

        return result

    def resample(self, size=None):

0 Source : metrics.py
with GNU Affero General Public License v3.0
from Aifred-Health

    def get_confusion_matrix_values(targets, predictions):
        """
        Calculate the tp, tn, fp, fn values given targets and predictions.

        Parameters:
            targets: numpy.ndarray of integers
                The target values.
            predictions: numpy.ndarray of integers
                The predicted values.

        Returns:
            tp, tn, fp, fn:  array of np.float32 with classes in sorted order
                The true positive/negative, false positive/negative values.

        """
        # credit: Robert Fratila
        confusion_matrix = skl_metrics.confusion_matrix(targets, predictions)
        tp = np.diagonal(confusion_matrix).astype('float32')
        tn = (np.array(
            [np.sum(confusion_matrix)] *
            confusion_matrix.shape[0]) -
            confusion_matrix.sum(axis=0) -
            confusion_matrix.sum(axis=1) + tp).astype('float32')
        # sum each column and remove diagonal
        fp = (confusion_matrix.sum(axis=0) - tp).astype('float32')
        # sum each row and remove diagonal
        fn = (confusion_matrix.sum(axis=1) - tp).astype('float32')

        return tp, tn, fp, fn

    @staticmethod

0 Source : gui.py
with GNU General Public License v3.0
from andreasfelix

    def create_menubar(self):
        self.menubar = tk.Menu(self.master)
        self.master.config(menu=self.menubar)

        menu_network = tk.Menu(self.menubar, tearoff=0)
        self.menubar.add_cascade(label="Network", menu=menu_network)
        menu_network.add_command(
            label="New network",
            command=lambda: self.initialize_new_network(self.hopfield_network.N),
        )
        menu_new_network_neuron_number = tk.Menu(menu_network, tearoff=0)
        menu_network.add_cascade(
            label="New network with", menu=menu_new_network_neuron_number
        )
        for i in self.n_neurons_vec:
            menu_new_network_neuron_number.add_command(
                label="{} neurons".format(i),
                command=lambda i=i: self.initialize_new_network(N=i),
            )
        menu_network.add_separator()
        menu_network.add_command(
            label="Open network from file", command=self.load_hopfield_network
        )
        menu_network.add_command(
            label="Save network to file", command=self.save_hopfield_network
        )
        menu_network.add_separator()
        menu_network.add_command(
            label="Add images to network", command=self.add_images_to_network
        )
        menu_build_network_neuron_number = tk.Menu(menu_network, tearoff=0)
        menu_network.add_cascade(
            label="Build network from images", menu=menu_build_network_neuron_number
        )
        for i in self.n_neurons_vec:
            menu_build_network_neuron_number.add_command(
                label="{} neurons".format(i),
                command=lambda i=i: self.build_network_from_images(N=i),
            )
        menu_network.add_separator()
        menu_network.add_command(label="Exit", command=self.master.quit)

        menu_options = tk.Menu(self.menubar, tearoff=0)
        self.menubar.add_cascade(label="Options", menu=menu_options)
        menu_options.add_checkbutton(
            label="Finite temperature",
            variable=self.settings.finite_temperature,
            command=self.toggle_finite_temperature,
        )

        menu_view = tk.Menu(self.menubar, tearoff=0)
        self.menubar.add_cascade(label="View", menu=menu_view)
        menu_canvas_size = tk.Menu(self.menubar, tearoff=0)
        menu_view.add_cascade(label="Canvas size", menu=menu_canvas_size)
        menu_canvas_size.add_command(
            label="tiny", command=lambda: self.change_canvas_size(100, 100)
        )
        menu_canvas_size.add_command(
            label="small", command=lambda: self.change_canvas_size(200, 200)
        )
        menu_canvas_size.add_command(
            label="normal", command=lambda: self.change_canvas_size(300, 300)
        )
        menu_canvas_size.add_command(
            label="large", command=lambda: self.change_canvas_size(400, 400)
        )
        menu_colors = tk.Menu(self.menubar, tearoff=0)
        menu_view.add_cascade(label="Pattern colors", menu=menu_colors)
        menu_colors.add_command(
            label="Black/White", command=lambda: self.change_cmap_color("binary")
        )
        menu_colors.add_command(
            label="Reds", command=lambda: self.change_cmap_color("Reds")
        )
        menu_colors.add_command(
            label="Greens", command=lambda: self.change_cmap_color("Greens")
        )
        menu_colors.add_command(
            label="Blues", command=lambda: self.change_cmap_color("Blues")
        )
        menu_colors.add_command(
            label="Blue/Green", command=lambda: self.change_cmap_color("GnBu")
        )
        menu_view.add_checkbutton(
            label="Show ticks",
            variable=self.settings.show_ticks,
            command=self.toggle_ticks,
        )

        menu_examples = tk.Menu(self.menubar, tearoff=0)
        self.menubar.add_cascade(label="Examples", menu=menu_examples)
        menu_examples.add_command(
            label="1 Pictures of famous physicists (10000 neurons)",
            command=lambda: self.build_network_from_images(
                input_path_vec=[
                    os.path.join(PHYSICISTS_PATH, f)
                    for f in os.listdir(PHYSICISTS_PATH)
                ],
                N=10000,
            ),
        )
        menu_examples.add_command(
            label="2 Random patterns (100 neurons, 20 patterns)",
            command=lambda: self.build_network_from_random_patterns(100, 20),
        )
        menu_examples.add_command(
            label="3 ABC (25 neurons, 3 patterns)",
            command=lambda: self.load_hopfield_network(
                path=os.path.join(EXAMPLES_PATH, "abc_25neu_3pat.npz")
            ),
        )
        menu_examples.add_command(
            label="4 Oscillating when sync update  (9 neurons, 3 patterns)",
            command=lambda: self.load_hopfield_network(
                path=os.path.join(
                    EXAMPLES_PATH, "oscillating_when_syn_update_9neu_5pat.npz"
                )
            ),
        )

        menu_debug = tk.Menu(self.menubar, tearoff=0)
        self.menubar.add_cascade(label="Debug", menu=menu_debug)
        menu_debug.add_command(
            label="Update all frames", command=self.update_all_frames
        )
        menu_debug.add_separator()
        menu_debug.add_command(
            label="Print number of neurons",
            command=lambda: print(
                "Number of neurons: {}\n".format(self.hopfield_network.N)
            ),
        )
        menu_debug.add_command(
            label="Print number of saved pattern",
            command=lambda: print(
                "Number of saved pattern: {}\n".format(self.hopfield_network.p)
            ),
        )
        menu_debug.add_command(
            label="Print time step",
            command=lambda: print(
                "Current time step: {}\n".format(self.hopfield_network.t)
            ),
        )
        menu_debug.add_separator()
        menu_debug.add_command(
            label="Print neuron state S",
            command=lambda: print(
                "Shape of S {}\n".format(self.hopfield_network.S.shape),
                repr(self.hopfield_network.S.reshape(self.N_sqrt, self.N_sqrt)),
            ),
        )
        menu_debug.add_command(
            label="Print weight matrix",
            command=lambda: print(
                "Shape of w {}\n".format(self.hopfield_network.w.shape),
                repr(self.hopfield_network.w),
            ),
        )
        menu_debug.add_command(
            label="Print weight matrix diagonal",
            command=lambda: print(
                "Diagonal of w {}\n", np.diagonal(self.hopfield_network.w)
            ),
        )
        menu_debug.add_command(
            label="Print input pattern",
            command=lambda: print(
                "Shape of input pattern {}\n".format(self.input_matrix.shape),
                repr(self.input_matrix),
            ),
        )
        menu_debug.add_command(
            label="Print saved pattern",
            command=lambda: print(
                "Shape of saved pattern {}\n".format(self.hopfield_network.xi.shape),
                repr(
                    self.hopfield_network.xi[:, self.id_current_viewer_pattern].reshape(
                        self.N_sqrt, self.N_sqrt
                    )
                ),
            ),
        )

    # Input frame
    def create_input_frame(self):

0 Source : eugenium_mmd.py
with MIT License
from archmaester

def MMD_Diff_Var(Kyy, Kzz, Kxy, Kxz):
    '''
    Compute the variance of the difference statistic MMDXY-MMDXZ
    See http://arxiv.org/pdf/1511.04581.pdf Appendix for derivations
    '''
    m = Kxy.shape[0];
    n = Kyy.shape[0];
    r = Kzz.shape[0];
    
    
    Kyynd = Kyy-np.diag(np.diagonal(Kyy));
    Kzznd = Kzz-np.diag(np.diagonal(Kzz));
    
    u_yy=np.sum(Kyynd)*( 1./(n*(n-1)) );
    u_zz=np.sum(Kzznd)*( 1./(r*(r-1)) );
    u_xy=np.sum(Kxy)/(m*n);
    u_xz=np.sum(Kxz)/(m*r);
    
    #compute zeta1
    t1=(1./n**3)*np.sum(Kyynd.T.dot(Kyynd))-u_yy**2;
    t2=(1./(n**2*m))*np.sum(Kxy.T.dot(Kxy))-u_xy**2;
    t3=(1./(n*m**2))*np.sum(Kxy.dot(Kxy.T))-u_xy**2;
    t4=(1./r**3)*np.sum(Kzznd.T.dot(Kzznd))-u_zz**2;
    t5=(1./(r*m**2))*np.sum(Kxz.dot(Kxz.T))-u_xz**2;
    t6=(1./(r**2*m))*np.sum(Kxz.T.dot(Kxz))-u_xz**2;
    t7=(1./(n**2*m))*np.sum(Kyynd.dot(Kxy.T))-u_yy*u_xy;
    t8=(1./(n*m*r))*np.sum(Kxy.T.dot(Kxz))-u_xz*u_xy;
    t9=(1./(r**2*m))*np.sum(Kzznd.dot(Kxz.T))-u_zz*u_xz;
    
    zeta1=(t1+t2+t3+t4+t5+t6-2.*(t7+t8+t9)); 
    
    zeta2=(1/m/(m-1))*np.sum((Kyynd-Kzznd-Kxy.T-Kxy+Kxz+Kxz.T)**2)-(u_yy - 2.*u_xy - (u_zz-2.*u_xz))**2;
    
    
    data=dict({'t1':t1,
               't2':t2,
               't3':t3,
               't4':t4,
               't5':t5,
               't6':t6,
               't7':t7,
               't8':t8,
               't9':t9,
               'zeta1':zeta1,
               'zeta2':zeta2,
                })
    #TODO more precise version for zeta2 
    #    xx=(1/m^2)*sum(sum(Kxxnd.*Kxxnd))-u_xx^2;
    # yy=(1/n^2)*sum(sum(Kyynd.*Kyynd))-u_yy^2;
    #xy=(1/(n*m))*sum(sum(Kxy.*Kxy))-u_xy^2;
    #xxy=(1/(n*m^2))*sum(sum(Kxxnd*Kxy))-u_xx*u_xy;
    #yyx=(1/(n^2*m))*sum(sum(Kyynd*Kxy'))-u_yy*u_xy;
    #zeta2=(xx+yy+xy+xy-2*(xxy+xxy +yyx+yyx))
    
    
    Var=(4.*(m-2)/(m*(m-1)))*zeta1;
    Var_z2=Var+(2./(m*(m-1)))*zeta2;

    return Var, Var_z2, data
def grbf(x1, x2, sigma):

0 Source : metrics.py
with MIT License
from Arjun-NA

def metrics(confusions, ignore_unclassified=False):
    """
    Computes different metrics from confusion matrices.
    :param confusions: ([..., n_c, n_c] np.int32). Can be any dimension, the confusion matrices should be described by
    the last axes. n_c = number of classes
    :param ignore_unclassified: (bool). True if the the first class should be ignored in the results
    :return: ([..., n_c] np.float32) precision, recall, F1 score, IoU score
    """

    # If the first class (often "unclassified") should be ignored, erase it from the confusion.
    if (ignore_unclassified):
        confusions[..., 0, :] = 0
        confusions[..., :, 0] = 0

    # Compute TP, FP, FN. This assume that the second to last axis counts the truths (like the first axis of a
    # confusion matrix), and that the last axis counts the predictions (like the second axis of a confusion matrix)
    TP = np.diagonal(confusions, axis1=-2, axis2=-1)
    TP_plus_FP = np.sum(confusions, axis=-1)
    TP_plus_FN = np.sum(confusions, axis=-2)

    # Compute precision and recall. This assume that the second to last axis counts the truths (like the first axis of
    # a confusion matrix), and that the last axis counts the predictions (like the second axis of a confusion matrix)
    PRE = TP / (TP_plus_FN + 1e-6)
    REC = TP / (TP_plus_FP + 1e-6)

    # Compute Accuracy
    ACC = np.sum(TP, axis=-1) / (np.sum(confusions, axis=(-2, -1)) + 1e-6)

    # Compute F1 score
    F1 = 2 * TP / (TP_plus_FP + TP_plus_FN + 1e-6)

    # Compute IoU
    IoU = F1 / (2 - F1)

    return PRE, REC, F1, IoU, ACC


def smooth_metrics(confusions, smooth_n=0, ignore_unclassified=False):

0 Source : metrics.py
with MIT License
from Arjun-NA

def IoU_from_confusions(confusions):
    """
    Computes IoU from confusion matrices.
    :param confusions: ([..., n_c, n_c] np.int32). Can be any dimension, the confusion matrices should be described by
    the last axes. n_c = number of classes
    :param ignore_unclassified: (bool). True if the the first class should be ignored in the results
    :return: ([..., n_c] np.float32) IoU score
    """

    # Compute TP, FP, FN. This assume that the second to last axis counts the truths (like the first axis of a
    # confusion matrix), and that the last axis counts the predictions (like the second axis of a confusion matrix)
    TP = np.diagonal(confusions, axis1=-2, axis2=-1)
    TP_plus_FN = np.sum(confusions, axis=-1)
    TP_plus_FP = np.sum(confusions, axis=-2)

    # Compute IoU
    IoU = TP / (TP_plus_FP + TP_plus_FN - TP + 1e-6)

    # Compute mIoU with only the actual classes
    mask = TP_plus_FN   <   1e-3
    counts = np.sum(1 - mask, axis=-1, keepdims=True)
    mIoU = np.sum(IoU, axis=-1, keepdims=True) / (counts + 1e-6)

    # If class is absent, place mIoU in place of 0 IoU to get the actual mean later
    IoU += mask * mIoU

    return IoU

0 Source : vertex_histogram.py
with Apache License 2.0
from automl

    def fit_transform(self, X, **kwargs):
        """Fit and transform, on the same dataset.

        Parameters
        ----------
        X : iterable
            Each element must be an iterable with at most three features and at
            least one. The first that is obligatory is a valid graph structure
            (adjacency matrix or edge_dictionary) while the second is
            node_labels and the third edge_labels (that fitting the given graph
            format). If None the kernel matrix is calculated upon fit data.
            The test samples.

        y : None
            There is no need of a target in a transformer, yet the pipeline API
            requires this parameter.

        Returns
        -------
        K : numpy array, shape = [n_targets, n_input_graphs]
            corresponding to the kernel matrix, a calculation between
            all pairs of graphs between target an features

        """
        self._method_calling = 2
        self.fit(X, **kwargs)

        # Transform - calculate kernel matrix
        km = self._calculate_kernel_matrix()

        self._X_diag = np.diagonal(km)
        if self.normalize:
            km = km / np.sqrt(np.outer(self._X_diag, self._X_diag))
        # if self.as_tensor:
        #     km = torch.tensor(km)
        return km

    def fit(self, X, y=None, **kwargs):

0 Source : utils.py
with MIT License
from autonomousvision

def _mmd2_and_variance(K_XX, K_XY, K_YY, unit_diagonal=False,
                       mmd_est='unbiased', block_size=1024,
                       var_at_m=None, ret_var=True):
    # based on
    # https://github.com/dougalsutherland/opt-mmd/blob/master/two_sample/mmd.py
    # but changed to not compute the full kernel matrix at once
    m = K_XX.shape[0]
    assert K_XX.shape == (m, m)
    assert K_XY.shape == (m, m)
    assert K_YY.shape == (m, m)
    if var_at_m is None:
        var_at_m = m

    # Get the various sums of kernels that we'll use
    # Kts drop the diagonal, but we don't need to compute them explicitly
    if unit_diagonal:
        diag_X = diag_Y = 1
        sum_diag_X = sum_diag_Y = m
        sum_diag2_X = sum_diag2_Y = m
    else:
        diag_X = np.diagonal(K_XX)
        diag_Y = np.diagonal(K_YY)

        sum_diag_X = diag_X.sum()
        sum_diag_Y = diag_Y.sum()

        sum_diag2_X = _sqn(diag_X)
        sum_diag2_Y = _sqn(diag_Y)

    Kt_XX_sums = K_XX.sum(axis=1) - diag_X
    Kt_YY_sums = K_YY.sum(axis=1) - diag_Y
    K_XY_sums_0 = K_XY.sum(axis=0)
    K_XY_sums_1 = K_XY.sum(axis=1)

    Kt_XX_sum = Kt_XX_sums.sum()
    Kt_YY_sum = Kt_YY_sums.sum()
    K_XY_sum = K_XY_sums_0.sum()

    if mmd_est == 'biased':
        mmd2 = ((Kt_XX_sum + sum_diag_X) / (m * m)
                + (Kt_YY_sum + sum_diag_Y) / (m * m)
                - 2 * K_XY_sum / (m * m))
    else:
        assert mmd_est in {'unbiased', 'u-statistic'}
        mmd2 = (Kt_XX_sum + Kt_YY_sum) / (m * (m-1))
        if mmd_est == 'unbiased':
            mmd2 -= 2 * K_XY_sum / (m * m)
        else:
            mmd2 -= 2 * (K_XY_sum - np.trace(K_XY)) / (m * (m-1))

    if not ret_var:
        return mmd2

    Kt_XX_2_sum = _sqn(K_XX) - sum_diag2_X
    Kt_YY_2_sum = _sqn(K_YY) - sum_diag2_Y
    K_XY_2_sum = _sqn(K_XY)

    dot_XX_XY = Kt_XX_sums.dot(K_XY_sums_1)
    dot_YY_YX = Kt_YY_sums.dot(K_XY_sums_0)

    m1 = m - 1
    m2 = m - 2
    zeta1_est = (
        1 / (m * m1 * m2) * (
            _sqn(Kt_XX_sums) - Kt_XX_2_sum + _sqn(Kt_YY_sums) - Kt_YY_2_sum)
        - 1 / (m * m1)**2 * (Kt_XX_sum**2 + Kt_YY_sum**2)
        + 1 / (m * m * m1) * (
            _sqn(K_XY_sums_1) + _sqn(K_XY_sums_0) - 2 * K_XY_2_sum)
        - 2 / m**4 * K_XY_sum**2
        - 2 / (m * m * m1) * (dot_XX_XY + dot_YY_YX)
        + 2 / (m**3 * m1) * (Kt_XX_sum + Kt_YY_sum) * K_XY_sum
    )
    zeta2_est = (
        1 / (m * m1) * (Kt_XX_2_sum + Kt_YY_2_sum)
        - 1 / (m * m1)**2 * (Kt_XX_sum**2 + Kt_YY_sum**2)
        + 2 / (m * m) * K_XY_2_sum
        - 2 / m**4 * K_XY_sum**2
        - 4 / (m * m * m1) * (dot_XX_XY + dot_YY_YX)
        + 4 / (m**3 * m1) * (Kt_XX_sum + Kt_YY_sum) * K_XY_sum
    )
    var_est = (4 * (var_at_m - 2) / (var_at_m * (var_at_m - 1)) * zeta1_est
               + 2 / (var_at_m * (var_at_m - 1)) * zeta2_est)

    return mmd2, var_est

0 Source : kde.py
with MIT License
from buds-lab

    def integrate_gaussian(self, mean, cov):
        """
        Multiply estimated density by a multivariate Gaussian and integrate
        over the whole space.

        Parameters
        ----------
        mean : aray_like
            A 1-D array, specifying the mean of the Gaussian.
        cov : array_like
            A 2-D array, specifying the covariance matrix of the Gaussian.

        Returns
        -------
        result : scalar
            The value of the integral.

        Raises
        ------
        ValueError
            If the mean or covariance of the input Gaussian differs from
            the KDE's dimensionality.

        """
        mean = atleast_1d(squeeze(mean))
        cov = atleast_2d(cov)

        if mean.shape != (self.d,):
            raise ValueError("mean does not have dimension %s" % self.d)
        if cov.shape != (self.d, self.d):
            raise ValueError("covariance does not have dimension %s" % self.d)

        # make mean a column vector
        mean = mean[:, newaxis]

        sum_cov = self.covariance + cov

        # This will raise LinAlgError if the new cov matrix is not s.p.d
        # cho_factor returns (ndarray, bool) where bool is a flag for whether
        # or not ndarray is upper or lower triangular
        sum_cov_chol = linalg.cho_factor(sum_cov)

        diff = self.dataset - mean
        tdiff = linalg.cho_solve(sum_cov_chol, diff)

        sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
        norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det

        energies = sum(diff * tdiff, axis=0) / 2.0
        result = sum(exp(-energies)*self.weights, axis=0) / norm_const

        return result

    def integrate_box_1d(self, low, high):

0 Source : kde.py
with MIT License
from buds-lab

    def integrate_kde(self, other):
        """
        Computes the integral of the product of this  kernel density estimate
        with another.

        Parameters
        ----------
        other : gaussian_kde instance
            The other kde.

        Returns
        -------
        value : scalar
            The result of the integral.

        Raises
        ------
        ValueError
            If the KDEs have different dimensionality.

        """
        if other.d != self.d:
            raise ValueError("KDEs are not the same dimensionality")

        # we want to iterate over the smallest number of points
        if other.n   <   self.n:
            small = other
            large = self
        else:
            small = self
            large = other

        sum_cov = small.covariance + large.covariance
        sum_cov_chol = linalg.cho_factor(sum_cov)
        result = 0.0
        for i in range(small.n):
            mean = small.dataset[:, i, newaxis]
            diff = large.dataset - mean
            tdiff = linalg.cho_solve(sum_cov_chol, diff)

            energies = sum(diff * tdiff, axis=0) / 2.0
            result += sum(exp(-energies)*large.weights, axis=0)*small.weights[i]

        sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
        norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det

        result /= norm_const

        return result

    def resample(self, size=None):

0 Source : gp.py
with MIT License
from C-bowman

    def marginal_likelihood_gradient(self, theta):
        """
        returns the log-marginal likelihood and its gradient with respect
        to the hyperparameters.

        This implementation is based on equations (5.8, 5.9) from Rasmussen & Williams.
        """
        K_xx, grad_K = self.cov.covariance_and_gradients(theta[self.cov_slice])
        K_xx += self.sig
        mu, grad_mu = self.mean.mean_and_gradients(theta[self.mean_slice])
        # get the cholesky decomposition
        L = cholesky(K_xx)
        iK = solve_triangular(L, eye(L.shape[0]), lower=True)
        iK = iK.T @ iK
        # calculate the log-marginal likelihood
        alpha = iK.dot(self.y - mu)
        LML = -0.5 * dot((self.y - mu).T, alpha) - log(diagonal(L)).sum()
        # calculate the mean parameter gradients
        grad = zeros(self.n_hyperpars)
        grad[self.mean_slice] = array([(alpha * dmu).sum() for dmu in grad_mu])
        # calculate the covariance parameter gradients
        Q = alpha[:, None] * alpha[None, :] - iK
        grad[self.cov_slice] = array([0.5 * (Q * dK.T).sum() for dK in grad_K])
        return LML, grad

    def differential_evo(self):

0 Source : behavior.py
with MIT License
from carl24k

    def behave_var(self):
        return np.diagonal(self.behave_cov)

    def generate_customer(self):

0 Source : behavior.py
with MIT License
from carl24k

    def behave_var(self):
        return self.exp_fun( np.diagonal(self.behave_cov))

    def generate_customer(self,start_of_month):

0 Source : mpfit.py
with MIT License
from CIRADA-Tools

	def qrsolv(self, r, ipvt, diag, qtb, sdiag):
		if self.debug:
			print('Entering qrsolv...')
		sz = r.shape
		m = sz[0]
		n = sz[1]

		# copy r and (q transpose)*b to preserve input and initialize s.
		# in particular, save the diagonal elements of r in x.

		for j in range(n):
			r[j:n,j] = r[j,j:n]
		x = numpy.diagonal(r).copy()
		wa = qtb.copy()

		# Eliminate the diagonal matrix d using a givens rotation
		for j in range(n):
			l = ipvt[j]
			if diag[l] == 0:
				break
			sdiag[j:] = 0
			sdiag[j] = diag[l]

			# The transformations to eliminate the row of d modify only a
			# single element of (q transpose)*b beyond the first n, which
			# is initially zero.

			qtbpj = 0.
			for k in range(j,n):
				if sdiag[k] == 0:
					break
				if numpy.abs(r[k,k])   <   numpy.abs(sdiag[k]):
					cotan  = r[k,k]/sdiag[k]
					sine   = 0.5/numpy.sqrt(.25 + .25*cotan*cotan)
					cosine = sine*cotan
				else:
					tang   = sdiag[k]/r[k,k]
					cosine = 0.5/numpy.sqrt(.25 + .25*tang*tang)
					sine   = cosine*tang

				# Compute the modified diagonal element of r and the
				# modified element of ((q transpose)*b,0).
				r[k,k] = cosine*r[k,k] + sine*sdiag[k]
				temp = cosine*wa[k] + sine*qtbpj
				qtbpj = -sine*wa[k] + cosine*qtbpj
				wa[k] = temp

				# Accumulate the transformation in the row of s
				if n > k+1:
					temp = cosine*r[k+1:n,k] + sine*sdiag[k+1:n]
					sdiag[k+1:n] = -sine*r[k+1:n,k] + cosine*sdiag[k+1:n]
					r[k+1:n,k] = temp
			sdiag[j] = r[j,j]
			r[j,j] = x[j]

		# Solve the triangular system for z.  If the system is singular
		# then obtain a least squares solution
		nsing = n
		wh = (numpy.nonzero(sdiag == 0))[0]
		if len(wh) > 0:
			nsing = wh[0]
			wa[nsing:] = 0

		if nsing >= 1:
			wa[nsing-1] = wa[nsing-1]/sdiag[nsing-1] # Degenerate case
			# *** Reverse loop ***
			for j in range(nsing-2,-1,-1):
				sum0 = sum(r[j+1:nsing,j]*wa[j+1:nsing])
				wa[j] = (wa[j]-sum0)/sdiag[j]

		# Permute the components of z back to components of x
		x[ipvt] = wa
		return (r, x, sdiag)



	
	#	 Original FORTRAN documentation
	#
	#	 subroutine lmpar
	#
	#	 given an m by n matrix a, an n by n nonsingular diagonal
	#	 matrix d, an m-vector b, and a positive number delta,
	#	 the problem is to determine a value for the parameter
	#	 par such that if x solves the system
	#
	#		a*x = b ,	 sqrt(par)*d*x = 0 ,
	#
	#	 in the least squares sense, and dxnorm is the euclidean
	#	 norm of d*x, then either par is zero and
	#
	#		(dxnorm-delta) .le. 0.1*delta ,
	#
	#	 or par is positive and
	#
	#		abs(dxnorm-delta) .le. 0.1*delta .
	#
	#	 this subroutine completes the solution of the problem
	#	 if it is provided with the necessary information from the
	#	 qr factorization, with column pivoting, of a. that is, if
	#	 a*p = q*r, where p is a permutation matrix, q has orthogonal
	#	 columns, and r is an upper triangular matrix with diagonal
	#	 elements of nonincreasing magnitude, then lmpar expects
	#	 the full upper triangle of r, the permutation matrix p,
	#	 and the first n components of (q transpose)*b. on output
	#	 lmpar also provides an upper triangular matrix s such that
	#
	#		 t   t				   t
	#		p *(a *a + par*d*d)*p = s *s .
	#
	#	 s is employed within lmpar and may be of separate interest.
	#
	#	 only a few iterations are generally needed for convergence
	#	 of the algorithm. if, however, the limit of 10 iterations
	#	 is reached, then the output par will contain the best
	#	 value obtained so far.
	#
	#	 the subroutine statement is
	#
	#	subroutine lmpar(n,r,ldr,ipvt,diag,qtb,delta,par,x,sdiag,
	#					 wa1,wa2)
	#
	#	 where
	#
	#	n is a positive integer input variable set to the order of r.
	#
	#	r is an n by n array. on input the full upper triangle
	#	  must contain the full upper triangle of the matrix r.
	#	  on output the full upper triangle is unaltered, and the
	#	  strict lower triangle contains the strict upper triangle
	#	  (transposed) of the upper triangular matrix s.
	#
	#	ldr is a positive integer input variable not less than n
	#	  which specifies the leading dimension of the array r.
	#
	#	ipvt is an integer input array of length n which defines the
	#	  permutation matrix p such that a*p = q*r. column j of p
	#	  is column ipvt(j) of the identity matrix.
	#
	#	diag is an input array of length n which must contain the
	#	  diagonal elements of the matrix d.
	#
	#	qtb is an input array of length n which must contain the first
	#	  n elements of the vector (q transpose)*b.
	#
	#	delta is a positive input variable which specifies an upper
	#	  bound on the euclidean norm of d*x.
	#
	#	par is a nonnegative variable. on input par contains an
	#	  initial estimate of the levenberg-marquardt parameter.
	#	  on output par contains the final estimate.
	#
	#	x is an output array of length n which contains the least
	#	  squares solution of the system a*x = b, sqrt(par)*d*x = 0,
	#	  for the output par.
	#
	#	sdiag is an output array of length n which contains the
	#	  diagonal elements of the upper triangular matrix s.
	#
	#	wa1 and wa2 are work arrays of length n.
	#
	#	 subprograms called
	#
	#	minpack-supplied ... dpmpar,enorm,qrsolv
	#
	#	fortran-supplied ... dabs,dmax1,dmin1,dsqrt
	#
	#	 argonne national laboratory. minpack project. march 1980.
	#	 burton s. garbow, kenneth e. hillstrom, jorge j. more
	#
	
	def lmpar(self, r, ipvt, diag, qtb, delta, x, sdiag, par=None):

0 Source : mpfit.py
with MIT License
from CIRADA-Tools

	def lmpar(self, r, ipvt, diag, qtb, delta, x, sdiag, par=None):

		if self.debug:
			print('Entering lmpar...')
		dwarf = self.machar.minnum
		machep = self.machar.machep
		sz = r.shape
		m = sz[0]
		n = sz[1]

		# Compute and store in x the gauss-newton direction.  If the
		# jacobian is rank-deficient, obtain a least-squares solution
		nsing = n
		wa1 = qtb.copy()
		rthresh = numpy.max(numpy.abs(numpy.diagonal(r).copy())) * machep
		wh = (numpy.nonzero(numpy.abs(numpy.diagonal(r).copy())   <   rthresh))[0]
		if len(wh) > 0:
			nsing = wh[0]
			wa1[wh[0]:] = 0
		if nsing >= 1:
			# *** Reverse loop ***
			for j in range(nsing-1,-1,-1):
				wa1[j] = wa1[j]/r[j,j]
				if j-1 >= 0:
					wa1[0:j] = wa1[0:j] - r[0:j,j]*wa1[j]

		# Note: ipvt here is a permutation array
		x[ipvt] = wa1

		# Initialize the iteration counter.  Evaluate the function at the
		# origin, and test for acceptance of the gauss-newton direction
		iter = 0
		wa2 = diag * x
		dxnorm = self.enorm(wa2)
		fp = dxnorm - delta
		if fp  < = 0.1*delta:
			return [r, 0., x, sdiag]

		# If the jacobian is not rank deficient, the newton step provides a
		# lower bound, parl, for the zero of the function.  Otherwise set
		# this bound to zero.

		parl = 0.
		if nsing >= n:
			wa1 = diag[ipvt] * wa2[ipvt] / dxnorm
			wa1[0] = wa1[0] / r[0,0] # Degenerate case
			for j in range(1,n):   # Note "1" here, not zero
				sum0 = sum(r[0:j,j]*wa1[0:j])
				wa1[j] = (wa1[j] - sum0)/r[j,j]

			temp = self.enorm(wa1)
			parl = ((fp/delta)/temp)/temp

		# Calculate an upper bound, paru, for the zero of the function
		for j in range(n):
			sum0 = sum(r[0:j+1,j]*qtb[0:j+1])
			wa1[j] = sum0/diag[ipvt[j]]
		gnorm = self.enorm(wa1)
		paru = gnorm/delta
		if paru == 0:
			paru = dwarf/numpy.min([delta,0.1])

		# If the input par lies outside of the interval (parl,paru), set
		# par to the closer endpoint

		par = numpy.max([par,parl])
		par = numpy.min([par,paru])
		if par == 0:
			par = gnorm/dxnorm

		# Beginning of an interation
		while(1):
			iter = iter + 1

			# Evaluate the function at the current value of par
			if par == 0:
				par = numpy.max([dwarf, paru*0.001])
			temp = numpy.sqrt(par)
			wa1 = temp * diag
			[r, x, sdiag] = self.qrsolv(r, ipvt, wa1, qtb, sdiag)
			wa2 = diag*x
			dxnorm = self.enorm(wa2)
			temp = fp
			fp = dxnorm - delta

			if (numpy.abs(fp)  < = 0.1*delta) or \
			   ((parl == 0) and (fp  < = temp) and (temp  <  0)) or \
			   (iter == 10):
			   break;

			# Compute the newton correction
			wa1 = diag[ipvt] * wa2[ipvt] / dxnorm

			for j in range(n-1):
				wa1[j] = wa1[j]/sdiag[j]
				wa1[j+1:n] = wa1[j+1:n] - r[j+1:n,j]*wa1[j]
			wa1[n-1] = wa1[n-1]/sdiag[n-1] # Degenerate case

			temp = self.enorm(wa1)
			parc = ((fp/delta)/temp)/temp

			# Depending on the sign of the function, update parl or paru
			if fp > 0:
				parl = numpy.max([parl,par])
			if fp  <  0:
				paru = numpy.min([paru,par])

			# Compute an improved estimate for par
			par = numpy.max([parl, par+parc])

			# End of an iteration

		# Termination
		return [r, par, x, sdiag]

	
	# Procedure to tie one parameter to another.
	def tie(self, p, ptied=None):

See More Examples