Here are the examples of the python api numpy.linalg.lstsq taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
196 Examples
3
Source : test_linalg.py
with GNU General Public License v3.0
from adityaprakash-bobby
with GNU General Public License v3.0
from adityaprakash-bobby
def test_future_rcond(self):
a = np.array([[0., 1., 0., 1., 2., 0.],
[0., 2., 0., 0., 1., 0.],
[1., 0., 1., 0., 0., 4.],
[0., 0., 0., 2., 3., 0.]]).T
b = np.array([1, 0, 0, 0, 0, 0])
with suppress_warnings() as sup:
w = sup.record(FutureWarning, "`rcond` parameter will change")
x, residuals, rank, s = linalg.lstsq(a, b)
assert_(rank == 4)
x, residuals, rank, s = linalg.lstsq(a, b, rcond=-1)
assert_(rank == 4)
x, residuals, rank, s = linalg.lstsq(a, b, rcond=None)
assert_(rank == 3)
# Warning should be raised exactly once (first command)
assert_(len(w) == 1)
class TestMatrixPower(object):
3
Source : test_regression.py
with GNU General Public License v3.0
from adityaprakash-bobby
with GNU General Public License v3.0
from adityaprakash-bobby
def test_lstsq_complex_larger_rhs(self):
# gh-9891
size = 20
n_rhs = 70
G = np.random.randn(size, size) + 1j * np.random.randn(size, size)
u = np.random.randn(size, n_rhs) + 1j * np.random.randn(size, n_rhs)
b = G.dot(u)
# This should work without segmentation fault.
u_lstsq, res, rank, sv = linalg.lstsq(G, b, rcond=None)
# check results just in case
assert_array_almost_equal(u_lstsq, u)
if __name__ == '__main__':
3
Source : test_bsplines.py
with GNU General Public License v3.0
from adityaprakash-bobby
with GNU General Public License v3.0
from adityaprakash-bobby
def test_lstsq(self):
# check LSQ construction vs a full matrix version
x, y, t, k = self.x, self.y, self.t, self.k
c0, AY = make_lsq_full_matrix(x, y, t, k)
b = make_lsq_spline(x, y, t, k)
assert_allclose(b.c, c0)
assert_equal(b.c.shape, (t.size - k - 1,))
# also check against numpy.lstsq
aa, yy = AY
c1, _, _, _ = np.linalg.lstsq(aa, y, rcond=-1)
assert_allclose(b.c, c1)
def test_weights(self):
3
Source : test_lsq_linear.py
with GNU General Public License v3.0
from adityaprakash-bobby
with GNU General Public License v3.0
from adityaprakash-bobby
def test_dense_no_bounds(self):
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, method=self.method, lsq_solver=lsq_solver)
assert_allclose(res.x, lstsq(A, b, rcond=-1)[0])
def test_dense_bounds(self):
3
Source : controllers.py
with MIT License
from AIDynamicAction
with MIT License
from AIDynamicAction
def _w_actor_from_action(self, action, observation):
"""
Compute actor weights from a given action.
The current implementation is for linearly parametrized models so far.
"""
if self.actor_struct == 'quad-lin':
regressor_actor = np.concatenate([ uptria2vec( np.outer(observation, observation) ), observation ])
elif self.actor_struct == 'quadratic':
regressor_actor = np.concatenate([ uptria2vec( np.outer(observation, observation) ) ])
elif self.actor_struct == 'quad-nomix':
regressor_actor = observation * observation
return reshape(lstsq( np.array( [ regressor_actor ] ), np.array( [ action ] ) )[0].T, self.dim_actor )
def _actor_critic_cost(self, w_all):
3
Source : test_regression.py
with MIT License
from alvarobartt
with MIT License
from alvarobartt
def test_lstsq_complex_larger_rhs(self):
# gh-9891
size = 20
n_rhs = 70
G = np.random.randn(size, size) + 1j * np.random.randn(size, size)
u = np.random.randn(size, n_rhs) + 1j * np.random.randn(size, n_rhs)
b = G.dot(u)
# This should work without segmentation fault.
u_lstsq, res, rank, sv = linalg.lstsq(G, b, rcond=None)
# check results just in case
assert_array_almost_equal(u_lstsq, u)
3
Source : test_least_angle.py
with MIT License
from alvarobartt
with MIT License
from alvarobartt
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
3
Source : test_least_angle.py
with MIT License
from alvarobartt
with MIT License
from alvarobartt
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
3
Source : linear_algebra.py
with GNU General Public License v3.0
from Artikash
with GNU General Public License v3.0
from Artikash
def linear_least_squares(a, b, rcond=1.e-10):
"""returns x,resids,rank,s
where x minimizes 2-norm(|b - Ax|)
resids is the sum square residuals
rank is the rank of A
s is the rank of the singular values of A in descending order
If b is a matrix then x is also a matrix with corresponding columns.
If the rank of A is less than the number of columns of A or greater than
the number of rows, then residuals will be returned as an empty array
otherwise resids = sum((b-dot(A,x)**2).
Singular values less than s[0]*rcond are treated as zero.
"""
return linalg.lstsq(a, b, rcond)
def singular_value_decomposition(A, full_matrices=0):
3
Source : test_linalg.py
with Apache License 2.0
from aws-samples
with Apache License 2.0
from aws-samples
def test_future_rcond(self):
a = np.array([[0., 1., 0., 1., 2., 0.],
[0., 2., 0., 0., 1., 0.],
[1., 0., 1., 0., 0., 4.],
[0., 0., 0., 2., 3., 0.]]).T
b = np.array([1, 0, 0, 0, 0, 0])
with suppress_warnings() as sup:
w = sup.record(FutureWarning, "`rcond` parameter will change")
x, residuals, rank, s = linalg.lstsq(a, b)
assert_(rank == 4)
x, residuals, rank, s = linalg.lstsq(a, b, rcond=-1)
assert_(rank == 4)
x, residuals, rank, s = linalg.lstsq(a, b, rcond=None)
assert_(rank == 3)
# Warning should be raised exactly once (first command)
assert_(len(w) == 1)
@pytest.mark.parametrize(["m", "n", "n_rhs"], [
3
Source : test_linalg.py
with Apache License 2.0
from aws-samples
with Apache License 2.0
from aws-samples
def test_empty_a_b(self, m, n, n_rhs):
a = np.arange(m * n).reshape(m, n)
b = np.ones((m, n_rhs))
x, residuals, rank, s = linalg.lstsq(a, b, rcond=None)
if m == 0:
assert_((x == 0).all())
assert_equal(x.shape, (n, n_rhs))
assert_equal(residuals.shape, ((n_rhs,) if m > n else (0,)))
if m > n and n_rhs > 0:
# residuals are exactly the squared norms of b's columns
r = b - np.dot(a, x)
assert_almost_equal(residuals, (r * r).sum(axis=-2))
assert_equal(rank, min(m, n))
assert_equal(s.shape, (min(m, n),))
def test_incompatible_dims(self):
3
Source : test_linalg.py
with Apache License 2.0
from aws-samples
with Apache License 2.0
from aws-samples
def test_incompatible_dims(self):
# use modified version of docstring example
x = np.array([0, 1, 2, 3])
y = np.array([-1, 0.2, 0.9, 2.1, 3.3])
A = np.vstack([x, np.ones(len(x))]).T
with assert_raises_regex(LinAlgError, "Incompatible dimensions"):
linalg.lstsq(A, y, rcond=None)
@pytest.mark.parametrize('dt', [np.dtype(c) for c in '?bBhHiIqQefdgFDGO'])
3
Source : dispatcher.py
with Apache License 2.0
from bentoml
with Apache License 2.0
from bentoml
def trigger_refresh(self):
x = tuple((i, 1) for i, _, _ in self.o_stat)
y = tuple(i for _, i, _ in self.o_stat)
_o_a, _o_b = np.linalg.lstsq(x, y, rcond=None)[0]
_o_w = sum(w for _, _, w in self.o_stat) * 1.0 / len(self.o_stat)
self.o_a, self.o_b = max(0.000001, _o_a), max(0, _o_b)
self.wait = max(0, _o_w)
logger.debug(
"Dynamic batching optimizer params updated: o_a: %.6f, o_b: %.6f, wait: %.6f",
_o_a,
_o_b,
_o_w,
)
T_IN = t.TypeVar("T_IN")
3
Source : contrast_tools.py
with MIT License
from birforce
with MIT License
from birforce
def __init__(self, d1, d2):
'''C such that d1 C = d2, with d1 = X, d2 = Z
should be (x, z) in arguments ?
'''
self.transf_matrix = np.linalg.lstsq(d1, d2, rcond=-1)[0]
self.invtransf_matrix = np.linalg.lstsq(d2, d1, rcond=-1)[0]
def dot_left(self, a):
3
Source : util.py
with MIT License
from bruel-gabrielsson
with MIT License
from bruel-gabrielsson
def run_trials_ols(beta0, n, sigma, ntrials=100):
"""
run ntrials regression problems
return the mse of each trial.
"""
mses = []
for i in range(ntrials):
X, y = generate_problem(beta0, n, sigma)
beta1 = np.linalg.lstsq(X, y, rcond=1e-4)[0]
mses.append(np.linalg.norm(beta0 - beta1)**2)
return np.array(mses)
def get_stats(beta0, n, sigma, lam, pen, ntrials=100, maxiter=100):
3
Source : formulation.py
with GNU General Public License v3.0
from catkira
with GNU General Public License v3.0
from catkira
def localCoordinate(G, t, x):
B, x1 = transformationJacobian(G, t)
xi,_,_,_ = np.linalg.lstsq(B, x-x1, rcond=None)
return xi
# integral sigma * grad(dof(v)) * tf(a)
def matrix_gradDofV_tfA(fieldV, fieldA, sigmas, region):
3
Source : ExactSearch.py
with GNU General Public License v3.0
from cmu-phil
with GNU General Public License v3.0
from cmu-phil
def bic_score_node(X, i, structure):
structure = list(structure)
n, d = X.shape
if len(structure) == 0:
residual = np.sum(X[:, i] ** 2)
else:
_, residual, _, _ = np.linalg.lstsq(a=X[:, structure],
b=X[:, i],
rcond=None)
bic = n * np.log(residual / n) + len(structure) * np.log(n)
return bic.item()
def insort(parent_graph, structure, score):
3
Source : Wobbly.py
with GNU General Public License v3.0
from cselab
with GNU General Public License v3.0
from cselab
def _optimize_shifts(i):
ss = np.linalg.lstsq(-glb.MM[i], glb.XX[i], rcond=-1)[0]
return np.asarray(np.round(ss), dtype=int)
def smooth_binary(x, width=1):
3
Source : test_least_angle.py
with Apache License 2.0
from dashanji
with Apache License 2.0
from dashanji
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * X # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
# Avoid FutureWarning about default value change when numpy >= 1.14
rcond = None if np_version >= parse_version('1.14') else -1
coef_lstsq = np.linalg.lstsq(X1, y, rcond=rcond)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
@pytest.mark.filterwarnings('ignore:`rcond` parameter will change')
3
Source : test_least_angle.py
with Apache License 2.0
from dashanji
with Apache License 2.0
from dashanji
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
_, _, coef_path_ = linear_model.lars_path(X, y, method='lasso')
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
3
Source : dmd.py
with MIT License
from datafold-dev
with MIT License
from datafold-dev
def _compute_koopman_generator(self, X: TSCDataFrame, X_grad: TSCDataFrame):
# X and X_grad are both in row-wise orientation
X_numpy = X.to_numpy()
X_grad_numpy = X_grad.to_numpy()
# the maths behind it: (X -- row-wise)
# L X^T = \dot{X}^T -- rearrange to standard lstsq problem
# X L^T = \dot{X} -- normal equations
# X^T X L^T = X^T \dot{X} -- solve for L^T
data_sq = X_numpy.T @ X_numpy
data_deriv = X_numpy.T @ X_grad_numpy
generator = np.linalg.lstsq(data_sq, data_deriv, rcond=self.rcond)[0]
# transpose to get L (in standard lstsq problem setting we solve for L^T)
return generator.conj().T
def _compute_spectral_components(self, generator_matrix_):
3
Source : nlinalg.py
with MIT License
from dmitriy-serdyuk
with MIT License
from dmitriy-serdyuk
def perform(self, node, inputs, outputs):
zz = numpy.linalg.lstsq(inputs[0], inputs[1], inputs[2])
outputs[0][0] = zz[0]
outputs[1][0] = zz[1]
outputs[2][0] = numpy.array(zz[2])
outputs[3][0] = zz[3]
def matrix_power(M, n):
3
Source : test_linalg.py
with GNU General Public License v3.0
from dnn-security
with GNU General Public License v3.0
from dnn-security
def test_incompatible_dims(self):
# use modified version of docstring example
x = np.array([0, 1, 2, 3])
y = np.array([-1, 0.2, 0.9, 2.1, 3.3])
A = np.vstack([x, np.ones(len(x))]).T
with assert_raises_regex(LinAlgError, "Incompatible dimensions"):
linalg.lstsq(A, y, rcond=None)
@pytest.mark.parametrize('dt', [np.dtype(c) for c in '?bBhHiIqQefdgFDGO'])
3
Source : test.py
with MIT License
from EFavDB
with MIT License
from EFavDB
def test_fwd_supervised_cod(TestCase, m=m, n=n, N=N):
# generate a data set with three target (y) vars.
X = _generate_normalized_array(m, n)
y = _generate_normalized_array(m, 3)
# now carry out reverse selection
selector = linselect.FwdSelect()
selector.fit(X, y)
# check best cod found using N features
s_at_N = selector.ordered_features[:N]
cod_at_N = selector.ordered_cods[N - 1]
# compare to cod of fit from numpy using same features
X_at_N = X[:, s_at_N]
squared_error = np.linalg.lstsq(X_at_N, y)[1]
assert(np.isclose(cod_at_N, 3 - np.sum(squared_error) / m, atol=1e-05))
def test_fwd_unsupervised_cod(TestCase, m=m, n=n, N=N):
3
Source : test.py
with MIT License
from EFavDB
with MIT License
from EFavDB
def test_fwd_unsupervised_cod(TestCase, m=m, n=n, N=N):
# generate a data set with two target (y) vars.
X = _generate_normalized_array(m, n)
# now carry out reverse selection
selector = linselect.FwdSelect()
selector.fit(X)
# check best cod found using N features
cod_at_N = selector.ordered_cods[N - 1]
# compare to cod of fit from numpy using same features
X_at_N = X[:, selector.ordered_features[:N]]
y_at_N = X[:, selector.ordered_features[N:]]
squared_error = np.linalg.lstsq(X_at_N, y_at_N)[1]
assert(np.isclose(cod_at_N, n - np.sum(squared_error) / m, atol=1e-05))
def test_rev_supervised_cod(TestCase, m=m, n=n, N=N):
3
Source : test.py
with MIT License
from EFavDB
with MIT License
from EFavDB
def test_rev_supervised_cod(TestCase, m=m, n=n, N=N):
# generate a data set with two target (y) vars.
X = _generate_normalized_array(m, n)
y = _generate_normalized_array(m, 2)
# now carry out reverse selection
selector = linselect.RevSelect()
selector.fit(X, y)
# check best cod found using N features
cod_at_N = selector.ordered_cods[N - 1]
# compare to cod of fit from numpy using same features
X_at_N = X[:, selector.ordered_features[:N]]
squared_error = np.linalg.lstsq(X_at_N, y)[1]
assert(np.isclose(cod_at_N, 2 - np.sum(squared_error) / m, atol=1e-05))
def test_rev_unsupervised_cod(TestCase, m=m, n=n, N=N):
3
Source : test.py
with MIT License
from EFavDB
with MIT License
from EFavDB
def test_rev_unsupervised_cod(TestCase, m=m, n=n, N=N):
# generate a data set with two target (y) vars.
X = _generate_normalized_array(m, n)
# now carry out reverse selection
selector = linselect.RevSelect()
selector.fit(X)
# check best cod found using N features
cod_at_N = selector.ordered_cods[N - 1]
# compare to cod of fit from numpy using same features
X_at_N = X[:, selector.ordered_features[:N]]
y_at_N = X[:, selector.ordered_features[N:]]
squared_error = np.linalg.lstsq(X_at_N, y_at_N)[1]
assert(np.isclose(cod_at_N, n - np.sum(squared_error) / m, atol=1e-05))
# Tests below ensure we select the best candidate each time.
def test_fwd_supervised_ordering(TestCase, m=m, n=n):
3
Source : linalg.py
with GNU General Public License v3.0
from evanseitz
with GNU General Public License v3.0
from evanseitz
def op(A,b):
try:
coeff = np.linalg.lstsq(A, b)[0]
#coeff = np.linalg.solve(A,b)
except:
coeff = np.linalg.solve(A, b)
#coeff = np.linalg.lstsq(A,b)[0]
return coeff
3
Source : _correlation_remover.py
with MIT License
from fairlearn
with MIT License
from fairlearn
def fit(self, X, y=None):
"""Learn the projection required to make the dataset uncorrelated with sensitive columns.""" # noqa: E501
self._create_lookup(X)
X = self._validate_data(X)
X_use, X_sensitive = self._split_X(X)
self.sensitive_mean_ = X_sensitive.mean()
X_s_center = X_sensitive - self.sensitive_mean_
self.beta_, _, _, _ = np.linalg.lstsq(X_s_center, X_use, rcond=None)
self.X_shape_ = X.shape
return self
def transform(self, X):
3
Source : linalg_test.py
with Apache License 2.0
from google
with Apache License 2.0
from google
def testLstsq(self, lhs_shape, rhs_shape, dtype, rcond):
rng = jtu.rand_default(self.rng())
np_fun = partial(np.linalg.lstsq, rcond=rcond)
jnp_fun = partial(jnp.linalg.lstsq, rcond=rcond)
jnp_fun_numpy_resid = partial(jnp.linalg.lstsq, rcond=rcond, numpy_resid=True)
tol = {np.float32: 1e-5, np.float64: 1e-12,
np.complex64: 1e-5, np.complex128: 1e-12}
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun_numpy_resid, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, atol=tol, rtol=tol)
# Disabled because grad is flaky for low-rank inputs.
# TODO:
# jtu.check_grads(lambda *args: jnp_fun(*args)[0], args_maker(), order=2, atol=1e-2, rtol=1e-2)
# Regression test for incorrect type for eigenvalues of a complex matrix.
def testIssue669(self):
3
Source : test_fitters.py
with BSD 3-Clause "New" or "Revised" License
from holzschu
with BSD 3-Clause "New" or "Revised" License
from holzschu
def test_poly2D_fitting(self):
v = self.model.fit_deriv(x=self.x, y=self.y)
p = linalg.lstsq(v, self.z.flatten(), rcond=-1)[0]
new_model = self.fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model.parameters, p)
def test_eval(self):
3
Source : exact_search.py
with MIT License
from ignavierng
with MIT License
from ignavierng
def bic_score_node(X, i, structure):
structure = list(structure)
n, d = X.shape
if len(structure) == 0:
residual = np.sum(X[:, i]**2)
else:
_, residual, _, _ = np.linalg.lstsq(a=X[:, structure],
b=X[:, i],
rcond=None)
bic = n * np.log(residual / n) + len(structure) * np.log(n)
return bic.item()
def insort(parent_graph, structure, score):
3
Source : sindy_utils.py
with MIT License
from kpchamp
with MIT License
from kpchamp
def sindy_fit(RHS, LHS, coefficient_threshold):
m,n = LHS.shape
Xi = np.linalg.lstsq(RHS,LHS, rcond=None)[0]
for k in range(10):
small_inds = (np.abs(Xi) < coefficient_threshold)
Xi[small_inds] = 0
for i in range(n):
big_inds = ~small_inds[:,i]
if np.where(big_inds)[0].size == 0:
continue
Xi[big_inds,i] = np.linalg.lstsq(RHS[:,big_inds], LHS[:,i], rcond=None)[0]
return Xi
def sindy_simulate(x0, t, Xi, poly_order, include_sine):
3
Source : plm.py
with MIT License
from ktraunmueller
with MIT License
from ktraunmueller
def _beta_raw(self):
"""Runs the regression and returns the beta."""
X = self._x_trans.values
Y = self._y_trans.values.squeeze()
beta, _, _, _ = np.linalg.lstsq(X, Y)
return beta
@cache_readonly
3
Source : test_math_solve.py
with MIT License
from lenskit
with MIT License
from lenskit
def test_solve_dposv(problem):
A, b, size = problem
# square values of A
A = A * A
# and solve
xexp, resid, rank, s = np.linalg.lstsq(A, b, rcond=None)
F = A.T @ A
x = A.T @ b
dposv(F, x, True)
assert x == approx(xexp, rel=1.0e-3)
3
Source : calibrate.py
with MIT License
from luphord
with MIT License
from luphord
def betas_ns_ols(tau: float, t: np.ndarray, y: np.ndarray) \
-> Tuple[NelsonSiegelCurve, Any]:
'''Calculate the best-fitting beta-values given tau
for time-value pairs t and y and return a corresponding
Nelson-Siegel curve instance.
'''
_assert_same_shape(t, y)
curve = NelsonSiegelCurve(0, 0, 0, tau)
factors = curve.factor_matrix(t)
lstsq_res = lstsq(factors, y, rcond=None)
beta = lstsq_res[0]
return NelsonSiegelCurve(beta[0], beta[1], beta[2], tau), lstsq_res
def errorfn_ns_ols(tau: float, t: np.ndarray, y: np.ndarray) -> float:
3
Source : calibrate.py
with MIT License
from luphord
with MIT License
from luphord
def betas_nss_ols(tau: Tuple[float, float], t: np.ndarray, y: np.ndarray) \
-> Tuple[NelsonSiegelSvenssonCurve, Any]:
'''Calculate the best-fitting beta-values given tau (= array of tau1
and tau2) for time-value pairs t and y and return a corresponding
Nelson-Siegel-Svensson curve instance.
'''
_assert_same_shape(t, y)
curve = NelsonSiegelSvenssonCurve(0, 0, 0, 0, tau[0], tau[1])
factors = curve.factor_matrix(t)
lstsq_res = lstsq(factors, y, rcond=None)
beta = lstsq_res[0]
return NelsonSiegelSvenssonCurve(beta[0], beta[1], beta[2], beta[3],
tau[0], tau[1]), lstsq_res
def errorfn_nss_ols(tau: Tuple[float, float], t: np.ndarray, y: np.ndarray) \
3
Source : matlab_utils.py
with GNU General Public License v3.0
from luphysics
with GNU General Public License v3.0
from luphysics
def backslash(x, y):
"""Imitates the MATLAB backslash operator."""
return np.linalg.lstsq(x, y, rcond=None)[0] # TODO: check this
def nextpow2(x):
3
Source : quadtree.py
with MIT License
from MITeaps
with MIT License
from MITeaps
def nanvarplane(array, coordinates, ddof = 0.):
'''
Compute the variance of a set of points relative to the best fitting plane
@param array: A 2D NumPy array of values
@param coordinates: A 3D NumPy array with the spatial coordinate of each cell
of array
@param ddof: The divisor for the variance is N - ddof, where N is the number
of points
@return The variance
'''
A, B = prepare_system(array, coordinates)
C = np.linalg.lstsq(A, B)
best_fit_plane = C[0][0]*coordinates[0] + C[0][1]*coordinates[1] + C[0][2]
return np.nansum((array - best_fit_plane)**2)/(B.size - ddof)
@jit(nopython = True)
3
Source : equilibrium.py
with MIT License
from mohanliu
with MIT License
from mohanliu
def chemical_potentials(self):
if self._chem_pots:
return self._chem_pots
A = self.composition_matrix
b = self.energy_array
dmus = np.linalg.lstsq(A, b)
self._chem_pots = dict(zip(self.elements, dmus[0]))
return self._chem_pots
@property
3
Source : beamforming.py
with MIT License
from morriswmz
with MIT License
from morriswmz
def f_mvdr(A, R):
r"""Compute the spectrum output of the Bartlett beamformer.
.. math::
P_{\mathrm{MVDR}}(\theta)
= \frac{1}{\mathbf{a}(\theta)^H \mathbf{R}^{-1} \mathbf{a}(\theta)}
Args:
A: m x k steering matrix of candidate direction-of-arrivals, where
m is the number of sensors and k is the number of candidate
direction-of-arrivals.
R: m x m covariance matrix.
"""
return 1.0 / np.sum(A.conj() * np.linalg.lstsq(R, A, None)[0], axis=0).real
class BartlettBeamformer(SpectrumBasedEstimatorBase):
3
Source : contrast_tools.py
with GNU Affero General Public License v3.0
from nccgroup
with GNU Affero General Public License v3.0
from nccgroup
def __init__(self, d1, d2):
'''C such that d1 C = d2, with d1 = X, d2 = Z
should be (x, z) in arguments ?
'''
self.transf_matrix = np.linalg.lstsq(d1, d2)[0]
self.invtransf_matrix = np.linalg.lstsq(d2, d1)[0]
def dot_left(self, a):
3
Source : test_bsplines.py
with Apache License 2.0
from pierreant
with Apache License 2.0
from pierreant
def test_lstsq(self):
# check LSQ construction vs a full matrix version
x, y, t, k = self.x, self.y, self.t, self.k
c0, AY = make_lsq_full_matrix(x, y, t, k)
b = make_lsq_spline(x, y, t, k)
assert_allclose(b.c, c0)
assert_equal(b.c.shape, (t.size - k - 1,))
# also check against numpy.lstsq
aa, yy = AY
c1, _, _, _ = np.linalg.lstsq(aa, y)
assert_allclose(b.c, c1)
def test_weights(self):
3
Source : test_lsq_linear.py
with Apache License 2.0
from pierreant
with Apache License 2.0
from pierreant
def test_dense_no_bounds(self):
for lsq_solver in self.lsq_solvers:
res = lsq_linear(A, b, method=self.method, lsq_solver=lsq_solver)
assert_allclose(res.x, lstsq(A, b)[0])
def test_dense_bounds(self):
3
Source : shape_matching_dataset.py
with MIT License
from pvnieo
with MIT License
from pvnieo
def load_c(self, i, j):
"""Compute functional map matrix from shape `i` to shape `j`.
Arguments:
i {int} -- index of source shape.
j {int} -- index of target shape.
Returns:
torch.Tensor -- Tensor representing the functional map. Size: `n_eig x n_eig`.
"""
# load eigen vectors & vts
evec_i, evec_j = self.evecs[i], self.evecs[j]
vts_i, vts_j = self.vts[i], self.vts[j]
# compute C
evec_i_a, evec_j_a = evec_i[vts_i], evec_j[vts_j]
C_i_j = np.linalg.lstsq(evec_i_a, evec_j_a, rcond=None)[0]
return torch.Tensor(C_i_j.T)
def __len__(self):
3
Source : equationline_twopoints.py
with MIT License
from Ricardosgeral
with MIT License
from Ricardosgeral
def line2(x1,y1,x2,y2): #x = volts ; y = psi 32767
points_pu = [(x1, y1), (x2, y2)]
x_coords_pu, y_coords_pi = zip(*points_pu)
A = vstack([x_coords_pu, ones(len(x_coords_pu))]).T
m, c = lstsq(A, y_coords_pi)[0] * psi_to_bar
#print("Line Solution is y = {m}x + {c}".format(m=m, c=c))
return round(m,4), round(c,4)
pu= line2(0.50,0, 4.5,5)
3
Source : SurfNorm.py
with MIT License
from Robin-WZQ
with MIT License
from Robin-WZQ
def compute_surfNorm(I, L, mask):
'''compute the surface normal vector'''
N = np.linalg.lstsq(L, I, rcond=-1)[0].T
N = normalize(N, axis=1)
return N
def show_surfNorm(img,steps=3):
3
Source : preprocessing.py
with BSD 2-Clause "Simplified" License
from RUB-Bioinf
with BSD 2-Clause "Simplified" License
from RUB-Bioinf
def _solve_lstsq(self, spectra):
if self.weights is None:
return np.linalg.lstsq(self._model, spectra.T, rcond=None)[0]
else:
w = self.weights[:, None]
return np.linalg.lstsq(self._model * w, spectra.T * w, rcond=None)[0]
def _validate_inputs(self):
3
Source : solvers.py
with MIT License
from steven7woo
with MIT License
from steven7woo
def fit(self, X, Y, W):
matX, vecY = approximate_data(X, Y, W, self.Theta)
self.lsqinfo = np.linalg.lstsq(matX, vecY, rcond=None)
self.weights = pd.Series(self.lsqinfo[0], index=list(matX))
def predict(self, X):
3
Source : solvers.py
with MIT License
from steven7woo
with MIT License
from steven7woo
def fit(self, x, y, sens_attr):
"""
assume sens_attr is contained in x
"""
lsqinfo_SEO = np.linalg.lstsq(x, y, rcond=None)
weights_SEO = pd.Series(lsqinfo_SEO[0], index=list(x))
self.weights_SEO = weights_SEO.drop(sens_attr)
def predict(self, x, sens_attr):
See More Examples