Here are the examples of the python api numpy.testing.assert_array_less taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
52 Examples
3
Example 1
def test_4_2(self):
t4 = timescales(self.P4)[1]
lags = [int(t4)]
its = msm.timescales_msm([self.dtraj4_2], lags=lags)
est = its.timescales[0]
np.testing.assert_array_less(est, t4 + 20.0)
np.testing.assert_array_less(t4 - 20.0, est)
3
Example 2
Project: seaborn Source File: test_linearmodels.py
def test_estimate_cis(self):
# set known good seed to avoid the test stochastically failing
np.random.seed(123)
p = lm._RegressionPlotter(self.df.d, self.df.y,
x_estimator=np.mean, ci=95)
_, _, ci_big = p.estimate_data
p = lm._RegressionPlotter(self.df.d, self.df.y,
x_estimator=np.mean, ci=50)
_, _, ci_wee = p.estimate_data
npt.assert_array_less(np.diff(ci_wee), np.diff(ci_big))
p = lm._RegressionPlotter(self.df.d, self.df.y,
x_estimator=np.mean, ci=None)
_, _, ci_nil = p.estimate_data
npt.assert_array_equal(ci_nil, [None] * len(ci_nil))
3
Example 3
Project: PyEMMA Source File: test_its.py
def test_2_parallel(self):
t2 = timescales(self.P2)[1]
lags = [1, 2, 3, 4, 5]
its = timescales_msm([self.dtraj2], lags=lags, n_jobs=2)
est = its.timescales[0]
np.testing.assert_array_less(est, t2 + 2.0)
np.testing.assert_array_less(t2 - 2.0, est)
3
Example 4
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually guaranteed in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
3
Example 5
Project: scipy Source File: test_matfuncs.py
def test_expm_cond_smoke(self):
np.random.seed(1234)
for n in range(1, 4):
A = np.random.randn(n, n)
kappa = expm_cond(A)
assert_array_less(0, kappa)
3
Example 6
Project: vbmfa Source File: fa_test.py
def test_fit(self):
np.random.seed(0)
P = 50
Q = 10
N = 100
mu = 10.0
Y, lambda_, x, mu = sample_cluster(P, Q, N, mu, 1.0)
fa = vbfa.VbFa(Y, Q)
fa.init()
for i in range(10):
fa.update()
npt.assert_array_less(np.abs(fa.q_mu.mean - mu), 1.0)
self.assertLess(fa.mse(), 60.0)
3
Example 7
def test_2(self):
t2 = timescales(self.P2)[1]
lags = [1, 2, 3, 4, 5]
its = msm.timescales_msm([self.dtraj2], lags=lags)
est = its.timescales[0]
np.testing.assert_array_less(est, t2 + 2.0)
np.testing.assert_array_less(t2 - 2.0, est)
3
Example 8
Project: supersmoother Source File: test_basic_smoothers.py
def test_sine_cv():
t, y, dy = make_sine(N=100, err=0.05, rseed=0)
ytrue = np.sin(np.sort(t)[1:-1])
def check_model(Model, span, err):
model = Model(span).fit(t, y, dy)
yfit = model.cv_values()[1:-1]
obs_err = np.mean((yfit - ytrue) ** 2)
assert_array_less(obs_err, err)
spans = [0.05, 0.2, 0.5]
errs = [0.005, 0.02, 0.1]
for Model in [MovingAverageSmoother, LinearSmoother]:
for span, err in zip(spans, errs):
yield check_model, Model, span, err
3
Example 9
Project: moss Source File: test_statistical.py
def test_randomize_onesample_correction():
"""Test that maximum based correction (seems to) work."""
a = rs.normal(0, 1, (100, 10))
t_un, p_un = stat.randomize_onesample(a, 1000, corrected=False)
t_corr, p_corr = stat.randomize_onesample(a, 1000, corrected=True)
assert_array_equal(t_un, t_corr)
npt.assert_array_less(p_un, p_corr)
3
Example 10
Project: python-qinfer Source File: test_distributions.py
def test_postselected_validity(self):
"""
Distributions: Checks that the postselected
samples are valid.
"""
ud = NormalDistribution(0, 1)
class FakeModel(object):
def are_models_valid(self, mps):
return mps >= 0
dist = PostselectedDistribution(
ud, FakeModel()
)
samples = dist.sample(40000)
assert_array_less(0, samples)
3
Example 11
Project: dipy Source File: test_ivim.py
@dec.skipif(SCIPY_VERSION < LooseVersion('0.17'),
"Gives wrong value for f")
def test_noisy_fit():
"""
Test fitting for noisy signals. This tests whether the threshold condition
applies correctly and returns the linear fitting parameters.
For older scipy versions, the returned value of `f` from a linear fit is around 135
and D and D_star values are equal. Hence doing a test based on Scipy version.
"""
model_one_stage = IvimModel(gtab)
fit_one_stage = model_one_stage.fit(noisy_single)
assert_array_less(fit_one_stage.model_params, [10000., 0.3, .01, 0.001])
3
Example 12
Project: statsmodels Source File: test_proportion.py
def test_ztost():
xfair = np.repeat([1,0], [228, 762-228])
# comparing to SAS last output at
# http://support.sas.com/docuementation/cdl/en/procstat/63104/HTML/default/viewer.htm#procstat_freq_sect028.htm
# confidence interval for tost
# generic ztost is moved to weightstats
from statsmodels.stats.weightstats import zconfint, ztost
ci01 = zconfint(xfair, alpha=0.1, ddof=0)
assert_almost_equal(ci01, [0.2719, 0.3265], 4)
res = ztost(xfair, 0.18, 0.38, ddof=0)
assert_almost_equal(res[1][0], 7.1865, 4)
assert_almost_equal(res[2][0], -4.8701, 4)
assert_array_less(res[0], 0.0001)
3
Example 13
Project: seaborn Source File: test_linearmodels.py
def test_estimate_units(self):
# Seed the RNG locally
np.random.seed(345)
p = lm._RegressionPlotter("x", "y", data=self.df,
units="s", x_bins=3)
_, _, ci_big = p.estimate_data
ci_big = np.diff(ci_big, axis=1)
p = lm._RegressionPlotter("x", "y", data=self.df, x_bins=3)
_, _, ci_wee = p.estimate_data
ci_wee = np.diff(ci_wee, axis=1)
npt.assert_array_less(ci_wee, ci_big)
3
Example 14
def test_sine():
t, y, dy = make_sine(N=100, err=0.05, rseed=0)
tfit = np.linspace(1, 5.3, 50)
ytrue = np.sin(tfit)
def check_model(Model, span, err):
model = Model(span).fit(t, y, dy)
yfit = model.predict(tfit)
obs_err = np.mean((yfit - ytrue) ** 2)
assert_array_less(obs_err, err)
spans = [0.05, 0.2, 0.5]
errs = [0.005, 0.01, 0.1]
for Model in [MovingAverageSmoother, LinearSmoother]:
for span, err in zip(spans, errs):
yield check_model, Model, span, err
3
Example 15
Project: scipy Source File: test_matfuncs.py
def test_expm_bad_condition_number(self):
A = np.array([
[-1.128679820, 9.614183771e4, -4.524855739e9, 2.924969411e14],
[0, -1.201010529, 9.634696872e4, -4.681048289e9],
[0, 0, -1.132893222, 9.532491830e4],
[0, 0, 0, -1.179475332],
])
kappa = expm_cond(A)
assert_array_less(1e36, kappa)
3
Example 16
Project: supersmoother Source File: test_supersmoother.py
def test_sine_cv():
t, y, dy = make_sine(N=100, err=0.05, rseed=0)
ytrue = np.sin(np.sort(t)[1:-1])
model = SuperSmoother().fit(t, y, dy)
yfit = model.cv_values()[1:-1]
obs_err = np.mean((yfit - ytrue) ** 2)
assert_array_less(obs_err, 0.001)
3
Example 17
def test_sine():
t, y, dy = make_sine(N=100, err=0.05, rseed=0)
tfit = np.linspace(1, 5.3, 50)
ytrue = np.sin(tfit)
model = SuperSmoother().fit(t, y, dy)
yfit = model.predict(tfit)
obs_err = np.mean((yfit - ytrue) ** 2)
assert_array_less(obs_err, 0.001)
3
Example 18
def test_randomize_corrmat_correction():
"""Test that FWE correction works."""
a = rs.randn(3, 20)
p_mat = stat.randomize_corrmat(a, "upper", False)
p_mat_corr = stat.randomize_corrmat(a, "upper", True)
triu = np.triu_indices(3, 1)
npt.assert_array_less(p_mat[triu], p_mat_corr[triu])
3
Example 19
Project: imusim Source File: transforms_test.py
def checkMeanAndCovariance(mean, covariance, samples):
axis = 1 if mean.shape[0] > 1 else 0
mean = mean.flatten()
sampleMean = np.mean(samples, axis=axis)
sdom = stats.sem(samples, axis=axis)
testing.assert_array_less(np.abs(mean-sampleMean), 3*sdom,
"Error between Monte Carlo and UT mean estimates greater "
"than 3 standard deviations of the mean. (p=0.01)")
# TODO: make this support the full covariance matrix, not just the diagonals
covariance = np.diag(covariance)
sampleCovariance = np.diag(np.atleast_2d(np.cov(samples)))
N = samples.shape[axis]
secov = np.sqrt(2.0/(N-1)) * sampleCovariance
testing.assert_array_less(np.abs(covariance-sampleCovariance),
3*secov,
"Error between Monte Carlo and UT covariance estimates greater "
"than 3 times the standard error of the covariance. (p=0.01)")
3
Example 20
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually garantied in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
3
Example 21
Project: AWS-Lambda-ML-Microservice-Skeleton Source File: test_regression.py
def test_svd_no_uv(self):
# gh-4733
for shape in (3, 4), (4, 4), (4, 3):
for t in float, complex:
a = np.ones(shape, dtype=t)
w = linalg.svd(a, compute_uv=False)
c = np.count_nonzero(np.absolute(w) > 0.5)
assert_equal(c, 1)
assert_equal(np.linalg.matrix_rank(a), 1)
assert_array_less(1, np.linalg.norm(a, ord=2))
3
Example 22
def test_randomize_corrmat_correction():
"""Test that FWE correction works."""
a = rs.randn(3, 20)
p_mat = algo.randomize_corrmat(a, "upper", False)
p_mat_corr = algo.randomize_corrmat(a, "upper", True)
triu = np.triu_indices(3, 1)
npt.assert_array_less(p_mat[triu], p_mat_corr[triu])
3
Example 23
Project: statsmodels Source File: test_generic_mle.py
def test_minsupport(self):
# rough sanity checks for convergence
params = self.res1.params
x_min = self.res1.endog.min()
p_min = params[1] + params[2]
assert_array_less(p_min, x_min)
assert_almost_equal(p_min, x_min, decimal=2)
3
Example 24
@skipif(_no_statsmodels)
def test_logistic_regression(self):
p = lm._RegressionPlotter("x", "c", data=self.df,
logistic=True, n_boot=self.n_boot)
_, yhat, _ = p.fit_regression(x_range=(-3, 3))
npt.assert_array_less(yhat, 1)
npt.assert_array_less(0, yhat)
3
Example 25
def assert_array_less(x, y, err_msg='', verbose=True):
"""Raises an AssertionError if array_like objects are not ordered by less than.
Args:
x(numpy.ndarray or cupy.ndarray): The smaller object to check.
y(numpy.ndarray or cupy.ndarray): The larger object to compare.
err_msg(str): The error message to be printed in case of failure.
verbose(bool): If ``True``, the conflicting values
are appended to the error message.
.. seealso:: :func:`numpy.testing.assert_array_less`
"""
numpy.testing.assert_array_less(
cupy.asnumpy(x), cupy.asnumpy(y), err_msg=err_msg,
verbose=verbose)
0
Example 26
Project: scipy Source File: test_procrustes.py
def test_orthogonal_procrustes():
np.random.seed(1234)
for m, n in ((6, 4), (4, 4), (4, 6)):
# Sample a random target matrix.
B = np.random.randn(m, n)
# Sample a random orthogonal matrix
# by computing eigh of a sampled symmetric matrix.
X = np.random.randn(n, n)
w, V = eigh(X.T + X)
assert_allclose(inv(V), V.T)
# Compute a matrix with a known orthogonal transformation that gives B.
A = np.dot(B, V.T)
# Check that an orthogonal transformation from A to B can be recovered.
R, s = orthogonal_procrustes(A, B)
assert_allclose(inv(R), R.T)
assert_allclose(A.dot(R), B)
# Create a perturbed input matrix.
A_perturbed = A + 1e-2 * np.random.randn(m, n)
# Check that the orthogonal procrustes function can find an orthogonal
# transformation that is better than the orthogonal transformation
# computed from the original input matrix.
R_prime, s = orthogonal_procrustes(A_perturbed, B)
assert_allclose(inv(R_prime), R_prime.T)
# Compute the naive and optimal transformations of the perturbed input.
naive_approx = A_perturbed.dot(R)
optim_approx = A_perturbed.dot(R_prime)
# Compute the Frobenius norm errors of the matrix approximations.
naive_approx_error = norm(naive_approx - B, ord='fro')
optim_approx_error = norm(optim_approx - B, ord='fro')
# Check that the orthogonal Procrustes approximation is better.
assert_array_less(optim_approx_error, naive_approx_error)
0
Example 27
Project: python-qinfer Source File: test_region_estimates.py
def test_in_credible_region(self):
"""
Tests that in_credible_region works.
"""
dist = MultivariateNormalDistribution(self.MEAN, self.COV)
# the model is irrelevant; we just want the updater to have some particles
# with the desired normal distribution.
u = SMCUpdater(MockModel(4), self.N_PARTICLES, dist)
# some points to test with
test_points = np.random.multivariate_normal(self.MEAN, self.COV, self.N_PARTICLES)
# method='pce'
results = [
u.in_credible_region(test_points, level=0.9, method='pce'),
u.in_credible_region(test_points, level=0.84, method='pce'),
u.in_credible_region(test_points, level=0.5, method='pce'),
]
assert_almost_equal(
np.array([np.mean(x.astype('float')) for x in results]),
np.array([0.9, 0.84, 0.5]),
3
)
# method='hpd-hull'
results1 = [
u.in_credible_region(test_points, level=0.9, method='hpd-hull'),
u.in_credible_region(test_points, level=0.84, method='hpd-hull'),
u.in_credible_region(test_points, level=0.5, method='hpd-hull'),
]
assert_array_less(
np.array([0.9, 0.84, 0.5]),
np.array([np.mean(x.astype('float')) for x in results1])
)
# method='hpd-mvee'
results2 = [
u.in_credible_region(test_points, level=0.9, method='hpd-mvee'),
u.in_credible_region(test_points, level=0.84, method='hpd-mvee'),
u.in_credible_region(test_points, level=0.5, method='hpd-mvee'),
]
assert_array_less(
np.array([0.9, 0.84, 0.5]),
np.array([np.mean(x.astype('float')) for x in results2])
)
# the mvee should be bigger than the convex hull.
# this passes iff all points in the ellipses are
# also in the hulls.
assert_array_less(
np.hstack([x.astype('float') for x in results1]),
np.hstack([x.astype('float') for x in results2]) + 0.5
)
# check for no failures with slices.
u.in_credible_region(test_points[:100,self.SLICE], level=0.9, method='pce', modelparam_slice=self.SLICE)
u.in_credible_region(test_points[:100,self.SLICE], level=0.9, method='hpd-hull', modelparam_slice=self.SLICE)
u.in_credible_region(test_points[:100,self.SLICE], level=0.9, method='hpd-mvee', modelparam_slice=self.SLICE)
# check for no failures with single inputs
assert(u.in_credible_region(test_points[0,:], level=0.9, method='pce').size == 1)
assert(u.in_credible_region(test_points[0,:], level=0.9, method='hpd-hull').size == 1)
assert(u.in_credible_region(test_points[0,:], level=0.9, method='hpd-mvee').size == 1)
0
Example 28
Project: scipy Source File: test_optimize.py
def test_minimize_l_bfgs_b_maxfun_interruption(self):
# gh-6162
f = optimize.rosen
g = optimize.rosen_der
values = []
x0 = np.ones(7) * 1000
def objfun(x):
value = f(x)
values.append(value)
return value
# Look for an interesting test case.
# Request a maxfun that stops at a particularly bad function
# evaluation somewhere between 100 and 300 evaluations.
low, medium, high = 30, 100, 300
optimize.fmin_l_bfgs_b(objfun, x0, fprime=g, maxfun=high)
v, k = max((y, i) for i, y in enumerate(values[medium:]))
maxfun = medium + k
# If the minimization strategy is reasonable,
# the minimize() result should not be worse than the best
# of the first 30 function evaluations.
target = min(values[:low])
xmin, fmin, d = optimize.fmin_l_bfgs_b(f, x0, fprime=g, maxfun=maxfun)
assert_array_less(fmin, target)
0
Example 29
Project: python-control Source File: mateqn_test.py
def test_dare_g(self):
A = matrix([[-0.6, 0],[-0.1, -0.4]])
Q = matrix([[2, 1],[1, 3]])
B = matrix([[1, 5],[2, 4]])
R = matrix([[1, 0],[0, 1]])
S = matrix([[1, 0],[2, 0]])
E = matrix([[2, 1],[1, 2]])
X,L,G = dare(A,B,Q,R,S,E)
# print("The solution obtained is", X)
assert_array_almost_equal(
A.T * X * A - E.T * X * E -
(A.T * X * B + S) * inv(B.T * X * B + R) * (B.T * X * A + S.T) + Q,
zeros((2,2)) )
assert_array_almost_equal(inv(B.T * X * B + R) * (B.T * X * A + S.T), G)
# check for stable closed loop
lam = eigvals(A - B * G, E)
assert_array_less(abs(lam), 1.0)
A = matrix([[-0.6, 0],[-0.1, -0.4]])
Q = matrix([[2, 1],[1, 3]])
B = matrix([[1],[2]])
R = 1
S = matrix([[1],[2]])
E = matrix([[2, 1],[1, 2]])
X,L,G = dare(A,B,Q,R,S,E)
# print("The solution obtained is", X)
assert_array_almost_equal(
A.T * X * A - E.T * X * E -
(A.T * X * B + S) * inv(B.T * X * B + R) * (B.T * X * A + S.T) + Q,
zeros((2,2)) )
assert_array_almost_equal((B.T * X * A + S.T) / (B.T * X * B + R), G)
# check for stable closed loop
lam = eigvals(A - B * G, E)
assert_array_less(abs(lam), 1.0)
0
Example 30
def test_morlet(self):
x = wavelets.morlet(50, 4.1, complete=True)
y = wavelets.morlet(50, 4.1, complete=False)
# Test if complete and incomplete wavelet have same lengths:
assert_equal(len(x), len(y))
# Test if complete wavelet is less than incomplete wavelet:
assert_array_less(x, y)
x = wavelets.morlet(10, 50, complete=False)
y = wavelets.morlet(10, 50, complete=True)
# For large widths complete and incomplete wavelets should be
# identical within numerical precision:
assert_equal(x, y)
# miscellaneous tests:
x = np.array([1.73752399e-09 + 9.84327394e-25j,
6.49471756e-01 + 0.00000000e+00j,
1.73752399e-09 - 9.84327394e-25j])
y = wavelets.morlet(3, w=2, complete=True)
assert_array_almost_equal(x, y)
x = np.array([2.00947715e-09 + 9.84327394e-25j,
7.51125544e-01 + 0.00000000e+00j,
2.00947715e-09 - 9.84327394e-25j])
y = wavelets.morlet(3, w=2, complete=False)
assert_array_almost_equal(x, y, decimal=2)
x = wavelets.morlet(10000, s=4, complete=True)
y = wavelets.morlet(20000, s=8, complete=True)[5000:15000]
assert_array_almost_equal(x, y, decimal=2)
x = wavelets.morlet(10000, s=4, complete=False)
assert_array_almost_equal(y, x, decimal=2)
y = wavelets.morlet(20000, s=8, complete=False)[5000:15000]
assert_array_almost_equal(x, y, decimal=2)
x = wavelets.morlet(10000, w=3, s=5, complete=True)
y = wavelets.morlet(20000, w=3, s=10, complete=True)[5000:15000]
assert_array_almost_equal(x, y, decimal=2)
x = wavelets.morlet(10000, w=3, s=5, complete=False)
assert_array_almost_equal(y, x, decimal=2)
y = wavelets.morlet(20000, w=3, s=10, complete=False)[5000:15000]
assert_array_almost_equal(x, y, decimal=2)
x = wavelets.morlet(10000, w=7, s=10, complete=True)
y = wavelets.morlet(20000, w=7, s=20, complete=True)[5000:15000]
assert_array_almost_equal(x, y, decimal=2)
x = wavelets.morlet(10000, w=7, s=10, complete=False)
assert_array_almost_equal(x, y, decimal=2)
y = wavelets.morlet(20000, w=7, s=20, complete=False)[5000:15000]
assert_array_almost_equal(x, y, decimal=2)
0
Example 31
Project: scipy Source File: test_windows.py
def test_windowfunc_basics():
for window_name, params in window_funcs:
window = getattr(signal, window_name)
with warnings.catch_warnings(record=True): # window is not suitable...
w1 = window(7, *params, sym=True)
w2 = window(7, *params, sym=False)
assert_array_almost_equal(w1, w2)
# Check that functions run and output lengths are correct
assert_equal(len(window(6, *params, sym=True)), 6)
assert_equal(len(window(6, *params, sym=False)), 6)
assert_equal(len(window(7, *params, sym=True)), 7)
assert_equal(len(window(7, *params, sym=False)), 7)
# Check invalid lengths
assert_raises(ValueError, window, 5.5, *params)
assert_raises(ValueError, window, -7, *params)
# Check degenerate cases
assert_array_equal(window(0, *params, sym=True), [])
assert_array_equal(window(0, *params, sym=False), [])
assert_array_equal(window(1, *params, sym=True), [1])
assert_array_equal(window(1, *params, sym=False), [1])
# Check dtype
assert_(window(0, *params, sym=True).dtype == 'float')
assert_(window(0, *params, sym=False).dtype == 'float')
assert_(window(1, *params, sym=True).dtype == 'float')
assert_(window(1, *params, sym=False).dtype == 'float')
assert_(window(6, *params, sym=True).dtype == 'float')
assert_(window(6, *params, sym=False).dtype == 'float')
# Check normalization
assert_array_less(window(10, *params, sym=True), 1.01)
assert_array_less(window(10, *params, sym=False), 1.01)
assert_array_less(window(9, *params, sym=True), 1.01)
assert_array_less(window(9, *params, sym=False), 1.01)
# Check periodic spectrum
assert_allclose(fftpack.fft(window(10, *params, sym=False)).imag,
0, atol=1e-14)
0
Example 32
Project: neural-network-animation Source File: test_triangulation.py
def test_triinterpcubic_C1_continuity():
# Below the 4 tests which demonstrate C1 continuity of the
# TriCubicInterpolator (testing the cubic shape functions on arbitrary
# triangle):
#
# 1) Testing continuity of function & derivatives at corner for all 9
# shape functions. Testing also function values at same location.
# 2) Testing C1 continuity along each edge (as gradient is polynomial of
# 2nd order, it is sufficient to test at the middle).
# 3) Testing C1 continuity at triangle barycenter (where the 3 subtriangles
# meet)
# 4) Testing C1 continuity at median 1/3 points (midside between 2
# subtriangles)
# Utility test function check_continuity
def check_continuity(interpolator, loc, values=None):
"""
Checks the continuity of interpolator (and its derivatives) near
location loc. Can check the value at loc itself if *values* is
provided.
*interpolator* TriInterpolator
*loc* location to test (x0, y0)
*values* (optional) array [z0, dzx0, dzy0] to check the value at *loc*
"""
n_star = 24 # Number of continuity points in a boundary of loc
epsilon = 1.e-10 # Distance for loc boundary
k = 100. # Continuity coefficient
(loc_x, loc_y) = loc
star_x = loc_x + epsilon*np.cos(np.linspace(0., 2*np.pi, n_star))
star_y = loc_y + epsilon*np.sin(np.linspace(0., 2*np.pi, n_star))
z = interpolator([loc_x], [loc_y])[0]
(dzx, dzy) = interpolator.gradient([loc_x], [loc_y])
if values is not None:
assert_array_almost_equal(z, values[0])
assert_array_almost_equal(dzx[0], values[1])
assert_array_almost_equal(dzy[0], values[2])
diff_z = interpolator(star_x, star_y) - z
(tab_dzx, tab_dzy) = interpolator.gradient(star_x, star_y)
diff_dzx = tab_dzx - dzx
diff_dzy = tab_dzy - dzy
assert_array_less(diff_z, epsilon*k)
assert_array_less(diff_dzx, epsilon*k)
assert_array_less(diff_dzy, epsilon*k)
# Drawing arbitrary triangle (a, b, c) inside a unit square.
(ax, ay) = (0.2, 0.3)
(bx, by) = (0.33367, 0.80755)
(cx, cy) = (0.669, 0.4335)
x = np.array([ax, bx, cx, 0., 1., 1., 0.])
y = np.array([ay, by, cy, 0., 0., 1., 1.])
triangles = np.array([[0, 1, 2], [3, 0, 4], [4, 0, 2], [4, 2, 5],
[1, 5, 2], [6, 5, 1], [6, 1, 0], [6, 0, 3]])
triang = mtri.Triangulation(x, y, triangles)
for idof in range(9):
z = np.zeros(7, dtype=np.float64)
dzx = np.zeros(7, dtype=np.float64)
dzy = np.zeros(7, dtype=np.float64)
values = np.zeros([3, 3], dtype=np.float64)
case = idof//3
values[case, idof % 3] = 1.0
if case == 0:
z[idof] = 1.0
elif case == 1:
dzx[idof % 3] = 1.0
elif case == 2:
dzy[idof % 3] = 1.0
interp = mtri.CubicTriInterpolator(triang, z, kind='user',
dz=(dzx, dzy))
# Test 1) Checking values and continuity at nodes
check_continuity(interp, (ax, ay), values[:, 0])
check_continuity(interp, (bx, by), values[:, 1])
check_continuity(interp, (cx, cy), values[:, 2])
# Test 2) Checking continuity at midside nodes
check_continuity(interp, ((ax+bx)*0.5, (ay+by)*0.5))
check_continuity(interp, ((ax+cx)*0.5, (ay+cy)*0.5))
check_continuity(interp, ((cx+bx)*0.5, (cy+by)*0.5))
# Test 3) Checking continuity at barycenter
check_continuity(interp, ((ax+bx+cx)/3., (ay+by+cy)/3.))
# Test 4) Checking continuity at median 1/3-point
check_continuity(interp, ((4.*ax+bx+cx)/6., (4.*ay+by+cy)/6.))
check_continuity(interp, ((ax+4.*bx+cx)/6., (ay+4.*by+cy)/6.))
check_continuity(interp, ((ax+bx+4.*cx)/6., (ay+by+4.*cy)/6.))
0
Example 33
Project: scikit-learn Source File: test_theil_sen.py
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
0
Example 34
Project: trackpy Source File: test_static.py
def test_norm_circle_trunc(self):
dist = np.array([1.0001, 1.1, 1.2, np.sqrt(2)-0.01])*self.R
result = arclen_2d_bounded(dist, self.point, self.box)
assert_array_less(result, 2*np.pi*dist)
0
Example 35
Project: pymc3 Source File: test_diagnostics.py
def test_geweke_positive(self):
"""Confirm Geweke diagnostic is smaller than 1 for a reasonable number of samples."""
n_samples = 2000
n_intervals = 20
switchpoint = self.get_switchpoint(n_samples)
with self.assertRaises(ValueError):
# first and last must be between 0 and 1
geweke(switchpoint, first=-0.3, last=1.1, intervals=n_intervals)
with self.assertRaises(ValueError):
# first and last must add to < 1
geweke(switchpoint, first=0.3, last=0.7, intervals=n_intervals)
first = 0.1
last = 0.7
# returns (intervalsx2) matrix, with first row start indexes, second
# z-scores
z_switch = geweke(switchpoint, first=first,
last=last, intervals=n_intervals)
start = z_switch[:, 0]
z_scores = z_switch[:, 1]
# Ensure `intervals` argument is honored
self.assertEqual(z_switch.shape[0], n_intervals)
# Start index should not be in the last <last>% of samples
assert_array_less(start, (1 - last) * n_samples)
# These z-scores should be small, since there are more samples.
self.assertLess(max(abs(z_scores)), 1)
0
Example 36
Project: scipy Source File: test_basic.py
def _assert_close_in_norm(x, y, rtol, size, rdt):
# helper function for testing
err_msg = "size: %s rdt: %s" % (size, rdt)
assert_array_less(np.linalg.norm(x - y), rtol*np.linalg.norm(x), err_msg)
0
Example 37
Project: trackpy Source File: test_static.py
def test_norm_sphere_trunc(self):
dist = np.array([1.0001, 1.1, 1.2, np.sqrt(3)-0.01])*self.R
result = area_3d_bounded(dist, self.point, self.box)
assert_array_less(result, 4*np.pi*dist**2)
0
Example 38
Project: scipy Source File: test_lobpcg.py
def _check_fiedler(n, p):
# This is not necessarily the recommended way to find the Fiedler vector.
np.random.seed(1234)
col = np.zeros(n)
col[1] = 1
A = toeplitz(col)
D = np.diag(A.sum(axis=1))
L = D - A
# Compute the full eigendecomposition using tricks, e.g.
# http://www.cs.yale.edu/homes/spielman/561/2009/lect02-09.pdf
tmp = np.pi * np.arange(n) / n
analytic_w = 2 * (1 - np.cos(tmp))
analytic_V = np.cos(np.outer(np.arange(n) + 1/2, tmp))
_check_eigen(L, analytic_w, analytic_V)
# Compute the full eigendecomposition using eigh.
eigh_w, eigh_V = eigh(L)
_check_eigen(L, eigh_w, eigh_V)
# Check that the first eigenvalue is near zero and that the rest agree.
assert_array_less(np.abs([eigh_w[0], analytic_w[0]]), 1e-14)
assert_allclose(eigh_w[1:], analytic_w[1:])
# Check small lobpcg eigenvalues.
X = analytic_V[:, :p]
lobpcg_w, lobpcg_V = lobpcg(L, X, largest=False)
assert_equal(lobpcg_w.shape, (p,))
assert_equal(lobpcg_V.shape, (n, p))
_check_eigen(L, lobpcg_w, lobpcg_V)
assert_array_less(np.abs(np.min(lobpcg_w)), 1e-14)
assert_allclose(np.sort(lobpcg_w)[1:], analytic_w[1:p])
# Check large lobpcg eigenvalues.
X = analytic_V[:, -p:]
lobpcg_w, lobpcg_V = lobpcg(L, X, largest=True)
assert_equal(lobpcg_w.shape, (p,))
assert_equal(lobpcg_V.shape, (n, p))
_check_eigen(L, lobpcg_w, lobpcg_V)
assert_allclose(np.sort(lobpcg_w), analytic_w[-p:])
# Look for the Fiedler vector using good but not exactly correct guesses.
fiedler_guess = np.concatenate((np.ones(n//2), -np.ones(n-n//2)))
X = np.vstack((np.ones(n), fiedler_guess)).T
lobpcg_w, lobpcg_V = lobpcg(L, X, largest=False)
# Mathematically, the smaller eigenvalue should be zero
# and the larger should be the algebraic connectivity.
lobpcg_w = np.sort(lobpcg_w)
assert_allclose(lobpcg_w, analytic_w[:2], atol=1e-14)
0
Example 39
Project: statsmodels Source File: test_kernels.py
def test_smoothconf(self):
kern_name = self.kern_name
kern = self.kern
#fittedg = np.array([kernels.Epanechnikov().smoothconf(x, y, xi) for xi in xg])
fittedg = np.array([kern.smoothconf(x, y, xi) for xi in xg])
# attach for inspection from outside of test run
self.fittedg = fittedg
res_fitted = results['s_' + kern_name]
res_se = results['se_' + kern_name]
crit = 1.9599639845400545 # norm.isf(0.05 / 2)
# implied standard deviation from conf_int
se = (fittedg[:, 2] - fittedg[:, 1]) / crit
fitted = fittedg[:, 1]
# check both rtol & atol
assert_allclose(fitted, res_fitted, rtol=5e-7, atol=1e-20)
assert_allclose(fitted, res_fitted, rtol=0, atol=1e-6)
# TODO: check we are using a different algorithm for se
# The following are very rough checks
self.se = se
self.res_se = res_se
se_valid = np.isfinite(res_se)
# if np.any(~se_valid):
# print('nan in stata result', self.__class__.__name__)
assert_allclose(se[se_valid], res_se[se_valid], rtol=self.se_rtol, atol=0.2)
# check that most values are closer
mask = np.abs(se - res_se) > (0.2 + 0.2 * res_se)
if not hasattr(self, 'se_n_diff'):
se_n_diff = 40 * 0.125
else:
se_n_diff = self.se_n_diff
assert_array_less(mask.sum(), se_n_diff + 1) # at most 5 large diffs
if DEBUG:
# raises: RuntimeWarning: invalid value encountered in divide
print(fitted / res_fitted - 1)
print(se / res_se - 1)
# Stata only displays ci, doesn't save it
res_upp = res_fitted + crit * res_se
res_low = res_fitted - crit * res_se
self.res_fittedg = np.column_stack((res_low, res_fitted, res_upp))
if DEBUG:
print(fittedg[:, 2] / res_upp - 1)
print(fittedg[:, 2] - res_upp)
print(fittedg[:, 0] - res_low)
print(np.max(np.abs(fittedg[:, 2] / res_upp - 1)))
assert_allclose(fittedg[se_valid, 2], res_upp[se_valid],
rtol=self.upp_rtol, atol=0.2)
assert_allclose(fittedg[se_valid, 0], res_low[se_valid],
rtol=self.low_rtol, atol=self.low_atol)
0
Example 40
Project: scipy Source File: test_quadpack.py
def assert_quad(value_and_err, tabled_value, errTol=1.5e-8):
value, err = value_and_err
assert_allclose(value, tabled_value, atol=err, rtol=0)
if errTol is not None:
assert_array_less(err, errTol)
0
Example 41
Project: statsmodels Source File: test_glsar_gretl.py
def test_GLSARlag():
#test that results for lag>1 is close to lag=1, and smaller ssr
from statsmodels.datasets import macrodata
d2 = macrodata.load().data
g_gdp = 400*np.diff(np.log(d2['realgdp']))
g_inv = 400*np.diff(np.log(d2['realinv']))
exogg = add_constant(np.c_[g_gdp, d2['realint'][:-1]], prepend=False)
mod1 = GLSAR(g_inv, exogg, 1)
res1 = mod1.iterative_fit(5)
mod4 = GLSAR(g_inv, exogg, 4)
res4 = mod4.iterative_fit(10)
assert_array_less(np.abs(res1.params / res4.params - 1), 0.03)
assert_array_less(res4.ssr, res1.ssr)
assert_array_less(np.abs(res4.bse / res1.bse) - 1, 0.015)
assert_array_less(np.abs((res4.fittedvalues / res1.fittedvalues - 1).mean()),
0.015)
assert_equal(len(mod4.rho), 4)
0
Example 42
Project: python-qinfer Source File: test_precession_model.py
def test_smc_fitting(self):
"""
Checks that the fitters converge on true value on simple precession_model. Is a stochastic
test but I ran 100 times and there were no fails, with these parameters.
"""
self.updater.batch_update(self.outcomes,self.expparams)
self.updater_bayes.batch_update(self.outcomes,self.expparams)
self.num_updater.batch_update(self.outcomes,self.expparams)
self.num_updater_bayes.batch_update(self.outcomes,self.expparams)
#Assert that models have learned true model parameters from data
#test means
assert_almost_equal(self.updater.est_mean(),TestSMCUpdater.MODELPARAMS,2)
assert_almost_equal(self.updater_bayes.est_mean(),TestSMCUpdater.MODELPARAMS,2)
assert_almost_equal(self.num_updater.est_mean(),TestSMCUpdater.MODELPARAMS,2)
assert_almost_equal(self.num_updater_bayes.est_mean(),TestSMCUpdater.MODELPARAMS,2)
#Assert that covariances have been reduced below thresholds
#test covs
assert_array_less(self.updater.est_covariance_mtx(),TestSMCUpdater.TEST_TARGET_COV)
assert_array_less(self.updater_bayes.est_covariance_mtx(),TestSMCUpdater.TEST_TARGET_COV)
assert_array_less(self.num_updater.est_covariance_mtx(),TestSMCUpdater.TEST_TARGET_COV)
assert_array_less(self.num_updater_bayes.est_covariance_mtx(),TestSMCUpdater.TEST_TARGET_COV)
0
Example 43
Project: statsmodels Source File: test_norm_expan.py
def test_ks(self):
# cdf is slow
# Kolmogorov-Smirnov test against generating sample
stat, pvalue = stats.kstest(self.rvs, self.dist2.cdf)
assert_array_less(0.25, pvalue)
0
Example 44
Project: statsmodels Source File: test_random_panel.py
def assert_maxabs(actual, expected, value):
npt.assert_array_less(em.maxabs(actual, expected, None), value)
0
Example 45
Project: statsmodels Source File: test_proportion.py
def test_binom_rejection_interval():
# consistency check with binom_test
# some code duplication but limit checks are different
alpha = 0.05
nobs = 200
prop = 12./20
alternative='smaller'
ci_low, ci_upp = smprop.binom_test_reject_interval(prop, nobs, alpha=alpha,
alternative=alternative)
assert_equal(ci_upp, nobs)
pval = smprop.binom_test(ci_low, nobs, prop=prop,
alternative=alternative)
assert_array_less(pval, alpha)
pval = smprop.binom_test(ci_low + 1, nobs, prop=prop,
alternative=alternative)
assert_array_less(alpha, pval)
alternative='larger'
ci_low, ci_upp = smprop.binom_test_reject_interval(prop, nobs, alpha=alpha,
alternative=alternative)
assert_equal(ci_low, 0)
pval = smprop.binom_test(ci_upp, nobs, prop=prop,
alternative=alternative)
assert_array_less(pval, alpha)
pval = smprop.binom_test(ci_upp - 1, nobs, prop=prop,
alternative=alternative)
assert_array_less(alpha, pval)
alternative='two-sided'
ci_low, ci_upp = smprop.binom_test_reject_interval(prop, nobs, alpha=alpha,
alternative=alternative)
pval = smprop.binom_test(ci_upp, nobs, prop=prop,
alternative=alternative)
assert_array_less(pval, alpha)
pval = smprop.binom_test(ci_upp - 1, nobs, prop=prop,
alternative=alternative)
assert_array_less(alpha, pval)
pval = smprop.binom_test(ci_upp, nobs, prop=prop,
alternative=alternative)
assert_array_less(pval, alpha)
pval = smprop.binom_test(ci_upp - 1, nobs, prop=prop,
alternative=alternative)
assert_array_less(alpha, pval)
0
Example 46
Project: statsmodels Source File: test_rootfinding.py
def test_brentq_expanding():
cases = [
(0, {}),
(50, {}),
(-50, {}),
(500000, dict(low=10000)),
(-50000, dict(upp=-1000)),
(500000, dict(low=300000, upp=700000)),
(-50000, dict(low= -70000, upp=-1000))
]
funcs = [(func, None),
(func, True),
(funcn, None),
(funcn, False)]
for f, inc in funcs:
for a, kwds in cases:
kw = {'increasing':inc}
kw.update(kwds)
res = brentq_expanding(f, args=(a,), **kwds)
#print '%10d'%a, ['dec', 'inc'][f is func], res - a
assert_allclose(res, a, rtol=1e-5)
# wrong sign for start bounds
# doesn't raise yet during development TODO: activate this
# it kind of works in some cases, but not correctly or in a useful way
#assert_raises(ValueError, brentq_expanding, func, args=(-500,), start_upp=-1000)
#assert_raises(ValueError, brentq_expanding, func, args=(500,), start_low=1000)
# low upp given, but doesn't bound root, leave brentq exception
# ValueError: f(a) and f(b) must have different signs
assert_raises(ValueError, brentq_expanding, funcn, args=(-50000,), low= -40000, upp=-10000)
# max_it too low to find root bounds
# ValueError: f(a) and f(b) must have different signs
assert_raises(ValueError, brentq_expanding, func, args=(-50000,), max_it=2)
# maxiter_bq too low
# RuntimeError: Failed to converge after 3 iterations.
assert_raises(RuntimeError, brentq_expanding, func, args=(-50000,), maxiter_bq=3)
# cannot determin whether increasing, all 4 low trial points return nan
assert_raises(ValueError, brentq_expanding, func_nan, args=(-20, 0.6))
# test for full_output
a = 500
val, info = brentq_expanding(func, args=(a,), full_output=True)
assert_allclose(val, a, rtol=1e-5)
info1 = {'iterations': 63, 'start_bounds': (-1, 1),
'brentq_bounds': (100, 1000), 'flag': 'converged',
'function_calls': 64, 'iterations_expand': 3, 'converged': True}
# adjustments for scipy 0.8.0 with changed convergence criteria
assert_array_less(info.__dict__['iterations'], 70)
assert_array_less(info.__dict__['function_calls'], 70)
for k in info1:
if k in ['iterations', 'function_calls']:
continue
assert_equal(info1[k], info.__dict__[k])
assert_allclose(info.root, a, rtol=1e-5)
0
Example 47
Project: python-control Source File: mateqn_test.py
def test_dare(self):
A = matrix([[-0.6, 0],[-0.1, -0.4]])
Q = matrix([[2, 1],[1, 0]])
B = matrix([[2, 1],[0, 1]])
R = matrix([[1, 0],[0, 1]])
X,L,G = dare(A,B,Q,R)
# print("The solution obtained is", X)
assert_array_almost_equal(
A.T * X * A - X -
A.T * X * B * inv(B.T * X * B + R) * B.T * X * A + Q, zeros((2,2)))
assert_array_almost_equal(inv(B.T * X * B + R) * B.T * X * A, G)
# check for stable closed loop
lam = eigvals(A - B * G)
assert_array_less(abs(lam), 1.0)
A = matrix([[1, 0],[-1, 1]])
Q = matrix([[0, 1],[1, 1]])
B = matrix([[1],[0]])
R = 2
X,L,G = dare(A,B,Q,R)
# print("The solution obtained is", X)
assert_array_almost_equal(
A.T * X * A - X -
A.T * X * B * inv(B.T * X * B + R) * B.T * X * A + Q, zeros((2,2)))
assert_array_almost_equal(B.T * X * A / (B.T * X * B + R), G)
# check for stable closed loop
lam = eigvals(A - B * G)
assert_array_less(abs(lam), 1.0)
0
Example 48
Project: python-qinfer Source File: test_region_estimates.py
def test_region_est_hull(self):
"""
Tests that test_region_est_hull works
"""
dist = MultivariateNormalDistribution(self.MEAN, self.COV)
# the model is irrelevant; we just want the updater to have some particles
# with the desired normal distribution.
u = SMCUpdater(MockModel(self.N_MPS), self.N_PARTICLES, dist)
faces, vertices = u.region_est_hull(level=0.95)
# In this multinormal case, the convex hull surface
# should be centered at MEAN
assert_almost_equal(
np.round(np.mean(vertices, axis=0)),
np.round(self.MEAN)
)
# And a lower level should result in a smaller hull
# and therefore smaller sample variance
faces2, vertices2 = u.region_est_hull(level=0.2)
assert_array_less(np.var(vertices2, axis=0), np.var(vertices, axis=0))
0
Example 49
Project: scipy Source File: test_matfuncs.py
@decorators.slow
def test_expm_cond_fuzz(self):
np.random.seed(12345)
eps = 1e-5
nsamples = 10
for i in range(nsamples):
n = np.random.randint(2, 5)
A = np.random.randn(n, n)
A_norm = scipy.linalg.norm(A)
X = expm(A)
X_norm = scipy.linalg.norm(X)
kappa = expm_cond(A)
# Look for the small perturbation that gives the greatest
# relative error.
f = functools.partial(_help_expm_cond_search,
A, A_norm, X, X_norm, eps)
guess = np.ones(n*n)
out = minimize(f, guess, method='L-BFGS-B')
xopt = out.x
yopt = f(xopt)
p_best = eps * _normalized_like(np.reshape(xopt, A.shape), A)
p_best_relerr = _relative_error(expm, A, p_best)
assert_allclose(p_best_relerr, -yopt * eps)
# Check that the identified perturbation indeed gives greater
# relative error than random perturbations with similar norms.
for j in range(5):
p_rand = eps * _normalized_like(np.random.randn(*A.shape), A)
assert_allclose(norm(p_best), norm(p_rand))
p_rand_relerr = _relative_error(expm, A, p_rand)
assert_array_less(p_rand_relerr, p_best_relerr)
# The greatest relative error should not be much greater than
# eps times the condition number kappa.
# In the limit as eps approaches zero it should never be greater.
assert_array_less(p_best_relerr, (1 + 2*eps) * eps * kappa)
0
Example 50
def testSample(self):
with self.test_session():
scale = make_pd(1., 2)
df = 4
chol_w = distributions.WishartCholesky(
df, chol(scale), cholesky_input_output_matrices=False)
x = chol_w.sample(1, seed=42).eval()
chol_x = [chol(x[0])]
full_w = distributions.WishartFull(
df, scale, cholesky_input_output_matrices=False)
self.assertAllClose(x, full_w.sample(1, seed=42).eval())
chol_w_chol = distributions.WishartCholesky(
df, chol(scale), cholesky_input_output_matrices=True)
self.assertAllClose(chol_x, chol_w_chol.sample(1, seed=42).eval())
eigen_values = tf.matrix_diag_part(chol_w_chol.sample(1000, seed=42))
np.testing.assert_array_less(0., eigen_values.eval())
full_w_chol = distributions.WishartFull(
df, scale, cholesky_input_output_matrices=True)
self.assertAllClose(chol_x, full_w_chol.sample(1, seed=42).eval())
eigen_values = tf.matrix_diag_part(full_w_chol.sample(1000, seed=42))
np.testing.assert_array_less(0., eigen_values.eval())
# Check first and second moments.
df = 4.
chol_w = distributions.WishartCholesky(
df=df,
scale=chol(make_pd(1., 3)),
cholesky_input_output_matrices=False)
x = chol_w.sample(10000, seed=42)
self.assertAllEqual((10000, 3, 3), x.get_shape())
moment1_estimate = tf.reduce_mean(x, reduction_indices=[0]).eval()
self.assertAllClose(chol_w.mean().eval(),
moment1_estimate,
rtol=0.05)
# The Variance estimate uses the squares rather than outer-products
# because Wishart.Variance is the diagonal of the Wishart covariance
# matrix.
variance_estimate = (
tf.reduce_mean(tf.square(x), reduction_indices=[0]) -
tf.square(moment1_estimate)).eval()
self.assertAllClose(chol_w.variance().eval(),
variance_estimate,
rtol=0.05)