Here are the examples of the python api numpy.testing.utils.assert_almost_equal taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
8 Examples
3
Example 1
def test_1D_single(self):
session = None
ary = np.array([3.0, 2.1, 1.3, 1.8, 5.7])
sources = make_data_sources(session, "none", ary)
assert_almost_equal(sources[0][0].get_data(), np.arange(len(ary)))
assert_almost_equal(sources[0][1].get_data(), ary)
return
3
Example 2
Project: chaco Source File: make_data_sources_test_case.py
def test_1d_multiple(self):
session = None
index = np.arange(-np.pi, np.pi, np.pi/30.0)
s = np.sin(index)
c = np.cos(index)
t = np.tan(index)
sources = make_data_sources(session, "ascending", index, s, c, t)
assert_almost_equal(sources[0][0].get_data(), index)
self.assert_(sources[0][0] == sources[1][0])
self.assert_(sources[0][0] == sources[2][0])
assert_almost_equal(sources[0][1].get_data(), s)
assert_almost_equal(sources[1][1].get_data(), c)
assert_almost_equal(sources[2][1].get_data(), t)
return
3
Example 3
@staticmethod
def assertArrayAlmostEqual(actual, desired, decimal=7, err_msg='',
verbose=True):
"""
Tests if two arrays are almost equal to a tolerance. The CamelCase
naming is so that it is consistent with standard unittest methods.
"""
return nptu.assert_almost_equal(actual, desired, decimal, err_msg,
verbose)
0
Example 4
Project: py-earth Source File: test_knot_search.py
def test_outcome_dependent_data():
np.random.seed(10)
m = 1000
max_terms = 100
y = np.random.normal(size=m)
w = np.random.normal(size=m) ** 2
weight = SingleWeightDependentData.alloc(w, m, max_terms, 1e-16)
data = SingleOutcomeDependentData.alloc(y, weight, m, max_terms)
# Test updating
B = np.empty(shape=(m, max_terms))
for k in range(max_terms):
b = np.random.normal(size=m)
B[:, k] = b
code = weight.update_from_array(b)
if k >= 99:
1 + 1
data.update()
assert_equal(code, 0)
assert_almost_equal(
np.dot(weight.Q_t[:k + 1, :], np.transpose(weight.Q_t[:k + 1, :])),
np.eye(k + 1))
assert_equal(weight.update_from_array(b), -1)
# data.update(1e-16)
# Test downdating
q = np.array(weight.Q_t).copy()
theta = np.array(data.theta[:max_terms]).copy()
weight.downdate()
data.downdate()
weight.update_from_array(b)
data.update()
assert_almost_equal(q, np.array(weight.Q_t))
assert_almost_equal(theta, np.array(data.theta[:max_terms]))
assert_almost_equal(
np.array(data.theta[:max_terms]), np.dot(weight.Q_t, w * y))
wB = B * w[:, None]
Q, _ = qr(wB, pivoting=False, mode='economic')
assert_almost_equal(np.abs(np.dot(weight.Q_t, Q)), np.eye(max_terms))
# Test that reweighting works
assert_equal(data.k, max_terms)
w2 = np.random.normal(size=m) ** 2
weight.reweight(w2, B, max_terms)
data.synchronize()
assert_equal(data.k, max_terms)
w2B = B * w2[:, None]
Q2, _ = qr(w2B, pivoting=False, mode='economic')
assert_almost_equal(np.abs(np.dot(weight.Q_t, Q2)), np.eye(max_terms))
assert_almost_equal(
np.array(data.theta[:max_terms]), np.dot(weight.Q_t, w2 * y))
0
Example 5
Project: py-earth Source File: test_knot_search.py
def test_knot_search():
seed = 10
np.random.seed(seed)
m = 100
q = 5
r = 10
n_outcomes = 3
# Generate some problem data
x, B, p, knot, candidates, outcomes = generate_problem(
m, q, r, n_outcomes, False)
y = np.concatenate([y_[:, None] for y_, _ in outcomes], axis=1)
w = np.concatenate([w_[:, None] for _, w_ in outcomes], axis=1)
# Formulate the inputs for the fast version
data = form_inputs(x, B, p, knot, candidates, y, w)
# Get the answer using the slow version
best_knot, best_k, best_e = slow_knot_search(p, x, B, candidates, outcomes)
# Test the test
assert_almost_equal(best_knot, knot)
assert_equal(r, len(candidates))
assert_equal(m, B.shape[0])
assert_equal(q, B.shape[1])
assert_equal(len(outcomes), n_outcomes)
# Run fast knot search and compare results to slow knot search
fast_best_knot, fast_best_k, fast_best_e = knot_search(data, candidates,
p, q, m, r,
len(outcomes), 0)
assert_almost_equal(fast_best_knot, best_knot)
assert_equal(candidates[fast_best_k], candidates[best_k])
assert_almost_equal(fast_best_e, best_e)
0
Example 6
Project: dask-learn Source File: test_averaged.py
def test_multiclass_merge_estimators():
coef1 = np.array([[1., 2, 3],
[4, 5, 6],
[7., 8, 9]])
coef2 = np.array([[10., 11, 12]])
coef3 = np.array([[13., 14, 15],
[16, 17, 18],
[19, 20, 21]])
intercept1 = np.array([[1.], [2], [3]])
intercept2 = np.array([[4.]])
intercept3 = np.array([[5.], [6], [7]])
classes1 = np.array([0, 1, 2])
classes2 = np.array([1, 2])
classes3 = np.array([1, 2, 3])
def f(coef, intercept, classes):
s = SGDClassifier()
s.coef_ = coef
s.intercept_ = intercept
s.classes_ = classes
return s
ests = [f(coef1, intercept1, classes1),
f(coef2, intercept2, classes2),
f(coef3, intercept3, classes3)]
coef = np.zeros((4, 3), dtype='f8')
coef[classes1] += coef1
coef[[classes2[1]]] += coef2
coef[classes3] += coef3
coef /= 3
intercept = np.zeros((4, 1), dtype='f8')
intercept[classes1] += intercept1
intercept[[classes2[1]]] += intercept2
intercept[classes3] += intercept3
intercept /= 3
res = merge_estimators(ests)
tm.assert_almost_equal(res.coef_, coef)
tm.assert_almost_equal(res.intercept_, intercept)
tm.assert_almost_equal(res.classes_, np.arange(4))
0
Example 7
Project: dask-learn Source File: test_grid_search.py
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert len(search.grid_scores_) == n_search_iter
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert len(cv_score.cv_validation_scores) == n_cv_iter
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
tm.assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert (list(sorted(cv_score.parameters.keys())) ==
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert search.best_score_ == best_score
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert search.best_params_ in tied_best_params
0
Example 8
Project: dask-learn Source File: test_grid_search.py
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
tm.assert_almost_equal(correct_score, scores[i])
i += 1