# numpy.errstate

Here are the examples of the python api numpy.errstate taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

## 152 Examples

#### Example 1

Project: scikit-learn
Source File: test_forest.py
```def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
```

#### Example 2

Project: scikit-learn
Source File: test_neighbors.py
```def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """

# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
```

#### Example 3

Project: scipy
Source File: linesearch.py
```def _quadmin(a, fa, fpa, b, fb):
"""
Finds the minimizer for a quadratic polynomial that goes through
the points (a,fa), (b,fb) with derivative at a of fpa,

"""
# f(x) = B*(x-a)^2 + C*(x-a) + D
with np.errstate(divide='raise', over='raise', invalid='raise'):
try:
D = fa
C = fpa
db = b - a * 1.0
B = (fb - D - C * db) / (db * db)
xmin = a - C / (2.0 * B)
except ArithmeticError:
return None
if not np.isfinite(xmin):
return None
return xmin
```

#### Example 4

Project: scipy
Source File: test_slsqp.py
```    def test_minimize_bounded_approximated(self):
# Minimize, method='SLSQP': bounded, approximated jacobian.
with np.errstate(invalid='ignore'):
res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
bounds=((2.5, None), (None, 0.5)),
method='SLSQP', options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [2.5, 0.5])
assert_(2.5 <= res.x[0])
assert_(res.x[1] <= 0.5)
```

#### Example 5

Project: sfs-python
Source File: util.py
```def db(x, power=False):
"""Convert *x* to decibel.

Parameters
----------
x : array_like
Input data.  Values of 0 lead to negative infinity.
power : bool, optional
If ``power=False`` (the default), *x* is squared before
conversion.

"""
with np.errstate(divide='ignore'):
return 10 if power else 20 * np.log10(np.abs(x))
```

#### Example 6

Project: bayespy
Source File: test_bernoulli.py
```    def test_random(self):
"""
Test random sampling in Bernoulli node
"""
p = [1.0, 0.0]
with np.errstate(divide='ignore'):
Z = Bernoulli(p, plates=(3,2)).random()
self.assertArrayEqual(Z, np.ones((3,2))*p)
```

#### Example 7

Project: bayespy
Source File: test_binomial.py
```    def test_random(self):
"""
Test random sampling in Binomial node
"""
N = [ [5], [50] ]
p = [1.0, 0.0]
with np.errstate(divide='ignore'):
Z = Binomial(N, p, plates=(3,2,2)).random()
self.assertArrayEqual(Z, np.ones((3,2,2))*N*p)
```

#### Example 8

Project: paramz
Source File: parameterized_tests.py
```    def test_optimize_org_bfgs(self):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
with np.errstate(divide='ignore'):
self.testmodel.optimize_restarts(1, messages=0, optimizer='org-bfgs', xtol=0, ftol=0, gtol=1e-6)
self.testmodel.optimize(messages=1, optimizer='org-bfgs')
```

#### Example 9

Project: gplearn
Source File: fitness.py
```def weighted_pearson(x1, x2, w):
"""Calculate the weighted Pearson correlation coefficient."""
with np.errstate(divide='ignore', invalid='ignore'):
x1_demean = x1 - np.average(x1, weights=w)
x2_demean = x2 - np.average(x2, weights=w)
corr = ((np.sum(w * x1_demean * x2_demean) / np.sum(w)) /
np.sqrt((np.sum(w * x1_demean ** 2) *
np.sum(w * x2_demean ** 2)) /
(np.sum(w) ** 2)))
if np.isfinite(corr):
return np.abs(corr)
return 0
```

#### Example 10

Source File: reductions.py
```    @staticmethod
def _finalize(bases, **kwargs):
sums, counts = bases
with np.errstate(divide='ignore', invalid='ignore'):
x = sums/counts
return xr.DataArray(x, **kwargs)
```

#### Example 11

Source File: reductions.py
```    @staticmethod
def _finalize(bases, **kwargs):
sums, counts, m2s = bases
with np.errstate(divide='ignore', invalid='ignore'):
x = m2s/counts
return xr.DataArray(x, **kwargs)
```

#### Example 12

Source File: reductions.py
```    @staticmethod
def _finalize(bases, **kwargs):
sums, counts, m2s = bases
with np.errstate(divide='ignore', invalid='ignore'):
x = np.sqrt(m2s/counts)
return xr.DataArray(x, **kwargs)
```

#### Example 13

Project: deap
Source File: symbreg_numpy.py
```def protectedDiv(left, right):
with numpy.errstate(divide='ignore',invalid='ignore'):
x = numpy.divide(left, right)
if isinstance(x, numpy.ndarray):
x[numpy.isinf(x)] = 1
x[numpy.isnan(x)] = 1
elif numpy.isinf(x) or numpy.isnan(x):
x = 1
return x
```

#### Example 14

```def sigmoid(z):
"""Computes sigmoid function.

z: array of input values.

Returns array of outputs, sigmoid(z).
"""
# Note: this version of sigmoid tries to avoid overflows in the computation
# of e^(-z), by using an alternative formulation when z is negative, to get
# 0. e^z / (1+e^z) is equivalent to the definition of sigmoid, but we won't
# get e^(-z) to overflow when z is very negative.
# Since both the x and y arguments to np.where are evaluated by Python, we
# may still get overflow warnings for large z elements; therefore we ignore
# warnings during this computation.
with np.errstate(over='ignore', invalid='ignore'):
return np.where(z >= 0,
1 / (1 + np.exp(-z)),
np.exp(z) / (1 + np.exp(z)))
```

#### Example 15

```    def __init__(self, *args, **kwargs):
super(PlotUI, self).__init__(*args, **kwargs)
# FIXME: 'with' wrapping is temporary fix for infinite range in initial
# color map, which can cause a distracting warning print. This 'with'
# wrapping should be unnecessary after fix in color_mapper.py.
with errstate(invalid='ignore'):
self.create_plot()
```

#### Example 16

Project: PyFNND
Source File: _fnndeconv.py
```def _post_LL(n_hat, res, scale_var, lD, z):

# barrier term
with np.errstate(invalid='ignore'):     # suppress log(0) error messages
barrier = np.log(n_hat).sum()       # this is currently a bottleneck

# sum of squared (predicted - actual) fluorescence
res_ss = res.ravel().dot(res.ravel())   # fast sum-of-squares

# weighted posterior log-likelihood of the fluorescence
LL = -(scale_var * res_ss) - (n_hat.sum() / lD) + (z * barrier)

return LL
```

#### Example 17

Project: hyperspy
Source File: model1d.py
```    def _poisson_likelihood_function(self, param, y, weights=None):
"""Returns the likelihood function of the model for the given
data and parameters
"""
mf = self._model_function(param)
with np.errstate(invalid='ignore'):
return -(y * np.log(mf) - mf).sum()
```

#### Example 18

Project: flopy
Source File: netcdf.py
```    def __truediv__(self,other):
new_net = NetCdf.zeros_like(self)
with np.errstate(invalid="ignore"):
if np.isscalar(other) or isinstance(other,np.ndarray):
for vname in self.var_attr_dict.keys():
new_net.nc.variables[vname][:] = self.nc.variables[vname][:] /\
other
elif isinstance(other,NetCdf):
for vname in self.var_attr_dict.keys():
new_net.nc.variables[vname][:] = self.nc.variables[vname][:] /\
other.nc.variables[vname][:]
else:
raise Exception("NetCdf.__sub__(): unrecognized other:{0}".\
format(str(type(other))))
return new_net
```

#### Example 19

Project: flopy
Source File: netcdf.py
```    def __truediv__(self,other):
new_net = NetCdf.zeros_like(self)
with np.errstate(invalid="ignore"):
if np.isscalar(other) or isinstance(other,np.ndarray):
for vname in self.var_attr_dict.keys():
new_net.nc.variables[vname][:] = self.nc.variables[vname][:] /\
other
elif isinstance(other,NetCdf):
for vname in self.var_attr_dict.keys():
new_net.nc.variables[vname][:] = self.nc.variables[vname][:] /\
other.nc.variables[vname][:]
else:
raise Exception("NetCdf.__sub__(): unrecognized other:{0}".\
format(str(type(other))))
return new_net
```

#### Example 20

```    @dec.skipif(platform.machine() == "armv5tel", "See gh-413.")
def test_invalid(self):
with np.errstate(all='raise', under='ignore'):
a = -np.arange(3)
# This should work
with np.errstate(invalid='ignore'):
np.sqrt(a)
# While this should fail!
try:
np.sqrt(a)
except FloatingPointError:
pass
else:
self.fail("Did not raise an invalid error")
```

#### Example 21

```    def test_divide(self):
with np.errstate(all='raise', under='ignore'):
a = -np.arange(3)
# This should work
with np.errstate(divide='ignore'):
a // 0
# While this should fail!
try:
a // 0
except FloatingPointError:
pass
else:
self.fail("Did not raise divide by zero error")
```

#### Example 22

```    @dec.skipif(platform.machine() == "armv5tel", "See gh-413.")
def test_invalid(self):
with np.errstate(all='raise', under='ignore'):
a = -np.arange(3)
# This should work
with np.errstate(invalid='ignore'):
np.sqrt(a)
# While this should fail!
try:
np.sqrt(a)
except FloatingPointError:
pass
else:
self.fail("Did not raise an invalid error")
```

#### Example 23

```    def test_divide(self):
with np.errstate(all='raise', under='ignore'):
a = -np.arange(3)
# This should work
with np.errstate(divide='ignore'):
a // 0
# While this should fail!
try:
a // 0
except FloatingPointError:
pass
else:
self.fail("Did not raise divide by zero error")
```

#### Example 24

```    def test_underlow(self):
# Regression test for #759:
# instanciating MachAr for dtype = np.float96 raises spurious warning.
with errstate(all='raise'):
try:
self._run_machar_highprec()
except FloatingPointError as e:
self.fail("Caught %s exception, should not have been raised." % e)
```

#### Example 25

```    def test_underlow(self):
# Regression test for #759:
# instanciating MachAr for dtype = np.float96 raises spurious warning.
with errstate(all='raise'):
try:
self._run_machar_highprec()
except FloatingPointError as e:
self.fail("Caught %s exception, should not have been raised." % e)
```

#### Example 26

```    def test_zero_division(self):
with np.errstate(all="ignore"):
for t in [np.complex64, np.complex128]:
a = t(0.0)
b = t(1.0)
assert_(np.isinf(b/a))
b = t(complex(np.inf, np.inf))
assert_(np.isinf(b/a))
b = t(complex(np.inf, np.nan))
assert_(np.isinf(b/a))
b = t(complex(np.nan, np.inf))
assert_(np.isinf(b/a))
b = t(complex(np.nan, np.nan))
assert_(np.isnan(b/a))
b = t(0.)
assert_(np.isnan(b/a))
```

#### Example 27

```    def test_zero_division(self):
with np.errstate(all="ignore"):
for t in [np.complex64, np.complex128]:
a = t(0.0)
b = t(1.0)
assert_(np.isinf(b/a))
b = t(complex(np.inf, np.inf))
assert_(np.isinf(b/a))
b = t(complex(np.inf, np.nan))
assert_(np.isinf(b/a))
b = t(complex(np.nan, np.inf))
assert_(np.isinf(b/a))
b = t(complex(np.nan, np.nan))
assert_(np.isnan(b/a))
b = t(0.)
assert_(np.isnan(b/a))
```

#### Example 28

```    def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
vals = isposinf(np.array((-1., 0, 1))/0.)
assert_(vals[0] == 0)
assert_(vals[1] == 0)
assert_(vals[2] == 1)
```

#### Example 29

```    def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
vals = isneginf(np.array((-1., 0, 1))/0.)
assert_(vals[0] == 1)
assert_(vals[1] == 0)
assert_(vals[2] == 0)
```

#### Example 30

```    def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
vals = nan_to_num(np.array((-1., 0, 1))/0.)
assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0]))
assert_(vals[1] == 0)
assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2]))
```

#### Example 31

```    def test_complex_bad(self):
with np.errstate(divide='ignore', invalid='ignore'):
v = 1 + 1j
v += np.array(0+1.j)/0.
vals = nan_to_num(v)
# !! This is actually (unexpectedly) zero
assert_all(np.isfinite(vals))
```

#### Example 32

```    def test_complex_bad2(self):
with np.errstate(divide='ignore', invalid='ignore'):
v = 1 + 1j
v += np.array(-1+1.j)/0.
vals = nan_to_num(v)
assert_all(np.isfinite(vals))
```

#### Example 33

```    def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
vals = isposinf(np.array((-1., 0, 1))/0.)
assert_(vals[0] == 0)
assert_(vals[1] == 0)
assert_(vals[2] == 1)
```

#### Example 34

```    def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
vals = isneginf(np.array((-1., 0, 1))/0.)
assert_(vals[0] == 1)
assert_(vals[1] == 0)
assert_(vals[2] == 0)
```

#### Example 35

```    def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
vals = nan_to_num(np.array((-1., 0, 1))/0.)
assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0]))
assert_(vals[1] == 0)
assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2]))
```

#### Example 36

```    def test_complex_bad(self):
with np.errstate(divide='ignore', invalid='ignore'):
v = 1 + 1j
v += np.array(0+1.j)/0.
vals = nan_to_num(v)
# !! This is actually (unexpectedly) zero
assert_all(np.isfinite(vals))
```

#### Example 37

```    def test_complex_bad2(self):
with np.errstate(divide='ignore', invalid='ignore'):
v = 1 + 1j
v += np.array(-1+1.j)/0.
vals = nan_to_num(v)
assert_all(np.isfinite(vals))
```

#### Example 38

```    def test_testScalarArithmetic(self):
#TODO FIXME: Find out what the following raises a warning in r8247
with np.errstate(divide='ignore'):
self.assertTrue(xm.filled().dtype is xm._data.dtype)
self.assertTrue(x.filled() == x._data)
```

#### Example 39

```    def test_masked_unary_operations(self):
(x, mx) = self.data
with np.errstate(divide='ignore'):
self.assertTrue(isinstance(log(mx), mmatrix))
assert_equal(log(x), np.log(x))
```

#### Example 40

```    def test_testScalarArithmetic(self):
#TODO FIXME: Find out what the following raises a warning in r8247
with np.errstate(divide='ignore'):
self.assertTrue(xm.filled().dtype is xm._data.dtype)
self.assertTrue(x.filled() == x._data)
```

#### Example 41

```    def test_masked_unary_operations(self):
(x, mx) = self.data
with np.errstate(divide='ignore'):
self.assertTrue(isinstance(log(mx), mmatrix))
assert_equal(log(x), np.log(x))
```

#### Example 42

```def _quadmin(a, fa, fpa, b, fb):
"""
Finds the minimizer for a quadratic polynomial that goes through
the points (a,fa), (b,fb) with derivative at a of fpa,

"""
# f(x) = B*(x-a)^2 + C*(x-a) + D
with np.errstate(divide='raise', over='raise', invalid='raise'):
try:
D = fa
C = fpa
db = b - a * 1.0
B = (fb - D - C * db) / (db * db)
xmin = a - C / (2.0 * B)
except ArithmeticError:
return None
if not np.isfinite(xmin):
return None
return xmin
```

#### Example 43

```    def test_minimize_bounded_approximated(self):
# Minimize, method='SLSQP': bounded, approximated jacobian.
with np.errstate(invalid='ignore'):
res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
bounds=((2.5, None), (None, 0.5)),
method='SLSQP', options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [2.5, 0.5])
assert_(2.5 <= res.x[0])
assert_(res.x[1] <= 0.5)
```

#### Example 44

```def _quadmin(a, fa, fpa, b, fb):
"""
Finds the minimizer for a quadratic polynomial that goes through
the points (a,fa), (b,fb) with derivative at a of fpa,

"""
# f(x) = B*(x-a)^2 + C*(x-a) + D
with np.errstate(divide='raise', over='raise', invalid='raise'):
try:
D = fa
C = fpa
db = b - a * 1.0
B = (fb - D - C * db) / (db * db)
xmin = a - C / (2.0 * B)
except ArithmeticError:
return None
if not np.isfinite(xmin):
return None
return xmin
```

#### Example 45

```    def test_minimize_bounded_approximated(self):
# Minimize, method='SLSQP': bounded, approximated jacobian.
with np.errstate(invalid='ignore'):
res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
bounds=((2.5, None), (None, 0.5)),
method='SLSQP', options=self.opts)
assert_(res['success'], res['message'])
assert_allclose(res.x, [2.5, 0.5])
assert_(2.5 <= res.x[0])
assert_(res.x[1] <= 0.5)
```

#### Example 46

```def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
```

#### Example 47

```def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
```

#### Example 48

```def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
```

#### Example 49

```def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
```

#### Example 50

```def test_numerical_stability_large_gradient():