Here are the examples of the python api numpy.random.normal taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
187 Examples
5
Example 1
def generate(self, m):
X = numpy.random.normal(size=(m, self.n))
# Including the intercept
num_terms = numpy.random.randint(2, self.max_terms)
coef = 10 * numpy.random.normal(size=num_terms)
B = numpy.ones(shape=(m, num_terms))
B[:, 0] += coef[0]
for i in range(1, num_terms):
degree = numpy.random.randint(1, self.max_degree)
for bf in range(degree):
knot = numpy.random.normal()
dir = 1 - 2 * numpy.random.binomial(1, .5)
var = numpy.random.randint(0, self.n)
B[:, i] *= (dir * (X[:, var] - knot)) * \
(dir * (X[:, var] - knot) > 0)
y = numpy.dot(B, coef) + numpy.random.normal(size=m)
return X, y
5
Example 2
def takeAction(self, intAction):
x = 0. if intAction == 0 else 10.**(intAction-1)
self.counter += 1
# If noisy, create noise on cost/payoff
paynoise = numpy.random.normal(scale=self.noise) if self.noise > 0 else 0.0
costnoise = numpy.random.normal(scale=self.noise) if self.noise > 0 else 0.0
# Update random demand
self.state[1] = min(self.max_quantity,
max(0., numpy.random.normal(self.demand[0], scale=self.demand[1])))
reward = (self.payoff + paynoise) * self.state.min() - (self.cost + costnoise) * x
self.state[0] = min(self.max_quantity, max(0., self.state[0] - self.state[1]) + x)
return reward/600.
4
Example 3
def update2():
global data3, ptr3
data3[ptr3] = np.random.normal()
ptr3 += 1
if ptr3 >= data3.shape[0]:
tmp = data3
data3 = np.empty(data3.shape[0] * 2)
data3[:tmp.shape[0]] = tmp
curve3.setData(data3[:ptr3])
curve3.setPos(-ptr3, 0)
curve4.setData(data3[:ptr3])
3
Example 4
def test_logsumexp_b():
a = np.random.normal(size=(200, 500, 5))
b = np.random.normal(size=(200, 500, 5)) ** 2.
for axis in range(a.ndim):
ans_ne = pymbar.utils.logsumexp(a, b=b, axis=axis)
ans_no_ne = pymbar.utils.logsumexp(a, b=b, axis=axis, use_numexpr=False)
ans_scipy = scipy.misc.logsumexp(a, b=b, axis=axis)
eq(ans_ne, ans_no_ne)
eq(ans_ne, ans_scipy)
3
Example 5
Project: ldsc Source File: test_jackknife.py
def test_eq_slow(self):
x = np.atleast_2d(np.random.normal(size=(100, 2)))
y = np.atleast_2d(np.random.normal(size=(100, 1)))
print x.shape
for n_blocks in xrange(2, 49):
b1 = jk.LstsqJackknifeFast(x, y, n_blocks=n_blocks).est
b2 = jk.LstsqJackknifeSlow(x, y, n_blocks=n_blocks).est
assert_array_almost_equal(b1, b2)
3
Example 6
Project: morb Source File: rbms.py
def _initial_W(self):
# return np.asarray( np.random.uniform(
# low = -4*np.sqrt(6./(self.n_hidden+self.n_visible)),
# high = 4*np.sqrt(6./(self.n_hidden+self.n_visible)),
# size = (self.n_visible, self.n_hidden)),
# dtype = theano.config.floatX)
return np.asarray( np.random.normal(0, 0.01,
size = (self.n_visible, self.n_hidden)),
dtype = theano.config.floatX)
3
Example 7
Project: attention-lvcsr Source File: test_bricks.py
def test_maxout():
x = tensor.tensor3()
maxout = Maxout(num_pieces=3)
y = maxout.apply(x)
x_val = numpy.asarray(numpy.random.normal(0, 1, (4, 5, 24)),
dtype=theano.config.floatX)
assert_allclose(
y.eval({x: x_val}),
x_val.reshape(4, 5, 8, 3).max(3))
assert y.eval({x: x_val}).shape == (4, 5, 8)
3
Example 8
def init(args):
layers = [784] + [args.hidden_size] * args.num_hidden + [10]
biases = [np.random.normal(scale=0.001, size=(1, x)) for x in layers[1:]]
weights = [
np.random.normal(
scale=0.001, size=(x, y)) for x, y in zip(layers[:-1], layers[1:])
]
return weights, biases
3
Example 9
Project: mystic Source File: example12.py
def noisy_data(params):
"""generate noisy data from polynomial coefficients"""
from numpy import random
x,y = data(params)
y = [random.normal(0,1) + i for i in y]
return x,y
3
Example 10
def test_symmetrize():
X = np.random.normal(size=(10, 10))
Xo = X.copy()
sym = Symmetrize(copy=True)
Xs = sym.fit_transform(X)
assert np.all(X == Xo)
assert np.allclose(Xs, (X + X.T) / 2)
assert np.all(sym.transform(X + 1) == X + 1)
Xs2 = Symmetrize(copy=False).fit_transform(X)
assert np.allclose(Xs2, (X + X.T) / 2)
sym = Symmetrize()
assert_raises(NotImplementedError, lambda: sym.fit(X))
assert_raises(TypeError, lambda: sym.fit_transform(np.zeros((5, 3))))
3
Example 11
def _sample_Z(self):
"""
Samples a point in latent space according to the prior distribution
p(Z)
"""
return numpy.random.normal(size=(400, ))
3
Example 12
def createData():
data = np.random.normal(size=(20,3))
mult = np.random.normal(size=(3,3))
data = np.dot(data,mult)
xy = pv.Plot(title="X vs Y",x_range=(-5,5),y_range=(-5,5))
xy.points(data[:,[0,1]])
xy.show(window='X vs Y',delay=1)
xy = pv.Plot(title="X vs Z",x_range=(-5,5),y_range=(-5,5),ylabel="Z Axis")
xy.points(data[:,[0,2]])
xy.show(window='X vs Z')
print repr(data)
return data
3
Example 13
Project: sherpa Source File: sample.py
def get_sample(self, fit, myscales=None, num=1):
vals = numpy.array(fit.model.thawedpars)
scales = self.scale.get_scales(fit, myscales)
samples = [numpy.random.normal(
val, scale, int(num)) for val, scale in izip(vals, scales)]
return numpy.asarray(samples).T
3
Example 14
Project: lifelines Source File: test_statistics.py
def test_equal_intensity_with_negative_data():
data1 = np.random.normal(0, size=(2000, 1))
data1 -= data1.mean()
data1 /= data1.std()
data2 = np.random.normal(0, size=(2000, 1))
data2 -= data2.mean()
data2 /= data2.std()
result = stats.logrank_test(data1, data2)
assert not result.is_significant
3
Example 15
Project: pysb Source File: anneal_mod.py
def update_guess(self, x0):
std = minimum(sqrt(self.T)*ones(self.dims), (self.upper-self.lower)/3.0/self.learn_rate)
x0 = asarray(x0)
xc = squeeze(random.normal(0, 1.0, size=self.dims))
xnew = x0 + xc*std*self.learn_rate
return xnew
3
Example 16
def initialize_value(self, vw):
magnitude = xavier_magnitude(vw.shape,
in_axes=self.in_axes,
out_axes=self.out_axes,
gain=self.gain)
return np.random.normal(loc=0,
scale=magnitude,
size=vw.shape)
3
Example 17
Project: matplotlib-style-gallery Source File: artist-demo.py
def circle_and_text_demo(ax):
# Circles with colors from default color cycle
for i, color in enumerate(plt.rcParams['axes.color_cycle']):
xy = np.random.normal(size=2)
ax.add_patch(plt.Circle(xy, radius=0.3, color=color))
ax.axis('equal')
ax.margins(0)
# Text label centered on the axes.
ax.text(0.5, 0.5, 'hello world', ha='center', va='center',
transform=ax.transAxes)
ax.set_xlabel('x-label')
ax.set_ylabel('y-label')
ax.set_title('title')
3
Example 18
def test_normal():
for dim in [1, 4, 10]:
for k in [1, 2, 5]:
data = np.random.normal(scale=100, size=(1000, dim))
query = np.random.normal(scale=100, size=(100, dim))
py = pyflann.FLANN(algorithm='kdtree_single')
cy = cyflann.FLANNIndex(algorithm='kdtree_single')
py.build_index(data)
cy.build_index(data)
py_ids, py_dists = py.nn_index(query, k)
cy_ids, cy_dists = cy.nn_index(query, k)
f = partial(check_match, py_ids, py_dists, cy_ids, cy_dists)
f.description = 'normal vs pyflann - dim {} - k {}'.format(dim, k)
yield f
3
Example 19
Project: pymc3 Source File: test_distributions_timeseries.py
def _gen_sde_path(sde, pars, dt, n, x0):
xs = [x0]
wt = np.random.normal(size=(n,) if isinstance(x0, float) else (n, x0.size))
for i in range(n):
f, g = sde(xs[-1], *pars)
xs.append(
xs[-1] + f * dt + np.sqrt(dt) * g * wt[i]
)
return np.array(xs)
3
Example 20
Project: convolupy Source File: tests.py
def test_convolutional_feature_map_agrees_with_naive_version_fprop():
cmap = ConvolutionalFeatureMap((5, 5), (20, 20))
cmap.initialize()
cmap_naive = NaiveConvolutionalFeatureMap((5, 5), (20, 20))
assert cmap_naive.fsize == (5, 5)
assert cmap_naive.imsize == (20, 20)
# Sync up their parameters
cmap_naive.convolution.params[:] = cmap.params
inputs = random.normal(size=cmap.imsize)
assert_array_almost_equal(cmap.fprop(inputs), cmap_naive.fprop(inputs))
3
Example 21
Project: panns Source File: test_object_construction.py
def test_parallel_build():
print 'Test building index in parallel mode ...'
rows, cols = 100000, 50
print 'Build a %i x %i dataset ...' % (rows, cols)
vecs = numpy.random.normal(0,1,(rows,cols))
pidx = PannsIndex(cols)
pidx.load_matrix(vecs)
pidx.parallelize(True)
pidx.build(100)
print 'Parallel bulding is done.'
v = gaussian_vector(cols, True)
r1 = pidx.linear_search(v, 10)
r2 = pidx.query(v, 10)
m1 = precision(r1, r2)
m2 = recall(r1, r2)
print "Precision: %.3f, Recall: %.3f" % (m1, m2)
pass
3
Example 22
@classmethod
def setUpClass(cls):
super(TestGLM, cls).setUpClass()
cls.intercept = 1
cls.slope = 3
cls.sd = .05
x_linear, cls.y_linear = generate_data(cls.intercept, cls.slope, size=1000)
cls.y_linear += np.random.normal(size=1000, scale=cls.sd)
cls.data_linear = dict(x=x_linear, y=cls.y_linear)
x_logistic, y_logistic = generate_data(cls.intercept, cls.slope, size=3000)
y_logistic = 1 / (1 + np.exp(-y_logistic))
bern_trials = [np.random.binomial(1, i) for i in y_logistic]
cls.data_logistic = dict(x=x_logistic, y=bern_trials)
3
Example 23
def generate_noise(self, ts):
noise=[]
for t, V, sigma0, sigma, tau in zip(ts, self.params.V, self.params.sigma0, self.params.sigma, self.params.tau):
cov = cl.generate_covariance(t, sigma0, sigma, tau)
A = nl.cholesky(cov)
xs = nr.normal(size=len(t))
noise.append(V + np.dot(A, xs))
return noise
3
Example 24
Project: scikit-learn Source File: plot_digits_linkage.py
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
3
Example 25
def update(ev):
m = 50
data = np.random.normal(size=(N, m), scale=0.3)
data[0] = 0
data[1] = 1
data[100] = 2
data[data > 1] += 4
lines.roll_data(data)
3
Example 26
def add_noise(seq, scale):
"""return seq + a 0-centered normal noise vector.
Parameters
----------
seq : ndarray
base sequence
scale : float
stdev of noise
Returns
-------
ndarray
"""
if scale == 0:
return seq
else:
return seq + np.random.normal(0, scale, len(seq))
3
Example 27
Project: SparkADMM Source File: LogisticRegressionSolver.py
def generateData(n,d,m):
"""
Generates a synthetic logistic regression dataset.
Used for testing purposes.
"""
features = [ 'input_'+str(i) for i in range(d)]
outputs = [ 'output_'+str(i) for i in range(m)]
X = np.random.normal( size= (n,d))
beta = np.random.normal( size= (m,d))
beta = normalize_row(beta)
Y = np.sign(2.0/ (1.0+np.exp(-np.dot(X,beta.T))) - 1.0)
inputDF = SparseDataFrame(X,columns = features )
outputDF = SparseDataFrame(Y, columns = outputs )
true_beta = SparseDataFrame(beta,index= outputs, columns =features)
return inputDF,outputDF,true_beta
3
Example 28
Project: datashader Source File: test_transfer_functions.py
def test_eq_hist():
# Float
data = np.random.normal(size=300**2)
data[np.random.randint(300**2, size=100)] = np.nan
data = (data - np.nanmin(data)).reshape((300, 300))
mask = np.isnan(data)
eq = tf.eq_hist(data, mask)
check_eq_hist_cdf_slope(eq)
assert (np.isnan(eq) == mask).all()
# Integer
data = np.random.normal(scale=100, size=(300, 300)).astype('i8')
data = data - data.min()
eq = tf.eq_hist(data)
check_eq_hist_cdf_slope(eq)
3
Example 29
def __init__(self, input_dim, variance=1., active_dims=None, ARD=False, name='basis func kernel'):
"""
Abstract superclass for kernels with explicit basis functions for use in GPy.
This class does NOT automatically add an offset to the design matrix phi!
"""
super(BasisFuncKernel, self).__init__(input_dim, active_dims, name)
assert self.input_dim==1, "Basis Function Kernel only implemented for one dimension. Use one kernel per dimension (and add them together) for more dimensions"
self.ARD = ARD
if self.ARD:
phi_test = self._phi(np.random.normal(0, 1, (1, self.input_dim)))
variance = variance * np.ones(phi_test.shape[1])
else:
variance = np.array(variance)
self.variance = Param('variance', variance, Logexp())
self.link_parameter(self.variance)
3
Example 30
Project: borg Source File: test_statistics.py
def test_log_normal_estimate_ml():
def assert_ok(mu, sigma, theta, terminus):
values = numpy.exp(numpy.random.normal(mu, sigma, 64000)) + theta
uncensored = values[values < terminus]
(e_mu, e_sigma, e_theta) = \
borg.statistics.log_normal_estimate_ml(
uncensored,
numpy.zeros(uncensored.size, dtype = numpy.intc),
numpy.array([1.0]),
numpy.array([values.size - uncensored.size], dtype = numpy.intc),
terminus,
)
nose.tools.assert_almost_equal(e_mu, mu, places = 2)
nose.tools.assert_almost_equal(e_sigma, sigma, places = 2)
nose.tools.assert_almost_equal(e_theta, theta, places = 2)
yield (assert_ok, 10.0, 1.0, 0.0, 10.0)
3
Example 31
Project: lifelines Source File: generate_datasets.py
def constant_coefficients(d, timelines, constant=False, independent=0):
"""
Proportional hazards model.
d: the dimension of the dataset
timelines: the observational times
constant: True for constant coefficients
independent: the number of coffients to set to 0 (covariate is ind of survival), or
a list of covariates to make indepent.
returns a matrix (t,d+1) of coefficients
"""
return time_varying_coefficients(d, timelines, constant=True, independent=independent, randgen=random.normal)
3
Example 32
Project: kaggle-ndsb Source File: nn_plankton.py
def sample(self, shape):
if len(shape) != 2:
raise RuntimeError("Only shapes of length 2 are supported.")
a = np.random.normal(0.0, 1.0, shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == shape else v # pick the one with the correct shape
# size = np.maximum(shape[0], shape[1])
# a = np.random.normal(0.0, 1.0, (size, size))
# q, _ = np.linalg.qr(a)
return nn.utils.floatX(self.gain * q[:shape[0], :shape[1]])
3
Example 33
Project: APGL Source File: Util.py
@staticmethod
def randNormalInt(mean, sd, min, max):
"""
Returns a normally distributed integer within a range (inclusive of min, max)
"""
i = round(rand.normal(mean, sd));
while i<min or i>max:
i = round(random.normal(mean, sd));
return i
3
Example 34
Project: RoBO Source File: base_prior.py
def sample_from_prior(self, n_samples):
"""
Returns N samples from the prior.
Parameters
----------
n_samples : int
The number of samples that will be drawn.
Returns
-------
(N, D) np.array
The samples from the prior.
"""
p0 = np.random.normal(loc=self.mean,
scale=self.sigma,
size=n_samples)
return p0[:, np.newaxis]
3
Example 35
Project: agnez Source File: test_output.py
def test_timesseries2d():
data = np.random.normal(0, 1, (3, 5, 8))
labels = np.arange(5)
ebd, mtd = timeseries2d(data)
assert ebd.shape == (3, 5, 2)
fig, ax, sc, txts = timeseries2dplot(ebd, labels)
assert isinstance(fig, plt.Figure)
3
Example 36
def get_weights(self, n, mean=10.0, stddev=5):
"""Returns random positive weight values."""
assert mean > 0, 'Weights have to be positive.'
results = []
while len(results) < n:
v = numpy.random.normal(mean, stddev)
if v > 0:
results.append(v)
return results
3
Example 37
def initialize_value(self, vw):
magnitude = he_magnitude(vw.shape,
in_axes=self.in_axes,
out_axes=self.out_axes,
gain=self.gain)
return np.random.normal(loc=0,
scale=magnitude,
size=vw.shape)
3
Example 38
def test_initialization():
x = np.random.normal(size=(13, 5))
y = np.random.randint(2, size=(13, 3))
# no edges make independent model
model = MultiLabelClf()
model.initialize(x, y)
assert_equal(model.n_states, 2)
assert_equal(model.n_labels, 3)
assert_equal(model.n_features, 5)
assert_equal(model.size_joint_feature, 5 * 3)
# setting and then initializing is no-op
model = MultiLabelClf(n_features=5, n_labels=3)
model.initialize(x, y) # smoketest
model = MultiLabelClf(n_features=3, n_labels=3)
assert_raises(ValueError, model.initialize, X=x, Y=y)
3
Example 39
def __init__(self, mean=0.0, std=1, size=1000):
self.data = np.random.normal(mean, std, size)
self.mean = mean
self.std = std
self.size = size
# For caching plots that may be expensive to compute
self._png_data = None
3
Example 40
Project: outputty Source File: test_Table_histogram.py
def test_vertical_histogram(self):
seed(1234) # Setting the seed to get repeatable results
numbers = normal(size=1000)
my_table = Table(headers=['values'])
my_table.extend([[value] for value in numbers])
output = my_table.write('histogram', column='values', height=5,
orientation='vertical', bins=10)
expected = dedent('''
265 |
||
|||
||||
||||||
-3.56 2.76
''').strip()
self.assertEquals(output, expected)
3
Example 41
Project: py-sdm Source File: test_typedbytes_utils.py
def test_typedbytes_flann():
pts = np.random.normal(size=(100, 2))
for algorithm in ['kdtree_single', 'linear']:
idx = cyflann.FLANNIndex(algorithm=algorithm)
idx.build_index(pts)
with closing(StringIO()) as sio:
out = tb.Output(sio)
register_write(out)
out.write(idx)
sio.seek(0)
inp = tb.Input(sio)
register_read(inp)
idx2 = inp.read()
fn = partial(_check_flann, idx, idx2)
fn.description = "flann typedbytes io - {}".format(algorithm)
yield fn
3
Example 42
@seed
def test_fit(self):
data = numpy.random.normal(loc=2.0, scale=6.7, size=37)
params = self.dist.fit(data)
check_params(
(params.mu, 4.1709713618),
(params.sigma, 7.2770395662),
)
3
Example 43
Project: sklearn-compiledtrees Source File: test_compiled.py
def test_predictions_with_invalid_input(self):
num_features = 100
num_examples = 100
X = np.random.normal(size=(num_examples, num_features))
X = X.astype(np.float32)
y = np.random.choice([-1, 1], size=num_examples)
for cls in REGRESSORS:
clf = cls()
clf.fit(X, y)
compiled = CompiledRegressionPredictor(clf)
assert_raises(ValueError, compiled.predict,
np.resize(X, (1, num_features, num_features)))
assert_allclose(compiled.score(X, y), clf.score(X, y))
3
Example 44
def _noise(self, scale=None, x=None):
#HACK: remove any duplicate points by adding noise
import numpy as np
if x is None: x = self.x
if scale is None: scale = self.noise
if not scale: return x
return x + np.random.normal(scale=scale, size=x.shape)
3
Example 45
Project: tsfresh Source File: test_feature_significance.py
def test_all_features_bad(self):
# Mixed case with real target
y = pd.Series(np.random.normal(0, 1, 1000))
X = pd.DataFrame(index=range(1000))
X["irr1"] = np.random.binomial(0, 0.1, 1000)
X["irr2"] = np.random.binomial(0, 0.15, 1000)
X["irr3"] = np.random.binomial(0, 0.05, 1000)
X["irr4"] = np.random.binomial(0, 0.2, 1000)
X["irr5"] = np.random.binomial(0, 0.25, 1000)
X["irr6"] = np.random.binomial(0, 0.01, 1000)
df_bh = tsfresh.feature_selection.feature_selector.check_fs_sig_bh(X, y, self.settings)
feat_rej = df_bh[df_bh.rejected].Feature
self.assertEqual(len(feat_rej), 0)
3
Example 46
Project: paramnormal Source File: test_dist.py
def setup(self):
self.dist = dist.normal
self.cargs = []
self.ckwds = dict(mu=4, sigma=1.75)
self.np_rand_fxn = numpy.random.normal
self.npargs = []
self.npkwds = dict(loc=4, scale=1.75)
3
Example 47
Project: lstm-anomaly-detect Source File: lstm-synthetic-wave-anomaly-detect.py
def gen_wave():
""" Generate a synthetic wave by adding up a few sine waves and some noise
:return: the final wave
"""
t = np.arange(0.0, 10.0, 0.01)
wave1 = sin(2 * 2 * pi * t)
noise = random.normal(0, 0.1, len(t))
wave1 = wave1 + noise
print("wave1", len(wave1))
wave2 = sin(2 * pi * t)
print("wave2", len(wave2))
t_rider = arange(0.0, 0.5, 0.01)
wave3 = sin(10 * pi * t_rider)
print("wave3", len(wave3))
insert = round(0.8 * len(t))
wave1[insert:insert + 50] = wave1[insert:insert + 50] + wave3
return wave1 + wave2
3
Example 48
def update1():
global data1, curve1, ptr1
data1[:-1] = data1[1:] # shift data in the array one sample left
# (see also: np.roll)
data1[-1] = np.random.normal()
curve1.setData(data1)
ptr1 += 1
curve2.setData(data1)
curve2.setPos(ptr1, 0)
3
Example 49
Project: deepx Source File: test_lstm.py
def test_stateful_lstm(self):
self.lstm.reset_states()
self.set_weights(self.lstm.right, 2)
X = np.random.normal(size=(1, 1, 1))
weights = np.ones((self.lstm.right.get_dim_in(), self.lstm.right.get_dim_out())) * 2
state = np.zeros((1, self.lstm.right.get_dim_out()))
out = np.zeros((1, self.lstm.right.get_dim_out()))
for i in range(1000):
out, state = self.lstm_forward(X, out, state, weights)
lstm_out = self.lstm.predict(X)
lstm_state = T.get_value(self.lstm.right.states[1])
np.testing.assert_almost_equal(lstm_out[0], out[0], 3)
np.testing.assert_almost_equal(lstm_state, state[0], 3)
3
Example 50
def states(self, jam):
# Sample the deformation
for state in AbstractPitchShift.states(self, jam):
for _ in range(self.n_samples):
state['n_semitones'] = np.random.normal(loc=self.mean,
scale=self.sigma,
size=None)
yield state