Here are the examples of the python api numpy.random.random taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
180 Examples
3
Example 51
Project: milk Source File: test_pca.py
def test_mds_list():
from milk.unsupervised.pca import mds
data = np.random.random((128,16))
V = mds(data,2)
V2 = mds(list(data),2)
assert np.all(V == V2)
3
Example 52
def get_data(num_samples, num_features, groundtruth):
"""Returns randomly sampled X, y."""
X = np.random.random((num_samples, num_features))
mu_y, std_y = groundtruth.predict(X)
y = mu_y + std_y * np.random.normal(size=num_samples)
return X, y
3
Example 53
Project: trimesh Source File: test_utility.py
def test_horn(self):
log.info('Testing absolute orientation')
for i in range(10):
points_A = (np.random.random(self.test_dim) - .5) * 100
angle = 4*np.pi*(np.random.random() - .5)
vector = trimesh.unitize(np.random.random(3) - .5)
offset = 100*(np.random.random(3)-.5)
T = trimesh.transformations.rotation_matrix(angle, vector)
T[0:3,3] = offset
points_B = trimesh.transformations.transform_points(points_A, T)
M, error = trimesh.points.absolute_orientation(points_A, points_B, return_error=True)
self.assertTrue(np.all(error < TOL_ZERO))
3
Example 54
Project: gavel Source File: judge.py
def choose_next(annotator):
items = preferred_items(annotator)
shuffle(items) # useful for argmax case as well in the case of ties
if items:
if random() < crowd_bt.EPSILON:
return items[0]
else:
return crowd_bt.argmax(lambda i: crowd_bt.expected_information_gain(
annotator.alpha,
annotator.beta,
annotator.prev.mu,
annotator.prev.sigma_sq,
i.mu,
i.sigma_sq), items)
else:
return None
3
Example 55
def test_add_dataset(self):
"""Test if the add-dataset subcommand adds datasets to projects."""
tempdir = tempfile.mkdtemp()
outfile = op.join(tempdir, "testdata.csv")
dframe = pd.DataFrame(np.random.random((10, 2)), columns=['a', 'b'])
dframe.to_csv(outfile, index=False)
cmd = ("semantic add-dataset testdata --project pysemantic --path {}"
" --dlm ,")
cmd = cmd.format(outfile).split(" ")
try:
subprocess.check_call(cmd, env=self.testenv)
_pr = pr.Project("pysemantic")
self.assertIn("testdata", _pr.datasets)
specs = dict(path=outfile, delimiter=',')
actual = pr.get_schema_specs("pysemantic", "testdata")
self.assertKwargsEqual(specs, actual)
finally:
pr.remove_dataset("pysemantic", "testdata")
shutil.rmtree(tempdir)
3
Example 56
Project: pIDLy Source File: pidly.py
def test_20_function_calls_explicit(self):
x = numpy.random.random(20)
y = numpy.zeros(20)
self.start_time = now()
for i in range(20):
y[i] = self.idl.func('sin', x[i])
self.end_time = now()
self.assertEqual(y.tolist(), numpy.sin(x).tolist())
3
Example 57
def testUndo(self):
what = np.random.random(self.whatshape)
wheres = [
(20, 11),
(21, 12),
(22, 13),
(50, 60),
]
for whs in wheres:
where = np.zeros(whs)
embd = utils.embed_to(where, what.copy())
undone = utils.undo_embed(embd, what.shape)
self.assertEqual(what.shape, undone.shape, )
np.testing.assert_equal(what, undone)
3
Example 58
def setUp(self):
data = np.random.random((10, 10, 600))
s = EELSSpectrum(data)
s.axes_manager[-1].offset = -150.
s.axes_manager[-1].scale = 0.5
s.metadata.set_item(
'Acquisition_instrument.TEM.Detector.EELS.collection_angle',
3.0)
s.metadata.set_item('Acquisition_instrument.TEM.beam_energy', 1.0)
s.metadata.set_item(
'Acquisition_instrument.TEM.convergence_angle',
2.0)
m = s.create_model(
ll=s + 1,
auto_background=False,
auto_add_edges=False)
g = Gaussian()
m.append(g)
self.model = m
3
Example 59
def read_frames(self, frames_to_read):
if self.has_broken_header and self.seekpoint + frames_to_read > self.num_frames // 2:
raise IOError()
num_frames_left = self.num_frames - self.seekpoint
if num_frames_left < frames_to_read:
will_read = num_frames_left
else:
will_read = frames_to_read
self.seekpoint += will_read
return np.random.random(will_read) * 2 - 1
3
Example 60
Project: sklearn_tutorial Source File: plot_gui_example.py
def nonlinear_model(rseed=42, Npts=30):
radius = 40 * np.random.random(Npts)
far_pts = radius > 20
radius[far_pts] *= 1.2
radius[~far_pts] *= 1.1
theta = np.random.random(Npts) * np.pi * 2
data = np.empty((Npts, 2))
data[:, 0] = radius * np.cos(theta)
data[:, 1] = radius * np.sin(theta)
labels = np.ones(Npts)
labels[far_pts] = -1
return data, labels
3
Example 61
def main():
fig, axes = plt.subplots(ncols=2)
num = 5
xy = np.random.random((num, 2))
lines = []
for i in range(num):
line, = axes[0].plot((i + 1) * np.arange(10))
lines.append(line)
points = []
for x, y in xy:
point, = axes[1].plot([x], [y], linestyle='none', marker='o')
points.append(point)
MultiHighlight(zip(points, lines))
plt.show()
3
Example 62
Project: lhcb_trigger_ml Source File: categorical.py
def generate_slice(length, subsample):
if subsample == 1.0:
return slice(None, None, None)
else:
p = numpy.random.random() * (1. - subsample)
start = int(p * length)
stop = int((p + subsample) * length)
return slice(start, stop, None)
3
Example 63
Project: rio-color Source File: test_colorspace.py
def test_bad_array_dims():
bad = np.random.random((3, 3))
with pytest.raises(ValueError) as exc:
saturate_rgb(bad, 1.1)
assert 'wrong number of dimensions' in str(exc.value)
with pytest.raises(ValueError) as exc:
convert_arr(bad, cs.rgb, cs.lch)
assert 'wrong number of dimensions' in str(exc.value)
3
Example 64
def setUp(self):
shape = (4,7)
rand = np.random.random
self.x = rand(shape) + rand(shape).astype(np.complex)*1j
self.dtype = self.x.dtype
3
Example 65
Project: sima Source File: test_sequence.py
def test_all_1_mask(self):
frame_mask = np.random.random((128, 256))
frame_mask = frame_mask > 0.5
masked = self.tiff_seq.mask([(None, None, frame_mask, None)])
self.masked_mask[:, :, frame_mask, :] = True
assert_equal(masked.shape, (5, 2, 128, 256, 2))
assert_(np.all(np.isnan(masked)[self.masked_mask]))
assert_(np.all(np.isfinite(masked)[~self.masked_mask]))
3
Example 66
Project: universal-portfolios Source File: tools.py
def mc_simplex(d, points):
""" Sample random points from a simplex with dimension d.
:param d: Number of dimensions.
:param points: Total number of points.
"""
a = np.sort(np.random.random((points, d)))
a = np.hstack([np.zeros((points,1)), a, np.ones((points,1))])
return np.diff(a)
3
Example 67
Project: msmtools Source File: test_assessment.py
def test_is_transition_matrix(self):
self.assertTrue(is_transition_matrix(self.T))
"""Larger test-case to prevent too restrictive tolerance settings"""
X = np.random.random((2000, 2000))
Tlarge = X / X.sum(axis=1)[:, np.newaxis]
self.assertTrue(is_transition_matrix(Tlarge))
3
Example 68
Project: brainx Source File: test_modularity.py
def test_graphpartition():
""" test GraphPartition correctly handles graph whose
nodes are strings"""
graph = nx.Graph()
graph.add_edge('a','b')
graph.add_edge('c','d')
index = {0:set([0,1]), 1:set([2,3])}
gpart = mod.GraphPartition(graph, index)
assert gpart._node_set == set([0,1,2,3])
# test raise error if matrix unweighted
jnk = np.random.random((10,10))
jnk = np.triu(jnk,1)
graph = nx.from_numpy_matrix(jnk, nx.Graph(weighted=False))
npt.assert_raises(ValueError, mod.GraphPartition, graph, index)
3
Example 69
def test_stat():
a = np.random.random((3, 3))
a[1][1] = 0
a = a / a.sum()
print dist_stats(a)
3
Example 70
def test_to_array():
"""Test export to a numpy array"""
X = np.random.random((10, 6))
def check_toarray(transfer_bytes):
Xsdb = sdb.from_array(X)
Xnp = Xsdb.toarray(transfer_bytes=transfer_bytes)
# set ATOL high because we're translating text
assert_allclose(Xnp, X, atol=1E-5)
for transfer_bytes in (True, False):
yield check_toarray, transfer_bytes
3
Example 71
def step(self):
from speedup.speedup import pyx_set_distances
from speedup.speedup import pyx_iteration
pyx_set_distances(self.X,self.Y,self.A,self.R,self.num)
pyx_iteration(self.X,self.Y,self.A,self.R,self.F,self.num,
self.stp,self.farl,self.nearl)
if random()<self.friendship_initiate_prob:
k = randint(self.num)
self.make_friends(k)
3
Example 72
Project: sand-spline Source File: helpers.py
def _rnd_interpolate(xy, num_points, ordered=False):
tck,u = splprep([
xy[:,0],
xy[:,1]],
s=0
)
unew = random(num_points)
if sort:
unew = sort(unew)
out = splev(unew, tck)
return column_stack(out)
3
Example 73
def __init__(self, inputs, outputs, numHiddenNeurons, activationFunction):
self.activationFunction = activationFunction
self.inputs = inputs
self.outputs = outputs
self.numHiddenNeurons = numHiddenNeurons
# input to hidden weights
self.inputWeights = np.random.random((self.numHiddenNeurons, self.inputs))
# bias of hidden units
self.bias = np.random.random((1, self.numHiddenNeurons)) * 2 - 1
# hidden to output layer connection
self.beta = np.random.random((self.numHiddenNeurons, self.outputs))
# auxiliary matrix used for sequential learning
self.M = None
3
Example 74
def make_callback(i):
ds = r.data_source
def func():
if i == N-1:
ds.data['x'].append(50)
ds.data['y'].append(95)
ds.data['text'].append("DONE")
ds.data['text_color'].append("white")
else:
ds.data['x'].append(np.random.random()*70 + 15)
ds.data['y'].append(np.random.random()*70 + 15)
ds.data['text_color'].append(RdYlBu3[i%3])
ds.data['text'].append(str(i))
ds.trigger('data', ds.data, ds.data)
func.interval = i * 100
return func
3
Example 75
Project: render Source File: render.py
def random_parallelogram(self,x1,y1,x2,y2,x3,y3,grains):
pix = self.pix
rectangle = self.ctx.rectangle
fill = self.ctx.fill
v1 = array((x2-x1, y2-y1))
v2 = array((x3-x1, y3-y1))
a1 = random((grains, 1))
a2 = random((grains, 1))
dd = v1*a1 + v2*a2
dd[:,0] += x1
dd[:,1] += y1
for x,y in dd:
rectangle(x,y,pix,pix)
fill()
3
Example 76
Project: nli_generation Source File: adverse_alg.py
def adverse_generator(train, gen_model, word_index, cache_prob, batch_size, hypo_len):
cache = []
while True:
mb = load_data.get_minibatches_idx(len(train), batch_size, shuffle=True)
for i, train_index in mb:
if len(train_index) != batch_size:
continue
orig_batch = [train[k] for k in train_index]
if np.random.random() > cache_prob or len(cache) < 100:
gen_batch, _ = make_gen_batch(orig_batch, gen_model, word_index, hypo_len)
cache.append(gen_batch)
else:
gen_batch = cache[np.random.random_integers(0, len(cache) - 1)]
train_batch = make_train_batch(orig_batch, word_index, hypo_len)
yield [train_batch, gen_batch], np.zeros(batch_size)
3
Example 77
Project: progressivis Source File: test_input.py
def do_line(inp,s):
sleep(2)
for r in xrange(10):
inp.from_input('line#%d'%r)
sleep(np.random.random())
sleep(1)
s.stop()
3
Example 78
def addObject(self, color=None):
label = self.labelmgr.request()
if color is None:
color = colorsys.hsv_to_rgb(numpy.random.random(), 1.0, 1.0)
self.setColor(label, color)
return label
3
Example 79
Project: sand-glyphs Source File: main-export.py
def get_word_generator():
def word_generator():
while True:
word = []
while random()>0.15:
r = (0.9 + random()*1.1)*GLYPH_WIDTH
word.append(r)
if len(word)>2:
yield word
return word_generator
3
Example 80
def test_copyto():
a_np = np.random.random(size=(7, 11))
a_ca = ca.array(a_np)
b_np = np.zeros_like(a_np)
b_ca = np.zeros_like(a_np)
ca.copyto(b_np, a_ca)
print(np.allclose(a_np, b_np))
ca.copyto(b_ca, a_np)
print(np.allclose(np.array(a_ca), np.array(b_ca)))
ca.copyto(b_ca, a_ca)
print(np.allclose(np.array(a_ca), np.array(b_ca)))
2
Example 81
Project: KMCLib Source File: KMCConfigurationTest.py
def testQueries(self):
""" Test the configuration's query functions. """
config = KMCConfiguration.__new__(KMCConfiguration)
config._KMCConfiguration__use_buckets = False
c0_ref = numpy.random.random()
c1_ref = numpy.random.random()
c2_ref = numpy.random.random()
# Set the backend proxy.
class BackendProxy(object):
def __init__(self):
pass
def elements(self):
return ("B", "F", "A")
def atomIDElements(self):
return ("A", "B", "F")
def atomIDCoordinates(self):
return (Backend.Coordinate(c0_ref,c1_ref,c2_ref),)
def movedAtomIDs(self):
return (123, 234)
def latestEventProcess(self):
return 995976943
def latestEventSite(self):
return 556462676
def particlesPerType(self):
return (123, 467, 432)
config._KMCConfiguration__backend = BackendProxy()
# Query and check.
atom_id_types = config.atomIDTypes()
atom_id_coords = config.atomIDCoordinates()
lattice_types = config.types()
moved_atom_ids = config.movedAtomIDs()
latest_event_process = config.latestEventProcess()
latest_event_site = config.latestEventSite()
particles_per_type = config.particlesPerType()
self.assertEqual(atom_id_types, ("A","B","F"))
self.assertEqual(lattice_types, ["B","F","A"])
self.assertAlmostEqual(atom_id_coords[0][0], c0_ref, 10)
self.assertAlmostEqual(atom_id_coords[0][1], c1_ref, 10)
self.assertAlmostEqual(atom_id_coords[0][2], c2_ref, 10)
self.assertEqual(moved_atom_ids, (123, 234))
self.assertEqual(latest_event_process, 995976943)
self.assertEqual(latest_event_site, 556462676)
self.assertEqual(particles_per_type, (123, 467, 432))
2
Example 82
Project: dolo Source File: taylor_expansion.py
def test_taylor_expansion():
import numpy
from numpy import array
s0 = array([0.2, 0.4, 1.1])
x0 = array([1.2, 0.9])
N = 1000
points = numpy.random.random((N,3))
X_s = numpy.random.random((2,3))
X_ss = numpy.random.random((2,3,3))
X_sss = numpy.random.random((2,3,3,3))
dr1 = TaylorExpansion(s0,x0,X_s) #, X_ss, X_sss])
dr2 = TaylorExpansion(s0,x0,X_s,X_ss) #, X_sss])
dr3 = TaylorExpansion(s0,x0,X_s, X_ss,X_sss)
out1 = dr1(points)
out2 = dr2(points)
out3 = dr3(points)
out1_1d = dr1(points[0,:])
out2_1d = dr2(points[0,:])
out3_1d = dr3(points[0,:])
assert(abs(out1_1d - out1[0,:]).max()==0)
assert(abs(out2_1d - out2[0,:]).max()==0)
assert(abs(out3_1d - out3[0,:]).max()==0)
ds = points-s0[None,:]
ds2 = ds[:,:,None].repeat(3, axis=2)
ds3 = ds2[:,:,:,None].repeat(3, axis=3)
from numpy import dot
verif1 = x0[None,:] + numpy.dot(ds, X_s.T)
verif2 = dr2.__call2__(points)
verif3 = dr3.__call2__(points)
assert(abs(out1-verif1).max()<1e-12)
assert(abs(out2-verif2).max()<1e-12)
assert(abs(out3-verif3).max()<1e-12)
2
Example 83
Project: attention-lvcsr Source File: test_ifelse.py
def test_multiple_out_crash(self):
# This test failed up to commit 2faeb62c38
p0 = self.shared(numpy.asarray(numpy.random.random([4, 8]),
dtype=self.dtype))
p1 = self.shared(numpy.asarray(numpy.random.random(8),
dtype=self.dtype))
p2 = self.shared(numpy.asarray(numpy.random.random([8, 3]),
dtype=self.dtype))
p3 = self.shared(numpy.asarray(numpy.random.random(3),
dtype=self.dtype))
p = [p0, p1, p2, p3]
# in my code these vars are the result of applying scan
ften0 = tensor.tensor3('ft0', dtype=self.dtype)
fmat1 = tensor.matrix('fm1', dtype=self.dtype)
ften2 = tensor.tensor3('ft2', dtype=self.dtype)
fmat3 = tensor.matrix('fm3', dtype=self.dtype)
# then I keep only the last iteration
fsub0 = ften0[-1]
fsub1 = fmat1[-1]
fsub2 = ften2[-1]
fsub3 = fmat3[-1]
fsub = [fsub0, fsub1, fsub2, fsub3]
acc = theano.tensor.constant(1, 'int8') >= 0
new_positions = theano.ifelse.ifelse(acc, fsub, p)
new_updates = [(p[0], new_positions[0])]
f = theano.function([ften0, fmat1, ften2, fmat3], [],
updates=new_updates, mode=self.mode)
self.assertFunctionContains1(f, self.get_ifelse(4))
i1 = numpy.asarray(numpy.random.random([19, 4, 8]), dtype=self.dtype)
i2 = numpy.asarray(numpy.random.random([19, 8]), dtype=self.dtype)
i3 = numpy.asarray(numpy.random.random([19, 8, 3]), dtype=self.dtype)
i4 = numpy.asarray(numpy.random.random([19, 3]), dtype=self.dtype)
f(i1, i2, i3, i4)
0
Example 84
def initialize(self, module_name, custom_rc_file=None):
"""
Loads rcParams by first starting with the default parameter values from
rcparams.default (already stored in the attribute 'default_params', and
then by checking for an rc file in:
1) the path specified by the 'custom_rc_file' parameter
2) the current working directory
3) the user's home directory
4) the directory in which the calling module is located
The script searches in each of these locations, in order, and reads the
first and only the first rc file that is found. If a rc file is found,
the default_params are updated with the values from the rc file. The
rc_params are then returned.
The name of the rc file can be specified as a parameter ``rcfile_name``
to the script. If not given, the rc file name defaults to the name of
the calling module passed as an input parameter, with an 'rc' suffix.
Note that this function can be called more than once, in order to
initialize different sets of parameters from different rc files.
"""
rc_file_params = None
rc_file_paths = [os.getcwd(), _get_home_dir(), sys.path[0]]
rcfile_name = os.path.split(module_name)[-1] + 'rc'
rc_file_paths = [os.path.join(path, rcfile_name) for path in rc_file_paths]
if custom_rc_file != None: rc_file_paths = [custom_rc_file] + rc_file_paths
for rc_file_path in rc_file_paths:
if os.path.exists(rc_file_path):
logger.info("Loading custom rc_file %s"%rc_file_path)
rc_file_params = read_rc_file(self._default_rcparams_dict, rc_file_path)
break
self._rcParams._validation = False
# If an rc file was found, update the default_params with the values
# from that rc file.
if rc_file_params != None:
for key in rc_file_params.iterkeys():
self._rcParams[key] = rc_file_params.original_value[key]
logger.info("custom '%s' parameter loaded"%key)
else:
logger.info("no rc file found. Using parameters from rcparams.default")
# Now run the validation on all the items in the default_params
# instance (as values read from rcparams.defaults have not yet been
# validated).
self.validate_params()
self._rcParams._validation = True
self._initialized = True
# Check if a random_seed was loaded from the rcfile. If not (if
# random_seed==None), then choose a random random_seed, and store it in
# rcParams so that it can be written to a file at the end of model
# runs, and saved for later reuse (for testing, etc.).
if self._rcParams['random_seed'] == None:
# Seed the random_seed with a known random integer, and save the seed for
# later reuse (for testing, etc.).
self._rcParams['random_seed'] = int(10**8 * np.random.random())
np.random.seed(int(self._rcParams['random_seed']))
logger.debug("Random seed set to %s"%int(self._rcParams['random_seed']))
0
Example 85
Project: qutip Source File: test_dimensions.py
def test_type_from_dims():
def dims_case(dims, expected_type, enforce_square=True):
actual_type = type_from_dims(dims, enforce_square=enforce_square)
assert_equal(
actual_type,
expected_type,
"Expected {} to be type='{}', but was type='{}'.".format(
dims, expected_type, actual_type
)
)
def qobj_case(qobj):
assert_equal(type_from_dims(qobj.dims), qobj.type)
dims_case([[2], [2]], 'oper')
dims_case([[2, 3], [2, 3]], 'oper')
dims_case([[2], [3]], 'other')
dims_case([[2], [3]], 'oper', False)
dims_case([[2], [1]], 'ket')
dims_case([[1], [2]], 'bra')
dims_case([[[2, 3], [2, 3]], [1]], 'operator-ket')
dims_case([[1], [[2, 3], [2, 3]]], 'operator-bra')
dims_case([[[3], [3]], [[2, 3], [2, 3]]], 'other')
dims_case([[[3], [3]], [[2, 3], [2, 3]]], 'super', False)
dims_case([[[2], [3, 3]], [[3], [2, 3]]], 'other')
## Qobj CASES ##
N = int(np.ceil(10.0 * np.random.random())) + 5
ket_data = np.random.random((N, 1))
ket_qobj = Qobj(ket_data)
qobj_case(ket_qobj)
bra_data = np.random.random((1, N))
bra_qobj = Qobj(bra_data)
qobj_case(bra_qobj)
oper_data = np.random.random((N, N))
oper_qobj = Qobj(oper_data)
qobj_case(oper_qobj)
N = 9
super_data = np.random.random((N, N))
super_qobj = Qobj(super_data, dims=[[[3]], [[3]]])
qobj_case(super_qobj)
0
Example 86
Project: pymdptoolbox Source File: example.py
def _randSparse(states, actions, mask):
"""Generate random sparse ``P`` and ``R``. See ``rand`` for details.
"""
# definition of transition matrix : square stochastic matrix
P = [None] * actions
# definition of reward matrix (values between -1 and +1)
R = [None] * actions
for action in range(actions):
# it may be more efficient to implement this by constructing lists
# of rows, columns and values then creating a coo_matrix, but this
# works for now
PP = _sp.dok_matrix((states, states))
RR = _sp.dok_matrix((states, states))
for state in range(states):
if mask is None:
m = _np.random.random(states)
m[m <= 2/3.0] = 0
m[m > 2/3.0] = 1
elif mask.shape == (actions, states, states):
m = mask[action][state] # mask[action, state, :]
else:
m = mask[state]
n = int(m.sum()) # m[state, :]
if n == 0:
m[_np.random.randint(0, states)] = 1
n = 1
# find the columns of the vector that have non-zero elements
nz = m.nonzero()
if len(nz) == 1:
cols = nz[0]
else:
cols = nz[1]
vals = _np.random.random(n)
vals = vals / vals.sum()
reward = 2*_np.random.random(n) - _np.ones(n)
PP[state, cols] = vals
RR[state, cols] = reward
# PP.tocsr() takes the same amount of time as PP.tocoo().tocsr()
# so constructing PP and RR as coo_matrix in the first place is
# probably "better"
P[action] = PP.tocsr()
R[action] = RR.tocsr()
return(P, R)
0
Example 87
Project: pydy Source File: test_code.py
def test_generate_ode_function(self):
system = multi_mass_spring_damper(1, True, True)
args = (system.eom_method.mass_matrix_full,
system.eom_method.forcing_full,
system.constants_symbols,
system.coordinates,
system.speeds)
kwargs = {'specified': tuple(system.specifieds_symbols)}
mass_matrix, forcing_vector, constants, coordinates, speeds = args
specifieds = kwargs['specified']
F, x, v = np.random.random(3)
states = np.array([x, v])
constants_map = dict(zip(constants, np.random.random(len(constants))))
m = constants_map[sm.symbols('m0')]
k = constants_map[sm.symbols('k0')]
c = constants_map[sm.symbols('c0')]
g = constants_map[sm.symbols('g')]
expected_dx = np.array([v, 1.0 / m * (-c * v + m * g - k * x + F)])
rhs_args = {'constants': constants_map,
'specified': {specifieds[0]: F}}
backends = ['lambdify']
from ..ode_function_generators import theano, Cython
if theano:
backends.append('theano')
if Cython:
backends.append('cython')
for backend in backends:
rhs = generate_ode_function(mass_matrix, forcing_vector,
constants, coordinates, speeds,
specifieds, generator=backend)
dx = rhs(states, 0.0, rhs_args)
testing.assert_allclose(dx, expected_dx)
# Now try it with a function defining the specified quantities.
rhs_args['specified'] = {specifieds[0]: lambda x, t: np.sin(t)}
t = 14.345
expected_dx = np.array([v, 1.0 / m * (-c * v + m * g - k * x +
np.sin(t))])
for backend in backends:
rhs = generate_ode_function(mass_matrix, forcing_vector,
constants, coordinates, speeds,
specifieds, generator=backend)
dx = rhs(states, t, rhs_args)
testing.assert_allclose(dx, expected_dx)
# Now try it without specified values.
system = multi_mass_spring_damper(1, True)
args = (system.eom_method.mass_matrix_full,
system.eom_method.forcing_full,
system.constants_symbols,
system.coordinates,
system.speeds)
mass_matrix, forcing_vector, constants, coordinates, speeds = args
expected_dx = np.array([v, 1.0 / m * (-c * v + m * g - k * x)])
for backend in backends:
rhs = generate_ode_function(mass_matrix, forcing_vector,
constants, coordinates, speeds,
specifieds, generator=backend)
dx = rhs(states, 0.0, rhs_args)
testing.assert_allclose(dx, expected_dx)
0
Example 88
Project: pyoptools Source File: gs.py
def ffGS(z,target,estimate=None, iterations=20,error=None):
'''
Far field Gerchberg - Saxton Algorithm
Calculates the phase distribution in a object plane (for a given
amplitude constrain) to obtain an specific amplitude distribution in
the target plane.
It uses the Gerchberg - Saxton algorithm for Fraunhoffer propagation.
A FFT implementation of the Fraunhoffer Transform is used.
**ARGUMENTS:**
========== ======================================================
z Propagation distance. This is used to calculate the
resolution needed in the object plane, for a given
target resolution.
target :class:`Field` instance whose amplitude distribution
is used to represent the amplitude constrain to be
applied in the target plane. The phase of this field
is not used.
estimate :class:`Field` instance used as initial estimate for
the problem. The amplitude of this field is taken as
the reference amplitude and the phase is obtained. The
resolution used to define this field must match the
value needed to obtain the required target resolution
when the FFT-Fraunhoffer transform is used. If the
wrong value is given an exception is raised.
If not given, a unitary amplitude wave, with random
phase and the correct resolution, is used.
iterations Maximum number of iterations
error Expected error
========== ======================================================
.. note : target and object must have the same wavelength
**RETURN VALUE:**
(holo,err)
==== ==========================================================
holo Field instance, containing the reference amplitude
information and the phase obtained from the iterative
algorithm. The holo.res attribute contains the
resolution of the calculated hologram for the given
propagation distance. The holo.l attribute contains the
wavelenght used to calculate the hologram.
err Final error obtained
==== ==========================================================
'''
if estimate==None:
edata=exp(2.j*pi*random(target.shape))
sx,sy=target.size
dxe=target.l*z/sx
dye=target.l*z/sy
estimate=Field(data=edata,psize=(dxe,dye),l=target.l)
assert estimate.shape==target.shape,\
"The estimate field, and the target field, must have the same shape"
assert target.l==estimate.l,\
"The wave lenghts for the reference beam, and the target must be equal"
sx,sy=target.size
dxe=target.l*z/sx
dye=target.l*z/sy
dx,dy=estimate.res
assert (dxe==dx) and (dye==dy),\
"The resolution for the reference beam, and the target must be equal"
holo=estimate
eabs=estimate.abs()
#Normalized Target amplitude
ntarget=target.abs()/target.abs().max()
for n in range(iterations):
if n!=0: holo=imp.propagate_fraunhofer(-z)
#Keep only the phase in the hologram plane
holo.data=exp(1.j*holo.angle)
holo=holo*eabs
#Calculate the new image plane
imp=holo.propagate_fraunhofer(z)
err=(ntarget-imp.abs()/imp.abs().max()).std()
if error!=None and err<error: break
d=exp(1.j*imp.angle)
imp=Field(data=d, psize=imp.psize, l=imp.l)
imp=imp*target.abs()
return holo,err
0
Example 89
Project: scikit-beam Source File: test_histogram.py
def test_2d_histogram():
ten = [10, 0, 10.01]
nine = [9, 0, 9.01]
onexf = random.random()*ten[2]
onexi = int(onexf)
oneyf = random.random()*ten[2]
oneyi = int(oneyf)
xf = np.random.random(1000000)*40
yf = np.random.random(1000000)*40
xi = xf.astype(int)
yi = yf.astype(int)
xl = xf.tolist()
yl = yf.tolist()
wf = np.linspace(1, 10, len(xf))
wi = wf.copy()
wl = wf.tolist()
vals = [
[[ten, ten], xf, yf, wf],
[[ten, nine], xf, yf, 1],
[[ten, ten], xi, yi, wi],
[[ten, ten], xi, yi, 1],
[[ten, nine], xf, yf, wi],
[[ten, nine], xi, yi, wf],
[[ten, nine], xl, yl, wl],
[[ten, nine], xi, yi, wl],
[[ten, nine], xf, yf, wl],
[[ten, nine], xl, yl, wi],
[[ten, nine], xl, yl, wf],
[[ten, nine], onexf, oneyf, 1],
[[ten, nine], onexi, oneyi, 1],
]
for binlowhigh, x, y, w in vals:
yield _2d_histogram_tester, binlowhigh, x, y, w
0
Example 90
Project: pyoptools Source File: gs.py
def fftGS(z,target,estimate=None, iterations=20,error=None,flagRand=True):
'''
Far field Gerchberg - Saxton Algorithm
Calculates the phase distribution in a object plane (for a given
amplitude constrain) to obtain an specific amplitude distribution in
the target plane.
It uses the Gerchberg - Saxton algorithm for far-field propagation,
using a standard FFT.
**ARGUMENTS:**
========== ======================================================
z Propagation distance. This is used to calculate the
resolution needed in the object plane, for a given
target resolution.
target :class:`Field` instance whose amplitude distribution
is used to represent the amplitude constrain to be
applied in the target plane. The phase of this field
is not used.
estimate :class:`Field` instance used as initial estimate for
the problem. The amplitude of this field is taken as
the reference amplitude and the phase is obtained. The
resolution used to define this field must match the
value needed to obtain the required target resolution
when the FFT-Fraunhoffer transform is used. If the
wrong value is given an exception is raised.
If not given, a unitary amplitude wave, with random
phase and the correct resolution, is used.
iterations Maximum number of iterations
error Expected error
========== ======================================================
.. note : target and object must have the same wavelength
**RETURN VALUE:**
(holo,err)
==== ==========================================================
holo Field instance, containing the reference amplitude
information and the phase obtained from the iterative
algorithm. The holo.res attribute contains the
resolution of the calculated hologram for the given
propagation distance. The holo.l attribute contains the
wavelenght used to calculate the hologram.
err Final error obtained
==== ==========================================================
'''
if estimate==None:
if flagRand:
edata=exp(2.j*pi*random(target.shape))
else:
edata=exp(2.j*pi*ones(target.shape))
sx,sy=target.size
dxe=target.l*z/sx
dye=target.l*z/sy
estimate=Field(data=edata,psize=(dxe,dye),l=target.l)
assert estimate.shape==target.shape,\
"The estimate field, and the target field, must have the same shape"
assert target.l==estimate.l,\
"The wave lenghts for the reference beam, and the target must be equal"
sx,sy=target.size
dxe=target.l*z/sx
dye=target.l*z/sy
dx,dy=estimate.res
assert (dxe==dx) and (dye==dy),\
"The resolution for the reference beam, and the target must be equal"
holo=estimate.data
eabs=estimate.abs()
#Normalized Target amplitude
ntarget=target.abs()/target.abs().max()
for n in range(iterations):
if n!=0: holo=fftshift(fft2(ifftshift(imp)))
#Keep only the phase in the hologram plane
holo=exp(1.j*angle(holo))
holo=holo*eabs
#Calculate the new image plane
imp=ifftshift(ifft2(fftshift(holo)))
err=(ntarget-abs(imp)/abs(imp).max()).std()
if error!=None and err<error: break
d=exp(1.j*angle(imp))
imp=d*target.abs()
holo=Field(data=holo, psize=(dxe,dye), l=target.l)
return holo,err
0
Example 91
Project: pele Source File: run_ptmc.py
def runptmc(nsteps_tot = 100000):
natoms = 31
nreplicas = 4
Tmin = 0.2
Tmax = 0.4
nsteps_equil = 10000
nsteps_tot = 100000
histiprint = nsteps_tot / 10
exchange_frq = 100*nreplicas
coords=np.random.random(3*natoms)
#quench the coords so we start from a reasonable location
mypot = lj.LJ()
ret = quench(coords, mypot)
coords = ret.coords
Tlist = getTemps(Tmin, Tmax, nreplicas)
replicas = []
ostreams = []
histograms = []
takesteplist = []
radius = 2.5
# create all the replicas which will be passed to PTMC
for i in range(nreplicas):
T = Tlist[i]
potential = lj.LJ()
takestep = RandomDisplacement( stepsize=0.01)
adaptive = AdaptiveStepsize(takestep, last_step = nsteps_equil)
takesteplist.append( adaptive )
file = "mcout." + str(i+1)
ostream = open(file, "w")
hist = EnergyHistogram( -134., 10., 1000)
histograms.append(hist)
event_after_step=[hist]
radiustest = SphericalContainer(radius)
accept_tests = [radiustest]
mc = MonteCarlo(coords, potential, takeStep=takestep, temperature=T, \
outstream=ostream, event_after_step = event_after_step, \
confCheck = accept_tests)
mc.histogram = hist #for convienence
mc.printfrq = 1
replicas.append(mc)
#is it possible to pickle a mc object?
#cp = copy.deepcopy(replicas[0])
#import pickle
#with open("mc.pickle", "w") as fout:
#pickle.dump(takesteplist[0], fout)
#attach an event to print xyz coords
from pele.printing.print_atoms_xyz import PrintEvent
printxyzlist = []
for n, rep in enumerate(replicas):
outf = "dumpstruct.%d.xyz" % (n+1)
printxyz = PrintEvent(outf, frq=500)
printxyzlist.append( printxyz)
rep.addEventAfterStep(printxyz)
#attach an event to print histograms
for n, rep in enumerate(replicas):
outf = "hist.%d" % (n+1)
histprint = PrintHistogram(outf, rep.histogram, histiprint)
rep.addEventAfterStep(histprint)
ptmc = PTMC(replicas)
ptmc.use_independent_exchange = True
ptmc.exchange_frq = exchange_frq
ptmc.run(nsteps_tot)
#do production run
#fix the step sizes
#for takestep in takesteplist:
# takestep.useFixedStep()
#ptmc.run(30000)
if False: #this doesn't work
print "final energies"
for rep in ptmc.replicas:
print rep.temperature, rep.markovE
for rep in ptmc.replicas_par:
print rep.mcsys.markovE
for k in range(nreplicas):
e,T = ptmc.getRepEnergyT(k)
print T, e
if False: #this doesn't work
print "histograms"
for i,hist in enumerate(histograms):
fname = "hist." + str(i)
print fname
with open(fname, "w") as fout:
for (e, visits) in hist:
fout.write( "%g %d\n" % (e, visits) )
ptmc.end() #close the open threads
0
Example 92
Project: pydy Source File: test_ode_function_generator.py
def test_generate_with_specifieds(self):
# This model has three specifieds.
sys = models.n_link_pendulum_on_cart(2, True, True)
kin_diff_eqs = sys.eom_method.kindiffdict()
coord_derivs = sm.Matrix([kin_diff_eqs[c.diff()] for c in
sys.coordinates])
rhs = sys.eom_method.rhs()
common_args = [sys.coordinates, sys.speeds, sys.constants_symbols]
args_dct = {}
args_dct['full rhs'] = [rhs] + common_args
args_dct['full mass matrix'] = [sys.eom_method.forcing_full] + common_args
args_dct['min mass matrix'] = [sys.eom_method.forcing] + common_args
kwargs_dct = {}
kwargs_dct['full rhs'] = {}
kwargs_dct['full mass matrix'] = \
{'mass_matrix': sys.eom_method.mass_matrix_full}
kwargs_dct['min mass matrix'] = \
{'mass_matrix': sys.eom_method.mass_matrix,
'coordinate_derivatives': coord_derivs}
for Subclass in self.ode_function_subclasses:
for system_type, args in args_dct.items():
g = Subclass(*args, specifieds=sys.specifieds_symbols,
**kwargs_dct[system_type])
f = g.generate()
rand = np.random.random
x = rand(g.num_states)
r = rand(g.num_specifieds)
p = rand(g.num_constants)
subs = {}
for arr, syms in zip([x, r, p], [sys.states,
sys.specifieds_symbols,
sys.constants_symbols]):
for val, sym in zip(arr, syms):
subs[sym] = val
try:
expected = sm.matrix2numpy(rhs.subs(subs),
dtype=float).squeeze()
except TypeError:
# Earlier SymPy versions don't support the dtype kwarg.
expected = np.asarray(sm.matrix2numpy(rhs.subs(subs)),
dtype=float).squeeze()
xdot = f(x, 0.0, r, p)
np.testing.assert_allclose(xdot, expected)
0
Example 93
Project: pyoptools Source File: gs.py
def frGS(z,target,estimate=None, iterations=20,error=None):
'''
Fresnel transform Gerchberg - Saxton Algorithm
Calculates the phase distribution in a object plane (for a given
amplitude constrain) to obtain an specific amplitude distribution in
the target plane.
A FFT implementation of the Fresnel Transform is used.
**ARGUMENTS:**
========== ======================================================
z Propagation distance. This is used to calculate the
resolution needed in the object plane, for a given
target resolution.
target :class:`Field` instance whose amplitude distribution
is used to represent the amplitude constrain to be
applied in the target plane. The phase of this field
is not used.
estimate :class:`Field` instance used as initial estimate for
the problem. The amplitude of this field is taken as
the reference amplitude and the phase is obtained. The
resolution used to define this field must match the
value needed to obtain the required target resolution
when the FFT-Fresnel transform is used. If the
wrong value is given an exception is raised.
If not given, a unitary amplitude wave, with random
phase and the correct resolution, is used.
iterations Maximum number of iterations
error Expected error
========== ======================================================
.. note : target and object must have the same wavelength
**RETURN VALUE:**
(holo,err)
==== ===========================================================
holo Field instance, containing the reference amplitude
information and the phase obtained from the iterative
algorithm. The holo.res attribute contains the
resolution of the calculated hologram for the given
propagation distance. The holo.l attribute contains the
wavelenght used to calculate the hologram.
err Final error obtained
==== ===========================================================
'''
if estimate==None:
edata=exp(2.j*pi*random(target.shape))
sx,sy=target.size
dxe=target.l*z/sx
dye=target.l*z/sy
estimate=Field(data=edata,psize=(dxe,dye),l=target.l)
assert estimate.shape==target.shape,\
"The estimate field, and the target field, must have the same shape"
assert target.l==estimate.l,\
"The wave lenghts for the reference beam, and the target must be equal"
sx,sy=target.size
dxe=target.l*z/sx
dye=target.l*z/sy
dx,dy=estimate.res
assert (dxe==dx) and (dye==dy),\
"The resolution for the reference beam, and the target must be equal"
holo=estimate
eabs=estimate.abs()
#Normalized Target amplitude
ntarget=target.abs()/target.abs().max()
for n in range(iterations):
if n!=0: holo=imp.propagate_fresnel(-z)
#Keep only the phase in the hologram plane
holo.data=exp(1.j*holo.angle)
#Apply the amplitude constain
holo=holo*eabs
#Calculate the new image plane
imp=holo.propagate_fresnel(z)
err=(ntarget-imp.abs()/imp.abs().max()).std()
if error!=None and err<error: break
d=exp(1.j*imp.angle)
imp=Field(data=d, psize=imp.psize, l=imp.l)
#Apply the amplitude constain
imp=imp*target.abs()
return holo,err
0
Example 94
Project: pyoptools Source File: gs.py
def asGS(z,target,estimate=None, iterations=20,error=None):
'''
Angular spectrum Gerchberg - Saxton Algorithm
Calculates the phase distribution in a object plane (for a given
amplitude constrain) to obtain an specific amplitude distribution in
the target plane.
It uses the Gerchberg - Saxton algorithm for the angular spectrum
propagation.
**ARGUMENTS:**
========== =====================================================
z Propagation distance. This is used to calculate the
resolution needed in the object plane, for a given
target resolution.
target :class:`Field` instance whose amplitude distribution
is used to represent the amplitude constrain to be
applied in the target plane. The phase of this field
is not used.
estimate :class:`Field` instance used as initial estimate for
the problem. The amplitude of this field is taken as
the reference amplitude and the phase is obtained. It
must have the same resolution as the target field.
If not given, a unitary amplitude wave, with random
phase and the correct resolution, is used.
iterations Maximum number of iterations
error Expected error
========== =====================================================
.. note : target and object must have the same wavelength
**RETURN VALUE:**
(holo,err)
==== ===========================================================
holo Field instance, containing the reference amplitude
information and the phase obtained from the iterative
algorithm. The holo.res attribute contains the
resolution of the calculated hologram for the given
propagation distance. The holo.l attribute contains the
wavelenght used to calculate the hologram.
err Final error obtained
==== ===========================================================
'''
if estimate==None:
edata=exp(2.j*pi*random(target.shape))
sx,sy=target.size
dxe=target.l*z/sx
dye=target.l*z/sy
estimate=Field(data=edata,psize=(dxe,dye),l=target.l)
assert estimate.shape==target.shape,\
"The estimate field, and the target field, must have the same shape"
assert target.l==estimate.l,\
"The wave lenghts for the reference beam, and the target must be equal"
dxe,dye=target.res
dx,dy=estimate.res
assert (dxe==dx) and (dye==dy),\
"The resolution for the estimate beam, and the target must be equal"
holo=estimate
eabs=estimate.abs()
#Normalized Target amplitude
ntarget=target.abs()/target.abs().max()
for n in range(iterations):
if n!=0: holo=imp.propagate_ae(-z)
#Keep only the phase in the hologram plane
holo.data=exp(1.j*holo.angle)
holo=holo*eabs
#Calculate the new image plane
imp=holo.propagate_ae(z)
err=(ntarget-imp.abs()/imp.abs().max()).std()
if error!=None and err<error: break
d=exp(1.j*imp.angle)
imp=Field(data=d, psize=imp.psize, l=imp.l)
imp=imp*target.abs()
return holo,err
0
Example 95
Project: glymur Source File: test_openjp2.py
def tile_encoder(**kwargs):
"""Fixture used by many tests."""
num_tiles = ((kwargs['image_width'] / kwargs['tile_width']) *
(kwargs['image_height'] / kwargs['tile_height']))
tile_size = ((kwargs['tile_width'] * kwargs['tile_height']) *
(kwargs['num_comps'] * kwargs['comp_prec'] / 8))
data = np.random.random((kwargs['tile_height'],
kwargs['tile_width'],
kwargs['num_comps']))
data = (data * 255).astype(np.uint8)
l_param = openjp2.set_default_encoder_parameters()
l_param.tcp_numlayers = 1
l_param.cp_fixed_quality = 1
l_param.tcp_distoratio[0] = 20
# position of the tile grid aligned with the image
l_param.cp_tx0 = 0
l_param.cp_ty0 = 0
# tile size, we are using tile based encoding
l_param.tile_size_on = 1
l_param.cp_tdx = kwargs['tile_width']
l_param.cp_tdy = kwargs['tile_height']
# use irreversible encoding
l_param.irreversible = kwargs['irreversible']
l_param.numresolution = 6
l_param.prog_order = glymur.core.LRCP
l_params = (openjp2.ImageComptParmType * kwargs['num_comps'])()
for j in range(kwargs['num_comps']):
l_params[j].dx = 1
l_params[j].dy = 1
l_params[j].h = kwargs['image_height']
l_params[j].w = kwargs['image_width']
l_params[j].sgnd = 0
l_params[j].prec = kwargs['comp_prec']
l_params[j].x0 = 0
l_params[j].y0 = 0
codec = openjp2.create_compress(kwargs['codec'])
openjp2.set_info_handler(codec, None)
openjp2.set_warning_handler(codec, None)
openjp2.set_error_handler(codec, None)
cspace = openjp2.CLRSPC_SRGB
l_image = openjp2.image_tile_create(l_params, cspace)
l_image.contents.x0 = 0
l_image.contents.y0 = 0
l_image.contents.x1 = kwargs['image_width']
l_image.contents.y1 = kwargs['image_height']
l_image.contents.color_space = openjp2.CLRSPC_SRGB
openjp2.setup_encoder(codec, l_param, l_image)
stream = openjp2.stream_create_default_file_stream(kwargs['filename'],
False)
openjp2.start_compress(codec, l_image, stream)
for j in np.arange(num_tiles):
openjp2.write_tile(codec, j, data, tile_size, stream)
openjp2.end_compress(codec, stream)
openjp2.stream_destroy(stream)
openjp2.destroy_codec(codec)
openjp2.image_destroy(l_image)
0
Example 96
Project: attention-lvcsr Source File: test_conv.py
def validate(self, image_shape, filter_shape, out_dim, verify_grad=True):
image_dim = len(image_shape)
filter_dim = len(filter_shape)
input = T.TensorType('float64', [False] * image_dim)()
filters = T.TensorType('float64', [False] * filter_dim)()
bsize = image_shape[0]
if image_dim != 3:
bsize = 1
nkern = filter_shape[0]
if filter_dim != 3:
nkern = 1
# THEANO IMPLEMENTATION ############
# we create a symbolic function so that verify_grad can work
def sym_conv2d(input, filters):
return conv.conv2d(input, filters)
output = sym_conv2d(input, filters)
assert output.ndim == out_dim
theano_conv = theano.function([input, filters], output)
# initialize input and compute result
image_data = numpy.random.random(image_shape)
filter_data = numpy.random.random(filter_shape)
theano_output = theano_conv(image_data, filter_data)
# REFERENCE IMPLEMENTATION ############
out_shape2d = numpy.array(image_shape[-2:]) - numpy.array(filter_shape[-2:]) + 1
ref_output = numpy.zeros(tuple(out_shape2d))
# reshape as 3D input tensors to make life easier
image_data3d = image_data.reshape((bsize,) + image_shape[-2:])
filter_data3d = filter_data.reshape((nkern,) + filter_shape[-2:])
# reshape theano output as 4D to make life easier
theano_output4d = theano_output.reshape((bsize, nkern,) +
theano_output.shape[-2:])
# loop over mini-batches (if required)
for b in range(bsize):
# loop over filters (if required)
for k in range(nkern):
image2d = image_data3d[b, :, :]
filter2d = filter_data3d[k, :, :]
output2d = numpy.zeros(ref_output.shape)
for row in range(ref_output.shape[0]):
for col in range(ref_output.shape[1]):
output2d[row, col] += (
image2d[row:row + filter2d.shape[0],
col:col + filter2d.shape[1]] *
filter2d[::-1, ::-1]
).sum()
self.assertTrue(_allclose(theano_output4d[b, k, :, :],
output2d))
# TEST GRADIENT ############
if verify_grad:
utt.verify_grad(sym_conv2d, [image_data, filter_data])
0
Example 97
def test(): # pragma: no cover
natoms = 3
x = np.random.random([natoms, 3]) * 5
masses = [1., 1., 16.] # np.random.random(natoms)
print masses
x -= np.average(x, axis=0, weights=masses)
cog = np.average(x, axis=0)
S = np.zeros([3, 3])
for i in xrange(3):
for j in xrange(3):
S[i][j] = np.sum(x[:, i] * x[:, j])
site = AASiteType(M=natoms, S=S, W=natoms, cog=cog)
X1 = 10.1 * np.random.random(3)
X2 = 10.1 * np.random.random(3)
p1 = rotations.random_aa()
p2 = rotations.random_aa()
R1 = rotations.aa2mx(p1)
R2 = rotations.aa2mx(p2)
x1 = np.dot(R1, x.transpose()).transpose() + X1
x2 = np.dot(R2, x.transpose()).transpose() + X2
import _aadist
print "site representation:", np.sum((x1 - x2) ** 2)
print "distance function: ", site.distance_squared(X1, p1, X2, p2)
print "fortran function: ", _aadist.sitedist(X2 - X1, p1, p2, site.S, site.W, cog)
import time
t0 = time.time()
for i in xrange(1000):
site.distance_squared(X1, p1, X2, p2)
t1 = time.time()
print "time python", t1 - t0
for i in xrange(1000):
sitedist(X2 - X1, p1, p2, site.S, site.W, cog)
# _aadist.aadist(coords1, coords2, site.S, site.W, cog)
t2 = time.time()
print "time fortran", t2 - t1
# for i in xrange(1000/20):
# #_aadist.sitedist(X1, p1, X2, p2, site.S, site.W, cog)
# _aadist.aadist(coords1, coords2, site.S, site.W, cog)
t2 = time.time()
print "time fortran acc", t2 - t1
print site.distance_squared_grad(X1, p1, X2, p2)
g_M = np.zeros(3)
g_P = np.zeros(3)
for i in xrange(3):
eps = 1e-6
delta = np.zeros(3)
delta[i] = eps
g_M[i] = (site.distance_squared(X1 + delta, p1, X2, p2)
- site.distance_squared(X1, p1, X2, p2)) / eps
g_P[i] = (site.distance_squared(X1, p1 + delta, X2, p2)
- site.distance_squared(X1, p1, X2, p2)) / eps
print g_M, g_P
xx = site.distance_squared_grad(X1, p1, X2, p2)
print g_M / xx[0], g_P / xx[1]
print _aadist.sitedist_grad(X2 - X1, p1, p2, site.S, site.W, cog)
0
Example 98
def add_some(n,n2=None,allocator=allocator):
'''add n random polygons to allocator
There's two types of polygons. if only n is given, will distribute randomly.
if n2 is given, distibute n to one type and n2 to the other
'''
assert isinstance(n,int) and (1 if n2 is None else isinstance(n2,int)),\
"n1 and n2 (if given) must be integers"
if n2 is None:
a = random.random()
n1 = int(n*a)
n2 = n-n1
else:
n1 = n
n2 = 0
positions = [(x*width,y*height,z) for x,y,z in np.random.random((n1,3))]
rs = [r*50 for r in np.random.random(n1)]
ns = [int(m*10)+3 for m in np.random.random(n1)]
colors = np.random.random((n1,3)).astype(color_dtype.np)
rates = np.random.random(n1)*.01
for n,r, position, color, rate in zip(ns,rs, positions, colors, rates):
add_rotating_regular_polygon(n,r,position,rate,color)
#from before indicies could be calculated
#indices = np.array(reduce(add, [[x,]*7 for x in range(n1)], []),dtype=np.int)
positions = [(x*width,y*height,z) for x,y,z in np.random.random((n2,3))]
rs = [r*50 for r in np.random.random(n2)]
ns = [int(m*10)+3 for m in np.random.random(n2)]
colors = np.random.random((n2,3)).astype(color_dtype.np)
for n_sides,radius, position, color in zip(ns,rs, positions, colors):
add_regular_polygon(n_sides,radius,position,color)
0
Example 99
Project: qutip Source File: pulsegen.py
def init_coeffs(self, num_coeffs=None):
"""
Generate the initial ceofficent values.
Parameters
----------
num_coeffs : integer
Number of coefficients used for each basis function
If given this overides the default and sets the attribute
of the same name.
"""
if num_coeffs:
self.num_coeffs = num_coeffs
self._num_coeffs_estimated = False
if not self.num_coeffs:
if isinstance(self.parent, dynamics.Dynamics):
dim = self.parent.get_drift_dim()
self.num_coeffs = self.estimate_num_coeffs(dim)
self._num_coeffs_estimated = True
else:
self.num_coeffs = self.DEF_NUM_COEFFS
self.num_optim_vars = self.num_coeffs*self.num_basis_funcs
if self._num_coeffs_estimated:
if self.log_level <= logging.INFO:
logger.info(
"The number of CRAB coefficients per basis function "
"has been estimated as {}, which means a total of {} "
"optimisation variables for this pulse. Based on the "
"dimension ({}) of the system".format(
self.num_coeffs, self.num_optim_vars, dim))
# Issue warning if beyond the recommended level
if self.log_level <= logging.WARN:
if self.num_coeffs > self.NUM_COEFFS_WARN_LVL:
logger.warn(
"The estimated number of coefficients {} exceeds "
"the amount ({}) recommended for efficient "
"optimisation. You can set this level explicitly "
"to suppress this message.".format(
self.num_coeffs, self.NUM_COEFFS_WARN_LVL))
if self.randomize_coeffs:
r = np.random.random([self.num_coeffs, self.num_basis_funcs])
self.coeffs = (2*r - 1.0) * self.scaling
else:
self.coeffs = np.ones([self.num_coeffs,
self.num_basis_funcs])*self.scaling
0
Example 100
Project: filterpy Source File: resampling.py
def residual_resample(weights):
""" Performs the residual resampling algorithm used by particle filters.
Based on observation that we don't need to use random numbers to select
most of the weights. Take int(N*w^i) samples of each particle i, and then
resample any remaining using a standard resampling algorithm [1]
Parameters
----------
weights : list-like of float
list of weights as floats
Returns
-------
indexes : ndarray of ints
array of indexes into the weights defining the resample. i.e. the
index of the zeroth resample is indexes[0], etc.
References
----------
.. [1] J. S. Liu and R. Chen. Sequential Monte Carlo methods for dynamic
systems. Journal of the American Statistical Association,
93(443):1032–1044, 1998.
"""
N = len(weights)
indexes = np.zeros(N, 'i')
# take int(N*w) copies of each weight, which ensures particles with the
# same weight are drawn uniformly
num_copies = (np.floor(N*np.asarray(weights))).astype(int)
k = 0
for i in range(N):
for _ in range(num_copies[i]): # make n copies
indexes[k] = i
k += 1
# use multinormal resample on the residual to fill up the rest. This
# maximizes the variance of the samples
residual = weights - num_copies # get fractional part
residual /= sum(residual) # normalize
cuemulative_sum = np.cuemsum(residual)
cuemulative_sum[-1] = 1. # avoid round-off errors: ensures sum is exactly one
indexes[k:N] = np.searchsorted(cuemulative_sum, random(N-k))
return indexes