Here are the examples of the python api numpy.cast taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
50 Examples
3
Source : model_utils.py
with Apache License 2.0
from bishanyang
with Apache License 2.0
from bishanyang
def as_floatX(variable):
if isinstance(variable, float):
return np.cast[theano.config.floatX](variable)
if isinstance(variable, np.ndarray):
return np.cast[theano.config.floatX](variable)
return theano.tensor.cast(variable, theano.config.floatX)
def sgd_updates_adadelta(params,cost,rho=0.95,epsilon=1e-6,norm_lim=9,word_vec_name='Words'):
3
Source : factor_distributions.py
with Apache License 2.0
from deepmind
with Apache License 2.0
from deepmind
def sample(self, rng=None):
"""Sample value in [self.minval, self.maxval) and return dict."""
rng = self._get_rng(rng)
out = rng.uniform(low=self.minval, high=self.maxval)
out = np.cast[self.dtype](out)
return {self.key: out}
def contains(self, spec):
3
Source : bn.py
with MIT License
from dmitriy-serdyuk
with MIT License
from dmitriy-serdyuk
def _compute_training_statistics(self, input_):
axes = (0,) + tuple((i + 1) for i, b in
enumerate(self.population_mean.broadcastable)
if b)
mean = input_.mean(axis=axes, keepdims=True)
assert mean.broadcastable[1:] == self.population_mean.broadcastable
add_role(mean, BATCH_NORM_MINIBATCH_ESTIMATE)
if self.mean_only:
stdev = tensor.ones_like(mean)
else:
stdev = tensor.sqrt(tensor.var(input_, axis=axes, keepdims=True) +
numpy.cast[theano.config.floatX](self.epsilon))
assert (stdev.broadcastable[1:] ==
self.population_stdev.broadcastable)
add_role(stdev, BATCH_NORM_MINIBATCH_ESTIMATE)
return mean, stdev
def _prepare_population_statistics(self):
3
Source : basic.py
with MIT License
from dmitriy-serdyuk
with MIT License
from dmitriy-serdyuk
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if x.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
if x.type in discrete_types:
return [x.zeros_like(dtype=theano.config.floatX)]
else:
return [x.zeros_like()]
return gz * exp2(x) * log(numpy.cast[x.type](2)),
def c_code(self, node, name, inputs, outputs, sub):
3
Source : basic.py
with MIT License
from dmitriy-serdyuk
with MIT License
from dmitriy-serdyuk
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if gz.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
if x.type in discrete_types:
return [x.zeros_like(dtype=theano.config.floatX)]
else:
return [x.zeros_like()]
return - gz / sqrt(numpy.cast[x.type](1) - sqr(x)),
def c_code(self, node, name, inputs, outputs, sub):
3
Source : basic.py
with MIT License
from dmitriy-serdyuk
with MIT License
from dmitriy-serdyuk
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if gz.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
if x.type in discrete_types:
return [x.zeros_like(dtype=theano.config.floatX)]
else:
return [x.zeros_like()]
return gz / sqrt(numpy.cast[x.type](1) - sqr(x)),
def c_code(self, node, name, inputs, outputs, sub):
3
Source : basic.py
with MIT License
from dmitriy-serdyuk
with MIT License
from dmitriy-serdyuk
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if gz.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
if x.type in discrete_types:
return [x.zeros_like(dtype=theano.config.floatX)]
else:
return [x.zeros_like()]
return gz / (numpy.cast[x.type](1) + sqr(x)),
def c_code(self, node, name, inputs, outputs, sub):
3
Source : basic.py
with MIT License
from dmitriy-serdyuk
with MIT License
from dmitriy-serdyuk
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if x.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
if x.type in discrete_types:
return [x.zeros_like(dtype=theano.config.floatX)]
else:
return [x.zeros_like()]
return gz / sqrt(sqr(x) - numpy.cast[x.type](1)),
def c_code(self, node, name, inputs, outputs, sub):
3
Source : basic.py
with MIT License
from dmitriy-serdyuk
with MIT License
from dmitriy-serdyuk
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if x.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
if x.type in discrete_types:
return [x.zeros_like(dtype=theano.config.floatX)]
else:
return [x.zeros_like()]
return gz / sqrt(sqr(x) + numpy.cast[x.type](1)),
def c_code(self, node, name, inputs, outputs, sub):
3
Source : basic.py
with MIT License
from dmitriy-serdyuk
with MIT License
from dmitriy-serdyuk
def grad(self, inputs, gout):
(x,) = inputs
(gz,) = gout
if x.type in complex_types:
raise NotImplementedError()
if self(x).type in discrete_types:
if x.type in discrete_types:
return [x.zeros_like(dtype=theano.config.floatX)]
else:
return [x.zeros_like()]
return gz / (numpy.cast[x.type](1) - sqr(x)),
def c_code(self, node, name, inputs, outputs, sub):
3
Source : test_determinism.py
with MIT License
from dmitriy-serdyuk
with MIT License
from dmitriy-serdyuk
def sharedX(x, name=None):
x = np.cast[config.floatX](x)
return shared(x, name)
def test_determinism_1():
3
Source : snippet.py
with Apache License 2.0
from dockerizeme
with Apache License 2.0
from dockerizeme
def as_shared(arr, name=None):
""" Quick wrapper for theano.shared """
if type(arr) in [float, int]:
if name is not None:
return theano.shared(np.cast[theano.config.floatX](arr))
else:
return theano.shared(np.cast[theano.config.floatX](arr), name=name)
if name is not None:
return theano.shared(value=arr, borrow=True)
else:
return theano.shared(value=arr, name=name, borrow=True)
def apply_shared(list_of_numpy):
3
Source : snippet.py
with Apache License 2.0
from dockerizeme
with Apache License 2.0
from dockerizeme
def as_shared(arr, name=None):
if type(arr) in [float, int]:
if name is not None:
return theano.shared(np.cast[theano.config.floatX](arr))
else:
return theano.shared(np.cast[theano.config.floatX](arr), name=name)
if name is not None:
return theano.shared(value=arr, borrow=True)
else:
return theano.shared(value=arr, name=name, borrow=True)
def np_zeros(shape):
3
Source : deep_rnn.py
with GNU General Public License v3.0
from filby89
with GNU General Public License v3.0
from filby89
def parameter_prediction(self, test_set_x): #, batch_size
""" This function is to predict the output of NN
:param test_set_x: input features for a testing sentence
:type test_set_x: python array variable
:returns: predicted features
"""
n_test_set_x = test_set_x.shape[0]
test_out = theano.function([], self.final_layer.output,
givens={self.x: test_set_x[0:n_test_set_x], self.is_train: np.cast['int32'](0)}, on_unused_input='ignore')
predict_parameter = test_out()
return predict_parameter
3
Source : iaf_modules.py
with MIT License
from Ghadjeres
with MIT License
from Ghadjeres
def get_masks(dim, dh, num_layers, num_outlayers, fixed_order=False, derank=1):
ms, rx = get_masks_all([dim,]+[dh for i in range(num_layers-1)]+[dim,],
fixed_order, derank)
ml = ms[-1]
ml_ = (ml.transpose(1,0)[:,:,None]*([np.cast['float32'](1),] *\
int(num_outlayers))).reshape(
dh, int(dim*num_outlayers)).transpose(1,0)
ms[-1] = ml_
return ms, rx
class MADE(Module):
3
Source : utils.py
with MIT License
from guxd
with MIT License
from guxd
def as_floatX(variable):
"""
This code is taken from pylearn2:
Casts a given variable into dtype config.floatX
numpy ndarrays will remain numpy ndarrays
python floats will become 0-D ndarrays
all other types will be treated as theano tensors
"""
if isinstance(variable, float):
return numpy.cast[theano.config.floatX](variable)
if isinstance(variable, numpy.ndarray):
return numpy.cast[theano.config.floatX](variable)
return theano.tensor.cast(variable, theano.config.floatX)
def copy(x):
3
Source : conjugate_gradient_optimizer.py
with MIT License
from iclavera
with MIT License
from iclavera
def __init__(self, base_eps=1e-5, symmetric=True, grad_clip=None):
self.base_eps = np.cast['float32'](base_eps)
self.symmetric = symmetric
self.grad_clip = grad_clip
self._target = None
self.reg_coeff = None
self._constraint_gradient = None
self._input_ph_dict = None
def build_graph(self, constraint_obj, target, input_val_dict, reg_coeff):
3
Source : Optimization.py
with MIT License
from iofu728
with MIT License
from iofu728
def as_floatX(variable):
if isinstance(variable, float):
return np.cast[theano.config.floatX](variable)
if isinstance(variable, np.ndarray):
return np.cast[theano.config.floatX](variable)
return T.cast(variable, theano.config.floatX)
class RMSprop(object):
3
Source : ppo.py
with MIT License
from openai
with MIT License
from openai
def __init__(self, env, model, nsteps, gamma, lam, norm_adv, subtract_rew_avg):
self.env = env
self.model = model
nenv = env.num_envs
self.gamma = gamma
self.lam = lam
self.norm_adv = norm_adv
self.subtract_rew_avg = subtract_rew_avg
self.nsteps = nsteps
self.num_steps_to_cut_left = nsteps//2
self.num_steps_to_cut_right = 0
obs = [np.cast[model.train_model.X.dtype.name](env.reset())]
states = [model.initial_state]
dones = [np.array([False for _ in range(nenv)])]
random_res = [np.array([False for _ in range(nenv)])]
# mb_obs, mb_increase_ent, mb_rewards, mb_reward_avg, mb_actions, mb_values, mb_valids, mb_random_resets, mb_dones, mb_neglogpacs, mb_states
self.mb_stuff = [obs, [np.zeros(obs[0].shape[0], dtype=np.uint8)], [], [], [], [], [], [random_res], dones, [], states]
def run(self):
3
Source : env_helpers.py
with MIT License
from thanard
with MIT License
from thanard
def reset(self, dones=None):
if dones is None:
dones = np.asarray([True] * self.n_envs)
else:
dones = np.cast['bool'](dones)
for i, done in enumerate(dones):
if done:
self.states[i] = self.env.reset()
self.cur_model_idx[i] = np.random.randint(len(self.env.dynamics_outs))
self.ts[dones] = 0
return self.states[dones]
def step(self, actions):
0
Source : test_bernoulli_mlp_regressor.py
with MIT License
from artberryx
with MIT License
from artberryx
def test_fit_normalized(self, input_shape, output_dim):
bmr = BernoulliMLPRegressor(input_shape=input_shape,
output_dim=output_dim)
observations, returns = get_train_data(input_shape, output_dim)
for _ in range(150):
bmr.fit(observations, returns)
paths, expected = get_test_data(input_shape, output_dim)
prediction = np.cast['int'](bmr.predict(paths['observations']))
assert np.allclose(prediction, expected, rtol=0, atol=0.1)
x_mean = self.sess.run(bmr.model._networks['default'].x_mean)
x_mean_expected = np.mean(observations, axis=0, keepdims=True)
x_std = self.sess.run(bmr.model._networks['default'].x_std)
x_std_expected = np.std(observations, axis=0, keepdims=True)
assert np.allclose(x_mean, x_mean_expected)
assert np.allclose(x_std, x_std_expected)
# yapf: disable
@pytest.mark.parametrize('input_shape, output_dim', [
0
Source : test_bernoulli_mlp_regressor.py
with MIT License
from artberryx
with MIT License
from artberryx
def test_fit_unnormalized(self, input_shape, output_dim):
bmr = BernoulliMLPRegressor(input_shape=input_shape,
output_dim=output_dim,
normalize_inputs=False)
observations, returns = get_train_data(input_shape, output_dim)
for _ in range(150):
bmr.fit(observations, returns)
paths, expected = get_test_data(input_shape, output_dim)
prediction = np.cast['int'](bmr.predict(paths['observations']))
assert np.allclose(prediction, expected, rtol=0, atol=0.1)
x_mean = self.sess.run(bmr.model._networks['default'].x_mean)
x_mean_expected = np.zeros_like(x_mean)
x_std = self.sess.run(bmr.model._networks['default'].x_std)
x_std_expected = np.ones_like(x_std)
assert np.allclose(x_mean, x_mean_expected)
assert np.allclose(x_std, x_std_expected)
# yapf: disable
@pytest.mark.parametrize('input_shape, output_dim', [
0
Source : test_bernoulli_mlp_regressor.py
with MIT License
from artberryx
with MIT License
from artberryx
def test_fit_with_no_trust_region(self, input_shape, output_dim):
bmr = BernoulliMLPRegressor(input_shape=input_shape,
output_dim=output_dim,
use_trust_region=False)
observations, returns = get_train_data(input_shape, output_dim)
for _ in range(150):
bmr.fit(observations, returns)
paths, expected = get_test_data(input_shape, output_dim)
prediction = np.cast['int'](bmr.predict(paths['observations']))
assert np.allclose(prediction, expected, rtol=0, atol=0.1)
x_mean = self.sess.run(bmr.model._networks['default'].x_mean)
x_mean_expected = np.mean(observations, axis=0, keepdims=True)
x_std = self.sess.run(bmr.model._networks['default'].x_std)
x_std_expected = np.std(observations, axis=0, keepdims=True)
assert np.allclose(x_mean, x_mean_expected)
assert np.allclose(x_std, x_std_expected)
def test_sample_predict(self):
0
Source : yolov3_postprocessing.py
with Apache License 2.0
from Ascend
with Apache License 2.0
from Ascend
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):
"""Get corrected boxes"""
box_yx = box_xy[..., ::-1]
box_hw = box_wh[..., ::-1]
input_shape = np.cast[box_yx.dtype](input_shape)
image_shape = np.cast[box_yx.dtype](image_shape)
new_shape = np.round(image_shape * np.min(input_shape / image_shape))
offset = (input_shape - new_shape) / 2. / input_shape
scale = input_shape / new_shape
box_yx = (box_yx - offset) * scale
box_hw *= scale
box_mins = box_yx - (box_hw / 2.)
box_maxes = box_yx + (box_hw / 2.)
boxes = np.concatenate([
box_mins[..., 0:1], # y_min
box_mins[..., 1:2], # x_min
box_maxes[..., 0:1], # y_max
box_maxes[..., 1:2] # x_max
], axis=-1)
# Scale boxes back to original image shape.
boxes *= np.concatenate([image_shape, image_shape], axis=-1)
return boxes
def yolo_boxes_and_scores(feats, anchors, num_classes, input_shape, image_shape):
0
Source : injectFault.py
with MIT License
from DependableSystemsLab
with MIT License
from DependableSystemsLab
def createInjectFaultCast(type):
"Returns a Function to call injectFault on cast nodes"
castInto = castType(type) # get the appropriate casting function for the type
def injectFaultCast(a, b = None):
"Inject a fault into a Cast instruction"
logging.debug("Calling Operator Cast " + getArgs(a, b))
# If we're given 2 parameters, treat it as the default case
if b != None:
res = np.cast(a, b)
else:
# Call the function for this type with 'a'
res = castInto(a)
res = condPerturb(Ops.CAST, res)
if logReturn: logging.debug("\tReturning " + str(res) )
return res
# Return the injectFaultCast function
return injectFaultCast
def injectFaultNoop():
0
Source : test_bn.py
with MIT License
from dmitriy-serdyuk
with MIT License
from dmitriy-serdyuk
def test_batch_normalization_train_apply():
def check(input_dim, variable_dim, broadcastable=None,
conserve_memory=True, mean_only=False):
# Default epsilon value.
epsilon = numpy.cast[theano.config.floatX](1e-4)
bn, x = apply_setup(input_dim, broadcastable, conserve_memory,
mean_only)
with bn:
y_hat = bn.apply(x)
rng = numpy.random.RandomState((2015, 12, 16))
input_ = random_unif(rng, (9,) +
(input_dim
if isinstance(input_dim, collections.Sequence)
else (input_dim,)))
# i + 1 because the axes are all shifted one over when the batch
# axis is added.
axes = (0,) + tuple((i + 1) for i, b in
enumerate(bn.population_mean.broadcastable) if b)
# NumPy implementation of the batch-normalization transform.
def normalize(x, mean_only):
centered = x - x.mean(axis=axes, keepdims=True,
dtype=theano.config.floatX)
if not mean_only:
var = numpy.var(x, axis=axes, keepdims=True,
dtype=theano.config.floatX)
return centered / numpy.sqrt(var + epsilon)
else:
return centered
# Check that batch norm is doing what it should be.
assert_allclose(y_hat.eval({x: input_}), normalize(input_, mean_only),
atol=(1e-3 if theano.config.floatX == 'float32'
else 1e-7))
if not mean_only:
# Check that the scale parameters are still getting applied.
gamma = random_unif(rng, variable_dim)
bn.scale.set_value(gamma)
assert_allclose(y_hat.eval({x: input_}),
normalize(input_, mean_only) * gamma,
atol=(1e-3 if theano.config.floatX == 'float32'
else 1e-7))
beta = random_unif(rng, variable_dim)
bn.shift.set_value(beta)
# Check that the shift parameters are still getting applied.
assert_allclose(y_hat.eval({x: input_}),
normalize(input_, mean_only) * gamma + beta,
atol=(1e-3 if theano.config.floatX == 'float32'
else 1e-7))
# Double check that setting the population parameters doesn't
# affect anything.
bn.population_mean.set_value(numpy.nan *
bn.population_mean.get_value())
bn.population_stdev.set_value(numpy.nan *
bn.population_mean.get_value())
assert_allclose(y_hat.eval({x: input_}),
normalize(input_, mean_only) * gamma + beta,
atol=(1e-3 if theano.config.floatX == 'float32'
else 1e-7))
else:
beta = random_unif(rng, variable_dim)
bn.shift.set_value(beta)
# Check that the shift parameters are still getting applied.
assert_allclose(y_hat.eval({x: input_}),
normalize(input_, mean_only) + beta,
atol=(1e-3 if theano.config.floatX == 'float32'
else 1e-7))
# Double check that setting the population parameters doesn't
# affect anything.
bn.population_mean.set_value(numpy.nan *
bn.population_mean.get_value())
assert_allclose(y_hat.eval({x: input_}),
normalize(input_, mean_only) + beta,
atol=(1e-3 if theano.config.floatX == 'float32'
else 1e-7))
yield check, 9, (9,)
yield check, (5, 4), (5, 4), None, False
yield check, (5, 4), (5, 4), None, False, True
yield check, (2, 9, 7), (2, 1, 1), (False, True, True)
yield check, (2, 9, 7), (2, 1, 1), (False, True, True), True, True
def test_batch_normalization_image_size_setter():
0
Source : test_convert_ilsvrc2010.py
with MIT License
from dmitriy-serdyuk
with MIT License
from dmitriy-serdyuk
def test_images_consumer_randomized():
mock_messages = MOCK_CONSUMER_MESSAGES + [
{'type': 'recv_pyobj', 'flags': zmq.SNDMORE, 'obj': ('jenny.jpeg', 1)},
{'type': 'recv', 'flags': 0,
'data': numpy.cast['uint8']([8, 6, 7, 5, 3, 0, 9])}
]
hdf5_file = MockH5PYFile()
prepare_hdf5_file(hdf5_file, 4, 5, 8)
socket = MockSocket(zmq.PULL, to_recv=mock_messages)
image_consumer(socket, hdf5_file, 5, offset=4, shuffle_seed=0)
written_data = set(tuple(s) for s in hdf5_file['encoded_images'][4:9])
expected_data = set(tuple(s['data']) for s in mock_messages[1::2])
assert written_data == expected_data
written_targets = set(hdf5_file['targets'][4:9].flatten())
expected_targets = set(s['obj'][1] for s in mock_messages[::2])
assert written_targets == expected_targets
written_filenames = set(hdf5_file['filenames'][4:9].flatten())
expected_filenames = set(s['obj'][0].encode('ascii')
for s in mock_messages[::2])
assert written_filenames == expected_filenames
def test_other_set_producer():
0
Source : test_gradient.py
with MIT License
from dmitriy-serdyuk
with MIT License
from dmitriy-serdyuk
def test_known_grads_integers():
# Tests that known_grads works on integers
x = theano.tensor.iscalar()
g_expected = theano.tensor.scalar()
g_grad = theano.gradient.grad(cost=None,
known_grads={x : g_expected},
wrt=x)
f = theano.function([g_expected], g_grad)
x = -3
gv = np.cast[theano.config.floatX](.6)
g_actual = f(gv)
assert np.allclose(g_actual, gv)
def test_undefined_cost_grad():
0
Source : deep_rnn.py
with GNU General Public License v3.0
from filby89
with GNU General Public License v3.0
from filby89
def build_finetune_functions(self, train_shared_xy, valid_shared_xy):
""" This function is to build finetune functions and to update gradients
:param train_shared_xy: theano shared variable for input and output training data
:type train_shared_xy: tuple of shared variable
:param valid_shared_xy: theano shared variable for input and output development data
:type valid_shared_xy: tuple of shared variable
:returns: finetune functions for training and development
"""
(train_set_x, train_set_y) = train_shared_xy
(valid_set_x, valid_set_y) = valid_shared_xy
lr = T.scalar('lr', dtype = theano.config.floatX)
mom = T.scalar('mom', dtype = theano.config.floatX) # momentum
# index = T.scalar('index', dtype='int32')
# batch_size = T.scalar('batch_size', dtype='int32')
cost = self.finetune_cost #+ self.L2_reg * self.L2_sqr
gparams = T.grad(cost, self.params)
# zip just concatenate two lists
updates = OrderedDict()
for param, gparam in zip(self.params, gparams):
weight_update = self.updates[param]
upd = mom * weight_update - lr * gparam
updates[weight_update] = upd
updates[param] = param + upd
train_model = theano.function(inputs = [lr, mom], #index, batch_size
outputs = self.errors,
updates = updates,
givens = {self.x: train_set_x, #[index*batch_size:(index + 1)*batch_size]
self.y: train_set_y,
self.is_train: np.cast['int32'](1)}, on_unused_input='ignore')
valid_model = theano.function(inputs = [],
outputs = self.errors,
givens = {self.x: valid_set_x,
self.y: valid_set_y,
self.is_train: np.cast['int32'](0)}, on_unused_input='ignore')
return train_model, valid_model
def parameter_prediction(self, test_set_x): #, batch_size
0
Source : exp_rnn.py
with GNU General Public License v3.0
from filby89
with GNU General Public License v3.0
from filby89
def build_finetune_functions(self, train_shared_xy, valid_shared_xy):
""" This function is to build finetune functions and to update gradients
:param train_shared_xy: theano shared variable for input and output training data
:type train_shared_xy: tuple of shared variable
:param valid_shared_xy: theano shared variable for input and output development data
:type valid_shared_xy: tuple of shared variable
:returns: finetune functions for training and development
"""
(train_set_x, train_set_y) = train_shared_xy
(valid_set_x, valid_set_y) = valid_shared_xy
lr = T.scalar('lr', dtype = theano.config.floatX)
mom = T.scalar('mom', dtype = theano.config.floatX) # momentum
# index = T.scalar('index', dtype='int32')
# batch_size = T.scalar('batch_size', dtype='int32')
cost = self.finetune_cost #+ self.L2_reg * self.L2_sqr
gparams = T.grad(cost, self.params)
# zip just concatenate two lists
updates = OrderedDict()
for param, gparam in zip(self.params, gparams):
weight_update = self.updates[param]
upd = mom * weight_update - lr * gparam
updates[weight_update] = upd
updates[param] = param + upd
train_model = theano.function(inputs = [lr, mom], #index, batch_size
outputs = self.errors,
updates = updates,
givens = {self.x: train_set_x, #[index*batch_size:(index + 1)*batch_size]
self.y: train_set_y,
self.is_train: np.cast['int32'](1)}, on_unused_input='ignore')
valid_model = theano.function(inputs = [],
outputs = self.errors,
givens = {self.x: valid_set_x,
self.y: valid_set_y,
self.is_train: np.cast['int32'](0)}, on_unused_input='ignore')
return train_model, valid_model
def build_finetune_functions_S2S(self, train_shared_xyd, valid_shared_xyd):
0
Source : exp_rnn.py
with GNU General Public License v3.0
from filby89
with GNU General Public License v3.0
from filby89
def build_finetune_functions_S2S(self, train_shared_xyd, valid_shared_xyd):
""" This function is to build finetune functions and to update gradients
:param train_shared_xy: theano shared variable for input and output training data
:type train_shared_xy: tuple of shared variable
:param valid_shared_xy: theano shared variable for input and output development data
:type valid_shared_xy: tuple of shared variable
:returns: finetune functions for training and development
"""
(train_set_x, train_set_y, train_set_d) = train_shared_xyd
(valid_set_x, valid_set_y, valid_set_d) = valid_shared_xyd
lr = T.scalar('lr', dtype = theano.config.floatX)
mom = T.scalar('mom', dtype = theano.config.floatX) # momentum
cost = self.finetune_cost #+ self.L2_reg * self.L2_sqr
gparams = T.grad(cost, self.params)
# zip just concatenate two lists
updates = OrderedDict()
for param, gparam in zip(self.params, gparams):
weight_update = self.updates[param]
upd = mom * weight_update - lr * gparam
updates[weight_update] = upd
updates[param] = param + upd
train_model = theano.function(inputs = [lr, mom],
outputs = self.errors,
updates = updates,
givens = {self.x: train_set_x,
self.y: train_set_y,
self.d: train_set_d,
self.is_train: np.cast['int32'](1)}, on_unused_input='ignore')
valid_model = theano.function(inputs = [],
outputs = self.errors,
givens = {self.x: valid_set_x,
self.y: valid_set_y,
self.d: valid_set_d,
self.is_train: np.cast['int32'](0)}, on_unused_input='ignore')
return train_model, valid_model
def build_finetune_functions_S2SPF(self, train_shared_xydf, valid_shared_xydf):
0
Source : exp_rnn.py
with GNU General Public License v3.0
from filby89
with GNU General Public License v3.0
from filby89
def build_finetune_functions_S2SPF(self, train_shared_xydf, valid_shared_xydf):
""" This function is to build finetune functions and to update gradients
:param train_shared_xy: theano shared variable for input and output training data
:type train_shared_xy: tuple of shared variable
:param valid_shared_xy: theano shared variable for input and output development data
:type valid_shared_xy: tuple of shared variable
:returns: finetune functions for training and development
"""
(train_set_x, train_set_y, train_set_d, train_set_f) = train_shared_xydf
(valid_set_x, valid_set_y, valid_set_d, valid_set_f) = valid_shared_xydf
lr = T.scalar('lr', dtype = theano.config.floatX)
mom = T.scalar('mom', dtype = theano.config.floatX) # momentum
cost = self.finetune_cost #+ self.L2_reg * self.L2_sqr
gparams = T.grad(cost, self.params)
# zip just concatenate two lists
updates = OrderedDict()
for param, gparam in zip(self.params, gparams):
weight_update = self.updates[param]
upd = mom * weight_update - lr * gparam
updates[weight_update] = upd
updates[param] = param + upd
train_model = theano.function(inputs = [lr, mom],
outputs = self.errors,
updates = updates,
givens = {self.x: train_set_x,
self.y: train_set_y,
self.d: train_set_d,
self.f: train_set_f,
self.is_train: np.cast['int32'](1)}, on_unused_input='ignore')
valid_model = theano.function(inputs = [],
outputs = self.errors,
givens = {self.x: valid_set_x,
self.y: valid_set_y,
self.d: valid_set_d,
self.f: valid_set_f,
self.is_train: np.cast['int32'](0)}, on_unused_input='ignore')
return train_model, valid_model
def parameter_prediction(self, test_set_x): #, batch_size
0
Source : exp_rnn.py
with GNU General Public License v3.0
from filby89
with GNU General Public License v3.0
from filby89
def parameter_prediction(self, test_set_x): #, batch_size
""" This function is to predict the output of NN
:param test_set_x: input features for a testing sentence
:type test_set_x: python array variable
:returns: predicted features
"""
n_test_set_x = test_set_x.shape[0]
test_out = theano.function([], self.final_layer.output,
givens={self.x: test_set_x[0:n_test_set_x], self.is_train: np.cast['int32'](0)}, on_unused_input='ignore')
predict_parameter = test_out()
return predict_parameter
def parameter_prediction_S2S(self, test_set_x, test_set_d):
0
Source : exp_rnn.py
with GNU General Public License v3.0
from filby89
with GNU General Public License v3.0
from filby89
def parameter_prediction_S2S(self, test_set_x, test_set_d):
""" This function is to predict the output of NN
:param test_set_x: input features for a testing sentence
:param test_set_d: phone durations for a testing sentence
:type test_set_x: python array variable
:type test_set_d: python array variable
:returns: predicted features
"""
n_test_set_x = test_set_x.shape[0]
test_out = theano.function([], self.final_layer.output,
givens={self.x: test_set_x[0:n_test_set_x], self.d: test_set_d[0:n_test_set_x], self.is_train: np.cast['int32'](0)}, on_unused_input='ignore')
predict_parameter = test_out()
return predict_parameter
def parameter_prediction_S2SPF(self, test_set_x, test_set_d, test_set_f):
0
Source : exp_rnn.py
with GNU General Public License v3.0
from filby89
with GNU General Public License v3.0
from filby89
def parameter_prediction_S2SPF(self, test_set_x, test_set_d, test_set_f):
""" This function is to predict the output of NN
:param test_set_x: input features for a testing sentence
:param test_set_d: phone durations for a testing sentence
:type test_set_x: python array variable
:type test_set_d: python array variable
:returns: predicted features
"""
n_test_set_x = test_set_x.shape[0]
num_of_frames = sum(test_set_d)
test_out = theano.function([], self.final_layer.output,
givens={self.x: test_set_x[0:n_test_set_x], self.d: test_set_d[0:n_test_set_x], self.f: test_set_f[0:num_of_frames], self.is_train: np.cast['int32'](0)}, on_unused_input='ignore')
predict_parameter = test_out()
return predict_parameter
def parameter_prediction_CTC(self, test_set_x): #, batch_size
0
Source : TM_dataset.py
with MIT License
from guxd
with MIT License
from guxd
def next(self):
if self.stop != -1 and self.offset >= self.stop:
self.offset = 0
raise StopIteration
else:
while True:
source_data = []
target_data = []
for source_lang in self.source_langs:
inc_offset = self.offset+self.batch_size
npos = 0
while not npos and inc_offset < = self.data_len:
npos = len([x for x in
source_lang[self.offset:inc_offset].tolist()
if len(x) > 0 ])
nzeros = self.batch_size - npos
inc_offset += nzeros
sents = np.asarray([np.cast[self.dtype](si) for si in
source_lang[self.offset:inc_offset].tolist()
if len(si)>0])
if self.order:
sents = sents.T
source_data.append(sents)
for target_lang in self.target_langs:
inc_offset = self.offset+self.batch_size
npos = 0
while not npos and inc_offset < = self.data_len:
npos = len([x for x in
target_lang[self.offset:inc_offset].tolist()
if len(x) > 0 ])
nzeros = self.batch_size - npos
inc_offset += nzeros
sents = np.asarray([np.cast[self.dtype](si) for si in target_lang[self.offset:inc_offset].tolist() if len(si) > 0])
if self.order:
sents = sents.T
target_data.append(sents)
if inc_offset > self.data_len and self.use_infinite_loop:
print ("Restarting the dataset iterator.")
inc_offset = 0 #self.offset + self.batch_size
elif inc_offset > self.data_len:
self.offset = 0
raise StopIteration
if len(source_data[0]) < 1 or len(target_data[0]) < 1:
self.offset = inc_offset
inc_offset = self.offset+self.batch_size
continue
break
self.offset = inc_offset
if not self.output_format:
return source_data, target_data
else:
return self.output_format(source_data, target_data)
class PytablesBitextFetcher(threading.Thread):
0
Source : TM_dataset.py
with MIT License
from guxd
with MIT License
from guxd
def next(self):
if self.stop != -1 and self.offset >= self.stop:
self.offset = 0
raise StopIteration
else:
while True:
inc_offset = self.offset + self.batch_size
if inc_offset > self.data_len and self.use_infinite_loop:
print ("Restarting the dataset iterator.")
inc_offset = 0
elif inc_offset > self.data_len:
self.offset = 0
raise StopIteration
sents_s = np.asarray([np.cast[self.dtype](si) for si in
self.source_ctxt[self.offset:inc_offset].tolist()
if len(si)>0])
if self.order:
sents_s = sents_s.T
source_ctxt = sents_s
sents_t = np.asarray([np.cast[self.dtype](si) for si in
self.target_ctxt[self.offset:inc_offset].tolist()
if len(si)>0])
if self.order:
sents_t = sents_t.T
target_ctxt = sents_t
targets = np.asarray([np.cast[self.dtype](si) for si in
self.targets[self.offset:inc_offset].tolist()
if len(si)>0])
if len(source_ctxt) < 1 or len(target_ctxt) < 1 or len(targets) < 1:
self.offset = inc_offset
inc_offset = self.offset + self.batch_size
continue
break
self.offset = inc_offset
if not self.output_format:
return source_ctxt, target_ctxt, targets
else:
return self.output_format(source_ctxt, target_ctxt, targets)
0
Source : nn.py
with MIT License
from imatge-upc
with MIT License
from imatge-upc
def adam_updates(params, cost, lr=0.001, mom1=0.9, mom2=0.999):
updates = []
grads = T.grad(cost, params)
t = th.shared(np.cast[th.config.floatX](1.))
for p, g in zip(params, grads):
v = th.shared(np.cast[th.config.floatX](p.get_value() * 0.))
mg = th.shared(np.cast[th.config.floatX](p.get_value() * 0.))
v_t = mom1*v + (1. - mom1)*g
mg_t = mom2*mg + (1. - mom2)*T.square(g)
v_hat = v_t / (1. - mom1 ** t)
mg_hat = mg_t / (1. - mom2 ** t)
g_t = v_hat / T.sqrt(mg_hat + 1e-8)
p_t = p - lr * g_t
updates.append((v, v_t))
updates.append((mg, mg_t))
updates.append((p, p_t))
updates.append((t, t+1))
return updates
class WeightNormLayer(lasagne.layers.Layer):
0
Source : audio_tools.py
with MIT License
from jfsantos
with MIT License
from jfsantos
def _mgc_newton(mgc_stored, periodogram, order, alpha, gamma,
recursion_order, iter_number, y_fft, z_fft, cr, pr, rr, ri,
qr, qi, Tm, Hm, Tm_plus_Hm, b):
# a lot of inplace operations to match the Julia code
cr[1:order + 1] = mgc_stored[1:order + 1]
if alpha != 0:
cr_res = _mgc_b2c(cr[:recursion_order + 1], cr[:order + 1], -alpha)
cr[:recursion_order + 1] = cr_res[:]
y = sp.fftpack.fft(np.cast["float64"](cr))
c = mgc_stored
x = periodogram
if gamma != 0.:
gamma_inv = 1. / gamma
else:
gamma_inv = np.inf
if gamma == -1.:
pr[:] = copy.deepcopy(x)
new_pr = copy.deepcopy(pr)
elif gamma == 0.:
pr[:] = copy.deepcopy(x) / np.exp(2 * np.real(y))
new_pr = copy.deepcopy(pr)
else:
tr = 1. + gamma * np.real(y)
ti = -gamma * np.imag(y)
trr = tr * tr
tii = ti * ti
s = trr + tii
t = x * np.power(s, (-gamma_inv))
t /= s
pr[:] = t
rr[:] = tr * t
ri[:] = ti * t
t /= s
qr[:] = (trr - tii) * t
s = tr * ti * t
qi[:] = (s + s)
new_pr = copy.deepcopy(pr)
if gamma != -1.:
"""
print()
print(pr.sum())
print(rr.sum())
print(ri.sum())
print(qr.sum())
print(qi.sum())
print()
"""
pass
y_fft[:] = copy.deepcopy(pr) + 0.j
z_fft[:] = np.fft.fft(y_fft) / len(y_fft)
pr[:] = copy.deepcopy(np.real(z_fft))
if alpha != 0.:
idx_1 = pr[:2 * order + 1]
idx_2 = pr[:recursion_order + 1]
idx_3 = _mgc_b2c(idx_1, idx_2, alpha)
pr[:2 * order + 1] = idx_3[:]
if gamma == 0. or gamma == -1.:
qr[:2 * order + 1] = pr[:2 * order + 1]
rr[:order + 1] = copy.deepcopy(pr[:order + 1])
else:
for i in range(len(qr)):
y_fft[i] = qr[i] + 1j * qi[i]
z_fft[:] = np.fft.fft(y_fft) / len(y_fft)
qr[:] = np.real(z_fft)
for i in range(len(rr)):
y_fft[i] = rr[i] + 1j * ri[i]
z_fft[:] = np.fft.fft(y_fft) / len(y_fft)
rr[:] = np.real(z_fft)
if alpha != 0.:
qr_new = _mgc_b2c(qr[:recursion_order + 1], qr[:recursion_order + 1], alpha)
qr[:recursion_order + 1] = qr_new[:]
rr_new = _mgc_b2c(rr[:order + 1], rr[:recursion_order + 1], alpha)
rr[:order + 1] = rr_new[:]
if alpha != 0:
_mgc_ptrans(pr, order, alpha)
_mgc_qtrans(qr, order, alpha)
eta = 0.
if gamma != -1.:
eta = _mgc_gain(rr, c, order, gamma)
c[0] = np.sqrt(eta)
if gamma == -1.:
qr[:] = 0.
elif gamma != 0.:
for i in range(2, 2 * order + 1):
qr[i] *= 1. + gamma
te = pr[:order]
_mgc_fill_toeplitz(Tm, te)
he = qr[2: 2 * order + 1]
_mgc_fill_hankel(Hm, he)
Tm_plus_Hm[:] = Hm[:] + Tm[:]
b[:order] = rr[1:order + 1]
res = np.linalg.solve(Tm_plus_Hm, b)
b[:] = res[:]
c[1:order + 1] += res[:order]
if gamma == -1.:
eta = _mgc_gain(rr, c, order, gamma)
c[0] = np.sqrt(eta)
return np.log(eta), new_pr
def _mgc_mgcepnorm(b_gamma, alpha, gamma, otype):
0
Source : HyperDenseNet.py
with MIT License
from josedolz
with MIT License
from josedolz
def __init__(self):
# --- containers for Theano compiled functions ----
self.networkModel_Train = ""
self.networkModel_Test = ""
# --- shared variables will be stored in the following variables ----
self.trainingData_x = ""
self.testingData_x = ""
self.trainingData_y = ""
self.trainingData_x_Bottom = ""
self.testingData_x_Bottom = ""
self.lastLayer = ""
self.networkLayers = []
self.intermediate_ConnectedLayers = []
self.networkName = ""
self.folderName = ""
self.cnnLayers = []
self.n_classes = -1
self.sampleSize_Train = []
self.sampleSize_Test = []
self.kernel_Shapes = []
self.pooling_scales = []
self.dropout_Rates = []
self.activationType = -1
self.weight_Initialization = -1
self.dropoutRates = []
self.batch_Size = -1
self.receptiveField = 0
self.initialLearningRate = ""
self.learning_rate = theano.shared(np.cast["float32"](0.01))
# Symbolic variables,
self.inputNetwork_Train = None
self.inputNetwork_Test = None
self.L1_reg_C = 0
self.L2_reg_C = 0
self.costFunction = 0
# Params for optimizers
self.initialMomentum = ""
self.momentum = theano.shared(np.cast["float32"](0.))
self.momentumNormalized = 0
self.momentumType = 0
self.vel_Momentum = []
self.rho_RMSProp = 0
self.epsilon_RMSProp = 0
self.params_RmsProp = []
self.numberOfEpochsTrained = 0
self.applyBatchNorm = ""
self.numberEpochToApplyBatchNorm = 0
self.softmax_Temp = 1.0
self.centralVoxelsTrain = ""
self.centralVoxelsTest = ""
# -------------------------------------------------------------------- END Function ------------------------------------------------------------------- #
""" ####### Function to generate the network architecture ######### """
0
Source : calibration.py
with MIT License
from microsoft
with MIT License
from microsoft
def calibration_curve(probabilities: np.ndarray, targets: np.ndarray, num_bins: int,
top_class_only: bool = True, equal_size_bins: bool = False, min_p: float = 0.0):
"""Calculates the calibration of a classifier (binary or multi-class). Specificially it takes
predicted probability values, assigns them to a given number of bins (keeping either the width
of the bins fixed or the number of predictions assigned to each bin) and then returns for each
bin the mean predicted probability of the positive class occuring as well as the empirically observed
frequency as per the targets. Additionally the relative size of each bin is returned. Note that
all inputs are assumed to be well-specified, i.e. probabilities between 0 and 1 and, for multi-class
targets, to sum to 1 across the final dimension.
Using the default options top_class_only=True and equal_bin_size=False returns mean probabilities,
bin_frequency and bin_weights values as used for the standard ECE forumlation, e.g. in
http://openaccess.thecvf.com/content_CVPRW_2019/papers/Uncertainty%20and%20Robustness%20in%20Deep%20Visual%20Learning/Nixon_Measuring_Calibration_in_Deep_Learning_CVPRW_2019_paper.pdf
top_class_only=False gives results for the Static Calibration Error, equal_size_bins=True the Adaptive Calibration
Error (the paper does not specify whether to set top_class_only to True or False). Setting min_p > 0
corresponds the Thresholded Adaptive Calibration Error. To calculate these calibration error, the outputs
of this fucntion can directly be passed into expected_calibration_error function.
Args:
probabilities: Array containing probability predictions.
targets: Array containing classification targets.
num_bins: Number of bins for probability values.
top_class_only: Whether to only use the maximum predicted probability for multi-class classification
or all probabilities.
equal_size_bins: Whether to have each bin an equal number of predictions assigned vs. equal width.
min_p: Minimum threshold for the probabilities to count.
Returns:
bin_probability: Average predicted probability. NaN for empty bins.
bin_frequency: Average observed true class frequency. NaN for empty bins.
bin_weights: Relative size of each bin. Zero for empty bins.
"""
if probabilities.ndim == targets.ndim + 1:
# multi-class
if top_class_only:
# targets are converted to per-datapoint accuracies, i.e. checking whether or not the predicted
# class was observed
predictions = np.cast[targets.dtype](probabilities.argmax(-1))
targets = targets == predictions
probabilities = probabilities.max(-1)
else:
# convert the targets to one-hot encodings and flatten both those targets and the probabilities,
# treating them as independent predictions for binary classification
num_classes = probabilities.shape[-1]
one_hot_targets = np.cast[targets.dtype](targets[..., np.newaxis] == np.arange(num_classes))
targets = one_hot_targets.reshape(*targets.shape[:-1], -1)
probabilities = probabilities.reshape(*probabilities.shape[:-2], -1)
elif probabilities.ndim != targets.ndim:
raise ValueError("Shapes of probabilities and targets do not match. "
"Must be either equal (binary classification) or probabilities "
"must have exactly one dimension more (multi-class).")
else:
# binary predictions, no pre-processing to do
pass
if equal_size_bins:
quantiles = np.linspace(0, 1, num_bins + 1)
bin_edges = np.quantile(probabilities, quantiles)
# explicitly set upper and lower edge to be 0/1
bin_edges[0] = 0
bin_edges[-1] = 1
else:
bin_edges = np.linspace(0, 1, num_bins + 1)
# bin membership has to be checked with strict inequality to either the lower or upper
# edge to avoid predictions exactly on a boundary to be included in multiple bins.
# Therefore the exclusive boundary has to be slightly below or above the actual value
# to avoid 0 or 1 predictions to not be assigned to any bin
bin_edges[0] -= 1e-6
lower = bin_edges[:-1]
upper = bin_edges[1:]
probabilities = probabilities.reshape(-1, 1)
targets = targets.reshape(-1, 1)
# set up masks for checking which bin probabilities fall into and whether they are above the minimum
# threshold. I'm doing this by multiplication with those booleans rather than indexing in order to
# allow for the code to be extensible for broadcasting
bin_membership = (probabilities > lower) & (probabilities < = upper)
exceeds_threshold = probabilities >= min_p
bin_sizes = (bin_membership * exceeds_threshold).sum(-2)
non_empty = bin_sizes > 0
bin_probability = np.full(num_bins, np.nan)
np.divide((probabilities * bin_membership * exceeds_threshold).sum(-2), bin_sizes,
out=bin_probability, where=non_empty)
bin_frequency = np.full(num_bins, np.nan)
np.divide((targets * bin_membership * exceeds_threshold).sum(-2), bin_sizes,
out=bin_frequency, where=non_empty)
bin_weights = np.zeros(num_bins)
np.divide(bin_sizes, bin_sizes.sum(), out=bin_weights, where=non_empty)
return bin_probability, bin_frequency, bin_weights
def expected_calibration_error(mean_probability_predicted: np.ndarray, observed_frequency: np.ndarray,
0
Source : test_gradient.py
with MIT License
from muhanzhang
with MIT License
from muhanzhang
def test_known_grads_integers():
# Tests that known_grads works on integers
x = theano.tensor.iscalar()
g_expected = theano.tensor.scalar()
g_grad = theano.gradient.grad(cost=None, known_grads={x: g_expected}, wrt=x)
f = theano.function([g_expected], g_grad)
x = -3
gv = np.cast[theano.config.floatX](.6)
g_actual = f(gv)
assert np.allclose(g_actual, gv)
def test_undefined_cost_grad():
0
Source : test_metrics.py
with MIT License
from NEGU93
with MIT License
from NEGU93
def test_metric():
y_true = [[0, 0, 0],
[0, 0, 1],
[0, 1, 0], [0, 1, 0],
[1, 0, 0]]
y_pred = [[0.1, 0.9, 0.8],
[0.1, 0.9, 0.8],
[0.05, 0.95, 0], [0.95, 0.05, 0],
[0, 1, 0]]
m = ComplexCategoricalAccuracy()
m.update_state(y_true, y_pred)
assert m.result().numpy() == 0.25
m = ComplexAverageAccuracy()
m.update_state(y_true, y_pred)
assert m.result().numpy() == np.cast['float32'](1/6) # I want 0.5/3 = 1/6
y_true = np.array([
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[0., 1.]
])
y_pred = np.array([
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.]
])
m = ComplexCategoricalAccuracy()
m.update_state(y_true, y_pred)
assert m.result().numpy() == np.cast['float32'](.9)
m = ComplexAverageAccuracy()
m.update_state(y_true, y_pred)
assert m.result().numpy() == np.cast['float32'](0.5)
def test_null_label():
0
Source : cphd.py
with MIT License
from ngageoint
with MIT License
from ngageoint
def _fetch(self, range1, range2, index):
"""
Parameters
----------
range1 : tuple
range2 : tuple
index : int
Returns
-------
numpy.ndarray
"""
chipper = self._chipper[index]
# NB: it is critical that there is no reorientation operation in CPHD.
# noinspection PyProtectedMember
range1, range2 = chipper._reorder_arguments(range1, range2)
data = chipper(range1, range2)
# fetch the scale data, if there is any
scale = self.read_pvp_variable('AmpSF', index, the_range=range1)
if scale is None:
return data
scale = numpy.cast['float32'](scale)
# recast from double, so our data will remain float32
if scale.size == 1:
return scale[0]*data
elif data.ndim == 1:
return scale*data
else:
return scale[:, numpy.newaxis]*data
def __call__(self, range1, range2, index=0):
0
Source : crsd.py
with MIT License
from ngageoint
with MIT License
from ngageoint
def _fetch(self, range1, range2, index):
"""
Parameters
----------
range1 : tuple
range2 : tuple
index : int
Returns
-------
numpy.ndarray
"""
chipper = self._chipper[index]
# NB: it is critical that there is no reorientation operation in CRSD.
# noinspection PyProtectedMember
range1, range2 = chipper._reorder_arguments(range1, range2)
data = chipper(range1, range2)
# fetch the scale data, if there is any
scale = self.read_pvp_variable('AmpSF', index, the_range=range1)
if scale is None:
return data
scale = numpy.cast['float32'](scale)
# recast from double, so our data will remain float32
if scale.size == 1:
return scale[0]*data
elif data.ndim == 1:
return scale*data
else:
return scale[:, numpy.newaxis]*data
def __call__(self, range1, range2, index=0):
0
Source : ppo.py
with MIT License
from openai
with MIT License
from openai
def run(self):
# shift forward
if len(self.mb_stuff[2]) >= self.nsteps+self.num_steps_to_cut_left+self.num_steps_to_cut_right:
self.mb_stuff = [l[self.nsteps:] for l in self.mb_stuff]
mb_obs, mb_increase_ent, mb_rewards, mb_reward_avg, mb_actions, mb_values, mb_valids, mb_random_resets, \
mb_dones, mb_neglogpacs, mb_states = self.mb_stuff
epinfos = []
while len(mb_rewards) < self.nsteps+self.num_steps_to_cut_left+self.num_steps_to_cut_right:
actions, values, states, neglogpacs = self.model.step(mb_obs[-1], mb_states[-1], mb_dones[-1], mb_increase_ent[-1])
mb_actions.append(actions)
mb_values.append(values)
mb_states.append(states)
mb_neglogpacs.append(neglogpacs)
obs, rewards, dones, infos = self.env.step(actions)
mb_obs.append(np.cast[self.model.train_model.X.dtype.name](obs))
mb_increase_ent.append(np.asarray([info.get('increase_entropy', False) for info in infos], dtype=np.uint8))
mb_rewards.append(rewards)
mb_dones.append(dones)
mb_valids.append([(not info.get('replay_reset.invalid_transition', False)) for info in infos])
mb_random_resets.append(np.array([info.get('replay_reset.random_reset', False) for info in infos]))
for info in infos:
maybeepinfo = info.get('episode')
if maybeepinfo: epinfos.append(maybeepinfo)
# GAE
mb_advs = [np.zeros_like(mb_values[0])] * (len(mb_rewards) + 1)
for t in reversed(range(len(mb_rewards))):
if t < self.num_steps_to_cut_left:
mb_valids[t] = np.zeros_like(mb_valids[t])
else:
if t == len(mb_values)-1:
next_value = self.model.value(mb_obs[-1], mb_states[-1], mb_dones[-1])
else:
next_value = mb_values[t+1]
use_next = np.logical_not(mb_dones[t+1])
adv_mask = np.logical_not(mb_random_resets[t+1])
delta = mb_rewards[t] + self.gamma * use_next * next_value - mb_values[t]
mb_advs[t] = adv_mask * (delta + self.gamma * self.lam * use_next * mb_advs[t + 1])
# extract arrays
end = self.nsteps + self.num_steps_to_cut_left
ar_mb_obs = np.asarray(mb_obs[:end], dtype=self.model.train_model.X.dtype.name)
ar_mb_ent = np.stack(mb_increase_ent[:end], axis=0)
ar_mb_valids = np.asarray(mb_valids[:end], dtype=np.float32)
ar_mb_actions = np.asarray(mb_actions[:end])
ar_mb_values = np.asarray(mb_values[:end], dtype=np.float32)
ar_mb_neglogpacs = np.asarray(mb_neglogpacs[:end], dtype=np.float32)
ar_mb_dones = np.asarray(mb_dones[:end], dtype=np.bool)
ar_mb_advs = np.asarray(mb_advs[:end], dtype=np.float32)
ar_mb_rets = ar_mb_values + ar_mb_advs
if self.norm_adv:
adv_mean, adv_std, _ = mpi_moments(ar_mb_advs.ravel())
ar_mb_advs = (ar_mb_advs - adv_mean) / (adv_std + 1e-7)
# obs, increase_ent, advantages, masks, actions, values, neglogpacs, valids, returns, states, epinfos = runner.run()
return (*map(sf01, (ar_mb_obs, ar_mb_ent, ar_mb_advs, ar_mb_dones, ar_mb_actions, ar_mb_values, ar_mb_neglogpacs, ar_mb_valids, ar_mb_rets)),
mb_states[0], epinfos)
def sf01(arr):
0
Source : readerinfo.py
with GNU General Public License v3.0
from SMByC
with GNU General Public License v3.0
from SMByC
def getNoDataValueFor(self,block,band=1):
"""
Returns the 'no data' value for the dataset
underlying the block. This should be the
same as what was set for the stats ignore value
when that dataset was created.
The value is cast to the same data type as the
dataset.
"""
ds = self.getGDALDatasetFor(block)
band = ds.GetRasterBand(band)
novalue = band.GetNoDataValue()
# if there is a valid novalue, cast it to the type
# of the dataset. Note this creates a numpy 0-d array
if novalue is not None:
numpytype = imageio.GDALTypeToNumpyType(band.DataType)
novalue = numpy.cast[numpytype](novalue)
return novalue
def getPercent(self):
0
Source : explorer.py
with Apache License 2.0
from stellargraph
with Apache License 2.0
from stellargraph
def run(
self, nodes, *, n=None, length=None, p=None, q=None, seed=None, weighted=None
):
"""
Perform a random walk starting from the root nodes. Optional parameters default to using the
values passed in during construction.
Args:
nodes (list): The root nodes as a list of node IDs
n (int, optional): Total number of random walks per root node
length (int, optional): Maximum length of each random walk
p (float, optional): Defines probability, 1/p, of returning to source node
q (float, optional): Defines probability, 1/q, for moving to a node away from the source node
seed (int, optional): Random number generator seed; default is None
weighted (bool, optional): Indicates whether the walk is unweighted or weighted
Returns:
List of lists of nodes ids for each of the random walks
"""
n = _default_if_none(n, self.n, "n")
length = _default_if_none(length, self.length, "length")
p = _default_if_none(p, self.p, "p")
q = _default_if_none(q, self.q, "q")
weighted = _default_if_none(weighted, self.weighted, "weighted")
self._validate_walk_params(nodes, n, length)
self._check_weights(p, q, weighted)
rs, _ = self._get_random_state(seed)
nodes = self.graph.node_ids_to_ilocs(nodes)
if weighted:
self._check_weights_valid()
weight_dtype = self.graph._edges.weights.dtype
cast_func = np.cast[weight_dtype]
ip = cast_func(1.0 / p)
iq = cast_func(1.0 / q)
if np.isinf(ip):
raise ValueError(
f"p: value ({p}) is too small. It must be possible to represent 1/p in {weight_dtype}, but this value overflows to infinity."
)
if np.isinf(iq):
raise ValueError(
f"q: value ({q}) is too small. It must be possible to represent 1/q in {weight_dtype}, but this value overflows to infinity."
)
walks = []
for node in nodes: # iterate over root nodes
for walk_number in range(n): # generate n walks per root node
# the walk starts at the root
walk = [node]
previous_node = None
previous_node_neighbours = []
current_node = node
for _ in range(length - 1):
# select one of the neighbours using the
# appropriate transition probabilities
if weighted:
neighbours, weights = self.graph.neighbor_arrays(
current_node, include_edge_weight=True, use_ilocs=True
)
else:
neighbours = self.graph.neighbor_arrays(
current_node, use_ilocs=True
)
weights = np.ones(neighbours.shape, dtype=weight_dtype)
if len(neighbours) == 0:
break
mask = neighbours == previous_node
weights[mask] *= ip
mask |= np.isin(neighbours, previous_node_neighbours)
weights[~mask] *= iq
choice = naive_weighted_choices(rs, weights)
if choice is None:
break
previous_node = current_node
previous_node_neighbours = neighbours
current_node = neighbours[choice]
walk.append(current_node)
walks.append(list(self.graph.node_ilocs_to_ids(walk)))
return walks
def _check_weights(self, p, q, weighted):
0
Source : data_utils.py
with MIT License
from WANG-Chaoyue
with MIT License
from WANG-Chaoyue
def coloring(xmb, ymb, p=0.2):
xmb_color = xmb
B,C,H,W = xmb.shape
#print H
for i in range(B):
img_input = np.cast['float32'](xmb[i,:,:,:])
img_real = np.cast['float32'](ymb[i,:,:,:])
img_input = convert_img_back(img_input)
img_real = convert_img_back(img_real)
img_gray = img_real[:,:,0] + img_real[:,:,1] + img_real[:,:,2]
img_gray = (765 - img_gray)
img_gray[img_gray>10]=255
img_gray[img_gray < 255]=0
img_gray = img_gray/255
img_gray = [img_gray]*3
img_gray = np.array(img_gray)
img_gray = img_gray.reshape([3,H,W])
img_mask = convert_img_back(img_gray)
rand = np.random.random((H,W))
rand[rand>p]=1
rand[rand < 1]=0
rand = [rand]*3
rand = np.array(rand)
rand = rand.reshape([3,H,W])
rand = convert_img_back(rand)
img_color = (img_input) * (1-img_mask) + img_real * img_mask + rand*1000
img_color[img_color>255]=255
img_input = img_color*(img_input/255)
img_input = np.cast['uint8'](img_input)
# img_real = np.cast['uint8'](img_real)
# img_mask = np.cast['uint8'](img_mask*255)
xmb_color[i,:,:,:] = convert_img(img_input)
return xmb_color
def processing_img(img, center=True, scale=True, convert=True):
0
Source : LiviaNet.py
with Apache License 2.0
from YongLiuLab
with Apache License 2.0
from YongLiuLab
def __init__(self):
# --- containers for Theano compiled functions ----
self.networkModel_Train = ""
self.networkModel_Test = ""
# --- shared variables will be stored in the following variables ----
self.trainingData_x = ""
self.testingData_x = ""
self.trainingData_y = ""
self.lastLayer = ""
self.networkLayers = []
self.intermediate_ConnectedLayers = []
self.networkName = ""
self.folderName = ""
self.cnnLayers = []
self.n_classes = -1
self.sampleSize_Train = []
self.sampleSize_Test = []
self.kernel_Shapes = []
self.pooling_scales = []
self.dropout_Rates = []
self.activationType = -1
self.weight_Initialization = -1
self.dropoutRates = []
self.batch_Size = -1
self.receptiveField = 0
self.initialLearningRate = ""
self.learning_rate = theano.shared(np.cast["float32"](0.01))
# Symbolic variables,
self.inputNetwork_Train = None
self.inputNetwork_Test = None
self.L1_reg_C = 0
self.L2_reg_C = 0
self.costFunction = 0
# Params for optimizers
self.initialMomentum = ""
self.momentum = theano.shared(np.cast["float32"](0.))
self.momentumNormalized = 0
self.momentumType = 0
self.vel_Momentum = []
self.rho_RMSProp = 0
self.epsilon_RMSProp = 0
self.params_RmsProp = []
self.numberOfEpochsTrained = 0
self.applyBatchNorm = ""
self.numberEpochToApplyBatchNorm = 0
self.softmax_Temp = 1.0
self.centralVoxelsTrain = ""
self.centralVoxelsTest = ""
# -------------------------------------------------------------------- END Function ------------------------------------------------------------------- #
""" ####### Function to generate the network architecture ######### """