Here are the examples of the python api numpy.asscalar taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
123 Examples
3
Source : driver.py
with MIT License
from aicherc
with MIT License
from aicherc
def convert_gradient(gradient, parameters):
""" Convert gradient """
new_gradient = np.array([
gradient['log_mu'],
gradient['logit_phi'],
gradient['logit_lambduh'],
gradient['LRinv_vec']*(-np.asscalar(parameters.LRinv)**-1), # grad w.r.t. tau
]).flatten()
return new_gradient
###############################################################################
## Run Script ---------------------------------------------------------------
###############################################################################
if __name__=='__main__':
3
Source : driver.py
with MIT License
from aicherc
with MIT License
from aicherc
def convert_gradient(gradient, parameters):
""" Convert gradient w.r.t. LRinv, LQinv, C, A to gradient w.r.t phi, sigma, tau """
new_gradient = np.array([
gradient['A'], # grad w.r.t. A < -> grad w.r.t. phi
gradient['LQinv_vec']*(-np.asscalar(parameters.LQinv)**-1), # grad w.r.t. sigma
gradient['LRinv_vec']*(-np.asscalar(parameters.LRinv)**-1), # grad w.r.t. tau
]).flatten()
return new_gradient
###############################################################################
## Run Script ---------------------------------------------------------------
###############################################################################
if __name__=='__main__':
3
Source : base_parameters.py
with MIT License
from aicherc
with MIT License
from aicherc
def logprior(self, parameters, **kwargs):
""" Return the log prior density for parameters
Args:
parameters (Parameters)
Returns:
logprior (double)
"""
logprior = 0.0
for prior_helper in self._prior_helper_list:
logprior = prior_helper.logprior(
self, logprior, parameters, **kwargs)
return np.asscalar(logprior)
def grad_logprior(self, parameters, **kwargs):
3
Source : parameters.py
with MIT License
from aicherc
with MIT License
from aicherc
def __str__(self):
my_str = "GARCHParameters:"
my_str +="\nalpha:{0}, beta:{1}, gamma:{2}, tau:{3}\n".format(
np.around(np.asscalar(self.alpha), 6),
np.around(np.asscalar(self.beta), 6),
np.around(np.asscalar(self.gamma), 6),
np.around(np.asscalar(self.tau), 6))
return my_str
@property
3
Source : probweight.py
with MIT License
from aicherc
with MIT License
from aicherc
def logprior(self, prior, logprior, parameters, **kwargs):
alpha = prior.hyperparams[self._alpha_name]
beta = prior.hyperparams[self._beta_name]
logprior += scipy.stats.beta.logpdf(
getattr(parameters, self.name),
a=alpha,
b=beta,
)
return np.asscalar(logprior)
def grad_logprior(self, prior, grad, parameters, **kwargs):
3
Source : colors.py
with MIT License
from alvarobartt
with MIT License
from alvarobartt
def _sanitize_extrema(ex):
if ex is None:
return ex
try:
ret = np.asscalar(ex)
except AttributeError:
ret = float(ex)
return ret
def _is_nth_color(c):
3
Source : util_misc.py
with MIT License
from CIRADA-Tools
with MIT License
from CIRADA-Tools
def toscalar(a):
"""
Returns a scalar version of a Numpy object.
"""
try:
return np.asscalar(a)
except Exception:
return a
#-----------------------------------------------------------------------------#
def MAD(a, c=0.6745, axis=None):
3
Source : rbf.py
with MIT License
from david-lindner
with MIT License
from david-lindner
def k_func(self, x: np.ndarray, y: np.ndarray) -> float:
"""
Return the kernel function evaluated between two points `x` and `y`.
This is determined using the RBF function and the custom `distance`
function.
Importantly, this function implement caching to avoid recomputing the
same value multiple times.
"""
key = (x.tostring(), y.tostring())
if key in self.k_cache:
return self.k_cache[key]
else:
r = self.distance(x, y) / self.lengthscale
k = self.variance ** 2 * np.exp(-0.5 * r ** 2)
k = np.asscalar(k)
self.k_cache[key] = k
return k
3
Source : error_utils.py
with MIT License
from dcmoyer
with MIT License
from dcmoyer
def zero_one_thold(c,c_hat,thold=0.5):
c_hat[c_hat < thold] = 0
c_hat[c_hat >= thold] = 1
return np.asscalar(sum((c_hat == c).astype(int)) / c.shape[0])
def zero_one_abs(c,c_hat):
3
Source : gym_wrapper.py
with Apache License 2.0
from deepmind
with Apache License 2.0
from deepmind
def _spec_to_space(spec):
"""Convert dm_env.specs to gym.Spaces."""
if isinstance(spec, list):
return spaces.Tuple([_spec_to_space(s) for s in spec])
elif isinstance(spec, specs.DiscreteArray):
return spaces.Discrete(spec.num_values)
elif isinstance(spec, specs.BoundedArray):
return spaces.Box(
np.asscalar(spec.minimum),
np.asscalar(spec.maximum),
shape=spec.shape,
dtype=spec.dtype)
else:
raise ValueError('Unknown type for specs: {}'.format(spec))
class GymWrapper(object):
3
Source : continuous.py
with MIT License
from dragonfly
with MIT License
from dragonfly
def pdf(self, x):
""" Returns value of pdf at x """
if x < self.lower or x > self.upper:
return 0
return np.asscalar(1/(self.upper - self.lower))
def draw_samples_random(self, size=None):
3
Source : discrete.py
with MIT License
from dragonfly
with MIT License
from dragonfly
def pmf(self, i):
""" Returns pmf of distribution at x. """
if i < 0 or i >= self.k:
return 0
return np.asscalar(self.p[i])
def draw_samples_random(self, size=None):
3
Source : discrete.py
with MIT License
from dragonfly
with MIT License
from dragonfly
def logp(self, value):
""" Returns log probability of category """
if hasattr(value, '__len__'):
if len(value) == 1:
value = np.asscalar(value)
else:
raise ValueError('Input dimension should be 1.')
if value < 0 or value >= self.k:
return -np.inf
return np.log(self.p[value])
def get_id(self, category):
3
Source : kernel.py
with MIT License
from dragonfly
with MIT License
from dragonfly
def __init__(self, dim, scale, order, dim_bandwidths):
kernel_list = []
for i in range(dim):
kernel_list.append(SEKernel(1, 1.0, np.asscalar(dim_bandwidths[i])))
super(ESPKernelSE, self).__init__(scale, order, kernel_list)
class ESPKernelMatern(ESPKernel):
3
Source : kernel.py
with MIT License
from dragonfly
with MIT License
from dragonfly
def __init__(self, dim, nu, scale, order, dim_bandwidths):
kernel_list = []
for i in range(dim):
kernel_list.append(MaternKernel(1, nu[i], 1.0, np.asscalar(dim_bandwidths[i])))
super(ESPKernelMatern, self).__init__(scale, order, kernel_list)
# Ancillary functions for the ExpSumOfDists and SumOfExpSumOfDistsKernel classes ---------
def _compute_raised_scaled_sum(dist_arrays, betas, powers):
3
Source : salsa_estimator.py
with MIT License
from dragonfly
with MIT License
from dragonfly
def __init__(self, dim, nu, scale, order, dim_bandwidths):
kernel_list = []
for i in range(dim):
kernel_list.append(MaternKernel(1, nu[i], 1.0, np.asscalar(dim_bandwidths[i])))
super(ESPKernelMatern, self).__init__(scale, order, kernel_list)
# SALSA class defined here ---------------------------------------------------------------
def _check_feature_label_lengths_and_format(X, Y):
3
Source : mortgage.py
with MIT License
from edelgm6
with MIT License
from edelgm6
def get_interest_payment(self, year):
"""Return principal payment for a given year post-investment.
Args:
year (int): Number of years after the purchase of the asset.
Returns:
Decimal: Interest payment in given year.
"""
yearly_rate = self.yearly_interest_rate
years = self.term_in_years
mortgage_amount = self.mortgage_amount
ipmt = npf.ipmt(yearly_rate, year, years, mortgage_amount)
# asscalar needed to enable conversion of the ndarray to Decimal values
return Decimal(numpy.asscalar(ipmt))
def get_pmi_payment(self, debt):
3
Source : Multi-GPU_Training.py
with Apache License 2.0
from facebookarchive
with Apache License 2.0
from facebookarchive
def accuracy(model):
accuracy = []
prefix = model.net.Proto().name
for device in model._devices:
accuracy.append(
np.asscalar(workspace.FetchBlob("gpu_{}/{}_accuracy".format(device, prefix))))
return np.average(accuracy)
# In[ ]:
# SOLUTION for Part 11
# Start looping through epochs where we run the batches of images to cover the entire dataset
# Usually you would want to run a lot more epochs to increase your model's accuracy
num_epochs = 2
3
Source : prunet_models.py
with MIT License
from gcastex
with MIT License
from gcastex
def get_prune_frac(self):
zeros = 0
total = 0
for index,params in enumerate(self.parameters()):
zeros+=torch.sum(self.prune_mask[index].view(-1))
total+=(self.prune_mask[index].view(-1)).size(0)
return np.asscalar((zeros.double()/total).detach().numpy())
######### LightCNN implementation. Adapted from code by Alfred Xiang Wu.
class mfm(nn.Module):
3
Source : color_diff.py
with GNU General Public License v3.0
from HHHHhgqcdxhg
with GNU General Public License v3.0
from HHHHhgqcdxhg
def delta_e_cie1976(color1, color2):
"""
Calculates the Delta E (CIE1976) of two colors.
"""
color1_vector = _get_lab_color1_vector(color1)
color2_matrix = _get_lab_color2_matrix(color2)
delta_e = color_diff_matrix.delta_e_cie1976(color1_vector, color2_matrix)[0]
return numpy.asscalar(delta_e)
# noinspection PyPep8Naming
def delta_e_cie1994(color1, color2, K_L=1, K_C=1, K_H=1, K_1=0.045, K_2=0.015):
3
Source : color_diff.py
with GNU General Public License v3.0
from HHHHhgqcdxhg
with GNU General Public License v3.0
from HHHHhgqcdxhg
def delta_e_cie2000(color1, color2, Kl=1, Kc=1, Kh=1):
"""
Calculates the Delta E (CIE2000) of two colors.
"""
color1_vector = _get_lab_color1_vector(color1)
color2_matrix = _get_lab_color2_matrix(color2)
delta_e = color_diff_matrix.delta_e_cie2000(
color1_vector, color2_matrix, Kl=Kl, Kc=Kc, Kh=Kh)[0]
return numpy.asscalar(delta_e)
# noinspection PyPep8Naming
def delta_e_cmc(color1, color2, pl=2, pc=1):
3
Source : color_diff.py
with GNU General Public License v3.0
from HHHHhgqcdxhg
with GNU General Public License v3.0
from HHHHhgqcdxhg
def delta_e_cmc(color1, color2, pl=2, pc=1):
"""
Calculates the Delta E (CMC) of two colors.
CMC values
Acceptability: pl=2, pc=1
Perceptability: pl=1, pc=1
"""
color1_vector = _get_lab_color1_vector(color1)
color2_matrix = _get_lab_color2_matrix(color2)
delta_e = color_diff_matrix.delta_e_cmc(
color1_vector, color2_matrix, pl=pl, pc=pc)[0]
return numpy.asscalar(delta_e)
3
Source : db.py
with GNU Affero General Public License v3.0
from HyeongseokSon1
with GNU Affero General Public License v3.0
from HyeongseokSon1
def on_epoch_end(self, epoch, logs={}):
self.et=time.time()-self.et
print("ending")
print(epoch)
logs['epoch']=epoch
logs['time']=datetime.utcnow()
logs['stepTime']=self.et
logs['acc']=np.asscalar(logs['acc'])
print(logs)
w=self.model.Params
fid=self.db.save_params(w,logs)
logs.update({'params':fid})
self.db.valid_log(logs)
def on_batch_begin(self, batch,logs={}):
3
Source : db.py
with GNU Affero General Public License v3.0
from HyeongseokSon1
with GNU Affero General Public License v3.0
from HyeongseokSon1
def on_batch_end(self, batch, logs={}):
self.t2=time.time()-self.t
logs['acc']=np.asscalar(logs['acc'])
#logs['loss']=np.asscalar(logs['loss'])
logs['step_time']=self.t2
logs['time']=datetime.utcnow()
logs['epoch']=self.epoch
logs['batch']=self.batch
self.db.train_log(logs)
3
Source : rewriter.py
with MIT License
from iCGY96
with MIT License
from iCGY96
def store_const_to_top(self, match_result):
top_node = list(match_result._pattern_to_op.values())[0]
kwargs = dict()
for pattern, op in match_result._pattern_to_op.items():
if pattern.name and pattern.type == 'Const':
if tensor_util.MakeNdarray(op.get_attr('value')).shape == (1, ):
kwargs[pattern.name] = np.asscalar(tensor_util.MakeNdarray(op.get_attr('value')))
else:
kwargs[pattern.name] = np.squeeze(tensor_util.MakeNdarray(op.get_attr('value')))
if hasattr(top_node, 'kwargs'):
top_node.kwargs.update(kwargs)
else:
top_node.kwargs = kwargs
def create_scope(self, result, pattern, scope_names_dict, parent_scope_name=''):
3
Source : nn.py
with MIT License
from JasonFengGit
with MIT License
from JasonFengGit
def as_scalar(node):
"""
Returns the value of a Node as a standard Python number. This only works
for nodes with one element (e.g. SquareLoss and SoftmaxLoss, as well as
DotProduct with a batch size of 1 element).
"""
assert isinstance(node, Node), (
"Input must be a node object, instead has type {!r}".format(
type(node).__name__))
assert node.data.size == 1, (
"Node has shape {}, cannot convert to a scalar".format(
format_shape(node.data.shape)))
return np.asscalar(node.data)
3
Source : bounded_mlp_policy.py
with MIT License
from lorelupo
with MIT License
from lorelupo
def eval_iw_stats(self, states, actions, rewards, lens_or_batch_size=1, horizon=None, gamma=.99,
behavioral=None, per_decision=False, normalize=False, truncate_at=np.infty):
batch_size, horizon, _states, _actions, _rewards, _mask = (
self._prepare_data(states, actions, rewards, lens_or_batch_size, horizon))
self._build(batch_size, horizon, behavioral, per_decision, normalize, truncate_at)
results = self._get_iw_stats(_states, _actions, _rewards, gamma, _mask)
return tuple(map(np.asscalar, results))
def eval_ret_stats(self, states, actions, rewards, lens_or_batch_size=1, horizon=None, gamma=.99,
3
Source : bounded_mlp_policy.py
with MIT License
from lorelupo
with MIT License
from lorelupo
def eval_ret_stats(self, states, actions, rewards, lens_or_batch_size=1, horizon=None, gamma=.99,
behavioral=None, per_decision=False, normalize=False, truncate_at=np.infty):
batch_size, horizon, _states, _actions, _rewards, _mask = self._prepare_data(states, actions, rewards, lens_or_batch_size, horizon)
self._build(batch_size, horizon, behavioral, per_decision, normalize, truncate_at)
results = self._get_ret_stats(_states, _actions, _rewards, gamma, _mask)
return tuple(map(np.asscalar, results))
def eval_grad_J(self, states, actions, rewards, lens_or_batch_size=1, horizon=None, gamma=.99,
3
Source : utils.py
with Mozilla Public License 2.0
from mozilla
with Mozilla Public License 2.0
from mozilla
def default(self, obj):
try:
return np.asscalar(obj)
except (ValueError, IndexError, AttributeError, TypeError):
pass
return super().default(self, obj)
class ExpQueue:
3
Source : tensor_util.py
with MIT License
from PacktPublishing
with MIT License
from PacktPublishing
def SlowAppendComplex64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.scomplex_val.extend([np.asscalar(v)
for x in proto_values
for v in [x.real, x.imag]])
def SlowAppendComplex128ArrayToTensorProto(tensor_proto, proto_values):
3
Source : tensor_util.py
with MIT License
from PacktPublishing
with MIT License
from PacktPublishing
def SlowAppendComplex128ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.dcomplex_val.extend([np.asscalar(v)
for x in proto_values
for v in [x.real, x.imag]])
def SlowAppendObjectArrayToTensorProto(tensor_proto, proto_values):
3
Source : ppo.py
with MIT License
from StarBeta
with MIT License
from StarBeta
def get_values(self, obs):
v_preds = self.sess.run(self.v_preds, feed_dict={self.obs: obs.reshape([1, -1])})
v_preds = np.asscalar(v_preds)
return v_preds
def get_variables(self):
3
Source : AugmentationGenerator.py
with MIT License
from suhitaghosh10
with MIT License
from suhitaghosh10
def similarity3D_parameter_space_regular_sampling(thetaX, thetaY, thetaZ, tx, ty, tz, scale):
'''
Create a list representing a regular sampling of the 3D similarity transformation parameter space. As the
SimpleITK rotation parameterization uses the vector portion of a versor we don't have an
intuitive way of specifying rotations. We therefor use the ZYX Euler angle parametrization and convert to
versor.
Args:
thetaX, thetaY, thetaZ: numpy ndarrays with the Euler angle values to use.
tx, ty, tz: numpy ndarrays with the translation values to use.
scale: numpy array with the scale values to use.
Return:
List of lists representing the parameter space sampling (vx,vy,vz,tx,ty,tz,s).
'''
return [list(eul2quat(parameter_values[0], parameter_values[1], parameter_values[2])) +
[np.asscalar(p) for p in parameter_values[3:]] for parameter_values in
np.nditer(np.meshgrid(thetaX, thetaY, thetaZ, tx, ty, tz, scale))]
def eul2quat(ax, ay, az, atol=1e-8):
2
Source : model.py
with MIT License
from wenhuchen
with MIT License
from wenhuchen
def evaluate(val_dataloader, encoder_stat, encoder_prog):
mapping = {}
back_mapping = {}
all_idexes = set()
accuracy = 0
TP, TN, FN, FP = 0, 0, 0, 0
for val_step, batch in enumerate(val_dataloader):
batch = tuple(t.to(device) for t in batch)
input_ids, prog_ids, labels, index, true_lab, pred_lab = batch
enc_stat = encoder_stat(input_ids)
enc_prog, logits = encoder_prog(prog_ids, input_ids, enc_stat)
similarity = torch.sigmoid(logits)
#similarity = torch.sigmoid(classifier(torch.cat([enc_stat, enc_prog], -1)).squeeze())
similarity = similarity.cpu().data.numpy()
sim = (similarity > args.threshold).astype('float32')
labels = labels.cpu().data.numpy()
index = index.cpu().data.numpy()
true_lab = true_lab.cpu().data.numpy()
pred_lab = pred_lab.cpu().data.numpy()
TP += ((sim == 1) & (labels == 1)).sum()
TN += ((sim == 0) & (labels == 0)).sum()
FN += ((sim == 0) & (labels == 1)).sum()
FP += ((sim == 1) & (labels == 0)).sum()
if not args.voting:
for i, s, p, t, inp_id, prog_id in zip(index, similarity, pred_lab, true_lab, input_ids, prog_ids):
if args.analyze:
inp = back_to_words(inp_id)
r = back_to_words(prog_id[1:])
else:
inp = None
r = None
if i not in mapping:
mapping[i] = [s, numpy.asscalar(p), numpy.asscalar(t), inp, r]
else:
if s > mapping[i][0]:
mapping[i] = [s, numpy.asscalar(p), numpy.asscalar(t), inp, r]
else:
factor = 2
for i, s, p, t in zip(index, similarity, pred_lab, true_lab):
if i not in mapping:
if p == 1:
mapping[i] = [factor * s, s, t]
else:
mapping[i] = [-s, s, t]
else:
if p == 1:
mapping[i][0] += factor * s
else:
mapping[i][0] -= s
precision = TP / (TP + FP + 0.001)
recall = TP / (TP + FN + 0.001)
print("TP: {}, FP: {}, FN: {}, TN: {}. precision = {}: recall = {}".format(TP, FP, FN, TN, precision, recall))
results = []
if not args.voting:
success, fail = 0, 0
for i, line in mapping.items():
if line[1] == line[2]:
success += 1
else:
fail += 1
results.append({'pred': line[1], 'gold': line[2], 'fact': line[3], 'program': line[4]})
print("success = {}, fail = {}, accuracy = {}".format(success, fail, success / (success + fail + 0.001)))
accuracy = success / (success + fail + 0.001)
else:
success, fail = 0, 0
for i, ent in mapping.items():
if (ent[0] > 0 and ent[2] == 1) or (ent[0] < 0 and ent[2] == 0):
success += 1
else:
fail += 1
print("success = {}, fail = {}, accuracy = {}".format(success, fail, success / (success + fail + 0.001)))
accuracy = success / (success + fail + 0.001)
if args.analyze:
if args.do_test or args.do_small_test:
with open('/tmp/test_eval_results.json', 'w') as f:
json.dump(results, f, indent=2)
if args.do_val:
with open('/tmp/val_eval_results.json', 'w') as f:
json.dump(results, f, indent=2)
return precision, recall, accuracy
if args.resume:
0
Source : expreplay.py
with Apache License 2.0
from amiralansary
with Apache License 2.0
from amiralansary
def _trigger(self):
# log player statistics in training
v = self._player_scores
dist = self._player_distError
try:
mean, max = v.average, v.max
self.trainer.monitors.put_scalar('expreplay/mean_score', mean)
self.trainer.monitors.put_scalar('expreplay/max_score', max)
mean, max = dist.average, dist.max
self.trainer.monitors.put_scalar('expreplay/mean_dist', mean)
self.trainer.monitors.put_scalar('expreplay/max_dist', max)
except Exception:
logger.exception("Cannot log training scores.")
v.reset()
dist.reset()
# monitor number of played games and successes of reaching the target
if self.player.num_games.count:
self.trainer.monitors.put_scalar('n_games',
np.asscalar(self.player.num_games.sum))
else:
self.trainer.monitors.put_scalar('n_games', 0)
if self.player.num_success.count:
self.trainer.monitors.put_scalar('n_success',
np.asscalar(self.player.num_success.sum))
self.trainer.monitors.put_scalar('n_success_ratio',
self.player.num_success.sum/self.player.num_games.sum)
else:
self.trainer.monitors.put_scalar('n_success', 0)
self.trainer.monitors.put_scalar('n_success_ratio',0)
# reset stats
self.player.reset_stat()
0
Source : expreplay.py
with Apache License 2.0
from amiralansary
with Apache License 2.0
from amiralansary
def _trigger(self):
# log player statistics in training
v = self._player_scores
dist = self._player_distError
try:
mean, max = v.average, v.max
self.trainer.monitors.put_scalar('expreplay/mean_score', mean)
self.trainer.monitors.put_scalar('expreplay/max_score', max)
mean, max = dist.average, dist.max
self.trainer.monitors.put_scalar('expreplay/mean_dist', mean)
self.trainer.monitors.put_scalar('expreplay/max_dist', max)
except Exception:
logger.exception("Cannot log training scores.")
v.reset()
dist.reset()
# monitor number of played games and successes of reaching the target
if self.player.num_games.count:
self.trainer.monitors.put_scalar('n_games',
np.asscalar(self.player.num_games.sum))
else:
self.trainer.monitors.put_scalar('n_games', 0)
if self.player.num_success.count:
self.trainer.monitors.put_scalar('n_success',
np.asscalar(self.player.num_success.sum))
self.trainer.monitors.put_scalar('n_success_ratio',
self.player.num_success.sum / self.player.num_games.sum)
else:
self.trainer.monitors.put_scalar('n_success', 0)
self.trainer.monitors.put_scalar('n_success_ratio', 0)
# reset stats
self.player.reset_stat()
0
Source : metric.py
with MIT License
from autonomousvision
with MIT License
from autonomousvision
def add(self, es, ta, ma=None):
if ma is not None:
raise Exception('mask is not implemented')
es = es.ravel()
ta = ta.ravel()
if es.shape[0] != ta.shape[0]:
raise Exception('invalid shape of es, or ta')
if es.min() < 0 or es.max() > 1:
raise Exception('estimate has wrong value range')
ta_p = (ta == 1)
ta_n = (ta == 0)
es_p = es[ta_p]
es_n = es[ta_n]
for idx, wp in enumerate(self.thresholds):
wp = np.asscalar(wp)
self.tps[idx] += (es_p > wp).sum()
self.fps[idx] += (es_n > wp).sum()
self.fns[idx] += (es_p < = wp).sum()
self.tns[idx] += (es_n < = wp).sum()
self.n_pos += ta_p.sum()
self.n_neg += ta_n.sum()
def get(self):
0
Source : run_summarization.py
with Apache License 2.0
from bigabig
with Apache License 2.0
from bigabig
def run_eval(model, batcher, vocab):
"""Repeatedly runs eval iterations, logging to screen and writing summaries. Saves the model with the best loss seen so far."""
model.build_graph() # build the graph
saver = tf.train.Saver(max_to_keep=3) # we will keep 3 best checkpoints at a time
sess = tf.Session(config=util.get_config())
eval_dir = os.path.join(FLAGS.log_root, "eval") # make a subdir of the root dir for eval data
bestmodel_save_path = os.path.join(eval_dir, 'bestmodel') # this is where checkpoints of best models are saved
summary_writer = tf.summary.FileWriter(eval_dir)
running_avg_loss = 0 # the eval job keeps a smoother, running average loss to tell it when to implement early stopping
best_loss = None # will hold the best loss achieved so far
while True:
_ = util.load_ckpt(saver, sess) # load a new checkpoint
batch = batcher.next_batch() # get the next batch
# run eval on the batch
t0=time.time()
results = model.run_eval_step(sess, batch)
t1=time.time()
tf.logging.info('seconds for batch: %.2f', t1-t0)
# print the loss and coverage loss to screen
loss = results['loss']
tf.logging.info('loss: %f', loss)
if FLAGS.coverage:
coverage_loss = results['coverage_loss']
tf.logging.info("coverage_loss: %f", coverage_loss)
# add summaries
summaries = results['summaries']
train_step = results['global_step']
summary_writer.add_summary(summaries, train_step)
# calculate running avg loss
running_avg_loss = calc_running_avg_loss(np.asscalar(loss), running_avg_loss, summary_writer, train_step)
# If running_avg_loss is best so far, save this checkpoint (early stopping).
# These checkpoints will appear as bestmodel- < iteration_number> in the eval dir
if best_loss is None or running_avg_loss < best_loss:
tf.logging.info('Found new best model with %.3f running_avg_loss. Saving to %s', running_avg_loss, bestmodel_save_path)
saver.save(sess, bestmodel_save_path, global_step=train_step, latest_filename='checkpoint_best')
best_loss = running_avg_loss
# flush the summary writer every so often
if train_step % 100 == 0:
summary_writer.flush()
def main_old(unused_argv):
0
Source : categorical.py
with MIT License
from buds-lab
with MIT License
from buds-lab
def draw_violins(self, ax):
"""Draw the violins onto `ax`."""
fill_func = ax.fill_betweenx if self.orient == "v" else ax.fill_between
for i, group_data in enumerate(self.plot_data):
kws = dict(edgecolor=self.gray, linewidth=self.linewidth)
# Option 1: we have a single level of grouping
# --------------------------------------------
if self.plot_hues is None:
support, density = self.support[i], self.density[i]
# Handle special case of no observations in this bin
if support.size == 0:
continue
# Handle special case of a single observation
elif support.size == 1:
val = np.asscalar(support)
d = np.asscalar(density)
self.draw_single_observation(ax, i, val, d)
continue
# Draw the violin for this group
grid = np.ones(self.gridsize) * i
fill_func(support,
grid - density * self.dwidth,
grid + density * self.dwidth,
facecolor=self.colors[i],
**kws)
# Draw the interior representation of the data
if self.inner is None:
continue
# Get a nan-free vector of datapoints
violin_data = remove_na(group_data)
# Draw box and whisker information
if self.inner.startswith("box"):
self.draw_box_lines(ax, violin_data, support, density, i)
# Draw quartile lines
elif self.inner.startswith("quart"):
self.draw_quartiles(ax, violin_data, support, density, i)
# Draw stick observations
elif self.inner.startswith("stick"):
self.draw_stick_lines(ax, violin_data, support, density, i)
# Draw point observations
elif self.inner.startswith("point"):
self.draw_points(ax, violin_data, i)
# Option 2: we have nested grouping by a hue variable
# ---------------------------------------------------
else:
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
support, density = self.support[i][j], self.density[i][j]
kws["facecolor"] = self.colors[j]
# Add legend data, but just for one set of violins
if not i:
self.add_legend_data(ax, self.colors[j], hue_level)
# Handle the special case where we have no observations
if support.size == 0:
continue
# Handle the special case where we have one observation
elif support.size == 1:
val = np.asscalar(support)
d = np.asscalar(density)
if self.split:
d = d / 2
at_group = i + offsets[j]
self.draw_single_observation(ax, at_group, val, d)
continue
# Option 2a: we are drawing a single split violin
# -----------------------------------------------
if self.split:
grid = np.ones(self.gridsize) * i
if j:
fill_func(support,
grid,
grid + density * self.dwidth,
**kws)
else:
fill_func(support,
grid - density * self.dwidth,
grid,
**kws)
# Draw the interior representation of the data
if self.inner is None:
continue
# Get a nan-free vector of datapoints
hue_mask = self.plot_hues[i] == hue_level
violin_data = remove_na(group_data[hue_mask])
# Draw quartile lines
if self.inner.startswith("quart"):
self.draw_quartiles(ax, violin_data,
support, density, i,
["left", "right"][j])
# Draw stick observations
elif self.inner.startswith("stick"):
self.draw_stick_lines(ax, violin_data,
support, density, i,
["left", "right"][j])
# The box and point interior plots are drawn for
# all data at the group level, so we just do that once
if not j:
continue
# Get the whole vector for this group level
violin_data = remove_na(group_data)
# Draw box and whisker information
if self.inner.startswith("box"):
self.draw_box_lines(ax, violin_data,
support, density, i)
# Draw point observations
elif self.inner.startswith("point"):
self.draw_points(ax, violin_data, i)
# Option 2b: we are drawing full nested violins
# -----------------------------------------------
else:
grid = np.ones(self.gridsize) * (i + offsets[j])
fill_func(support,
grid - density * self.dwidth,
grid + density * self.dwidth,
**kws)
# Draw the interior representation
if self.inner is None:
continue
# Get a nan-free vector of datapoints
hue_mask = self.plot_hues[i] == hue_level
violin_data = remove_na(group_data[hue_mask])
# Draw box and whisker information
if self.inner.startswith("box"):
self.draw_box_lines(ax, violin_data,
support, density,
i + offsets[j])
# Draw quartile lines
elif self.inner.startswith("quart"):
self.draw_quartiles(ax, violin_data,
support, density,
i + offsets[j])
# Draw stick observations
elif self.inner.startswith("stick"):
self.draw_stick_lines(ax, violin_data,
support, density,
i + offsets[j])
# Draw point observations
elif self.inner.startswith("point"):
self.draw_points(ax, violin_data, i + offsets[j])
def draw_single_observation(self, ax, at_group, at_quant, density):
0
Source : constraint.py
with Apache License 2.0
from cvxgrp
with Apache License 2.0
from cvxgrp
def prox_soc_base(v, t):
"""Proximal operator of the set indicator that :math:`\\|v_{1:n}\\_2 \\leq v_{n+1}`, where :math:`v` is a vector of
length n+1, `v_{1:n}` symbolizes its first n elements, and :math:`v_{n+1}` is its last element. This is equivalent
to the projection of :math:`v` onto the second-order cone :math:`C = {(u,s):\\|u\\|_2 \\leq s}`.
Parikh and Boyd (2013). "Proximal Algorithms." Foundations and Trends in Optimization. vol. 1, no. 3, Sect. 6.3.2.
"""
if sparse.issparse(v):
v = v.tocsr()
u = v[:-1] # u = (v_1,...,v_n)
s = v[-1] # s = v_{n+1}
s = np.asscalar(s.todense())
u_norm = sparse.linalg.norm(u,'fro')
if u_norm < = -s:
return np.zeros(v.shape)
elif u_norm < = s:
return v
else:
scale = (1 + s / u_norm) / 2
return scale * sparse.vstack((u, u_norm))
else:
u = v[:-1] # u = (v_1,...,v_n)
s = v[-1] # s = v_{n+1}
s = np.asscalar(s)
u_norm = np.linalg.norm(u,2)
if u_norm < = -s:
return np.zeros(v.shape)
elif u_norm < = s:
return v
else:
scale = (1 + s / u_norm) / 2
u_all = np.zeros(v.shape)
u_all[:-1] = u
u_all[-1] = u_norm
return scale * u_all
0
Source : pynwb_utils.py
with Apache License 2.0
from dandi
with Apache License 2.0
from dandi
def _get_pynwb_metadata(path: Union[str, Path]) -> Dict[str, Any]:
out = {}
with NWBHDF5IO(path, "r", load_namespaces=True) as io:
nwb = io.read()
for key in metadata_nwb_file_fields:
value = getattr(nwb, key)
if isinstance(value, h5py.Dataset):
# serialize into a basic container (list), since otherwise
# it would be a closed Dataset upon return
value = list(value)
if isinstance(value, (list, tuple)) and all(
isinstance(v, bytes) for v in value
):
value = type(value)(v.decode("utf-8") for v in value)
out[key] = value
# .subject can be None as the test shows
for subject_feature in metadata_nwb_subject_fields:
out[subject_feature] = getattr(nwb.subject, subject_feature, None)
# Add a few additional useful fields
# "Custom" DANDI extension by Ben for now to contain additional metadata
# not present in nwb-schema
dandi_icephys = getattr(nwb, "lab_meta_data", {}).get(
"DandiIcephysMetadata", None
)
if dandi_icephys:
out.update(dandi_icephys.fields)
# Go through devices and see if there any probes used to record this file
probe_ids = [
np.asscalar(v.probe_id) # .asscalar to avoid numpy types
for v in getattr(nwb, "devices", {}).values()
if hasattr(v, "probe_id") # duck typing
]
if probe_ids:
out["probe_ids"] = probe_ids
# Counts
for f in metadata_nwb_computed_fields:
if f in ("nwb_version", "nd_types"):
continue
if not f.startswith("number_of_"):
raise NotImplementedError(
"ATM can only compute number_of_ fields. Got {}".format(f)
)
key = f[len("number_of_") :]
out[f] = len(getattr(nwb, key, []) or [])
# get external_file data:
out["external_file_objects"] = _get_image_series(nwb)
return out
def _get_image_series(nwb: pynwb.NWBFile) -> List[dict]:
0
Source : resnet50_trainer.py
with MIT License
from davidglavas
with MIT License
from davidglavas
def RunEpoch(
args,
epoch,
train_model,
test_model,
total_batch_size,
num_shards,
expname,
explog,
):
'''
Run one epoch of the trainer.
TODO: add checkpointing here.
'''
# TODO: add loading from checkpoint
log.info("Starting epoch {}/{}".format(epoch, args.num_epochs))
epoch_iters = int(args.epoch_size / total_batch_size / num_shards)
test_epoch_iters = int(args.test_epoch_size / total_batch_size / num_shards)
for i in range(epoch_iters):
# This timeout is required (temporarily) since CUDA-NCCL
# operators might deadlock when synchronizing between GPUs.
timeout = args.first_iter_timeout if i == 0 else args.timeout
with timeout_guard.CompleteInTimeOrDie(timeout):
t1 = time.time()
workspace.RunNet(train_model.net.Proto().name)
t2 = time.time()
dt = t2 - t1
fmt = "Finished iteration {}/{} of epoch {} ({:.2f} images/sec)"
log.info(fmt.format(i + 1, epoch_iters, epoch, total_batch_size / dt))
prefix = "{}_{}".format(
train_model._device_prefix,
train_model._devices[0])
accuracy = workspace.FetchBlob(prefix + '/accuracy')
loss = workspace.FetchBlob(prefix + '/loss')
train_fmt = "Training loss: {}, accuracy: {}"
log.info(train_fmt.format(loss, accuracy))
num_images = epoch * epoch_iters * total_batch_size
prefix = "{}_{}".format(train_model._device_prefix, train_model._devices[0])
accuracy = workspace.FetchBlob(prefix + '/accuracy')
loss = workspace.FetchBlob(prefix + '/loss')
learning_rate = workspace.FetchBlob(
data_parallel_model.GetLearningRateBlobNames(train_model)[0]
)
test_accuracy = 0
test_accuracy_top5 = 0
if test_model is not None:
# Run 100 iters of testing
ntests = 0
for _ in range(test_epoch_iters):
workspace.RunNet(test_model.net.Proto().name)
for g in test_model._devices:
test_accuracy += np.asscalar(workspace.FetchBlob(
"{}_{}".format(test_model._device_prefix, g) + '/accuracy'
))
test_accuracy_top5 += np.asscalar(workspace.FetchBlob(
"{}_{}".format(test_model._device_prefix, g) + '/accuracy_top5'
))
ntests += 1
test_accuracy /= ntests
test_accuracy_top5 /= ntests
else:
test_accuracy = (-1)
test_accuracy_top5 = (-1)
explog.log(
input_count=num_images,
batch_count=(i + epoch * epoch_iters),
additional_values={
'accuracy': accuracy,
'loss': loss,
'learning_rate': learning_rate,
'epoch': epoch,
'top1_test_accuracy': test_accuracy,
'top5_test_accuracy': test_accuracy_top5,
}
)
assert loss < 40, "Exploded gradients :("
# TODO: add checkpointing
return epoch + 1
def Train(args):
0
Source : rank_loss_operator_test.py
with MIT License
from davidglavas
with MIT License
from davidglavas
def test_pair_wise_loss_gradient(self, X, label, dY, gc, dc):
workspace.FeedBlob('X', X)
workspace.FeedBlob('dY', dY)
workspace.FeedBlob('label', label)
net = core.Net('net')
net.PairWiseLossGradient(
['X', 'label', 'dY'],
['dX'],
)
plan = core.Plan('predict_data')
plan.AddStep(core.execution_step('predict_data',
[net], num_iter=1))
workspace.RunPlan(plan)
dx = workspace.FetchBlob('dX')
sign = 1 if label[0] > label[1] else -1
if label[0] == label[1]:
self.assertEqual(np.asscalar(dx[0]), 0)
return
self.assertAlmostEqual(
np.asscalar(dx[0]),
np.asscalar(-dY[0] * sign / (1 + np.exp(sign * (X[0] - X[1])))),
delta=1e-2 * abs(np.asscalar(dx[0])))
self.assertEqual(np.asscalar(dx[0]), np.asscalar(-dx[1]))
delta = 1e-3
up_x = np.array([[X[0] + delta], [X[1]]], dtype=np.float32)
down_x = np.array([[X[0] - delta], [X[1]]], dtype=np.float32)
workspace.FeedBlob('up_x', up_x)
workspace.FeedBlob('down_x', down_x)
new_net = core.Net('new_net')
new_net.PairWiseLoss(['up_x', 'label'], ['up_output'])
new_net.PairWiseLoss(['down_x', 'label'], ['down_output'])
plan = core.Plan('predict_data')
plan.AddStep(core.execution_step('predict_data', [new_net], num_iter=1))
workspace.RunPlan(plan)
down_output_pred = workspace.FetchBlob('down_output')
up_output_pred = workspace.FetchBlob('up_output')
np.testing.assert_allclose(
np.asscalar(dx[0]),
np.asscalar(
0.5 * dY[0] *
(up_output_pred[0] - down_output_pred[0]) / delta),
rtol=1e-2, atol=1e-2)
@serial.given(n=st.integers(0, 10), k=st.integers(1, 5), **hu.gcs_cpu_only)
0
Source : utilities.py
with GNU General Public License v3.0
from denisecailab
with GNU General Public License v3.0
from denisecailab
def xrconcat_recursive(var: Union[dict, list], dims: List[str]) -> xr.Dataset:
"""
Recursively concatenate `xr.DataArray` over multiple dimensions.
Parameters
----------
var : Union[dict, list]
Either a `dict` or a `list` of `xr.DataArray` to be concatenated. If a
`dict` then keys should be `tuple`, with length same as the length of
`dims` and values corresponding to the coordinates that uniquely
identify each `xr.DataArray`. If a `list` then each `xr.DataArray`
should contain valid coordinates for each dimensions specified in
`dims`.
dims : List[str]
Dimensions to be concatenated over.
Returns
-------
ds : xr.Dataset
The concatenated dataset.
Raises
------
NotImplementedError
if input `var` is neither a `dict` nor a `list`
"""
if len(dims) > 1:
if type(var) is dict:
var_dict = var
elif type(var) is list:
var_dict = {tuple([np.asscalar(v[d]) for d in dims]): v for v in var}
else:
raise NotImplementedError("type {} not supported".format(type(var)))
try:
var_dict = {k: v.to_dataset() for k, v in var_dict.items()}
except AttributeError:
pass
data = np.empty(len(var_dict), dtype=object)
for iv, ds in enumerate(var_dict.values()):
data[iv] = ds
index = pd.MultiIndex.from_tuples(list(var_dict.keys()), names=dims)
var_ps = pd.Series(data=data, index=index)
xr_ls = []
for idx, v in var_ps.groupby(level=dims[0]):
v.index = v.index.droplevel(dims[0])
xarr = xrconcat_recursive(v.to_dict(), dims[1:])
xr_ls.append(xarr)
return xr.concat(xr_ls, dim=dims[0])
else:
if type(var) is dict:
var = list(var.values())
return xr.concat(var, dim=dims[0])
def update_meta(dpath, pattern=r"^minian\.nc$", meta_dict=None, backend="netcdf"):
0
Source : test_heterograph.py
with Apache License 2.0
from dmlc
with Apache License 2.0
from dmlc
def test_convert(idtype):
hg = create_test_heterograph(idtype)
hs = []
for ntype in hg.ntypes:
h = F.randn((hg.number_of_nodes(ntype), 5))
hg.nodes[ntype].data['h'] = h
hs.append(h)
hg.nodes['user'].data['x'] = F.randn((3, 3))
ws = []
for etype in hg.canonical_etypes:
w = F.randn((hg.number_of_edges(etype), 5))
hg.edges[etype].data['w'] = w
ws.append(w)
hg.edges['plays'].data['x'] = F.randn((4, 3))
g = dgl.to_homogeneous(hg, ndata=['h'], edata=['w'])
assert g.idtype == idtype
assert g.device == hg.device
assert F.array_equal(F.cat(hs, dim=0), g.ndata['h'])
assert 'x' not in g.ndata
assert F.array_equal(F.cat(ws, dim=0), g.edata['w'])
assert 'x' not in g.edata
src, dst = g.all_edges(order='eid')
src = F.asnumpy(src)
dst = F.asnumpy(dst)
etype_id, eid = F.asnumpy(g.edata[dgl.ETYPE]), F.asnumpy(g.edata[dgl.EID])
ntype_id, nid = F.asnumpy(g.ndata[dgl.NTYPE]), F.asnumpy(g.ndata[dgl.NID])
for i in range(g.number_of_edges()):
srctype = hg.ntypes[ntype_id[src[i]]]
dsttype = hg.ntypes[ntype_id[dst[i]]]
etype = hg.etypes[etype_id[i]]
src_i, dst_i = hg.find_edges([eid[i]], (srctype, etype, dsttype))
assert np.asscalar(F.asnumpy(src_i)) == nid[src[i]]
assert np.asscalar(F.asnumpy(dst_i)) == nid[dst[i]]
mg = nx.MultiDiGraph([
('user', 'user', 'follows'),
('user', 'game', 'plays'),
('user', 'game', 'wishes'),
('developer', 'game', 'develops')])
for _mg in [None, mg]:
hg2 = dgl.to_heterogeneous(
g, hg.ntypes, hg.etypes,
ntype_field=dgl.NTYPE, etype_field=dgl.ETYPE, metagraph=_mg)
assert hg2.idtype == hg.idtype
assert hg2.device == hg.device
assert set(hg.ntypes) == set(hg2.ntypes)
assert set(hg.canonical_etypes) == set(hg2.canonical_etypes)
for ntype in hg.ntypes:
assert hg.number_of_nodes(ntype) == hg2.number_of_nodes(ntype)
assert F.array_equal(hg.nodes[ntype].data['h'], hg2.nodes[ntype].data['h'])
for canonical_etype in hg.canonical_etypes:
src, dst = hg.all_edges(etype=canonical_etype, order='eid')
src2, dst2 = hg2.all_edges(etype=canonical_etype, order='eid')
assert F.array_equal(src, src2)
assert F.array_equal(dst, dst2)
assert F.array_equal(hg.edges[canonical_etype].data['w'], hg2.edges[canonical_etype].data['w'])
# hetero_from_homo test case 2
g = dgl.graph(([0, 1, 2, 0], [2, 2, 3, 3]), idtype=idtype, device=F.ctx())
g.ndata[dgl.NTYPE] = F.tensor([0, 0, 1, 2])
g.edata[dgl.ETYPE] = F.tensor([0, 0, 1, 2])
hg = dgl.to_heterogeneous(g, ['l0', 'l1', 'l2'], ['e0', 'e1', 'e2'])
assert hg.idtype == idtype
assert hg.device == g.device
assert set(hg.canonical_etypes) == set(
[('l0', 'e0', 'l1'), ('l1', 'e1', 'l2'), ('l0', 'e2', 'l2')])
assert hg.number_of_nodes('l0') == 2
assert hg.number_of_nodes('l1') == 1
assert hg.number_of_nodes('l2') == 1
assert hg.number_of_edges('e0') == 2
assert hg.number_of_edges('e1') == 1
assert hg.number_of_edges('e2') == 1
assert F.array_equal(hg.ndata[dgl.NID]['l0'], F.tensor([0, 1], F.int64))
assert F.array_equal(hg.ndata[dgl.NID]['l1'], F.tensor([2], F.int64))
assert F.array_equal(hg.ndata[dgl.NID]['l2'], F.tensor([3], F.int64))
assert F.array_equal(hg.edata[dgl.EID][('l0', 'e0', 'l1')], F.tensor([0, 1], F.int64))
assert F.array_equal(hg.edata[dgl.EID][('l0', 'e2', 'l2')], F.tensor([3], F.int64))
assert F.array_equal(hg.edata[dgl.EID][('l1', 'e1', 'l2')], F.tensor([2], F.int64))
# hetero_from_homo test case 3
mg = nx.MultiDiGraph([
('user', 'movie', 'watches'),
('user', 'TV', 'watches')])
g = dgl.graph(((0, 0), (1, 2)), idtype=idtype, device=F.ctx())
g.ndata[dgl.NTYPE] = F.tensor([0, 1, 2])
g.edata[dgl.ETYPE] = F.tensor([0, 0])
for _mg in [None, mg]:
hg = dgl.to_heterogeneous(g, ['user', 'TV', 'movie'], ['watches'], metagraph=_mg)
assert hg.idtype == g.idtype
assert hg.device == g.device
assert set(hg.canonical_etypes) == set(
[('user', 'watches', 'movie'), ('user', 'watches', 'TV')])
assert hg.number_of_nodes('user') == 1
assert hg.number_of_nodes('TV') == 1
assert hg.number_of_nodes('movie') == 1
assert hg.number_of_edges(('user', 'watches', 'TV')) == 1
assert hg.number_of_edges(('user', 'watches', 'movie')) == 1
assert len(hg.etypes) == 2
# hetero_to_homo test case 2
hg = dgl.heterograph({
('_U', '_E', '_V'): ([0, 1], [0, 1])
}, {'_U': 2, '_V': 3}, idtype=idtype, device=F.ctx())
g = dgl.to_homogeneous(hg)
assert hg.idtype == g.idtype
assert hg.device == g.device
assert g.number_of_nodes() == 5
# hetero_to_subgraph_to_homo
hg = dgl.heterograph({
('user', 'plays', 'game'): ([0, 1, 1, 2], [0, 0, 2, 1]),
('user', 'follows', 'user'): ([0, 1, 1], [1, 2, 2])
}, idtype=idtype, device=F.ctx())
hg.nodes['user'].data['h'] = F.copy_to(
F.tensor([[1, 0], [0, 1], [1, 1]], dtype=idtype), ctx=F.ctx())
sg = dgl.node_subgraph(hg, {'user': [1, 2]})
assert len(sg.ntypes) == 2
assert len(sg.etypes) == 2
assert sg.num_nodes('user') == 2
assert sg.num_nodes('game') == 0
g = dgl.to_homogeneous(sg, ndata=['h'])
assert 'h' in g.ndata.keys()
assert g.num_nodes() == 2
@unittest.skipIf(F._default_context_str == 'gpu', reason="Test on cpu is enough")
0
Source : continuous.py
with MIT License
from dragonfly
with MIT License
from dragonfly
def grad_logp(self, x):
""" Returns gradient of log pdf at x """
return np.asscalar(-(x - self.mean)/self.var)
def get_mean(self):
0
Source : model.py
with MIT License
from dragonfly
with MIT License
from dragonfly
def pdf(self, x):
""" Returns pdf of distribution at x """
return np.asscalar(self._pdf(x))
def logp(self, x):
0
Source : plot_utils.py
with MIT License
from dragonfly
with MIT License
from dragonfly
def gen_curves(
plot_order,
results,
x_label,
y_label,
plot_legends,
plot_colours,
plot_markers,
plot_linestyles,
options,
x_bounds=None,
y_bounds=None,
outlier_frac=0.1,
log_y=False,
log_x=False,
plot_title=None,
study_name=None,
num_workers=None,
time_distro_str=None,
fill_error=False,
err_bar_freq=5,
plot_type='semilogy',
to_plot_legend=True,
true_maxval=None,
):
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
# pylint: disable=unused-argument
# pylint: disable=unused-variable
""" Plots the curves given the experiment results.
"""
num_methods, num_experiments = results['curr_opt_vals'].shape
if true_maxval is None:
true_maxval = np.asscalar(results['true_maxval'])
if true_maxval is None or not np.isfinite(true_maxval):
to_plot_regret = False
else:
to_plot_regret = True
plot_type = 'semilogy'
y_label = 'Regret'
methods = [str(method).strip() for method in results['methods']]
# Exclude incomplete experiments if present in any of the methods
for i in range(num_methods):
for vals in results['curr_opt_vals'][i, num_experiments::-1]:
if isinstance(vals, str) or vals.dtype == np.dtype(' < U1'):
num_experiments = num_experiments - 1
if x_bounds is None or x_bounds == []:
x_bounds = [0.0, np.asscalar(results['max_capital'])]
if log_x:
grid_pts = np.logspace(
np.log10(x_bounds[0]),
np.log10(x_bounds[1]),
num=NUM_GRID_PTS
)
else:
grid_pts = np.linspace(
x_bounds[0],
x_bounds[1],
num=NUM_GRID_PTS
)
err_bar_idx_half_gap = 0.5 * NUM_GRID_PTS / NUM_ERR_BARS
err_bar_idxs = np.round(np.linspace(
err_bar_idx_half_gap,
NUM_GRID_PTS - err_bar_idx_half_gap,
num=NUM_ERR_BARS
)).astype(np.int)
unordered_plot_means = np.zeros((num_methods, NUM_GRID_PTS))
unordered_plot_stds = np.zeros((num_methods, NUM_GRID_PTS))
init_opt_vals = None
for i in range(num_methods):
meth_curr_opt_vals = results['curr_true_opt_vals'][i, :]
meth_costs = results['query_eval_times'][i, :]
cum_costs = results['query_receive_times'][i, :]
meth_plot_mean, meth_plot_std = get_plot_info(
meth_curr_opt_vals,
cum_costs,
meth_costs,
grid_pts,
outlier_frac,
init_opt_vals,
num_experiments
)
if to_plot_regret:
unordered_plot_means[i, :] = true_maxval - meth_plot_mean
else:
unordered_plot_means[i, :] = meth_plot_mean
unordered_plot_stds[i, :] = meth_plot_std
# re-order plot_means
plot_means = np.zeros((num_methods, NUM_GRID_PTS))
plot_stds = np.zeros((num_methods, NUM_GRID_PTS))
for plot_idx, method in enumerate(plot_order):
saved_order = methods.index(method)
plot_means[plot_idx, :] = unordered_plot_means[saved_order, :]
plot_stds[plot_idx, :] = unordered_plot_stds[saved_order, :]
# Print out some statistics about plot_means
all_plot_vals = plot_means.flatten()
all_plot_vals = all_plot_vals[np.isfinite(all_plot_vals)]
percentiles = [0.001, 0.1, 0.5, 0.9, 0.999]
percentile_vals = np.percentile(all_plot_vals, percentiles)
print_percentile_list = ['%0.3f:%0.4f'%(p, v) for (p, v) in
zip(percentiles, percentile_vals)]
print('Percentiles:: %s.'%(', '.join(print_percentile_list)))
err_bar_pts = grid_pts[err_bar_idxs]
err_bar_means = plot_means[:, err_bar_idxs]
err_bar_stds = plot_stds[:, err_bar_idxs]
if plot_type == 'plot':
plot_func = plt.plot
elif plot_type == 'loglog':
plot_func = plt.loglog
elif plot_type == 'semilogy':
plot_func = plt.semilogy
elif plot_type == 'semilogx':
plot_func = plt.semilogx
else:
raise ValueError('Unknown plot function.')
first_lines_for_legend = []
# First the bars for the legend
# for i, method in enumerate(methods):
for plot_idx, method in enumerate(plot_order):
# First plot the error bars
curr_leg_line, = plot_func(
err_bar_pts,
err_bar_means[plot_idx],
marker=plot_markers[plot_idx],
color=plot_colours[plot_idx],
label=plot_legends[plot_idx],
linewidth=LINE_WIDTH,
linestyle=plot_linestyles[plot_idx],
markersize=MARKER_SIZE,
)
first_lines_for_legend.append(curr_leg_line)
if to_plot_legend:
plt.legend(loc=options.legend_location, fontsize='large')
# Now plot the whole curve
for plot_idx, method in enumerate(plot_order):
plot_func(
grid_pts,
plot_means[plot_idx, :],
marker=',',
color=plot_colours[plot_idx],
linestyle=plot_linestyles[plot_idx],
linewidth=LINE_WIDTH,
)
# Now do the error bars
for plot_idx, method in enumerate(plot_order):
if not fill_error:
plt.errorbar(
err_bar_pts,
err_bar_means[plot_idx],
err_bar_stds[plot_idx],
marker=plot_markers[plot_idx],
color=plot_colours[plot_idx],
linewidth=LINE_WIDTH,
markersize=MARKER_SIZE,
linestyle='',
)
else:
plt.fill_between(
grid_pts,
plot_means[i, :] - plot_stds[i, :],
plot_means[i, :] + plot_stds[i, :],
color=transparent(*plot_colours[plot_idx], opacity=0.3),
)
if plot_title is not None:
plt.title(plot_title, fontsize=TITLE_FONT_SIZE)
# if to_plot_regret:
# plt.title('{}; workers={}; {}; Exps={}; Max Val={}'.format(study_name, num_workers,
# time_distro_str, num_experiments, true_maxval).replace('_', '-'))
# else:
# plt.title('{}; Workers={}; {}; Exps={}'.format(study_name, num_workers,
# time_distro_str, num_experiments).replace('_', '-'))
plt.xlabel(x_label, fontsize=AXIS_FONT_SIZE)
plt.ylabel(y_label, fontsize=AXIS_FONT_SIZE)
if y_bounds is not None:
plt.ylim(y_bounds)
# Remove duplicate lines
for leg_line in first_lines_for_legend:
leg_line.remove()
plt.draw()
plt.show()
# Utilities to read and process data -----------------------------------------------
def get_plot_info(
0
Source : plot_utils.py
with MIT License
from dragonfly
with MIT License
from dragonfly
def plot_results(results, plot_order, method_legend_colour_marker_dict, x_label,
y_label, x_bounds=None, y_bounds=None, to_plot_legend=True,
true_maxval=None, outlier_frac=0.0, plot_title=None, options=None):
# pylint: disable=too-many-arguments
""" Plots the results using Matplotlib. """
if options is None:
options = get_plot_options()
if plot_title == None:
plot_title = options.title
if x_bounds is None:
x_bounds = []
# Get order of legends, colours, etc.
plot_legends, plot_colours, plot_markers, plot_linestyles = \
generate_legend_marker_colour_orders(plot_order, method_legend_colour_marker_dict)
return gen_curves(
plot_order,
results,
x_label,
y_label,
plot_legends,
plot_colours,
plot_markers,
plot_linestyles,
options,
x_bounds=x_bounds,
y_bounds=y_bounds,
outlier_frac=outlier_frac,
log_y=True,
plot_title=plot_title,
study_name=str(np.asscalar(results['study_name'])),
num_workers=str(np.asscalar(results['num_workers'])),
time_distro_str=str(np.asscalar(results['time_distro_str'])),
plot_type=options.type,
to_plot_legend=to_plot_legend,
true_maxval=true_maxval,
)
0
Source : Marshallers.py
with Apache License 2.0
from Enigma644
with Apache License 2.0
from Enigma644
def read(self, f, dsetgrp, attributes, options):
# Use the parent class version to read it and do most of the
# work.
data = NumpyScalarArrayMarshaller.read(self, f, dsetgrp,
attributes, options)
# The type string determines how to convert it back to a Python
# type (just look up the entry in types). As it might be
# returned as an ndarray, it needs to be run through
# np.asscalar. Now, since int and long are unified in Python 3.x
# and the size of int in Python 2.x is not always the same, if
# the type_string is 'int', then we need to check to see if it
# can fit into an int if we are in Python 2.x. If it will fit,
# it is returned as an int. If it would not fit, it is returned
# as a long.
type_string = convert_attribute_to_string( \
attributes['Python.Type'])
if type_string in self.typestring_to_type:
tp = self.typestring_to_type[type_string]
sdata = np.asscalar(data)
if sys.hexversion >= 0x03000000 or tp != int:
return tp(sdata)
else:
num = long(sdata)
if num > sys.maxint or num < -(sys.maxint - 1):
return num
else:
return int(num)
else:
# Must be some other type, so return it as is.
return data
class PythonStringMarshaller(NumpyScalarArrayMarshaller):
See More Examples