Here are the examples of the python api numpy.load taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
182 Examples
5
Example 1
def load_npy_or_text(self, filename):
'''Load an array from an existing .npy file, or read a text file and
convert to a NumPy array. In either case, return a NumPy array. If a
pickled NumPy dataset is found, memory-map it read-only. If the specified
file does not contain a pickled NumPy array, attempt to read the file using
numpy.loadtxt(filename).'''
if self.is_npy(filename):
return numpy.load(filename, 'r')
else:
return numpy.loadtxt(filename)
5
Example 2
Project: anna Source File: supervised_data_loader.py
def _load_with_folds(self, fold):
X = numpy.load(os.path.join(self.dataset_path, 'X.npy'))
y = numpy.load(os.path.join(self.dataset_path, 'y.npy'))
folds = numpy.load(os.path.join(self.dataset_path, 'folds.npy'))
assert fold <= folds.max(), \
'Fold number exceeds available number of folds. Please try again.'
mask = (folds == fold)
X = X[mask, :, :, :]
y = y[mask]
# Create supervised data container and return it
supervised_data_container = SupervisedDataContainer(X, y)
return supervised_data_container
4
Example 3
def load_params(path, params):
logging.info("Loading model from file '%s'...", path)
pp = numpy.load(path)
for k in pp:
params[k] = pp[k]
path = "%s.pkl" % (path,)
logging.info("Loading model from file '%s'...", path)
with open(path, 'rb') as fin:
data = pickle.load(fin)
for k in ['dim_proj_chars', 'char_dict', 'pos_dict', 'word_dict']:
params[k] = data[k]
return params
4
Example 4
Project: CTC-LSTM Source File: timit.py
def setup_datastream(path, batch_size, sort_batch_count, valid=False):
A = numpy.load(os.path.join(path, ('valid_x_raw.npy' if valid else 'train_x_raw.npy')))
B = numpy.load(os.path.join(path, ('valid_phn.npy' if valid else 'train_phn.npy')))
C = numpy.load(os.path.join(path, ('valid_seq_to_phn.npy' if valid else 'train_seq_to_phn.npy')))
D = [B[x[0]:x[1], 2] for x in C]
ds = IndexableDataset({'input': A, 'output': D})
stream = DataStream(ds, iteration_scheme=ShuffledExampleScheme(len(A)))
stream = Batch(stream, iteration_scheme=ConstantScheme(batch_size * sort_batch_count))
comparison = _balanced_batch_helper(stream.sources.index('input'))
stream = Mapping(stream, SortMapping(comparison))
stream = Unpack(stream)
stream = Batch(stream, iteration_scheme=ConstantScheme(batch_size, num_examples=len(A)))
stream = Padding(stream, mask_sources=['input', 'output'])
return ds, stream
3
Example 5
Project: drmad Source File: loaddataSubClass.py
def loadCifar10():
# with open(train_path, "rb") as f:
# data_train = cPickle.load(f)
# label_train = cPickle.load(f)
# with open(test_path, "rb") as f:
# data_test = cPickle.load(f)
# label_test = cPickle.load(f)
npzfile = np.load(train_path)
data_train=npzfile['X']
label_train=npzfile['y']
npzfile = np.load(test_path)
data_test=npzfile['X']
label_test=npzfile['y']
return data_train, label_train, data_test, label_test
3
Example 6
def _load(self):
with open(self._npz_path, 'rb') as f:
dic = np.load(f)
x = dic['x']
y = dic['y']
return x, y
3
Example 7
Project: learning-wordnet Source File: utils.py
def numpy_load_fnc(key):
if os.path.exists(os.path.join(c["CACHE_DIR"], key + ".npz")):
# Listed numpy array
savez_file = np.load(os.path.join(c["CACHE_DIR"], key + ".npz"))
ar = []
for k in sorted(list((int(x) for x in savez_file))):
logger.info("Loading " + str(k) + " from " + str(key) + " " + str(savez_file[str(k)].shape))
ar.append(savez_file[str(k)])
return ar
else:
return np.load(os.path.join(c["CACHE_DIR"], key + ".npy"))
3
Example 8
def get_mean(dataset_path='data/data.hf5'):
try:
h5f = h5py.File(dataset_path, 'r')
return h5f['mean'][:]
except IOError:
return np.load("data/mean.npy")
3
Example 9
Project: tweet2vec Source File: w2v.py
def load_params_shared(path):
"""
Load previously saved model
"""
params = OrderedDict()
with open(path,'r') as f:
npzfile = np.load(f)
for kk, vv in npzfile.iteritems():
params[kk] = theano.shared(vv, name=kk)
return params
3
Example 10
Project: Deep-Learning-TensorFlow Source File: utilities.py
def get_weights_as_images(weights_npy, width, height, outdir='img/',
n_images=10, img_type='grey'):
"""Create and save the weights of the hidden units as images.
:param weights_npy: path to the weights .npy file
:param width: width of the images
:param height: height of the images
:param outdir: output directory
:param n_images: number of images to generate
:param img_type: 'grey' or 'color' (RGB)
"""
weights = np.load(weights_npy)
perm = np.random.permutation(weights.shape[1])[:n_images]
for p in perm:
w = np.array([i[p] for i in weights])
image_path = outdir + 'w_{}.png'.format(p)
gen_image(w, width, height, image_path, img_type)
3
Example 11
def test_single_slice(self):
stack = dcmstack.DicomStack()
stack.add_dcm(self.inputs[0])
affine = stack.get_affine()
ref = np.load(path.join(self.data_dir, 'single_slice_aff.npy'))
ok_(np.allclose(affine, ref))
3
Example 12
def metadata(self, filename):
npzfile = numpy.load(self._loadpath(filename))
metadata = (npzfile['metadata'].tolist()
if 'metadata' in list(npzfile.keys()) else {})
# Numpy load may return a Python dictionary.
if not isinstance(npzfile, dict): npzfile.close()
return metadata
3
Example 13
Project: PyFR Source File: test_ele_mats.py
def test_hex_gleg_ord3():
# Config for a third order DG scheme
cfg = Inifile()
cfg.set('solver', 'order', '3')
cfg.set('solver-interfaces-quad', 'flux-pts', 'gauss-legendre')
cfg.set('solver-elements-hex', 'soln-pts', 'gauss-legendre')
# Generate the shape
hs = HexShape(None, cfg)
# Load and import the reference values
fobj = BytesIO(pkgutil.get_data(__name__, 'hex-gleg-ord3.npz'))
refm = np.load(fobj)
assert np.allclose(refm['m0'], hs.m0)
assert np.allclose(refm['m1'], hs.m1)
assert np.allclose(refm['m2'], hs.m2)
assert np.allclose(refm['m3'], hs.m3)
3
Example 14
def load_images(path):
"""Load the built images for training."""
imgs = get_files(path, lambda x: 'images' in x, recursive=False)
img_arr = np.vstack([np.load(f) for f in imgs])
labels = get_files(path, lambda x: 'labels' in x, recursive=False)
labels_arr = np.concatenate([np.load(f) for f in labels])
return img_arr, labels_arr
3
Example 15
Project: LASIF Source File: adjoint_src_manager.py
def get_adjoint_src(self, channel_id, starttime, endtime):
filename = os.path.join(self.directory, self._get_tag(
channel_id, starttime, endtime))
if not os.path.exists(filename):
return None
return np.load(filename)
3
Example 16
@standard_classification_loader(name)
def load(force_contiguous=True):
data = np.load(_datafile)
features = data['features']
labels = np.array([label_names.index(lab) for lab in data['labels']])
if force_contiguous:
features = np.ascontiguousarray(features)
labels = np.ascontiguousarray(labels)
return features,labels
3
Example 17
Project: diagnose-heart Source File: utils.py
def load_params(model, fn):
if 'npz' in fn:
with np.load(fn) as f:
param_values = [f['arr_%d' % i] for i in range(len(f.files))]
nn.layers.set_all_param_values(model, param_values)
else:
with open(fn, 'r') as re:
import pickle
nn.layers.set_all_param_values(model, pickle.load(re))
3
Example 18
def load_mnist(path):
mnist = np.load(path)
train_set_x = mnist['train_data']
train_set_y = mnist['train_labels']
test_set_x = mnist['test_data']
test_set_y = mnist['test_labels']
train_set_x, train_set_y = _shared_dataset((train_set_x, train_set_y))
test_set_x, test_set_y = _shared_dataset((test_set_x, test_set_y))
valid_set_x, valid_set_y = test_set_x, test_set_y
rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return rval
3
Example 19
def load_params(path, params):
pp = numpy.load(path)
for kk, vv in iteritems(params):
if kk not in pp:
raise Warning('%s is not in the archive' % kk)
params[kk] = pp[kk]
return params
3
Example 20
Project: bctpy Source File: load_samples.py
def load_sample_group_fmri():
f = np.load(mat_path('sample_group_fmri.npy'))
import functools
def compose(*functions):
return functools.reduce(lambda f, g: lambda x: f(g(x)), functions)
thresh_fun = functools.partial(bct.threshold_proportional, p=.5)
return np.transpose(list(map(compose(bct.normalize, thresh_fun),
(f[:, :, i] for i in range(f.shape[2])))),
(1, 2, 0))
3
Example 21
def __init__(self):
self.images = {}
self.training = True
self.people = []
self.svm = None
if args.unknown:
self.unknownImgs = np.load("./examples/web/unknown.npy")
3
Example 22
def load_params(path, params):
pp = numpy.load(path)
for kk, vv in params.iteritems():
if kk not in pp:
warnings.warn('%s is not in the archive' % kk)
continue
params[kk] = pp[kk]
return params
3
Example 23
Project: rf_helicopter Source File: pytests.py
def test_world_loading():
world = helicopter_world(file_name=os.path.join(os.getcwd(),
"Tests",
"Test_Track.npy"))
loaded_track = np.load(os.path.join(os.getcwd(),
"Tests",
"Test_Track.npy"))
assert loaded_track.shape == world.track.shape, \
"Loading Track into World Failed"
3
Example 24
def _lazy_init(self):
if self.invCovMatFile:
self.norm = numpy.load(self.invCovMatFile)
else:
self.NoSqrt = True
self.norm = create_inverse_covariance_norm(self.training_set.data_table)
if self.centerFile:
self.center = numpy.load(self.centerFile)
else:
self.center = average_vector(self.training_set.data_table)
3
Example 25
Project: tvb-library Source File: lookup_tables.py
@staticmethod
def populate_table(result, source_file):
source_full_path = try_get_absolute_path("tvb_data.tables", source_file)
zip_data = numpy.load(source_full_path)
result.df = zip_data['df']
result.xmin, result.xmax = zip_data['min_max']
result.data = zip_data['f']
return result
3
Example 26
def _init_network(self):
"""Define model and initialize weights."""
self.network = Network(self.problem.layers)
self.weights = Matrices(self.network.shapes)
if self.load:
loaded = np.load(self.load)
assert loaded.shape == self.weights.shape, (
'weights to load must match problem definition')
self.weights.flat = loaded
else:
self.weights.flat = np.random.normal(
self.problem.weight_mean, self.problem.weight_scale,
len(self.weights.flat))
3
Example 27
Project: painters Source File: cnn_embedding.py
def get_embedded_train_val_split(layer, model_name=MODEL_NAME):
assert layer in LAYER_RESULT_FUNCS.keys()
model_path = join(MODELS_DIR, model_name)
model_name_no_ext, _ = splitext(model_name)
embedded_data_dir = join(
DATA_DIR, 'embedding_{:s}'.format(model_name_no_ext))
train_val_split_file = join(
embedded_data_dir, 'train_val_split_{:s}.npz'.format(layer))
if isfile(train_val_split_file):
split = np.load(train_val_split_file)
return split['arr_0'], split['arr_1'],\
split['arr_2'], split['arr_3'],\
split['arr_4'], split['arr_5']
else:
return _create_embedded_train_val_split(
layer, model_path, train_val_split_file)
3
Example 28
def main():
data = np.load('GTEx_reqnorm_float64.npy')
data_means = data.mean(axis=1)
data_stds = data.std(axis=1) + 1e-3
data = (data - data_means.reshape((10463, 1)))/data_stds.reshape((10463, 1))
X, Y = data[:943, :].transpose(), data[943:, :].transpose()
np.save('GTEx_X_float64.npy', X)
np.save('GTEx_Y_float64.npy', Y)
np.save('GTEx_Y_0-4760_float64.npy', Y[:, 0:4760])
np.save('GTEx_Y_4760-9520_float64.npy', Y[:, 4760:9520])
3
Example 29
def load_features(self):
with np.load(self.input_path) as zdata:
assert len(zdata.keys()) == 1
image_data = zdata[zdata.keys()[0]]
image_data -= self.mean
image_data /= self.std
channels, width, height = image_data.shape
image_data = image_data.reshape((channels, width * height, 1))
return image_data
3
Example 30
def __call__(self, fname, datadir=None):
if datadir is None:
datadir = self._default_path
fpath = os.path.join(datadir, fname)
try:
return self._cache[fpath]
except KeyError:
pass
d = np.load(fpath)
ret = Bunch(d)
self._cache[fpath] = ret
return ret
3
Example 31
def fetch_dataset(url):
datapath = download(url)
fname = osp.basename(url)
extension = osp.splitext(fname)[-1]
assert extension in [".npz", ".pkl"]
if extension == ".npz":
return np.load(datapath)
elif extension == ".pkl":
with open(datapath, 'rb') as fin:
return pickle.load(fin)
else:
raise NotImplementedError
3
Example 32
Project: eofs Source File: reference.py
def _retrieve_test_field(name):
filename = os.path.join(_test_data_path(), '{!s}.npy'.format(name))
try:
field = np.load(filename)
except IOError:
field = None
return field
3
Example 33
Project: illustration2vec Source File: chainer_i2v.py
def make_i2v_with_chainer(param_path, tag_path=None, threshold_path=None):
# ignore UserWarnings from chainer
with warnings.catch_warnings():
warnings.simplefilter('ignore')
net = CaffeFunction(param_path)
kwargs = {}
if tag_path is not None:
tags = json.loads(open(tag_path, 'r').read())
assert(len(tags) == 1539)
kwargs['tags'] = tags
if threshold_path is not None:
fscore_threshold = np.load(threshold_path)['threshold']
kwargs['threshold'] = fscore_threshold
return ChainerI2V(net, **kwargs)
3
Example 34
Project: theano-tutorial Source File: load.py
def _load_batch_cifar100(filename, dtype='float64'):
"""
load a batch in the CIFAR-100 format
"""
path = os.path.join(data_dir_cifar100, filename)
batch = np.load(path)
data = batch['data'] / 255.0
labels = one_hot(batch['fine_labels'], n=100)
return data.astype(dtype), labels.astype(dtype)
3
Example 35
def __init__(self, cal_data = None):
if cal_data is None:
npz_mask = '*.npz'
self.cal_data = glob(npz_mask)[0]
else:
self.cal_data = cal_data
with np.load(self.cal_data) as X:
self.mtx, self.dist, _, _ = [X[i] for i in ('mtx','dist','rvecs','tvecs')]
3
Example 36
def read_array(path, mmap_mode=None):
"""Read a .npy array."""
file_ext = op.splitext(path)[1]
if file_ext == '.npy':
return np.load(path, mmap_mode=mmap_mode)
raise NotImplementedError("The file extension `{}` ".format(file_ext) +
"is not currently supported.")
3
Example 37
def load_data(dir, num):
"""Load numpy data from the data directory.
The files should stored in ``data/{dir}`` and named
``0.npy, 1.npy, ... {num - 1}.npy``.
Returns:
list: A list of loaded data, such that ``list[i]`` contains the
the contents of ``i.npy``.
"""
root = os.path.abspath(os.path.dirname(__file__))
def get_path(i):
return os.path.join(root, 'data', dir, str(i) + '.npy')
return [np.load(get_path(i)) for i in range(num)]
3
Example 38
@classmethod
def load_dir(cls, directory):
"""
Load an AssocSpace from a directory on disk.
"""
u = np.load(os.path.join(directory, 'u.npy'), mmap_mode='r')
sigma = np.load(os.path.join(directory, 'sigma.npy'))
with open(os.path.join(directory, 'labels.txt'), 'rb') as fl:
labels = LabelSet(fl.read().decode('utf-8').splitlines())
# Load the spectrally-associated matrix if available
try:
assoc = np.load(os.path.join(directory, 'assoc.npy'), mmap_mode='r')
except FileNotFoundError:
assoc = None
return cls(u, sigma, labels, assoc=assoc)
3
Example 39
def load(self, path):
state = np.load(pjoin(path, type(self).__name__ + '.npz'))
self.t.set_value(state["t"])
self.std.set_value(state["std"])
self._srng.rstate[:] = state['_srng_rstate']
for state_update, saved_state in zip(self._srng.state_updates, state["_srng_state_updates"]):
state_update[0].set_value(saved_state)
3
Example 40
def _load_file(self, itraj):
filename = self._filenames[itraj]
#self._logger.debug("opening file %s" % filename)
if filename.endswith('.npy'):
x = np.load(filename, mmap_mode=self.mmap_mode)
arr = self._reshape(x)
else:
raise ValueError("given file '%s' is not a NumPy array. Make sure"
" it has a .npy extension" % filename)
return arr
3
Example 41
def _put(self, func, symbol, data, format):
assert format == 'npy'
start_time = time.time()
try:
data = np.load(StringIO(data))
except StandardError:
return self.request.write("-ERR wrong data format\r\n")
end_time = time.time()
parse_time = 1000.0 * (end_time - start_time)
logging.info("proto parse: %.2fms", parse_time)
if data != None:
func(symbol, data)
self.request.write("+OK\r\n")
3
Example 42
def __init__(self, *argv, **kwargs):
self.lambda_max = None
self.n_compacts = None
filename = kwargs["filename"] if "filename" in kwargs else None
if filename is not None:
d = np.load(filename)
arr_k = [k for k in list(d.keys()) if k.count("csr")]
argv = [sparse.csr_matrix((d[k][0], d[k][1], d[k][2]),
shape=d[k][3])
for k in arr_k]
for k in set(d.keys()) - set(arr_k):
try:
setattr(self, k, float(d[k]))
except:
pass
for l in argv:
self.append(l)
3
Example 43
def load(self, filename):
ext = os.path.splitext(filename)[1]
if ext == '.h5':
self.load_h5(filename)
else:
try:
new_params = np.load(filename)
except IOError, e:
warnings.warn('''Parameter file could not be loaded with numpy.load()!
Is the filename correct?\n %s''' % (e, ))
if type(new_params) == np.ndarray:
print "loading npy file"
self.updateparams(new_params)
elif type(new_params) == np.lib.npyio.NpzFile:
print "loading npz file"
self.updateparams_fromdict(new_params)
else:
warnings.warn('''Parameter file loaded, but variable type not
recognized. Need npz or ndarray.''', Warning)
3
Example 44
def setUp( self ):
GenericArraySourceTest.setUp(self)
self.lena = np.load(os.path.join(volumina._testing.__path__[0], 'lena.npy'))
self.raw = np.zeros((1,512,512,1,1))
self.raw[0,:,:,0,0] = self.lena
self.source = ArraySource( self.raw )
self.samesource = ArraySource( self.raw )
self.othersource = ArraySource( np.array(self.raw) )
2
Example 45
Project: gensim Source File: utils.py
def _load_specials(self, fname, mmap, compress, subname):
"""
Loads any attributes that were stored specially, and gives the same
opportunity to recursively included SaveLoad instances.
"""
mmap_error = lambda x, y: IOError(
'Cannot mmap compressed object %s in file %s. ' % (x, y) +
'Use `load(fname, mmap=None)` or uncompress files manually.')
for attrib in getattr(self, '__recursive_saveloads', []):
cfname = '.'.join((fname, attrib))
logger.info("loading %s recursively from %s.* with mmap=%s" % (
attrib, cfname, mmap))
getattr(self, attrib)._load_specials(cfname, mmap, compress, subname)
for attrib in getattr(self, '__numpys', []):
logger.info("loading %s from %s with mmap=%s" % (
attrib, subname(fname, attrib), mmap))
if compress:
if mmap:
raise mmap_error(attrib, subname(fname, attrib))
val = numpy.load(subname(fname, attrib))['val']
else:
val = numpy.load(subname(fname, attrib), mmap_mode=mmap)
setattr(self, attrib, val)
for attrib in getattr(self, '__scipys', []):
logger.info("loading %s from %s with mmap=%s" % (
attrib, subname(fname, attrib), mmap))
sparse = unpickle(subname(fname, attrib))
if compress:
if mmap:
raise mmap_error(attrib, subname(fname, attrib))
with numpy.load(subname(fname, attrib, 'sparse')) as f:
sparse.data = f['data']
sparse.indptr = f['indptr']
sparse.indices = f['indices']
else:
sparse.data = numpy.load(subname(fname, attrib, 'data'), mmap_mode=mmap)
sparse.indptr = numpy.load(subname(fname, attrib, 'indptr'), mmap_mode=mmap)
sparse.indices = numpy.load(subname(fname, attrib, 'indices'), mmap_mode=mmap)
setattr(self, attrib, sparse)
for attrib in getattr(self, '__ignoreds', []):
logger.info("setting ignored attribute %s to None" % (attrib))
setattr(self, attrib, None)
2
Example 46
def load_dict(self, opts):
"""
Loading the dictionary that goes from indices to actual words
"""
if opts['rolling_vocab']:
if self.indx_word and '.pkl' in self.indx_word[-4:]:
data_dict = pkl.load(open(self.indx_word, "r"))
self.large2word_trgt = data_dict
self.large2word_trgt[opts['null_sym_target']] = '<eol>'
self.large2word_trgt[opts['unk_sym_target']] = opts['oov']
elif self.indx_word and '.np' in self.indx_word[-4:]:
self.large2word_trgt = numpy.load(self.indx_word)['unique_words']
if self.indx_word_src and '.pkl' in self.indx_word_src[-4:]:
data_dict = pkl.load(open(self.indx_word_src, "r"))
self.large2word_src = data_dict
self.large2word_src[opts['null_sym_source']] = '<eol>'
self.large2word_src[opts['unk_sym_source']] = opts['oov']
elif self.indx_word_src and '.np' in self.indx_word_src[-4:]:
self.large2word_src = numpy.load(self.indx_word_src)['unique_words']
else:
if self.indx_word and '.pkl' in self.indx_word[-4:]:
data_dict = pkl.load(open(self.indx_word, "r"))
self.word_indxs = data_dict
self.word_indxs[opts['null_sym_target']] = '<eol>'
self.word_indxs[opts['unk_sym_target']] = opts['oov']
elif self.indx_word and '.np' in self.indx_word[-4:]:
self.word_indxs = numpy.load(self.indx_word)['unique_words']
if self.indx_word_src and '.pkl' in self.indx_word_src[-4:]:
data_dict = pkl.load(open(self.indx_word_src, "r"))
self.word_indxs_src = data_dict
self.word_indxs_src[opts['null_sym_source']] = '<eol>'
self.word_indxs_src[opts['unk_sym_source']] = opts['oov']
elif self.indx_word_src and '.np' in self.indx_word_src[-4:]:
self.word_indxs_src = numpy.load(self.indx_word_src)['unique_words']
2
Example 47
Project: dl4mt-multi Source File: initialize_model.py
def main(config, ref_encs=None, ref_decs=None, ref_att=None,
ref_enc_embs=None, ref_dec_embs=None):
# Create Theano variables
floatX = theano.config.floatX
src_sel = tensor.matrix('src_selector', dtype=floatX)
trg_sel = tensor.matrix('trg_selector', dtype=floatX)
x = tensor.lmatrix('source')
y = tensor.lmatrix('target')
x_mask = tensor.matrix('source_mask')
y_mask = tensor.matrix('target_mask')
# for multi source - maximum is 5 for now
xs = [tensor.lmatrix('source%d' % i) for i in range(5)]
x_masks = [tensor.matrix('source%d_mask' % i) for i in range(5)]
# Create encoder-decoder architecture, and initialize
logger.info('Creating encoder-decoder')
enc_ids, dec_ids = get_enc_dec_ids(config['cgs'])
enc_dec = EncoderDecoder(
encoder=MultiEncoder(enc_ids=enc_ids, **config),
decoder=MultiDecoder(**config))
enc_dec.build_models(x, x_mask, y, y_mask, src_sel, trg_sel,
xs=xs, x_masks=x_masks)
# load reference encoder models
r_encs = {}
if ref_encs is not None:
for eid, path in ref_encs.items():
logger.info('... ref-enc[{}] loading [{}]'.format(eid, path))
r_encs[eid] = dict(numpy.load(path))
# load reference decoder models
r_decs = {}
if ref_decs is not None:
for did, path in ref_decs.items():
logger.info('... ref-dec[{}] loading [{}]'.format(did, path))
r_decs[did] = dict(numpy.load(path))
# load reference model for the shared components
if ref_att is not None:
logger.info('... ref-shared loading [{}]'.format(ref_att))
r_att = dict(numpy.load(ref_att))
num_params_set = 0
params_set = {k: 0 for k in enc_dec.get_params().keys()}
# set encoder parameters of target model
for eid, rparams in r_encs.items():
logger.info(' Setting encoder [{}] parameters ...'.format(eid))
tparams = enc_dec.encoder.encoders[eid].tparams
for pname, pval in tparams.items():
set_tparam(tparams[pname], rparams[pname])
params_set[pname] += 1
num_params_set += 1
set_tparam(enc_dec.encoder.tparams['ctx_embedder_%s_W' % eid],
rparams['ctx_embedder_%s_W' % eid])
set_tparam(enc_dec.encoder.tparams['ctx_embedder_%s_b' % eid],
rparams['ctx_embedder_%s_b' % eid])
params_set['ctx_embedder_%s_W' % eid] += 1
params_set['ctx_embedder_%s_b' % eid] += 1
num_params_set += 2
# set decoder parameters of target model
for did, rparams in r_decs.items():
logger.info(' Setting decoder [{}] parameters ...'.format(did))
tparams = enc_dec.decoder.decoders[did].tparams
for pname, pval in tparams.items():
set_tparam(tparams[pname], rparams[pname])
params_set[pname] += 1
num_params_set += 1
# set shared component parameters of target model
if ref_att is not None:
logger.info(' Setting shared parameters ...')
shared_enc, shared_params = enc_dec.decoder._get_shared_params()
for pname in shared_params.keys():
set_tparam(enc_dec.decoder.tparams[pname], r_att[pname])
params_set[pname] += 1
num_params_set += 1
# set encoder embeddings
if ref_enc_embs is not None:
logger.info(' Setting encoder embeddings ...')
for eid, path in ref_enc_embs.items():
pname = 'Wemb_%s' % eid
logger.info(' ... [{}]-[{}]'.format(did, pname))
emb = numpy.load(path)[pname]
set_tparam(enc_dec.encoder.tparams[pname], emb)
params_set[pname] += 1
num_params_set += 1
# set decoder embeddings
if ref_dec_embs is not None:
logger.info(' Setting decoder embeddings ...')
for did, path in ref_dec_embs.items():
pname = 'Wemb_dec_%s' % did
logger.info(' ... [{}]-[{}]'.format(did, pname))
emb = numpy.load(path)[pname]
set_tparam(enc_dec.decoder.tparams[pname], emb)
params_set[pname] += 1
num_params_set += 1
logger.info(' Saving initialized params to [{}/.params.npz]'
.format(config['saveto']))
if not os.path.exists(config['saveto']):
os.makedirs(config['saveto'])
numpy.savez('{}/params.npz'.format(config['saveto']),
**tparams_asdict(enc_dec.get_params()))
logger.info(' Total number of params : [{}]'
.format(len(enc_dec.get_params())))
logger.info(' Total number of params set: [{}]'.format(num_params_set))
logger.info(' Duplicates [{}]'.format(
[k for k, v in params_set.items() if v > 1]))
logger.info(' Unset (random) [{}]'.format(
[k for k, v in params_set.items() if v == 0]))
logger.info(' Set {}'.format(
[k for k, v in params_set.items() if v > 0]))
0
Example 48
@staticmethod
def Load(filename_network, filename_grid, filename_similarity_measure,
fileformat=None, *args, **kwds):
"""
Return a ClimateNetwork object stored in files.
Unified reading function for graphs. Relies on and partially extends
the corresponding igraph function. Refer to igraph docuementation for
further details on the various reader methods for different formats.
This method tries to identify the format of the graph given in
the first parameter and calls the corresponding reader method.
Existing node and link attributes/weights are also restored depending
on the chosen file format. E.g., the formats GraphML and gzipped
GraphML are able to store both node and link weights.
The remaining arguments are passed to the reader method without
any changes.
:arg str filename_network: The name of the file where the Network
object is to be stored.
:arg str filename_grid: The name of the file where the Grid object is
to be stored (including ending).
:arg str filename_similarity_measure: The name of the file where the
similarity measure matrix is to be stored.
:arg str fileformat: the format of the file (if known in advance)
``None`` means auto-detection. Possible values are: ``"ncol"``
(NCOL format), ``"lgl"`` (LGL format), ``"graphml"``,
``"graphmlz"`` (GraphML and gzipped GraphML format), ``"gml"`` (GML
format), ``"net"``, ``"pajek"`` (Pajek format), ``"dimacs"``
(DIMACS format), ``"edgelist"``, ``"edges"`` or ``"edge"`` (edge
list), ``"adjacency"`` (adjacency matrix), ``"pickle"`` (Python
pickled format).
:return: :class:`ClimateNetwork` instance.
"""
# Load Grid object
grid = Grid.Load(filename_grid)
# Load similarity measure
similarity_measure = np.load(filename_similarity_measure)
# Load to igraph Graph object
graph = igraph.Graph.Read(f=filename_network, format=fileformat,
*args, **kwds)
# Extract adjacency matrix
A = np.array(graph.get_adjacency(type=2).data)
# Extract node weights
if "node_weight_nsi" in graph.vs.attribute_names():
node_weights = np.array(
graph.vs.get_attribute_values("node_weight_nsi"))
else:
node_weights = None
# Create ClimateNetwork instance
net = ClimateNetwork(grid=grid, similarity_measure=similarity_measure,
directed=graph.is_directed())
net.adjacency = A
net.node_weights = node_weights
# Overwrite igraph Graph object in Network instance to restore link
# attributes/weights
net.graph = graph
# Restore link attributes/weights
net.clear_paths_cache()
return net
0
Example 49
def main():
archs = {
'alex': alex.Alex,
'alex_fp16': alex.AlexFp16,
'googlenet': googlenet.GoogLeNet,
'googlenetbn': googlenetbn.GoogLeNetBN,
'googlenetbn_fp16': googlenetbn.GoogLeNetBNFp16,
'nin': nin.NIN
}
parser = argparse.ArgumentParser(
description='Learning convnet from ILSVRC2012 dataset')
parser.add_argument('train', help='Path to training image-label list file')
parser.add_argument('val', help='Path to validation image-label list file')
parser.add_argument('--arch', '-a', choices=archs.keys(), default='nin',
help='Convnet architecture')
parser.add_argument('--batchsize', '-B', type=int, default=32,
help='Learning minibatch size')
parser.add_argument('--epoch', '-E', type=int, default=10,
help='Number of epochs to train')
parser.add_argument('--gpu', '-g', type=int, default=-1,
help='GPU ID (negative value indicates CPU')
parser.add_argument('--initmodel',
help='Initialize the model from given file')
parser.add_argument('--loaderjob', '-j', type=int,
help='Number of parallel data loading processes')
parser.add_argument('--mean', '-m', default='mean.npy',
help='Mean file (computed by compute_mean.py)')
parser.add_argument('--resume', '-r', default='',
help='Initialize the trainer from given file')
parser.add_argument('--out', '-o', default='result',
help='Output directory')
parser.add_argument('--root', '-R', default='.',
help='Root directory path of image files')
parser.add_argument('--val_batchsize', '-b', type=int, default=250,
help='Validation minibatch size')
parser.add_argument('--test', action='store_true')
parser.set_defaults(test=False)
args = parser.parse_args()
# Initialize the model to train
model = archs[args.arch]()
if args.initmodel:
print('Load model from', args.initmodel)
chainer.serializers.load_npz(args.initmodel, model)
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use() # Make the GPU current
model.to_gpu()
# Load the datasets and mean file
mean = np.load(args.mean)
train = PreprocessedDataset(args.train, args.root, mean, model.insize)
val = PreprocessedDataset(args.val, args.root, mean, model.insize, False)
# These iterators load the images with subprocesses running in parallel to
# the training/validation.
train_iter = chainer.iterators.MultiprocessIterator(
train, args.batchsize, n_processes=args.loaderjob)
val_iter = chainer.iterators.MultiprocessIterator(
val, args.val_batchsize, repeat=False, n_processes=args.loaderjob)
# Set up an optimizer
optimizer = chainer.optimizers.MomentumSGD(lr=0.01, momentum=0.9)
optimizer.setup(model)
# Set up a trainer
updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.out)
val_interval = (10 if args.test else 100000), 'iteration'
log_interval = (10 if args.test else 1000), 'iteration'
trainer.extend(TestModeEvaluator(val_iter, model, device=args.gpu),
trigger=val_interval)
trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=val_interval)
trainer.extend(extensions.snapshot_object(
model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
# Be careful to pass the interval directly to LogReport
# (it determines when to emit log rather than when to read observations)
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.observe_lr(), trigger=log_interval)
trainer.extend(extensions.PrintReport([
'epoch', 'iteration', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'lr'
]), trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
if args.resume:
chainer.serializers.load_npz(args.resume, trainer)
trainer.run()
0
Example 50
Project: pyunicorn Source File: map_plots.py
def generate_multiple_map_plots_npy(self, map_names, map_scales,
title_on=True, labels_on=True):
"""
Method for very large datasets (RAM issues) and useful for PARALLEL
code. Generates map plots from the datasets stored in the npy files
and the list of titles. The data is sorted as parallel computation
mixes it up. Stores the plots in the file indicated by filename in the
current directory.
"""
# Set resources
resources = self.resources
# Set plot title
if title_on:
resources.tiMainString = self.title
for k in range(len(self.map_mult_data)):
# Open a workstation for every map, only wks_type = "ps" allows
# multiple workstation
# Sort dataset, as parallel code will mix it
self.map_mult_data[k].sort()
# Define own levels
resources.cnLevelSelectionMode = "ExplicitLevels"
resources.cnLevels = map_scales[k]
wks_type = "pdf"
wks = Ngl.open_wks(wks_type, map_names[k], resources)
#
# Generate map plots
#
for ititle in self.map_mult_data[k]:
# Set title
if labels_on:
resources.lbTitleString = ititle
data = np.load(str(k) + "_" + ititle + ".npy")
# Reshape for visualization on the sphere
data.shape = (self.grid.grid_size()["lat"],
self.grid.grid_size()["lon"])
# Generate map plot
cmap = Ngl.contour_map(wks, data, resources)
# Clear map
del cmap
Ngl.destroy(wks)
# Clean up
for file_name in glob.glob('*.npy'):
os.remove(file_name)
del resources
Ngl.end()