Here are the examples of the python api numpy.__name__ taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
60 Examples
3
Source : util.py
with MIT License
from bei181
with MIT License
from bei181
def is_a_numpy_array(obj):
"""Returns true if obj is a numpy array."""
return type(obj).__module__ == np.__name__
def count_parameters(also_print=True):
3
Source : estimators.py
with MIT License
from encryptogroup
with MIT License
from encryptogroup
def to_native_type(data):
"""
Converts data from numpy types to python types.
This is important because numpy integers for example are limited to 64 bits, which may cause overflow errors when
dealing with very big numbers.
:param data: type
:return: data cast to python type
"""
if type(data).__module__ == np.__name__:
return data.item()
else:
return data
def get_jackk_coeffs(f, m):
3
Source : encoders.py
with GNU General Public License v3.0
from f0uriest
with GNU General Public License v3.0
from f0uriest
def default(self, obj):
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return {'data': obj.tolist(), '__numpy__': True}
else:
return obj.item()
return super().default(self, obj)
class PopulationEncoder(json.JSONEncoder):
3
Source : encoders.py
with GNU General Public License v3.0
from f0uriest
with GNU General Public License v3.0
from f0uriest
def default(self, obj):
if isinstance(obj, Truss):
a = obj.__dict__
for key, val in a.items():
if type(val).__module__ == np.__name__:
if isinstance(val, np.ndarray):
a[key] = {'data': val.tolist(), '__numpy__': True}
else:
a[key] = val.item()
return a
return super().default(self, obj)
def numpy_decoder(dct):
3
Source : data_loader.py
with GNU General Public License v2.0
from fossology
with GNU General Public License v2.0
from fossology
def save_params(save_function):
def wrapper(*args):
self = args[0]
params = args[1].get_params()
# convert to native python datatypes and check for serializabilty
for key, value in list(params.items()):
if type(value).__module__ == np.__name__:
params[key] = value.item()
elif not isinstance(value, (int, float, bool, list, tuple, dict)):
del params[key]
filename = args[2] + '.json'
with open(Path(self.path / ('params_' + filename)), 'w', encoding='utf8') as param_doc:
json.dump(params, param_doc)
return save_function(*args)
return wrapper
@staticmethod
3
Source : controller.py
with MIT License
from GalBrandwine
with MIT License
from GalBrandwine
def input_frame(self, input_frame_from_camera):
"""Setter with preprocessing. """
try:
# make sure input is np.ndarray
assert type(input_frame_from_camera).__module__ == np.__name__
except AssertionError as error:
self.logger.exception(error)
return
self._input_frame = cv2.bilateralFilter(
input_frame_from_camera, 5, 50, 100) # smoothing filter
self._input_frame = cv2.flip(input_frame_from_camera, 1)
self._draw_roi()
def _draw_roi(self):
3
Source : IO.py
with GNU General Public License v3.0
from grebtsew
with GNU General Public License v3.0
from grebtsew
def ndarrayJsonDumps(obj):
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
raise TypeError("Unknown type:", type(obj))
def save_to_file(file_path, data, show=True):
3
Source : hyp_model.py
with Apache License 2.0
from hyperion-ml
with Apache License 2.0
from hyperion-ml
def to_json(self, **kwargs):
# Piece of code borrowed from keras
def get_json_type(obj):
# if obj is any numpy type
if type(obj).__module__ == np.__name__:
return obj.item()
# if obj is a python 'type'
if type(obj).__name__ == type.__name__:
return obj.__name__
raise TypeError("Not JSON Serializable:", obj)
config = self.get_config()
return json.dumps(config, default=get_json_type, **kwargs)
@staticmethod
3
Source : hyp_model.py
with Apache License 2.0
from jsalt2019-diadet
with Apache License 2.0
from jsalt2019-diadet
def to_json(self, **kwargs):
# Piece of code borrowed from keras
def get_json_type(obj):
# if obj is any numpy type
if type(obj).__module__ == np.__name__:
return obj.item()
# if obj is a python 'type'
if type(obj).__name__ == type.__name__:
return obj.__name__
raise TypeError('Not JSON Serializable:', obj)
config=self.get_config()
return json.dumps(config, default=get_json_type, **kwargs)
@staticmethod
3
Source : __io.py
with Apache License 2.0
from matrix-profile-foundation
with Apache License 2.0
from matrix-profile-foundation
def JSONSerializer(obj):
"""
Default JSON serializer to write numpy arays and other non-supported
data types.
Borrowed from:
https://stackoverflow.com/a/52604722
"""
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
raise TypeError('Unknown type:', type(obj))
def from_json(profile):
3
Source : train_torch_filter.py
with MIT License
from mbrossar
with MIT License
from mbrossar
def prepare_filter(args, dataset):
iekf = TORCHIEKF()
# set dataset parameter
iekf.filter_parameters = args.parameter_class()
iekf.set_param_attr()
if type(iekf.g).__module__ == np.__name__:
iekf.g = torch.from_numpy(iekf.g).double()
# load model
if args.continue_training:
iekf.load(args, dataset)
iekf.train()
# init u_loc and u_std
iekf.get_normalize_u(dataset)
return iekf
def prepare_loss_data(args, dataset):
3
Source : utils_torch_filter.py
with MIT License
from mbrossar
with MIT License
from mbrossar
def prepare_filter(args, dataset):
torch_iekf = TORCHIEKF()
torch_iekf.load(args, dataset)
torch_iekf = TORCHIEKF()
# set dataset parameter
torch_iekf.filter_parameters = args.parameter_class()
torch_iekf.set_param_attr()
if type(torch_iekf.g).__module__ == np.__name__:
torch_iekf.g = torch.from_numpy(torch_iekf.g).double()
# load model
torch_iekf.load(args, dataset)
torch_iekf.get_normalize_u(dataset)
iekf = NUMPYIEKF(args.parameter_class)
iekf.set_learned_covariance(torch_iekf)
return iekf, torch_iekf
3
Source : bfm.py
with BSD 3-Clause "New" or "Revised" License
from mindslab-ai
with BSD 3-Clause "New" or "Revised" License
from mindslab-ai
def to(self, device):
self.device = device
for key, value in self.__dict__.items():
if type(value).__module__ == np.__name__:
setattr(self, key, torch.tensor(value).to(device))
def compute_shape(self, id_coeff, exp_coeff):
3
Source : common.py
with BSD 3-Clause "New" or "Revised" License
from naver
with BSD 3-Clause "New" or "Revised" License
from naver
def matmul(A, B):
if typename(A) == np.__name__:
B = tonumpy(B)
scores = np.dot(A, B.T)
elif typename(B) == torch.__name__:
scores = torch.matmul(A, B.t()).cpu().numpy()
else:
raise TypeError("matrices must be either numpy or torch type")
return scores
def pool(x, pooling='mean', gemp=3):
3
Source : domain_controller.py
with MIT License
from nazarimilad
with MIT License
from nazarimilad
def default(self, obj):
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
raise TypeError('Unknown type:', type(obj))
def get_tables(self):
3
Source : _codec.py
with MIT License
from pyfar
with MIT License
from pyfar
def _is_numpy_type(obj):
""" True if object is a Numpy-type.
"""
return type(obj).__module__ == np.__name__
def _is_dtype(obj):
3
Source : conversion.py
with MIT License
from RomuloDrumond
with MIT License
from RomuloDrumond
def numpy_json_encoder(obj):
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
raise TypeError(f"""Unable to "jsonify" object of type :', {type(obj)}""")
3
Source : sk.py
with GNU General Public License v3.0
from Shao-Kui
with GNU General Public License v3.0
from Shao-Kui
def jsonDumpsDefault(obj):
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
raise TypeError('Unknown type:', type(obj))
'''
3
Source : trajectory.py
with MIT License
from Stanford-ILIAD
with MIT License
from Stanford-ILIAD
def __getitem__(self, idx: Union[int, List[int], np.array]):
if isinstance(idx, list) or type(idx).__module__ == np.__name__:
return TrajectorySet([self.trajectories[i] for i in idx])
return self.trajectories[idx]
def __setitem__(self, idx: int, new_trajectory: Trajectory):
3
Source : pywordseg.py
with MIT License
from voidism
with MIT License
from voidism
def sort_by(li, piv=2, unsort=False):
if type(li[piv]).__module__ == np.__name__:
return sort_numpy(li, piv, unsort)
elif type(li[piv]).__module__ == torch.__name__:
return sort_torch(li, piv, unsort)
else:
return sort_list(li, piv, unsort)
class W2V_Embedder():
0
Source : matlab_utils.py
with BSD 3-Clause "New" or "Revised" License
from aesrgan
with BSD 3-Clause "New" or "Revised" License
from aesrgan
def imresize(img, scale, antialiasing=True):
"""imresize function same as MATLAB.
It now only supports bicubic.
The same scale applies for both height and width.
Args:
img (Tensor | Numpy array):
Tensor: Input image with shape (c, h, w), [0, 1] range.
Numpy: Input image with shape (h, w, c), [0, 1] range.
scale (float): Scale factor. The same scale applies for both height
and width.
antialisaing (bool): Whether to apply anti-aliasing when downsampling.
Default: True.
Returns:
Tensor: Output image with shape (c, h, w), [0, 1] range, w/o round.
"""
squeeze_flag = False
if type(img).__module__ == np.__name__: # numpy type
numpy_type = True
if img.ndim == 2:
img = img[:, :, None]
squeeze_flag = True
img = torch.from_numpy(img.transpose(2, 0, 1)).float()
else:
numpy_type = False
if img.ndim == 2:
img = img.unsqueeze(0)
squeeze_flag = True
in_c, in_h, in_w = img.size()
out_h, out_w = math.ceil(in_h * scale), math.ceil(in_w * scale)
kernel_width = 4
kernel = 'cubic'
# get weights and indices
weights_h, indices_h, sym_len_hs, sym_len_he = calculate_weights_indices(in_h, out_h, scale, kernel, kernel_width,
antialiasing)
weights_w, indices_w, sym_len_ws, sym_len_we = calculate_weights_indices(in_w, out_w, scale, kernel, kernel_width,
antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_c, in_h + sym_len_hs + sym_len_he, in_w)
img_aug.narrow(1, sym_len_hs, in_h).copy_(img)
sym_patch = img[:, :sym_len_hs, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, 0, sym_len_hs).copy_(sym_patch_inv)
sym_patch = img[:, -sym_len_he:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, sym_len_hs + in_h, sym_len_he).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(in_c, out_h, in_w)
kernel_width = weights_h.size(1)
for i in range(out_h):
idx = int(indices_h[i][0])
for j in range(in_c):
out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_h[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(in_c, out_h, in_w + sym_len_ws + sym_len_we)
out_1_aug.narrow(2, sym_len_ws, in_w).copy_(out_1)
sym_patch = out_1[:, :, :sym_len_ws]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, 0, sym_len_ws).copy_(sym_patch_inv)
sym_patch = out_1[:, :, -sym_len_we:]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, sym_len_ws + in_w, sym_len_we).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(in_c, out_h, out_w)
kernel_width = weights_w.size(1)
for i in range(out_w):
idx = int(indices_w[i][0])
for j in range(in_c):
out_2[j, :, i] = out_1_aug[j, :, idx:idx + kernel_width].mv(weights_w[i])
if squeeze_flag:
out_2 = out_2.squeeze(0)
if numpy_type:
out_2 = out_2.numpy()
if not squeeze_flag:
out_2 = out_2.transpose(1, 2, 0)
return out_2
def rgb2ycbcr(img, y_only=False):
0
Source : model.py
with Apache License 2.0
from AstraZeneca
with Apache License 2.0
from AstraZeneca
def _tensor(self, data):
"""Turns numpy arrays to torch tensors"""
if type(data).__module__ == np.__name__:
data = th.from_numpy(data)
return data.to(self.device).float()
0
Source : __init__.py
with MIT License
from belosthomas
with MIT License
from belosthomas
def extract(self, original_image, resized_image, with_images=True):
"""
Extract all the line from the given image
:param image: A tensor image
:return: The extracted line, as pillow image, and their positions
"""
if type(original_image).__module__ == np.__name__:
original_image = torch.from_numpy(original_image).unsqueeze(0)
if type(resized_image).__module__ == np.__name__:
resized_image = torch.from_numpy(resized_image).unsqueeze(0)
is_cuda = next(self.model.parameters()).is_cuda
image = torch.autograd.Variable(resized_image).float()
if is_cuda:
image = image.cuda()
else:
image = image.cpu()
image = self.loss.process_labels(image)
result = self.model(torch.autograd.Variable(image))[0]
lines, components = self.loss.ytrue_to_lines(original_image.cpu().numpy()[0], result.cpu().detach().numpy(), with_images)
pillow_lines = [line for line, pos in lines]
pos = [pos for line, pos in lines]
return pillow_lines, pos, result, components
def output_image_bloc(self, image, lines, lwidth=5):
0
Source : image_scraper.py
with MIT License
from bigmpc
with MIT License
from bigmpc
def download_manager(self, image_url, image_name = ''):
""" download manager of image
Args:
image_url (str): image url
image_name (str): image name
"""
# download image
image = self.download_image(image_url)
if self.config.CHECK_RATIO_AND_RESIZE:
image = self.resize_recolor_reratio(image, image_name)
# check return image as numpy
if type(image).__module__ == np.__name__:
self.save_image(image, image_name)
def scrap(self , verbose = True):
0
Source : visualize.py
with BSD 3-Clause Clear License
from braindynamicslab
with BSD 3-Clause Clear License
from braindynamicslab
def json_dump(obj, fp):
""" Handles numpy data types.
Reference
---------
- [1] https://stackoverflow.com/questions/26646362/
"""
def default(o):
if type(o).__module__ == np.__name__:
if isinstance(o, np.ndarray):
return o.tolist()
else:
return o.item()
return json.JSONEncoder().default(o)
json.dump(obj, fp, default=default)
return None
def http_server(port=None, host='localhost'):
0
Source : library_examples.py
with MIT License
from credmark
with MIT License
from credmark
def run(self, input) -> LibrariesDto:
return LibrariesDto(
**{
"libraries": [
{
"name": pandas.__name__,
"version": pandas.__version__
},
{
"name": numpy.__name__,
"version": numpy.__version__
},
{
"name": matplotlib.__name__,
"version": matplotlib.__version__, # type: ignore
},
{
"name": scipy.__name__,
"version": scipy.__version__
},
{
"name": statsmodels.__name__,
"version": statsmodels.__version__
},
{
"name": xlrd.__name__,
"version": xlrd.__version__
},
{
"name": xlsxwriter.__name__,
"version": xlsxwriter.__version__
},
{
"name": BeautifulSoup.__name__,
"version": bs4.__version__ # type: ignore
},
{
"name": pyarrow.__name__,
"version": pyarrow.__version__
},
]
}
)
0
Source : data_generator.py
with GNU General Public License v3.0
from frankkramer-lab
with GNU General Public License v3.0
from frankkramer-lab
def __init__(self, sample_list, preprocessor, training=False,
validation=False, shuffle=False, iterations=None):
# Parse sample list
if isinstance(sample_list, list) : self.sample_list = sample_list.copy()
elif type(sample_list).__module__ == np.__name__ :
self.sample_list = sample_list.tolist()
else : raise ValueError("Sample list have to be a list or numpy array!")
# Create a working environment from the handed over variables
self.preprocessor = preprocessor
self.training = training
self.validation = validation
self.shuffle = shuffle
self.iterations = iterations
self.batch_queue = []
# If samples with subroutines should be preprocessed -> do it now
if preprocessor.prepare_subfunctions:
preprocessor.run_subfunctions(sample_list, training)
# If batches should be prepared before runtime -> do it now
if preprocessor.prepare_batches:
batches_count = preprocessor.run(sample_list, training, validation)
self.batchpointers = list(range(0, batches_count+1))
elif not training:
self.batch_queue = preprocessor.run(sample_list, False, False)
# Shuffle before starting
self.on_epoch_end()
# Return the next batch for associated index
def __getitem__(self, idx):
0
Source : controller.py
with MIT License
from GalBrandwine
with MIT License
from GalBrandwine
def face_covered_frame(self, input_frame_with_faces):
"""Function to draw black recs over detected faces.
This function remove eny 'noise' and help detector detecting palm.
:param input_frame_with_faces (np.ndarray): a frame with faces, that needed to be covered.
"""
try:
# make sure input is np.ndarray
assert type(input_frame_with_faces).__module__ == np.__name__
except AssertionError as error:
self.logger.exception(error)
return
# Preparation
self._preprocessed_input_frame = input_frame_with_faces.copy()
gray = cv2.cvtColor(self._preprocessed_input_frame, cv2.COLOR_BGR2GRAY)
faces = self._face_detector.detectMultiScale(gray, 1.3, 5)
# Black rectangle over faces to remove skin noises.
for (x, y, w, h) in faces:
self._preprocessed_input_frame[y - self._face_padding_y:y + h + self._face_padding_y,
x - self._face_padding_x:x + w + self._face_padding_x, :] = 0
class BackGroundRemover:
0
Source : network.py
with MIT License
from geomstats
with MIT License
from geomstats
def to_json(self, **kwargs):
"""Returns a JSON string containing the network configuration.
To load a network from a JSON save file, use
`keras.models.model_from_json(json_string, custom_objects={})`.
# Arguments
**kwargs: Additional keyword arguments
to be passed to `json.dumps()`.
# Returns
A JSON string.
"""
def get_json_type(obj):
# If obj is any numpy type
if type(obj).__module__ == np.__name__:
return obj.item()
# If obj is a python 'type'
if type(obj).__name__ == type.__name__:
return obj.__name__
raise TypeError('Not JSON Serializable:', obj)
model_config = self._updated_config()
return json.dumps(model_config, default=get_json_type, **kwargs)
def to_yaml(self, **kwargs):
0
Source : saving.py
with MIT License
from geomstats
with MIT License
from geomstats
def save_model(model, filepath, overwrite=True, include_optimizer=True):
"""Save a model to a HDF5 file.
Note: Please also see
[How can I install HDF5 or h5py to save my models in Keras?](
/getting-started/faq/
#how-can-i-install-HDF5-or-h5py-to-save-my-models-in-Keras)
in the FAQ for instructions on how to install `h5py`.
The saved model contains:
- the model's configuration (topology)
- the model's weights
- the model's optimizer's state (if any)
Thus the saved model can be reinstantiated in
the exact same state, without any of the code
used for model definition or training.
# Arguments
model: Keras model instance to be saved.
filepath: one of the following:
- string, path where to save the model, or
- h5py.File object where to save the model
overwrite: Whether we should overwrite any existing
model at the target location, or instead
ask the user with a manual prompt.
include_optimizer: If True, save optimizer's state together.
# Raises
ImportError: if h5py is not available.
"""
if h5py is None:
raise ImportError('`save_model` requires h5py.')
def get_json_type(obj):
"""Serialize any object to a JSON-serializable structure.
# Arguments
obj: the object to serialize
# Returns
JSON-serializable structure representing `obj`.
# Raises
TypeError: if `obj` cannot be serialized.
"""
# if obj is a serializable Keras class instance
# e.g. optimizer, layer
if hasattr(obj, 'get_config'):
return {'class_name': obj.__class__.__name__,
'config': obj.get_config()}
# if obj is any numpy type
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return {'type': type(obj),
'value': obj.tolist()}
else:
return obj.item()
# misc functions (e.g. loss function)
if callable(obj):
return obj.__name__
# if obj is a python 'type'
if type(obj).__name__ == type.__name__:
return obj.__name__
raise TypeError('Not JSON Serializable:', obj)
from .. import __version__ as keras_version
if not isinstance(filepath, h5py.File):
# If file exists and should not be overwritten.
if not overwrite and os.path.isfile(filepath):
proceed = ask_to_proceed_with_overwrite(filepath)
if not proceed:
return
f = h5py.File(filepath, mode='w')
opened_new_file = True
else:
f = filepath
opened_new_file = False
try:
f.attrs['keras_version'] = str(keras_version).encode('utf8')
f.attrs['backend'] = K.backend().encode('utf8')
f.attrs['model_config'] = json.dumps({
'class_name': model.__class__.__name__,
'config': model.get_config()
}, default=get_json_type).encode('utf8')
model_weights_group = f.create_group('model_weights')
if legacy_models.needs_legacy_support(model):
model_layers = legacy_models.legacy_sequential_layers(model)
else:
model_layers = model.layers
save_weights_to_hdf5_group(model_weights_group, model_layers)
if include_optimizer and model.optimizer:
if isinstance(model.optimizer, optimizers.TFOptimizer):
warnings.warn(
'TensorFlow optimizers do not '
'make it possible to access '
'optimizer attributes or optimizer state '
'after instantiation. '
'As a result, we cannot save the optimizer '
'as part of the model save file.'
'You will have to compile your model again '
'after loading it. '
'Prefer using a Keras optimizer instead '
'(see keras.io/optimizers).')
else:
f.attrs['training_config'] = json.dumps({
'optimizer_config': {
'class_name': model.optimizer.__class__.__name__,
'config': model.optimizer.get_config()
},
'loss': model.loss,
'metrics': model.metrics,
'sample_weight_mode': model.sample_weight_mode,
'loss_weights': model.loss_weights,
}, default=get_json_type).encode('utf8')
# Save optimizer weights.
symbolic_weights = getattr(model.optimizer, 'weights')
if symbolic_weights:
optimizer_weights_group = f.create_group(
'optimizer_weights')
weight_values = K.batch_get_value(symbolic_weights)
weight_names = []
for i, (w, val) in enumerate(zip(symbolic_weights,
weight_values)):
# Default values of symbolic_weights is /variable
# for Theano and CNTK
if K.backend() == 'theano' or K.backend() == 'cntk':
if hasattr(w, 'name'):
if w.name.split('/')[-1] == 'variable':
name = str(w.name) + '_' + str(i)
else:
name = str(w.name)
else:
name = 'param_' + str(i)
else:
if hasattr(w, 'name') and w.name:
name = str(w.name)
else:
name = 'param_' + str(i)
weight_names.append(name.encode('utf8'))
optimizer_weights_group.attrs[
'weight_names'] = weight_names
for name, val in zip(weight_names, weight_values):
param_dset = optimizer_weights_group.create_dataset(
name,
val.shape,
dtype=val.dtype)
if not val.shape:
# scalar
param_dset[()] = val
else:
param_dset[:] = val
f.flush()
finally:
if opened_new_file:
f.close()
def load_model(filepath, custom_objects=None, compile=True):
0
Source : serialization.py
with Apache License 2.0
from google-research
with Apache License 2.0
from google-research
def _object_to_literal(to_serialize: Any, container_stack: List[Any]) -> Any:
"""Turns a supported object into a Python literal."""
if isinstance(to_serialize, (int, float, bool, str, bytes, type(None))):
return to_serialize
elif isinstance(to_serialize, tf.DType):
dtype_string = repr(to_serialize)
assert dtype_string.startswith('tf.')
dtype_string = dtype_string[len('tf.'):]
return {_KIND_KEY: _DTYPE_KIND,
'dtype': dtype_string}
elif isinstance(to_serialize, tf.Tensor):
tensor_content = to_serialize.numpy()
# Sometimes tensor_content is a numpy type, and sometimes it's a normal
# Python type.
if type(tensor_content).__module__ == np.__name__:
tensor_content = tensor_content.tolist()
return {_KIND_KEY: _TENSOR_KIND,
'content': tensor_content,
'dtype': _object_to_literal(to_serialize.dtype, container_stack)}
elif isinstance(to_serialize, tf.SparseTensor):
return {_KIND_KEY: _SPARSE_TENSOR_KIND,
'indices': _object_to_literal(to_serialize.indices,
container_stack),
'values': _object_to_literal(to_serialize.values, container_stack),
'dense_shape': _object_to_literal(to_serialize.dense_shape,
container_stack)}
elif isinstance(to_serialize, dict):
if any(to_serialize is seen for seen in container_stack):
raise ValueError('Cycle detected in object dependencies.')
container_stack.append(to_serialize)
result = {_object_to_literal(key, container_stack):
_object_to_literal(value, container_stack)
for key, value in to_serialize.items()}
container_stack.pop()
return {_KIND_KEY: _DICT_KIND,
'dict': result}
elif isinstance(to_serialize, (list, tuple, set)):
if any(to_serialize is seen for seen in container_stack):
raise ValueError('Cycle detected in object dependencies.')
container_stack.append(to_serialize)
generator = (_object_to_literal(x, container_stack) for x in to_serialize)
container_type = type(to_serialize)
result = container_type(generator)
container_stack.pop()
return result
else:
raise TypeError('Cannot convert object {} with type {} to a literal.'
.format(to_serialize, type(to_serialize)))
def _literal_to_object(literal: Any) -> Any:
0
Source : trainer.py
with Apache License 2.0
from HazyResearch
with Apache License 2.0
from HazyResearch
def train_model(self, model, payloads, **kwargs):
# NOTE: misses="insert" so we can log extra metadata (e.g. num_parameters)
# and eventually write to disk.
self.config = recursive_merge_dicts(self.config, kwargs, misses="insert")
self.task_names = [task_name for task_name in model.task_map]
self.payload_names = [payload.name for payload in payloads]
train_payloads = [p for p in payloads if p.split == "train"]
if not train_payloads:
msg = "At least one payload must have property payload.split=='train'"
raise Exception(msg)
# Calculate epoch statistics
# NOTE: We calculate approximate count size using batch_size * num_batches
self.batches_per_epoch = sum([len(p.data_loader) for p in train_payloads])
self.examples_per_epoch = sum(
[len(p.data_loader) * p.data_loader.batch_size for p in train_payloads]
)
if self.config["verbose"]:
print(f"Beginning train loop.")
print(
f"Expecting approximately {self.examples_per_epoch} examples total "
f"and {self.batches_per_epoch} batches per epoch from "
f"{len(train_payloads)} payload(s) in the train split."
)
# Check inputs
self._check_metrics()
# Set training components
self._set_writer()
self._set_logger()
self._set_checkpointer(model)
self._set_optimizer(model)
self._set_lr_scheduler(model) # TODO: Support more detailed training schedules
self._set_task_scheduler(model, payloads)
# Record config
if self.writer:
self.writer.write_config(self.config)
# Train the model
# TODO: Allow other ways to train besides 1 epoch of all datasets
model.train()
# Dict metrics_hist contains the most recently recorded value of all metrics
self.metrics_hist = {}
self._reset_losses()
for epoch in range(self.config["n_epochs"]):
progress_bar = self.config["progress_bar"] and self.config["verbose"]
t = tqdm(
enumerate(self.task_scheduler.get_batches(payloads, "train")),
total=self.batches_per_epoch,
disable=(not progress_bar),
)
for batch_num, (batch, payload_name, labels_to_tasks) in t:
# NOTE: actual batch_size may not equal config's target batch_size,
# for example due to orphan batches. We base batch size off of Y instead
# of X because we know Y will contain tensors, whereas X can be of any
# format the input_module accepts, including tuples of tensors, etc.
_, Ys = batch
batch_size = len(next(iter(Ys.values())))
batch_id = epoch * self.batches_per_epoch + batch_num
# Zero the parameter gradients
self.optimizer.zero_grad()
# Forward pass to calculate the average loss per example by task
# Counts stores the number of examples in each batch with labels by task
loss_dict, count_dict = model.calculate_loss(
*batch, payload_name, labels_to_tasks
)
# NOTE: If there were no "active" examples, loss_dict is empty
# Skip additional loss-based computation at this point
if not loss_dict:
continue
loss = sum(loss_dict.values())
if torch.isnan(loss):
msg = "Loss is NaN. Consider reducing learning rate."
raise Exception(msg)
# Backward pass to calculate gradients
# Loss is an average loss per example
if model.config["fp16"]:
self.optimizer.backward(loss)
else:
loss.backward()
# Clip gradient norm (not individual gradient magnitudes)
# max_grad_value = max([p.grad.abs().max().item() for p in model.parameters()])
if self.config["grad_clip"]:
torch.nn.utils.clip_grad_norm_(
model.parameters(), self.config["grad_clip"]
)
# Perform optimizer step
self.optimizer.step()
# Update loss
for loss_name in loss_dict:
if count_dict[loss_name]:
self.running_losses[loss_name] += (
loss_dict[loss_name].item() * count_dict[loss_name]
)
self.running_examples[loss_name] += count_dict[loss_name]
# Calculate metrics, log, and checkpoint as necessary
metrics_dict = self._execute_logging(model, payloads, batch_size)
# Confirm metrics being produced are in proper format
if epoch == 0 and batch_num == 0:
self._validate_metrics_dict(metrics_dict)
# Apply learning rate scheduler
self._update_lr_scheduler(model, batch_id)
# tqdm output
if len(model.task_map) == 1:
t.set_postfix(loss=metrics_dict["model/train/all/loss"])
else:
losses = {}
for key, val in metrics_dict.items():
if "loss" in key:
losses[key] = val
t.set_postfix(losses)
model.eval()
# Restore best model if applicable
if self.checkpointer and self.checkpointer.checkpoint_best:
# First do a final checkpoint at the end of training
metrics_dict = self._execute_logging(
model, payloads, batch_size, force_log=True
)
self.checkpointer.load_best_model(model=model)
# Copy best model to log directory
if self.writer:
path_to_best = os.path.join(
self.checkpointer.checkpoint_dir, "best_model.pth"
)
path_to_logs = self.writer.log_subdir
if os.path.isfile(path_to_best):
copy2(path_to_best, path_to_logs)
# Print final performance values
if self.config["verbose"]:
print("Finished training")
# Calculate metrics for all splits if test_split=None
test_split = self.config["metrics_config"]["test_split"]
metrics_dict = self.calculate_metrics(model, payloads, split=test_split)
if self.config["verbose"]:
pprint(metrics_dict)
# Clean up checkpoints
if self.checkpointer and self.config["checkpoint_cleanup"]:
print("Cleaning checkpoints")
self.checkpointer.clean_up()
# Write log if applicable
if self.writer:
# convert from numpy to python float
metrics_dict = recursive_transform(
metrics_dict, lambda x: type(x).__module__ == np.__name__, float
)
self.writer.write_metrics(metrics_dict)
self.writer.write_log()
self.writer.close()
# pickle and save the full model
full_model_path = os.path.join(self.writer.log_subdir, "model.pkl")
torch.save(model, full_model_path, pickle_module=dill)
print(f"Full model saved at {full_model_path}")
return metrics_dict
def _execute_logging(self, model, payloads, batch_size, force_log=False):
0
Source : models.py
with MIT License
from hello-sea
with MIT License
from hello-sea
def save_model(model, filepath, overwrite=True, include_optimizer=True):
"""Save a model to a HDF5 file.
The saved model contains:
- the model's configuration (topology)
- the model's weights
- the model's optimizer's state (if any)
Thus the saved model can be reinstantiated in
the exact same state, without any of the code
used for model definition or training.
# Arguments
model: Keras model instance to be saved.
filepath: one of the following:
- string, path where to save the model, or
- h5py.File object where to save the model
overwrite: Whether we should overwrite any existing
model at the target location, or instead
ask the user with a manual prompt.
include_optimizer: If True, save optimizer's state together.
# Raises
ImportError: if h5py is not available.
"""
if h5py is None:
raise ImportError('`save_model` requires h5py.')
def get_json_type(obj):
"""Serialize any object to a JSON-serializable structure.
# Arguments
obj: the object to serialize
# Returns
JSON-serializable structure representing `obj`.
# Raises
TypeError: if `obj` cannot be serialized.
"""
# if obj is a serializable Keras class instance
# e.g. optimizer, layer
if hasattr(obj, 'get_config'):
return {'class_name': obj.__class__.__name__,
'config': obj.get_config()}
# if obj is any numpy type
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return {'type': type(obj),
'value': obj.tolist()}
else:
return obj.item()
# misc functions (e.g. loss function)
if callable(obj):
return obj.__name__
# if obj is a python 'type'
if type(obj).__name__ == type.__name__:
return obj.__name__
raise TypeError('Not JSON Serializable:', obj)
from . import __version__ as keras_version
if not isinstance(filepath, h5py.File):
# If file exists and should not be overwritten.
if not overwrite and os.path.isfile(filepath):
proceed = ask_to_proceed_with_overwrite(filepath)
if not proceed:
return
f = h5py.File(filepath, mode='w')
opened_new_file = True
else:
f = filepath
opened_new_file = False
try:
f.attrs['keras_version'] = str(keras_version).encode('utf8')
f.attrs['backend'] = K.backend().encode('utf8')
f.attrs['model_config'] = json.dumps({
'class_name': model.__class__.__name__,
'config': model.get_config()
}, default=get_json_type).encode('utf8')
model_weights_group = f.create_group('model_weights')
if legacy_models.needs_legacy_support(model):
model_layers = legacy_models.legacy_sequential_layers(model)
else:
model_layers = model.layers
topology.save_weights_to_hdf5_group(model_weights_group, model_layers)
if include_optimizer and hasattr(model, 'optimizer'):
if isinstance(model.optimizer, optimizers.TFOptimizer):
warnings.warn(
'TensorFlow optimizers do not '
'make it possible to access '
'optimizer attributes or optimizer state '
'after instantiation. '
'As a result, we cannot save the optimizer '
'as part of the model save file.'
'You will have to compile your model again '
'after loading it. '
'Prefer using a Keras optimizer instead '
'(see keras.io/optimizers).')
else:
f.attrs['training_config'] = json.dumps({
'optimizer_config': {
'class_name': model.optimizer.__class__.__name__,
'config': model.optimizer.get_config()
},
'loss': model.loss,
'metrics': model.metrics,
'sample_weight_mode': model.sample_weight_mode,
'loss_weights': model.loss_weights,
}, default=get_json_type).encode('utf8')
# Save optimizer weights.
symbolic_weights = getattr(model.optimizer, 'weights')
if symbolic_weights:
optimizer_weights_group = f.create_group('optimizer_weights')
weight_values = K.batch_get_value(symbolic_weights)
weight_names = []
for i, (w, val) in enumerate(zip(symbolic_weights,
weight_values)):
# Default values of symbolic_weights is /variable
# for Theano and CNTK
if K.backend() == 'theano' or K.backend() == 'cntk':
if hasattr(w, 'name'):
if w.name.split('/')[-1] == 'variable':
name = str(w.name) + '_' + str(i)
else:
name = str(w.name)
else:
name = 'param_' + str(i)
else:
if hasattr(w, 'name') and w.name:
name = str(w.name)
else:
name = 'param_' + str(i)
weight_names.append(name.encode('utf8'))
optimizer_weights_group.attrs['weight_names'] = weight_names
for name, val in zip(weight_names, weight_values):
param_dset = optimizer_weights_group.create_dataset(
name,
val.shape,
dtype=val.dtype)
if not val.shape:
# scalar
param_dset[()] = val
else:
param_dset[:] = val
f.flush()
finally:
if opened_new_file:
f.close()
def load_model(filepath, custom_objects=None, compile=True):
0
Source : numpy_usm_shared.py
with Apache License 2.0
from IntelPython
with Apache License 2.0
from IntelPython
def numba_register_lower_builtin():
todo = []
todo_builtin = []
todo_getattr = []
todo_array_member_func = []
for k, v in _overload_glue._registered.items():
func = k
for typs, impl in v._BIND_TYPES.items():
ig = (impl, func, typs)
dprint(
"Numpy lowered registry functions:",
impl,
func,
type(func),
typs,
)
# If it is a Numpy function...
if isinstance(func, ftype):
dprint("is ftype")
if func.__module__ == np.__name__:
dprint("is Numpy module")
# If we have overloaded that function in the usmarray module (always True right now)...
if func.__name__ in functions_list:
todo.append(ig)
if isinstance(func, bftype):
dprint("is bftype")
if func.__module__ == np.__name__:
dprint("is Numpy module")
# If we have overloaded that function in the usmarray module (always True right now)...
if func.__name__ in functions_list:
todo.append(ig)
if isinstance(func, str) and func.startswith("array."):
todo_array_member_func.append(ig)
# For all Numpy identifiers that have been registered for typing in Numba...
# this registry contains functions, getattrs, setattrs, casts and constants...
for ig in lower_registry.functions:
impl, func, types = ig
dprint(
"Numpy lowered registry functions:", impl, func, type(func), types
)
# If it is a Numpy function...
if isinstance(func, ftype):
dprint("is ftype")
if func.__module__ == np.__name__:
dprint("is Numpy module")
# If we have overloaded that function in the usmarray module (always True right now)...
if func.__name__ in functions_list:
todo.append(ig)
if isinstance(func, bftype):
dprint("is bftype")
if func.__module__ == np.__name__:
dprint("is Numpy module")
# If we have overloaded that function in the usmarray module (always True right now)...
if func.__name__ in functions_list:
todo.append(ig)
if isinstance(func, str) and func.startswith("array."):
todo_array_member_func.append(ig)
for lg in lower_registry.getattrs:
func, attr, types = lg
dprint("Numpy lowered registry getattrs:", func, attr, types)
types_with_usmarray = types_replace_array(types)
if UsmSharedArrayType in types_with_usmarray:
dprint(
"lower_getattr:",
func,
type(func),
attr,
type(attr),
types,
type(types),
)
todo_getattr.append((func, attr, types_with_usmarray))
for lg in todo_getattr:
lower_registry.getattrs.append(lg)
for impl, func, types in todo + todo_builtin:
try:
usmarray_func = eval(
"dpctl.tensor.numpy_usm_shared." + func.__name__
)
except:
dprint("failed to eval", func.__name__)
continue
dprint(
"need to re-register lowerer for usmarray",
impl,
func,
types,
usmarray_func,
)
new_impl = copy_func_for_usmarray(impl, nus)
lower_registry.functions.append((new_impl, usmarray_func, types))
for impl, func, types in todo_array_member_func:
types_with_usmarray = types_replace_array(types)
usmarray_func = "usm" + func
dprint(
"Registering lowerer for", impl, usmarray_func, types_with_usmarray
)
new_impl = copy_func_for_usmarray(impl, nus)
lower_registry.functions.append(
(new_impl, usmarray_func, types_with_usmarray)
)
def argspec_to_string(argspec):
0
Source : io.py
with Apache License 2.0
from jgraving
with Apache License 2.0
from jgraving
def get_json_type(obj):
"""Serialize any object to a JSON-serializable structure.
# Arguments
obj: the object to serialize
# Returns
JSON-serializable structure representing `obj`.
# Raises
TypeError: if `obj` cannot be serialized.
"""
# if obj is a serializable Keras class instance
# e.g. optimizer, layer
if hasattr(obj, "get_config"):
return {"class_name": obj.__class__.__name__, "config": obj.get_config()}
# if obj is any numpy type
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return {"type": type(obj), "value": obj.tolist()}
else:
return obj.item()
# misc functions (e.g. loss function)
if callable(obj):
return obj.__name__
# if obj is a python 'type'
if type(obj).__name__ == type.__name__:
return obj.__name__
raise TypeError("Not JSON Serializable:", obj)
0
Source : imgproc.py
with Apache License 2.0
from Lornatang
with Apache License 2.0
from Lornatang
def imresize(image: Any, scale_factor: float, antialiasing: bool = True) -> Any:
"""Implementation of `imresize` function in Matlab under Python language.
Args:
image: The input image.
scale_factor (float): Scale factor. The same scale applies for both height and width.
antialiasing (bool): Whether to apply antialiasing when down-sampling operations.
Caution: Bicubic down-sampling in `PIL` uses antialiasing by default. Default: ``True``.
Returns:
np.ndarray: Output image with shape (c, h, w), [0, 1] range, w/o round.
"""
squeeze_flag = False
if type(image).__module__ == np.__name__: # numpy type
numpy_type = True
if image.ndim == 2:
image = image[:, :, None]
squeeze_flag = True
image = torch.from_numpy(image.transpose(2, 0, 1)).float()
else:
numpy_type = False
if image.ndim == 2:
image = image.unsqueeze(0)
squeeze_flag = True
in_c, in_h, in_w = image.size()
out_h, out_w = math.ceil(in_h * scale_factor), math.ceil(in_w * scale_factor)
kernel_width = 4
# get weights and indices
weights_h, indices_h, sym_len_hs, sym_len_he = calculate_weights_indices(in_h, out_h, scale_factor, kernel_width, antialiasing)
weights_w, indices_w, sym_len_ws, sym_len_we = calculate_weights_indices(in_w, out_w, scale_factor, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_c, in_h + sym_len_hs + sym_len_he, in_w)
img_aug.narrow(1, sym_len_hs, in_h).copy_(image)
sym_patch = image[:, :sym_len_hs, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, 0, sym_len_hs).copy_(sym_patch_inv)
sym_patch = image[:, -sym_len_he:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, sym_len_hs + in_h, sym_len_he).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(in_c, out_h, in_w)
kernel_width = weights_h.size(1)
for i in range(out_h):
idx = int(indices_h[i][0])
for j in range(in_c):
out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_h[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(in_c, out_h, in_w + sym_len_ws + sym_len_we)
out_1_aug.narrow(2, sym_len_ws, in_w).copy_(out_1)
sym_patch = out_1[:, :, :sym_len_ws]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, 0, sym_len_ws).copy_(sym_patch_inv)
sym_patch = out_1[:, :, -sym_len_we:]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, sym_len_ws + in_w, sym_len_we).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(in_c, out_h, out_w)
kernel_width = weights_w.size(1)
for i in range(out_w):
idx = int(indices_w[i][0])
for j in range(in_c):
out_2[j, :, i] = out_1_aug[j, :, idx:idx + kernel_width].mv(weights_w[i])
if squeeze_flag:
out_2 = out_2.squeeze(0)
if numpy_type:
out_2 = out_2.numpy()
if not squeeze_flag:
out_2 = out_2.transpose(1, 2, 0)
return out_2
# Code reference `https://github.com/xinntao/BasicSR/blob/master/basicsr/utils/matlab_functions.py`
def rgb2ycbcr(image: np.ndarray, use_y_channel: bool = False) -> np.ndarray:
0
Source : data_utils.py
with Apache License 2.0
from Lornatang
with Apache License 2.0
from Lornatang
def imresize(image: Any, scale_factor: float, antialiasing: bool = True) -> Any:
"""Implementation of `imresize` function in Matlab under Python language.
Args:
image: The input image.
scale_factor (float): Scale factor. The same scale applies for both height and width.
antialiasing (bool): Whether to apply antialiasing when down-sampling operations.
Caution: Bicubic down-sampling in `PIL` uses antialiasing by default. Default: ``True``.
Returns:
np.ndarray: Output image with shape (c, h, w), [0, 1] range, w/o round.
"""
squeeze_flag = False
if type(image).__module__ == np.__name__: # numpy type
numpy_type = True
if image.ndim == 2:
image = image[:, :, None]
squeeze_flag = True
image = torch.from_numpy(image.transpose(2, 0, 1)).float()
else:
numpy_type = False
if image.ndim == 2:
image = image.unsqueeze(0)
squeeze_flag = True
in_c, in_h, in_w = image.size()
out_h, out_w = math.ceil(in_h * scale_factor), math.ceil(in_w * scale_factor)
kernel_width = 4
# get weights and indices
weights_h, indices_h, sym_len_hs, sym_len_he = calculate_weights_indices(in_h, out_h, scale_factor, kernel_width, antialiasing)
weights_w, indices_w, sym_len_ws, sym_len_we = calculate_weights_indices(in_w, out_w, scale_factor, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_c, in_h + sym_len_hs + sym_len_he, in_w)
img_aug.narrow(1, sym_len_hs, in_h).copy_(image)
sym_patch = image[:, :sym_len_hs, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, 0, sym_len_hs).copy_(sym_patch_inv)
sym_patch = image[:, -sym_len_he:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, sym_len_hs + in_h, sym_len_he).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(in_c, out_h, in_w)
kernel_width = weights_h.size(1)
for i in range(out_h):
idx = int(indices_h[i][0])
for j in range(in_c):
out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_h[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(in_c, out_h, in_w + sym_len_ws + sym_len_we)
out_1_aug.narrow(2, sym_len_ws, in_w).copy_(out_1)
sym_patch = out_1[:, :, :sym_len_ws]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, 0, sym_len_ws).copy_(sym_patch_inv)
sym_patch = out_1[:, :, -sym_len_we:]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, sym_len_ws + in_w, sym_len_we).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(in_c, out_h, out_w)
kernel_width = weights_w.size(1)
for i in range(out_w):
idx = int(indices_w[i][0])
for j in range(in_c):
out_2[j, :, i] = out_1_aug[j, :, idx:idx + kernel_width].mv(weights_w[i])
if squeeze_flag:
out_2 = out_2.squeeze(0)
if numpy_type:
out_2 = out_2.numpy()
if not squeeze_flag:
out_2 = out_2.transpose(1, 2, 0)
return out_2
0
Source : metrics.py
with Apache License 2.0
from mcindoe
with Apache License 2.0
from mcindoe
def is_numpy(x):
return type(x).__module__ == np.__name__
def squared_euclidean_distance(a, b):
0
Source : constraint.py
with BSD 3-Clause "New" or "Revised" License
from montefiore-ai
with BSD 3-Clause "New" or "Revised" License
from montefiore-ai
def highest_density_level(pdf, alpha, bias=0.0, min_epsilon=10e-17, region=False):
# Check if a proper bias has been specified.
if bias >= alpha:
raise ValueError("The bias cannot be larger or equal to the specified alpha level.")
# Detect numpy type
if type(pdf).__module__ != np.__name__:
pdf = pdf.cpu().clone().numpy()
else:
pdf = np.array(pdf)
total_pdf = pdf.sum()
pdf /= total_pdf
# Compute highest density level and the corresponding mask
n = len(pdf)
optimal_level = pdf.max().item()
epsilon = 10e-02
while epsilon >= min_epsilon:
area = float(0)
while area < = (alpha + bias):
# Compute the integral
m = (pdf >= optimal_level).astype(np.float32)
area = np.sum(m * pdf)
# Compute the error and apply gradient descent
optimal_level -= epsilon
optimal_level += 2 * epsilon
epsilon /= 10
optimal_level *= total_pdf
if region:
return optimal_level, torch.from_numpy(m)
else:
return optimal_level
@torch.no_grad()
0
Source : kits19.py
with MIT License
from nitsaick
with MIT License
from nitsaick
def vis_transform(self, data):
cmap = self.get_colormap()
if 'image' in data.keys() and data['image'] is not None:
imgs = data['image']
if type(imgs).__module__ != np.__name__:
imgs = imgs.cpu().detach().numpy()
data['image'] = imgs
if 'label' in data.keys() and data['label'] is not None and data['label'].shape[-1] != 0:
labels = data['label']
if type(labels).__module__ != np.__name__:
labels = labels.cpu().detach().numpy()
labels = cmap[labels]
labels = labels.transpose((0, 3, 1, 2))
labels = labels / 255
data['label'] = labels
if 'predict' in data.keys() and data['predict'] is not None:
preds = data['predict']
if type(preds).__module__ != np.__name__:
preds = preds.cpu().detach().numpy()
if preds.shape[1] == self.num_classes:
preds = preds.argmax(axis=1)
preds = cmap[preds]
preds = preds.transpose((0, 3, 1, 2))
preds = preds / 255
data['predict'] = preds
return data
def _default_transform(self, data):
0
Source : _campaign_planning_tool.py
with BSD 3-Clause "New" or "Revised" License
from niva83
with BSD 3-Clause "New" or "Revised" License
from niva83
def __check_lidar_position(lidar_position):
"""
Validates a lidar position
Parameters
----------
lidar_position : ndarray
nD array containing data with `float` or `int` type
corresponding to x, y and z coordinates of a lidar.
nD array data are expressed in meters.
Returns
-------
True / False
See also
--------
add_lidar_instance() : adds lidar to the lidar dictionary
"""
if(type(lidar_position).__module__ == np.__name__):
if (len(lidar_position.shape) == 1 and lidar_position.shape[0] == 3):
return True
else:
print('Wrong dimensions!\nLidar position is described by 3 parameters:\n(1)Easting\n(2)Northing\n(3)Height!')
print('Lidar position was not added!')
return False
else:
print('Input is not numpy array!')
print('Lidar position was not added!')
return False
def add_measurement_instances(self, points_id, points):
0
Source : topology.py
with MIT License
from PacktPublishing
with MIT License
from PacktPublishing
def to_json(self, **kwargs):
"""Returns a JSON string containing the network configuration.
To load a network from a JSON save file, use
`keras.models.model_from_json(json_string, custom_objects={})`.
Arguments:
**kwargs: Additional keyword arguments
to be passed to `json.dumps()`.
Returns:
A JSON string.
"""
def get_json_type(obj):
# If obj is any numpy type
if type(obj).__module__ == np.__name__:
return obj.item()
# If obj is a python 'type'
if type(obj).__name__ == type.__name__:
return obj.__name__
raise TypeError('Not JSON Serializable:', obj)
model_config = self._updated_config()
return json.dumps(model_config, default=get_json_type, **kwargs)
def to_yaml(self, **kwargs):
0
Source : models.py
with MIT License
from PacktPublishing
with MIT License
from PacktPublishing
def save_model(model, filepath, overwrite=True, include_optimizer=True):
"""Save a model to a HDF5 file.
The saved model contains:
- the model's configuration (topology)
- the model's weights
- the model's optimizer's state (if any)
Thus the saved model can be reinstantiated in
the exact same state, without any of the code
used for model definition or training.
Arguments:
model: Keras model instance to be saved.
filepath: String, path where to save the model.
overwrite: Whether we should overwrite any existing
model at the target location, or instead
ask the user with a manual prompt.
include_optimizer: If True, save optimizer's state together.
Raises:
ImportError: if h5py is not available.
"""
if h5py is None:
raise ImportError('`save_model` requires h5py.')
def get_json_type(obj):
"""Serialize any object to a JSON-serializable structure.
Arguments:
obj: the object to serialize
Returns:
JSON-serializable structure representing `obj`.
Raises:
TypeError: if `obj` cannot be serialized.
"""
# if obj is a serializable Keras class instance
# e.g. optimizer, layer
if hasattr(obj, 'get_config'):
return {'class_name': obj.__class__.__name__, 'config': obj.get_config()}
# if obj is any numpy type
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return {'type': type(obj), 'value': obj.tolist()}
else:
return obj.item()
# misc functions (e.g. loss function)
if callable(obj):
return obj.__name__
# if obj is a python 'type'
if type(obj).__name__ == type.__name__:
return obj.__name__
raise TypeError('Not JSON Serializable:', obj)
from tensorflow.python.keras._impl.keras import __version__ as keras_version # pylint: disable=g-import-not-at-top
# If file exists and should not be overwritten.
if not overwrite and os.path.isfile(filepath):
proceed = ask_to_proceed_with_overwrite(filepath)
if not proceed:
return
with h5py.File(filepath, mode='w') as f:
f.attrs['keras_version'] = str(keras_version).encode('utf8')
f.attrs['backend'] = K.backend().encode('utf8')
f.attrs['model_config'] = json.dumps(
{
'class_name': model.__class__.__name__,
'config': model.get_config()
},
default=get_json_type).encode('utf8')
model_weights_group = f.create_group('model_weights')
model_layers = model.layers
topology.save_weights_to_hdf5_group(model_weights_group, model_layers)
if include_optimizer and hasattr(model, 'optimizer'):
if isinstance(model.optimizer, optimizers.TFOptimizer):
logging.warning(
'TensorFlow optimizers do not '
'make it possible to access '
'optimizer attributes or optimizer state '
'after instantiation. '
'As a result, we cannot save the optimizer '
'as part of the model save file.'
'You will have to compile your model again after loading it. '
'Prefer using a Keras optimizer instead '
'(see keras.io/optimizers).')
else:
f.attrs['training_config'] = json.dumps(
{
'optimizer_config': {
'class_name': model.optimizer.__class__.__name__,
'config': model.optimizer.get_config()
},
'loss': model.loss,
'metrics': model.metrics,
'sample_weight_mode': model.sample_weight_mode,
'loss_weights': model.loss_weights,
},
default=get_json_type).encode('utf8')
# Save optimizer weights.
symbolic_weights = getattr(model.optimizer, 'weights')
if symbolic_weights:
optimizer_weights_group = f.create_group('optimizer_weights')
weight_values = K.batch_get_value(symbolic_weights)
weight_names = []
for w, val in zip(symbolic_weights, weight_values):
name = str(w.name)
weight_names.append(name.encode('utf8'))
optimizer_weights_group.attrs['weight_names'] = weight_names
for name, val in zip(weight_names, weight_values):
param_dset = optimizer_weights_group.create_dataset(
name, val.shape, dtype=val.dtype)
if not val.shape:
# scalar
param_dset[()] = val
else:
param_dset[:] = val
f.flush()
def load_model(filepath, custom_objects=None, compile=True): # pylint: disable=redefined-builtin
0
Source : network.py
with MIT License
from Relph1119
with MIT License
from Relph1119
def to_json(self, **kwargs):
"""Returns a JSON string containing the network configuration.
To load a network from a JSON save file, use
`keras.models.model_from_json(json_string, custom_objects={})`.
# Arguments
**kwargs: Additional keyword arguments
to be passed to `json.dumps()`.
# Returns
A JSON string.
"""
def get_json_type(obj):
# If obj is any numpy type
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
# If obj is a python 'type'
if type(obj).__name__ == type.__name__:
return obj.__name__
raise TypeError('Not JSON Serializable:', obj)
model_config = self._updated_config()
return json.dumps(model_config, default=get_json_type, **kwargs)
def to_yaml(self, **kwargs):
0
Source : saving.py
with MIT License
from Relph1119
with MIT License
from Relph1119
def _serialize_model(model, f, include_optimizer=True):
"""Model serialization logic.
This method is used for both writing to HDF5 file/group,
as well as pickling. This is achieved via a
`keras.utils.hdf5_utls.H5Dict` object, which can wrap HDF5
files, groups and dicts with a common API.
# Arguments
model: Keras model instance to be serialized.
f: keras.utils.io_utils.HD5Dict instance.
include_optimizer: If True, serialize optimizer's state together.
"""
def get_json_type(obj):
"""Serialize any object to a JSON-serializable structure.
# Arguments
obj: the object to serialize
# Returns
JSON-serializable structure representing `obj`.
# Raises
TypeError: if `obj` cannot be serialized.
"""
# if obj is a serializable Keras class instance
# e.g. optimizer, layer
if hasattr(obj, 'get_config'):
return {'class_name': obj.__class__.__name__,
'config': obj.get_config()}
# if obj is any numpy type
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
# misc functions (e.g. loss function)
if callable(obj):
return obj.__name__
# if obj is a python 'type'
if type(obj).__name__ == type.__name__:
return obj.__name__
raise TypeError('Not JSON Serializable: %s' % (obj,))
from .. import __version__ as keras_version
f['keras_version'] = str(keras_version).encode('utf8')
f['backend'] = K.backend().encode('utf8')
model_config = {}
model_config['class_name'] = model.__class__.__name__
model_config['config'] = model.get_config()
model_config = json.dumps(model_config, default=get_json_type)
model_config = model_config.encode('utf-8')
f['model_config'] = model_config
model_weights_group = f['model_weights']
model_layers = model.layers
model_weights_group['layer_names'] = [layer.name.encode('utf8')
for layer in model_layers]
model_weights_group['backend'] = K.backend().encode('utf8')
model_weights_group['keras_version'] = str(keras_version).encode('utf8')
for layer in model_layers:
layer_group = model_weights_group[layer.name]
symbolic_weights = layer.weights
weight_values = K.batch_get_value(symbolic_weights)
weight_names = []
for i, (w, val) in enumerate(zip(symbolic_weights, weight_values)):
if hasattr(w, 'name') and w.name:
name = str(w.name)
else:
name = 'param_' + str(i)
if name in weight_names:
idx = 2
unique_name = name + '_1'
while unique_name in weight_names:
unique_name = name + '_' + str(idx)
idx += 1
name = unique_name
weight_names.append(name.encode('utf8'))
layer_group['weight_names'] = weight_names
for name, val in zip(weight_names, weight_values):
layer_group[name] = val
if include_optimizer and model.optimizer:
if isinstance(model.optimizer, optimizers.TFOptimizer):
warnings.warn(
'TensorFlow optimizers do not '
'make it possible to access '
'optimizer attributes or optimizer state '
'after instantiation. '
'As a result, we cannot save the optimizer '
'as part of the model save file.'
'You will have to compile your model again '
'after loading it. '
'Prefer using a Keras optimizer instead '
'(see keras.io/optimizers).')
else:
f['training_config'] = json.dumps({
'optimizer_config': {
'class_name': model.optimizer.__class__.__name__,
'config': model.optimizer.get_config()
},
'loss': model.loss,
'metrics': model.metrics,
'sample_weight_mode': model.sample_weight_mode,
'loss_weights': model.loss_weights,
}, default=get_json_type).encode('utf8')
symbolic_weights = getattr(model.optimizer, 'weights')
if symbolic_weights:
optimizer_weights_group = f['optimizer_weights']
weight_values = K.batch_get_value(symbolic_weights)
weight_names = []
for i, (w, val) in enumerate(zip(symbolic_weights,
weight_values)):
# Default values of symbolic_weights is /variable
# for Theano and CNTK
if K.backend() == 'theano' or K.backend() == 'cntk':
if hasattr(w, 'name'):
if w.name.split('/')[-1] == 'variable':
name = str(w.name) + '_' + str(i)
else:
name = str(w.name)
else:
name = 'param_' + str(i)
else:
if hasattr(w, 'name') and w.name:
name = str(w.name)
else:
name = 'param_' + str(i)
if name in weight_names:
idx = 2
unique_name = name + '_1'
while unique_name in weight_names:
unique_name = name + '_' + str(idx)
idx += 1
name = unique_name
weight_names.append(name.encode('utf8'))
optimizer_weights_group['weight_names'] = weight_names
for name, val in zip(weight_names, weight_values):
optimizer_weights_group[name] = val
def _deserialize_model(f, custom_objects=None, compile=True):
0
Source : io_utils.py
with MIT License
from Relph1119
with MIT License
from Relph1119
def __setitem__(self, attr, val):
if self.read_only:
raise ValueError('Cannot set item in read only mode.')
is_np = type(val).__module__ == np.__name__
if isinstance(self.data, dict):
if isinstance(attr, bytes):
attr = attr.decode('utf-8')
if is_np:
self.data[attr] = pickle.dumps(val)
# We have to remember to unpickle in __getitem__
self.data['_{}_pickled'.format(attr)] = True
else:
self.data[attr] = val
return
if isinstance(self.data, h5py.Group) and attr in self.data:
raise KeyError('Cannot set attribute. '
'Group with name "{}" exists.'.format(attr))
if is_np:
dataset = self.data.create_dataset(attr, val.shape, dtype=val.dtype)
if not val.shape:
# scalar
dataset[()] = val
else:
dataset[:] = val
elif isinstance(val, list):
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
bad_attributes = [x for x in val if len(x) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if len(bad_attributes) > 0:
raise RuntimeError('The following attributes cannot be saved to '
'HDF5 file because they are larger than '
'%d bytes: %s' % (HDF5_OBJECT_HEADER_LIMIT,
', '.join(bad_attributes)))
if val and sys.version_info[0] == 3 and isinstance(val[0], str):
# convert to bytes
val = [x.encode('utf-8') for x in val]
data_npy = np.asarray(val)
num_chunks = 1
chunked_data = np.array_split(data_npy, num_chunks)
# This will never loop forever thanks to the test above.
is_too_big = lambda x: x.nbytes > HDF5_OBJECT_HEADER_LIMIT
while any(map(is_too_big, chunked_data)):
num_chunks += 1
chunked_data = np.array_split(data_npy, num_chunks)
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(chunked_data):
self.data.attrs['%s%d' % (attr, chunk_id)] = chunk_data
else:
self.data.attrs[attr] = val
else:
self.data.attrs[attr] = val
def __getitem__(self, attr):
0
Source : io_utils.py
with MIT License
from Relph1119
with MIT License
from Relph1119
def __getitem__(self, attr):
if isinstance(self.data, dict):
if isinstance(attr, bytes):
attr = attr.decode('utf-8')
if attr in self.data:
val = self.data[attr]
if isinstance(val, dict) and val.get('_is_group'):
val = H5Dict(val)
elif '_{}_pickled'.format(attr) in self.data:
val = pickle.loads(val)
return val
else:
if self.read_only:
raise ValueError('Cannot create group in read only mode.')
val = {'_is_group': True}
self.data[attr] = val
return H5Dict(val)
if attr in self.data.attrs:
val = self.data.attrs[attr]
if type(val).__module__ == np.__name__:
if val.dtype.type == np.string_:
val = val.tolist()
elif attr in self.data:
val = self.data[attr]
if isinstance(val, h5py.Dataset):
val = np.asarray(val)
else:
val = H5Dict(val)
else:
# could be chunked
chunk_attr = '%s%d' % (attr, 0)
is_chunked = chunk_attr in self.data.attrs
if is_chunked:
val = []
chunk_id = 0
while chunk_attr in self.data.attrs:
chunk = self.data.attrs[chunk_attr]
val.extend([x.decode('utf8') for x in chunk])
chunk_id += 1
chunk_attr = '%s%d' % (attr, chunk_id)
else:
if self.read_only:
raise ValueError('Cannot create group in read only mode.')
val = H5Dict(self.data.create_group(attr))
return val
def __len__(self):
0
Source : sequence.py
with MIT License
from Relph1119
with MIT License
from Relph1119
def get_config(self):
'''Returns the TimeseriesGenerator configuration as Python dictionary.
# Returns
A Python dictionary with the TimeseriesGenerator configuration.
'''
data = self.data
if type(self.data).__module__ == np.__name__:
data = self.data.tolist()
try:
json_data = json.dumps(data)
except:
raise TypeError('Data not JSON Serializable:', data)
targets = self.targets
if type(self.targets).__module__ == np.__name__:
targets = self.targets.tolist()
try:
json_targets = json.dumps(targets)
except:
raise TypeError('Targets not JSON Serializable:', targets)
return {
'data': json_data,
'targets': json_targets,
'length': self.length,
'sampling_rate': self.sampling_rate,
'stride': self.stride,
'start_index': self.start_index,
'end_index': self.end_index,
'shuffle': self.shuffle,
'reverse': self.reverse,
'batch_size': self.batch_size
}
def to_json(self, **kwargs):
0
Source : gpt2_client.py
with MIT License
from rish-16
with MIT License
from rish-16
def decode_seq(self, encodings):
# converting numpy array to list
if type(encodings).__module__ == np.__name__:
encodings = encodings.tolist()
models_dir = models_dir = os.path.expanduser(os.path.expandvars(self.save_dir))
enc = get_encoder(self.model_name, self.save_dir)
hparams = default_hparams()
with open(os.path.join(self.save_dir, self.model_name, 'hparams.json')) as f:
data = json.load(f)
hparams.override_from_dict(data)
length = hparams.n_ctx
with tf.Session(graph=tf.Graph()) as sess:
batch_size = 1
temperature = 1
top_k = 40
context = tf.placeholder(tf.int32, [batch_size, None])
np.random.seed(None)
tf.set_random_seed(None)
output = sample_sequence(
hparams=hparams,
length=length,
start_token=enc.encoder[' < |endoftext|>'],
batch_size=batch_size,
temperature=temperature,
top_k=top_k
)
saver = tf.train.Saver()
ckpt = tf.train.latest_checkpoint(os.path.join(self.save_dir, self.model_name))
saver.restore(sess, ckpt)
sequences = enc.decode(encodings)
return sequences
@lru_cache()
0
Source : json_encoder.py
with Apache License 2.0
from SaltieRL
with Apache License 2.0
from SaltieRL
def default(self, o):
# it cannot normally serialize np.int64 and possibly other np data types
if type(o).__module__ == np.__name__:
return o.item()
return super(CarballJsonEncoder, self).default(o)
See More Examples