Here are the examples of the python api numpy.frombuffer taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
52 Examples
3
Example 1
Project: pims Source File: bioformats.py
def _jbytearr_stringbuffer(arr, dtype):
# see https://github.com/originell/jpype/issues/71 and
# https://github.com/originell/jpype/pull/73
Jstr = jpype.java.lang.String(arr, 'ISO-8859-1').toString().encode('UTF-16LE')
bytearr = np.array(np.frombuffer(Jstr, dtype='<u2'), dtype=np.byte)
return np.frombuffer(bytearr, dtype=dtype)
3
Example 2
def read_value(self, dtype='uint64', count=1, advance=True):
"""
Read one or more scalars of the indicated dtype. Count specifies the number of
scalars to be read in.
"""
data = np.frombuffer(self._blob, dtype=dtype, count=count, offset=self.pos)
if advance:
# probably the same thing as data.nbytes * 8
self._pos += data.dtype.itemsize * data.size
if count == 1:
data = data[0]
return data
3
Example 3
def _get_data(self, index):
# Return the data and meta data for the given index
if index >= self._length:
raise IndexError('Image index %i > %i' % (index, self._length))
# Read all bytes
if self._data is None:
self._data = self._fp.read()
# Put in a numpy array
im = np.frombuffer(self._data, 'uint8')
im.shape = len(im), 1
# Return array and dummy meta data
return im, {}
3
Example 4
def get_tile(f, num_points):
"""
Read the next tile from a Sparky file object.
Parameters
----------
f : file object
Open file object pointing to a Sparky file.
num_points : int
Number of points in the tile.
Returns
-------
tile : ndarray
Tile of NMR data. Data is returned as a 1D array.
"""
bsize = num_points * 4 # size in bytes
return np.frombuffer(f.read(bsize), dtype='>f4')
3
Example 5
def _frombuffer(ptr, frames, channels, dtype):
"""Create NumPy array from a pointer to some memory."""
framesize = channels * dtype.itemsize
data = np.frombuffer(ffi.buffer(ptr, frames * framesize), dtype=dtype)
data.shape = -1, channels
return data
3
Example 6
Project: async-deep-rl Source File: actor_learner.py
def sync_net_with_shared_memory(self, dest_net, shared_mem_vars):
feed_dict = {}
offset = 0
params = np.frombuffer(shared_mem_vars.vars,
ctypes.c_float)
for i in xrange(len(dest_net.params)):
shape = shared_mem_vars.var_shapes[i]
size = np.prod(shape)
feed_dict[dest_net.params_ph[i]] = \
params[offset:offset+size].reshape(shape)
offset += size
self.session.run(dest_net.sync_with_shared_memory,
feed_dict=feed_dict)
3
Example 7
def _toArray(args):
shape, text = args
byts = base64decode(text.encode('utf-8'))
ar = np.frombuffer(byts, dtype='int8')
ar.shape = shape
return ar
3
Example 8
def read_in_blocks(self, block_size=1024):
"""Lazy function (generator) that reads a binary file in chunks.
Default chunk size is 1k.
Data-type and byte-order of the returned data are the object's same.
"""
with open(self.filename, 'rb') as f:
chunk_size = block_size * self.dtype.itemsize
for data in futils.read_in_chunks(f, chunk_size):
yield np.frombuffer(data, dtype=self.dtype)
3
Example 9
def _to_array(args):
shape, text = args
byts = base64decode(text.encode('utf-8'))
ar = np.frombuffer(byts, dtype='int8')
ar.shape = shape
return ar
3
Example 10
Project: pyzmq Source File: serialsocket.py
def recv_array(self, flags=0, copy=True, track=False):
"""recv a numpy array"""
md = self.recv_json(flags=flags)
msg = self.recv(flags=flags, copy=copy, track=track)
A = numpy.frombuffer(msg, dtype=md['dtype'])
return A.reshape(md['shape'])
3
Example 11
Project: tensorpack Source File: viz.py
def pyplot2img(plt):
buf = io.BytesIO()
plt.axis('off')
plt.savefig(buf, format='png', bbox_inches='tight', pad_inches=0)
buf.seek(0)
rawbuf = np.frombuffer(buf.getvalue(), dtype='uint8')
im = cv2.imdecode(rawbuf, cv2.IMREAD_COLOR)
buf.close()
return im
3
Example 12
Project: nansat Source File: mosaic.py
def mparray2ndarray(sharedArray, shape, dtype='float32'):
''' convert shared multiprocessing Array to numpy ndarray '''
# get access to shared array and convert to numpy ndarray
sharedNDArray = np.frombuffer(sharedArray.get_obj(), dtype=dtype)
# change shape to match bands
sharedNDArray.shape = shape
return sharedNDArray
3
Example 13
def __init__(self, array, mem, offset):
self.shape = array.shape
self.dtype = array.dtype
self.nbytes = array.nbytes
self.size = array.size
self.offset = offset
total = self.offset + self.nbytes
if total > len(mem):
raise ValueError(
'Shared memory size is too small. expect:{}, actual:{}'.format(
total, len(mem)))
target = numpy.frombuffer(mem, self.dtype, self.size, self.offset)
target[...] = array.ravel()
3
Example 14
Project: automl-phase-2 Source File: experiments.py
def __init__(self, time_budget):
super(SimpleManager, self).__init__()
self.time_budget = time_budget
self.actioncounter = 0
self.data = dict(shared_value=(np.frombuffer(Array('d', [20]).get_obj())))
self.child_cpu = defaultdict(lambda: 0)
self.child_private_memory = defaultdict(lambda: 0)
self.communication_sleep = 0 # next action has a sleep in it anyway
self.learner_preference = None
self.save_file = constants.SAVE_DIR + '/managerV1.pk'
self.name = 'm'
0
Example 15
@staticmethod
def _convert(signal, sample_width):
return numpy.array(numpy.frombuffer(signal, dtype=AudioEnergyValidator._formats[sample_width]), dtype=numpy.float64)
0
Example 16
Project: nmrglue Source File: sparky.py
def get_tilen(f, n_tile, tw_tuple):
"""
Read a tile from a Sparky file object.
Parameters
----------
f : file object
Open file object pointing to a Sparky file.
n_tile : int
Tile number to read
tw_tuple : tuple of ints
Tile size
Returns
-------
tile : ndarray
Tile of NMR data. Data is returned as a 1D array.
Notes
-----
Current file position is loss. In can be stored before calling if the
position is later needed.
"""
# determind the size of the tile in bytes
tsize = 4
for i in tw_tuple:
tsize = tsize * i
# seek to the beginning of the tile
f.seek(int(180 + 128 * len(tw_tuple) + n_tile * tsize))
return np.frombuffer(f.read(tsize), dtype='>f4')
0
Example 17
Project: pyfive Source File: low_level.py
def unpack_attribute(self, offset):
""" Return the attribute name and value. """
# read in the attribute message header
# See section IV.A.2.m. The Attribute Message for details
version = struct.unpack_from('<B', self.msg_data, offset)[0]
if version == 1:
attr_dict = _unpack_struct_from(
ATTR_MSG_HEADER_V1, self.msg_data, offset)
assert attr_dict['version'] == 1
offset += ATTR_MSG_HEADER_V1_SIZE
padding_multiple = 8
elif version == 3:
attr_dict = _unpack_struct_from(
ATTR_MSG_HEADER_V3, self.msg_data, offset)
assert attr_dict['version'] == 3
offset += ATTR_MSG_HEADER_V3_SIZE
padding_multiple = 1 # no padding
else:
raise NotImplementedError(
"unsupported attribute message version: %i" % (version))
# read in the attribute name
name_size = attr_dict['name_size']
name = self.msg_data[offset:offset+name_size]
name = name.strip(b'\x00').decode('utf-8')
offset += _padded_size(name_size, padding_multiple)
# read in the datatype information
try:
dtype = determine_dtype(self.msg_data, offset)
except NotImplementedError:
warnings.warn(
'Attribute %s type not implemented, set to None.' % (name, ))
return name, None
offset += _padded_size(attr_dict['datatype_size'], padding_multiple)
# read in the dataspace information
offset += _padded_size(attr_dict['dataspace_size'], padding_multiple)
# read in the value
if isinstance(dtype, tuple):
dtype_class = dtype[0]
if dtype_class == 'VLEN_STRING':
value = self._vlen_attr_value(offset, dtype)
elif dtype_class == 'REFERENCE':
address = struct.unpack_from(
'<Q', self.msg_data, offset=offset)[0]
value = Reference(address)
else:
raise NotImplementedError
else:
value = np.frombuffer(
self.msg_data, dtype=dtype, count=1, offset=offset)[0]
return name, value
0
Example 18
def read(self, frames, raw=False):
"""Read samples from an input stream.
The function does not return until the required number of
frames has been read. This may involve waiting for the
operating system to supply the data.
If raw data is requested, the raw cffi data buffer is
returned. Otherwise, a numpy array of the appropriate dtype
with one column per channel is returned.
"""
channels, _ = _split(self.channels)
dtype, _ = _split(self.dtype)
data = ffi.new("signed char[]", channels * dtype.itemsize * frames)
self._handle_error(_pa.Pa_ReadStream(self._stream, data, frames))
if not raw:
data = np.frombuffer(ffi.buffer(data), dtype=dtype)
data.shape = frames, channels
return data
0
Example 19
Project: VIP Source File: iuwt.py
def mp_a_trous(C0, wavelet_filter, scale, core_count):
"""
This is a reimplementation of the a trous filter which makes use of multiprocessing. In particular,
it divides the input array of dimensions NxN into M smaller arrays of dimensions (N/M)xN, where M is the
number of cores which are to be used.
INPUTS:
C0 (no default): The current array which is to be decomposed.
wavelet_filter (no default): The filter-bank which is applied to the components of the transform.
scale (no default): The scale at which decomposition is to be carried out.
core_count (no default): The number of CPU cores over which the task should be divided.
OUTPUTS:
shared_array The decomposed array.
"""
# Creates an array which may be accessed by multiple processes.
shared_array_base = mp.Array(ctypes.c_float, C0.shape[0]**2, lock=False)
shared_array = np.frombuffer(shared_array_base, dtype=ctypes.c_float)
shared_array = shared_array.reshape(C0.shape)
shared_array[:,:] = C0
# Division of the problem and allocation of processes to cores.
processes = []
for i in range(core_count):
process = mp.Process(target = mp_a_trous_kernel, args = (shared_array, wavelet_filter, scale, i,
C0.shape[0]//core_count, 'row',))
process.start()
processes.append(process)
for i in processes:
i.join()
processes = []
for i in range(core_count):
process = mp.Process(target = mp_a_trous_kernel, args = (shared_array, wavelet_filter, scale, i,
C0.shape[1]//core_count, 'col',))
process.start()
processes.append(process)
for i in processes:
i.join()
return shared_array
0
Example 20
Project: K3D-jupyter Source File: objects.py
def _to_ndarray(data, dtype=numpy.float32):
return numpy.frombuffer(base64.b64decode(data), dtype=dtype)
0
Example 21
Project: artiq Source File: pyon.py
def _nparray(shape, dtype, data):
a = numpy.frombuffer(base64.b64decode(data), dtype=dtype)
a = a.copy()
return a.reshape(shape)
0
Example 22
Project: vapory Source File: io.py
def ppm_to_numpy(filename=None, buffer=None, byteorder='>'):
"""Return image data from a raw PGM/PPM file as numpy array.
Format specification: http://netpbm.sourceforge.net/doc/pgm.html
"""
if not numpy_found:
raise IOError("Function ppm_to_numpy requires numpy installed.")
if buffer is None:
with open(filename, 'rb') as f:
buffer = f.read()
try:
header, width, height, maxval = re.search(
b"(^P\d\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n]\s)*)", buffer).groups()
except AttributeError:
raise ValueError("Not a raw PPM/PGM file: '%s'" % filename)
cols_per_pixels = 1 if header.startswith(b"P5") else 3
dtype = 'uint8' if int(maxval) < 256 else byteorder+'uint16'
arr = numpy.frombuffer(buffer, dtype=dtype,
count=int(width)*int(height)*3,
offset=len(header))
return arr.reshape((int(height), int(width), 3))
0
Example 23
def load_all(self):
if self.hdr[H.NB_FIBERS] == 0:
return []
with open(self.filename, 'rb') as f:
f.seek(self.offset)
buff = f.read()
buff = buff[:-2 * 3 * self.dtype.itemsize]
pts = np.frombuffer(buff, dtype=self.dtype) # Convert binary to float
# Convert big endian to little endian
if self.dtype != '<f4':
pts = pts.astype('<f4')
pts = pts.reshape([-1, 3])
idxNaN = np.arange(len(pts))[np.isnan(pts[:, 0])]
pts = pts[np.isfinite(pts[:, 0])]
idxNaN -= np.arange(len(idxNaN))
streamlines = np.split(pts, idxNaN)
return [np.dot(c_[s, np.ones(len(s), dtype='<f4')], self.invM)[:, :-1] for s in streamlines if s.shape[0] > 0]
0
Example 24
def _load(self):
f = open(self.filename, 'rb')
#####
# Read header
###
self.hdr[H.MAGIC_NUMBER] = f.read(6)
self.hdr[H.DIMENSIONS] = np.frombuffer(f.read(6), dtype='<i2')
self.hdr[H.VOXEL_SIZES] = np.frombuffer(f.read(12), dtype='<f4')
self.hdr[H.ORIGIN] = np.frombuffer(f.read(12), dtype='<f4')
self.hdr[H.NB_SCALARS_BY_POINT] = np.frombuffer(f.read(2), dtype='<i2')[0]
self.hdr['scalar_name'] = [f.read(20) for i in range(10)]
self.hdr[H.NB_PROPERTIES_BY_TRACT] = np.frombuffer(f.read(2), dtype='<i2')[0]
self.hdr['property_name'] = [f.read(20) for i in range(10)]
self.hdr[H.VOXEL_TO_WORLD] = np.frombuffer(f.read(64), dtype='<f4').reshape(4, 4)
self.hdr[H.WORLD_ORDER] = "RAS"
# Skip reserved bytes
f.seek(444, os.SEEK_CUR)
self.hdr[H.VOXEL_ORDER] = f.read(4)
self.hdr["pad2"] = f.read(4)
self.hdr["image_orientation_patient"] = np.frombuffer(f.read(24), dtype='<f4')
self.hdr["pad1"] = f.read(2)
self.hdr["invert_x"] = f.read(1) == '\x01'
self.hdr["invert_y"] = f.read(1) == '\x01'
self.hdr["invert_z"] = f.read(1) == '\x01'
self.hdr["swap_xy"] = f.read(1) == '\x01'
self.hdr["swap_yz"] = f.read(1) == '\x01'
self.hdr["swap_zx"] = f.read(1) == '\x01'
self.hdr[H.NB_FIBERS] = np.frombuffer(f.read(4), dtype='<i4')
self.hdr["version"] = np.frombuffer(f.read(4), dtype='<i4')
self.hdr["hdr_size"] = np.frombuffer(f.read(4), dtype='<i4')
# Check if little or big endian
self.hdr[H.ENDIAN] = '<'
if self.hdr["hdr_size"] != self.OFFSET:
self.hdr[H.ENDIAN] = '>'
self.hdr[H.NB_FIBERS] = self.hdr[H.NB_FIBERS].astype('>i4')
self.hdr["version"] = self.hdr["version"].astype('>i4')
self.hdr["hdr_size"] = self.hdr["hdr_size"].astype('>i4')
nb_fibers = 0
self.hdr[H.NB_POINTS] = 0
#Either verify the number of streamlines specified in the header is correct or
# count the actual number of streamlines in case it is not specified in the header.
remainingBytes = os.path.getsize(self.filename) - self.OFFSET
while remainingBytes > 0:
# Read points
nbPoints = readBinaryBytes(f, 1, np.dtype(self.hdr[H.ENDIAN] + "i4"))[0]
self.hdr[H.NB_POINTS] += nbPoints
# This seek is used to go to the next points number indication in the file.
f.seek((nbPoints * (3 + self.hdr[H.NB_SCALARS_BY_POINT])
+ self.hdr[H.NB_PROPERTIES_BY_TRACT]) * 4, 1) # Relative seek
remainingBytes -= (nbPoints * (3 + self.hdr[H.NB_SCALARS_BY_POINT])
+ self.hdr[H.NB_PROPERTIES_BY_TRACT]) * 4 + 4
nb_fibers += 1
if self.hdr[H.NB_FIBERS] != nb_fibers:
logging.warn(('The number of streamlines specified in header ({0}) does not match '
'the actual number of streamlines contained in this file ({1}). '
'The latter will be used.').format(self.hdr[H.NB_FIBERS], nb_fibers))
self.hdr[H.NB_FIBERS] = nb_fibers
f.close()
0
Example 25
def unpack(self, mem):
ret = numpy.frombuffer(mem, self.dtype, self.size, self.offset)
ret = ret.reshape(self.shape).copy()
return ret
0
Example 26
def read_mnist_labels(filename):
"""Read MNIST labels from the original ubyte file format.
Parameters
----------
filename : str
Filename/path from which to read labels.
Returns
-------
labels : :class:`~numpy.ndarray`, shape (nlabels, 1)
A one-dimensional unsigned byte array containing the
labels as integers.
"""
with gzip.open(filename, 'rb') as f:
magic, _ = struct.unpack('>ii', f.read(8))
if magic != MNIST_LABEL_MAGIC:
raise ValueError("Wrong magic number reading MNIST label file")
array = numpy.frombuffer(f.read(), dtype='uint8')
array = array.reshape(array.size, 1)
return array
0
Example 27
Project: AWS-Lambda-ML-Microservice-Skeleton Source File: sas_xport.py
@Appender(_read_method_doc)
def read(self, nrows=None):
if nrows is None:
nrows = self.nobs
read_lines = min(nrows, self.nobs - self._lines_read)
read_len = read_lines * self.record_length
if read_len <= 0:
raise StopIteration
raw = self.filepath_or_buffer.read(read_len)
data = np.frombuffer(raw, dtype=self._dtype, count=read_lines)
df = pd.DataFrame(index=range(read_lines))
for j, x in enumerate(self.columns):
vec = data['s%d' % j]
ntype = self.fields[j]['ntype']
if ntype == "numeric":
vec = _handle_truncated_float_vec(
vec, self.fields[j]['field_length'])
miss = self._missing_double(vec)
v = _parse_float_vec(vec)
v[miss] = np.nan
elif self.fields[j]['ntype'] == 'char':
v = [y.rstrip() for y in vec]
if compat.PY3:
if self._encoding is not None:
v = [y.decode(self._encoding) for y in v]
df[x] = v
if self._index is None:
df.index = range(self._lines_read, self._lines_read + read_lines)
else:
df = df.set_index(self._index)
self._lines_read += read_lines
return df
0
Example 28
Project: BlenderPanda Source File: vbo.py
def mapVBO( vbo, access=0x88BA ): # GL_READ_WRITE
"""Map the given buffer into a numpy array...
Method taken from:
http://www.mail-archive.com/[email protected]/msg01161.html
This should be considered an *experimental* API,
it is not guaranteed to be available in future revisions
of this library!
Simplification to use ctypes cast from comment by 'sashimi' on my blog...
"""
from numpy import frombuffer
vp = vbo.implementation.glMapBuffer( vbo.target, access )
# TODO: obviously this is not the right way to do this should allow each format
# handler to convert the pointer in their own way...
vp_array = ctypes.cast(vp, ctypes.POINTER(ctypes.c_byte*vbo.size) )
# Note: we could have returned the raw ctypes.c_byte array instead...
array = frombuffer( vp_array, 'B' )
_cleaners[vbo] = weakref.ref( array, _cleaner( vbo ))
return array
0
Example 29
Project: async-deep-rl Source File: actor_learner.py
def apply_gradients_to_shared_memory_vars(self, grads):
#Flatten grads
offset = 0
for g in grads:
self.flat_grads[offset:offset + g.size] = g.reshape(-1)
offset += g.size
g = self.flat_grads
if self.optimizer_type == "adam" and self.optimizer_mode == "shared":
p = np.frombuffer(self.learning_vars.vars, ctypes.c_float)
p_size = self.learning_vars.size
m = np.frombuffer(self.opt_st.ms, ctypes.c_float)
v = np.frombuffer(self.opt_st.vs, ctypes.c_float)
T = self.global_step.value()
self.opt_st.lr.value = 1.0 * self.opt_st.lr.value * (1 - self.b2**T)**0.5 / (1 - self.b1**T)
apply_grads_adam(m, v, g, p, p_size, self.opt_st.lr.value, self.b1, self.b2, self.e)
else: #local or shared rmsprop/momentum
lr = self.decay_lr()
if (self.optimizer_mode == "local"):
m = self.opt_st
else: #shared
m = np.frombuffer(self.opt_st.vars, ctypes.c_float)
p = np.frombuffer(self.learning_vars.vars, ctypes.c_float)
p_size = self.learning_vars.size
_type = 0 if self.optimizer_type == "momentum" else 1
#print "BEFORE", "RMSPROP m", m[0], "GRAD", g[0], self.flat_grads[0], self.flat_grads2[0]
apply_grads_mom_rmsprop(m, g, p, p_size, _type, lr, self.alpha, self.e)
0
Example 30
Project: python-matlab-bridge Source File: pymatbridge.py
def decode_arr(data):
"""Extract a numpy array from a base64 buffer"""
data = data.encode('utf-8')
return frombuffer(base64.b64decode(data), float64)
0
Example 31
def get_data(f):
"""
Read all data from sparky file object.
"""
return np.frombuffer(f.read(), dtype='>f4')
0
Example 32
Project: siphon Source File: ncstream.py
def read_ncstream_data(fobj):
'Handle reading an NcStream v1 data block from a file-like object'
data = read_proto_object(fobj, stream.Data)
if data.dataType in (stream.STRING, stream.OPAQUE) or data.vdata:
log.debug('Reading string/opaque/vlen')
num_obj = read_var_int(fobj)
log.debug('Num objects: %d', num_obj)
blocks = [read_block(fobj) for _ in range(num_obj)]
if data.dataType == stream.STRING:
blocks = [b.decode('utf-8', errors='ignore') for b in blocks]
# Again endian isn't coded properly
dt = data_type_to_numpy(data.dataType).newbyteorder('>')
if data.vdata:
return np.array([np.frombuffer(b, dtype=dt) for b in blocks])
else:
return np.array(blocks, dtype=dt)
elif data.dataType in _dtypeLookup:
log.debug('Reading array data')
bin_data = read_block(fobj)
log.debug('Binary data: %s', bin_data)
# Hard code to big endian for now since it's not encoded correctly
dt = data_type_to_numpy(data.dataType).newbyteorder('>')
# Handle decompressing the bytes
if data.compress == stream.DEFLATE:
bin_data = zlib.decompress(bin_data)
assert len(bin_data) == data.uncompressedSize
elif data.compress != stream.NONE:
raise NotImplementedError('Compression type {0} not implemented!'.format(
data.compress))
# Turn bytes into an array
return reshape_array(data, np.frombuffer(bin_data, dtype=dt))
elif data.dataType == stream.STRUCTURE:
sd = read_proto_object(fobj, stream.StructureData)
# Make a datatype appropriate to the rows of struct
endian = '>' if data.bigend else '<'
dt = np.dtype([(endian, np.void, sd.rowLength)])
# Turn bytes into an array
return reshape_array(data, np.frombuffer(sd.data, dtype=dt))
elif data.dataType == stream.SEQUENCE:
log.debug('Reading sequence')
blocks = []
magic = read_magic(fobj)
while magic != MAGIC_VEND:
if magic == MAGIC_VDATA:
log.error('Bad magic for struct/seq data!')
blocks.append(read_proto_object(fobj, stream.StructureData))
magic = read_magic(fobj)
return data, blocks
else:
raise NotImplementedError("Don't know how to handle data type: {0}".format(
data.dataType))
0
Example 33
Project: imutils Source File: encodings.py
def base64_decode_array(a, dtype):
# decode and return the array
return np.frombuffer(base64.decodestring(a), dtype=dtype)
0
Example 34
def read_mnist_images(filename, dtype=None):
"""Read MNIST images from the original ubyte file format.
Parameters
----------
filename : str
Filename/path from which to read images.
dtype : 'float32', 'float64', or 'bool'
If unspecified, images will be returned in their original
unsigned byte format.
Returns
-------
images : :class:`~numpy.ndarray`, shape (n_images, 1, n_rows, n_cols)
An image array, with individual examples indexed along the
first axis and the image dimensions along the second and
third axis.
Notes
-----
If the dtype provided was Boolean, the resulting array will
be Boolean with `True` if the corresponding pixel had a value
greater than or equal to 128, `False` otherwise.
If the dtype provided was a float dtype, the values will be mapped to
the unit interval [0, 1], with pixel values that were 255 in the
original unsigned byte representation equal to 1.0.
"""
with gzip.open(filename, 'rb') as f:
magic, number, rows, cols = struct.unpack('>iiii', f.read(16))
if magic != MNIST_IMAGE_MAGIC:
raise ValueError("Wrong magic number reading MNIST image file")
array = numpy.frombuffer(f.read(), dtype='uint8')
array = array.reshape((number, 1, rows, cols))
if dtype:
dtype = numpy.dtype(dtype)
if dtype.kind == 'b':
# If the user wants Booleans, threshold at half the range.
array = array >= 128
elif dtype.kind == 'f':
# Otherwise, just convert.
array = array.astype(dtype)
array /= 255.
else:
raise ValueError("Unknown dtype to convert MNIST to")
return array
0
Example 35
Project: LASIF Source File: visualization.py
def plot_raydensity(map_object, station_events, domain):
"""
Create a ray-density plot for all events and all stations.
This function is potentially expensive and will use all CPUs available.
Does require geographiclib to be installed.
"""
import ctypes as C
from lasif import rotations
from lasif.domain import RectangularSphericalSection
from lasif.tools.great_circle_binner import GreatCircleBinner
from lasif.utils import Point
import multiprocessing
import progressbar
from scipy.stats import scoreatpercentile
if not isinstance(domain, RectangularSphericalSection):
raise NotImplementedError(
"Raydensity currently only implemented for rectangular domains. "
"Should be easy to implement for other domains. Let me know.")
# Merge everything so that a list with coordinate pairs is created. This
# list is then distributed among all processors.
station_event_list = []
for event, stations in station_events:
if domain.rotation_angle_in_degree:
# Rotate point to the non-rotated domain.
e_point = Point(*rotations.rotate_lat_lon(
event["latitude"], event["longitude"], domain.rotation_axis,
-1.0 * domain.rotation_angle_in_degree))
else:
e_point = Point(event["latitude"], event["longitude"])
for station in stations.itervalues():
# Rotate point to the non-rotated domain if necessary.
if domain.rotation_angle_in_degree:
p = Point(*rotations.rotate_lat_lon(
station["latitude"], station["longitude"],
domain.rotation_axis,
-1.0 * domain.rotation_angle_in_degree))
else:
p = Point(station["latitude"], station["longitude"])
station_event_list.append((e_point, p))
circle_count = len(station_event_list)
# The granularity of the latitude/longitude discretization for the
# raypaths. Attempt to get a somewhat meaningful result in any case.
lat_lng_count = 1000
if circle_count < 1000:
lat_lng_count = 1000
if circle_count < 10000:
lat_lng_count = 2000
else:
lat_lng_count = 3000
cpu_count = multiprocessing.cpu_count()
def to_numpy(raw_array, dtype, shape):
data = np.frombuffer(raw_array.get_obj())
data.dtype = dtype
return data.reshape(shape)
print "\nLaunching %i greatcircle calculations on %i CPUs..." % \
(circle_count, cpu_count)
widgets = ["Progress: ", progressbar.Percentage(),
progressbar.Bar(), "", progressbar.ETA()]
pbar = progressbar.ProgressBar(widgets=widgets,
maxval=circle_count).start()
def great_circle_binning(sta_evs, bin_data_buffer, bin_data_shape,
lock, counter):
new_bins = GreatCircleBinner(
domain.min_latitude, domain.max_latitude,
lat_lng_count, domain.min_longitude,
domain.max_longitude, lat_lng_count)
for event, station in sta_evs:
with lock:
counter.value += 1
if not counter.value % 25:
pbar.update(counter.value)
new_bins.add_greatcircle(event, station)
bin_data = to_numpy(bin_data_buffer, np.uint32, bin_data_shape)
with bin_data_buffer.get_lock():
bin_data += new_bins.bins
# Split the data in cpu_count parts.
def chunk(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
chunks = chunk(station_event_list, cpu_count)
# One instance that collects everything.
collected_bins = GreatCircleBinner(
domain.min_latitude, domain.max_latitude,
lat_lng_count, domain.min_longitude,
domain.max_longitude, lat_lng_count)
# Use a multiprocessing shared memory array and map it to a numpy view.
collected_bins_data = multiprocessing.Array(C.c_uint32,
collected_bins.bins.size)
collected_bins.bins = to_numpy(collected_bins_data, np.uint32,
collected_bins.bins.shape)
# Create, launch and join one process per CPU. Use a shared value as a
# counter and a lock to avoid race conditions.
processes = []
lock = multiprocessing.Lock()
counter = multiprocessing.Value("i", 0)
for _i in xrange(cpu_count):
processes.append(multiprocessing.Process(
target=great_circle_binning, args=(chunks[_i], collected_bins_data,
collected_bins.bins.shape, lock,
counter)))
for process in processes:
process.start()
for process in processes:
process.join()
pbar.finish()
stations = chain.from_iterable((
_i[1].values() for _i in station_events if _i[1]))
# Remove duplicates
stations = [(_i["latitude"], _i["longitude"]) for _i in stations]
stations = set(stations)
title = "%i Events, %i unique raypaths, "\
"%i unique stations" % (len(station_events), circle_count,
len(stations))
plt.title(title, size="xx-large")
data = collected_bins.bins.transpose()
if data.max() >= 10:
data = np.log10(np.clip(data, a_min=0.5, a_max=data.max()))
data[data >= 0.0] += 0.1
data[data < 0.0] = 0.0
max_val = scoreatpercentile(data.ravel(), 99)
else:
max_val = data.max()
cmap = cm.get_cmap("gist_heat")
cmap._init()
cmap._lut[:120, -1] = np.linspace(0, 1.0, 120) ** 2
# Slightly change the appearance of the map so it suits the rays.
map_object.fillcontinents(color='#dddddd', lake_color='#dddddd', zorder=0)
lngs, lats = collected_bins.coordinates
# Rotate back if necessary!
if domain.rotation_angle_in_degree:
for lat, lng in zip(lats, lngs):
lat[:], lng[:] = rotations.rotate_lat_lon(
lat, lng, domain.rotation_axis,
domain.rotation_angle_in_degree)
ln, la = map_object(lngs, lats)
map_object.pcolormesh(ln, la, data, cmap=cmap, vmin=0, vmax=max_val)
# Draw the coastlines so they appear over the rays. Otherwise things are
# sometimes hard to see.
map_object.drawcoastlines()
map_object.drawcountries(linewidth=0.2)
0
Example 36
def readData(self,chan=1):
"""Read scope channel and return numpy array"""
rawdata = self._readRawData(chan)
return np.frombuffer(rawdata, dtype='h', offset=WAV_PREAMBLE_LENGTH)
0
Example 37
def array(self, shape, dtype):
dtype = numpy_support.as_dtype(dtype)
# Dynamic shared memory is requested with size 0 - this all shares the
# same underlying memory
if shape == 0:
# Count must be the maximum number of whole elements that fit in the
# buffer (Numpy complains if the buffer is not a multiple of the
# element size)
count = self._dynshared_size // dtype.itemsize
return np.frombuffer(self._dynshared.data, dtype=dtype, count=count)
# Otherwise, identify allocations by source file and line number
# We pass the reference frame explicitly to work around
# http://bugs.python.org/issue25108
stack = traceback.extract_stack(sys._getframe())
caller = stack[-2][0:2]
res = self._allocations.get(caller)
if res is None:
res = np.empty(shape, dtype)
self._allocations[caller] = res
return res
0
Example 38
Project: artiq Source File: pyon.py
def _npscalar(ty, data):
return numpy.frombuffer(base64.b64decode(data), dtype=ty)[0]
0
Example 39
Project: tractconverter Source File: trk.py
def readBinaryBytes(f, nbBytes, dtype):
buff = f.read(nbBytes * dtype.itemsize)
return np.frombuffer(buff, dtype=dtype)
0
Example 40
Project: pyusbtmc Source File: realtime_chart.py
def getChannelData(channel):
if (channel==1):
channelName = "CHAN1"
elif (channel==2):
channelName = "CHAN2"
else:
print "Invalid channel!"
return
# Grab the data from channel 1
test.write(":WAV:POIN:MODE NOR")
test.write(":WAV:DATA? " + channelName)
rawdata = test.read(9000)
data = numpy.frombuffer(rawdata, 'B')
# Get the voltage scale
test.write(":" + channelName + ":SCAL?")
voltscale = float(test.read(20))
# And the voltage offset
test.write(":" + channelName + ":OFFS?")
voltoffset = float(test.read(20))
# Walk through the data, and map it to actual voltages
# First invert the data (ya rly)
data = data * -1 + 255
# Now, we know from experimentation that the scope display range is actually
# 30-229. So shift by 130 - the voltage offset in counts, then scale to
# get the actual voltage.
data = (data - 130.0 - voltoffset/voltscale*25) / 25 * voltscale
# Get the timescale
test.write(":TIM:SCAL?")
timescale = float(test.read(20))
# Get the timescale offset
test.write(":TIM:OFFS?")
timeoffset = float(test.read(20))
# Now, generate a time axis. The scope display range is 0-600, with 300 being
# time zero.
time = numpy.arange(-300.0/50*timescale, 300.0/50*timescale, timescale/50.0)
# If we generated too many points due to overflow, crop the length of time.
if (time.size > data.size):
time = time[0:600:1]
# See if we should use a different time axis
# if (time[599] < 1e-3):
# time = time * 1e6
# tUnit = "uS"
# elif (time[599] < 1):
# time = time * 1e3
# tUnit = "mS"
# else:
# tUnit = "S"
return [time, data]
0
Example 41
def loads(data):
''' Load signal from memory buffer. '''
x = np.frombuffer(data, dtype='int16')
x = x / scaling
return x
0
Example 42
Project: pims Source File: cine.py
def _get_frame(self, number):
with FileLocker(self.file_lock):
# get basic information about the frame we want
image_start = self.image_locations[number]
annotation_size = self.unpack(DWORD, image_start)
# this is not used, but is needed to advance the point in the file
annotation = self.unpack('%db' % (annotation_size - 8))
image_size = self.unpack(DWORD)
cfa = self.cfa
compression = self.compression
# sort out data type looking at the cached version
data_type = self._data_type
# actual bit per pixel
actual_bits = image_size * 8 // (self._pixel_count)
# so this seem wrong as 10 or 12 bits won't fit in 'u1'
# but I (TAC) may not understand and don't have a packed file
# (which the docs seem to imply don't exist) to test on so
# I am leaving it. good luck.
if actual_bits in (10, 12):
data_type = 'u1'
# move the file to the right point in the file
self.f.seek(image_start + annotation_size)
# suck the data out of the file and shove into linear
# numpy array
frame = frombuffer(self.f.read(image_size), data_type)
# if mono-camera
if cfa == CFA_NONE:
if compression != 0:
raise ValueError("Can not deal with compressed files\n" +
"compression level: " +
"{}".format(compression))
# we are working with a monochrome camera
# un-pack packed data
if (actual_bits == 10):
frame = _ten2sixteen(frame)
elif (actual_bits == 12):
frame = _twelve2sixteen(frame)
elif (actual_bits % 8):
raise ValueError('Data should be byte aligned, ' +
'or 10 or 12 bit packed (appears to be' +
' %dbits/pixel?!)' % actual_bits)
# re-shape to an array
# flip the rows
# and the cast to proper type
frame = frame.reshape(self._height,
self._width)[::-1].astype(self._dtype)
if actual_bits in (10, 12):
frame = frame[::-1, :]
# Don't know why it works this way, but it does...
# else, some sort of color layout
else:
if compression == 0:
# and re-order so color is RGB (naively saves as BGR)
frame = frame.reshape(self._height,
self._width,
3)[::-1, :, ::-1].astype(self._dtype)
elif compression == 2:
raise ValueError("Can not process un-interpolated movies")
else:
raise ValueError("Should never hit this, " +
"you have an un-docuemented file\n" +
"compression level: " +
"{}".format(compression))
return frame
0
Example 43
def tst_basic(self,buffer,expected,kwargs):
assert_array_equal(np.frombuffer(buffer,**kwargs),expected)
0
Example 44
def _readWaveform(self, chan=1):
'''Read full waveform and header from the scope to produced a scaled waveform'''
buf = self._readRawWaveform(chan)
n_desc = unpackLong(buf,36)
n_data_bytes = unpackLong(buf,60)
name = buf[76:92]
n_data = unpackLong(buf,116)
#first_valid = unpackLong(buf,124)
#last_valid = unpackLong(buf,128)
vert_gain = unpackFloat(buf,156)
vert_offset = unpackFloat(buf,160)
horiz_interval= unpackFloat(buf,176)
horiz_offset = unpackDouble(buf,180)
data = np.frombuffer(buf, dtype='>h', count = n_data - 2, offset=n_desc)
self.dt = horiz_interval
self.t0 = horiz_offset
self.size = n_data - 2
return (data, vert_gain, vert_offset)
0
Example 45
def get_frame_2D(self, **coords):
"""Actual reader, returns image as 2D numpy array and metadata as
dict.
"""
_coords = {'t': 0, 'c': 0, 'z': 0}
_coords.update(coords)
if self.isRGB:
_coords['c'] = 0
j = self.rdr.getIndex(int(_coords['z']), int(_coords['c']),
int(_coords['t']))
if self.read_mode == 'jpype':
im = np.frombuffer(self.rdr.openBytes(j)[:],
dtype=self._pixel_type)
elif self.read_mode == 'stringbuffer':
im = self._jbytearr_stringbuffer(self.rdr.openBytes(j))
elif self.read_mode == 'javacasting':
im = self._jbytearr_javacasting(self.rdr.openBytes(j))
im.shape = self._frame_shape_2D
im = im.astype(self._pixel_type, copy=False)
metadata = {'frame': j,
'series': self._series}
if self.colors is not None:
metadata['colors'] = self.colors
if self.calibration is not None:
metadata['mpp'] = self.calibration
if self.calibrationZ is not None:
metadata['mppZ'] = self.calibrationZ
metadata.update(coords)
for key, method in self.frame_metadata.items():
metadata[key] = getattr(self.metadata, method)(self._series, j)
return Frame(im, metadata=metadata)
0
Example 46
Project: reverse-geocoder Source File: cKDTree_MP.py
def shmem_as_nparray(shmem_array):
"""
Function that converts a shared memory array (multiprocessing.Array) to a numpy array
"""
return np.frombuffer(shmem_array.get_obj())
0
Example 47
@property
def corpus(self):
if self._read_globals:
return np.frombuffer(_corpus, self.dtype)
return self._corpus_local
0
Example 48
def getData(self):
self.rigolScope.write(":WAV:POIN:MODE NOR")
self.rigolScope.write(":WAV:DATA? " + self.channelName)
rawdata = self.rigolScope.read(9000)
data = numpy.frombuffer(rawdata, 'B')
# Walk through the data, and map it to actual voltages
# First invert the data
data = data * -1 + 255
voltscale = self.getVoltageScale();
voltoffset = self.getVoltageOffset();
print "Offset: ", voltoffset/voltscale*25
# Now, we know from experimentation that the scope display range is actually
# 30-229. So shift by 130 - the voltage offset in counts, then scale to
# get the actual voltage.
data = (data - 130.0 - voltoffset/voltscale*25) / 25 * voltscale
#Sets the voltage offset
data = data + voltoffset/voltscale
data = data[0:600:1]
return data
0
Example 49
Project: thunder Source File: readers.py
def frombinary(path, ext='bin', conf='conf.json', dtype=None, shape=None, skip=0, index=None, labels=None, engine=None, credentials=None):
"""
Load series data from flat binary files.
Parameters
----------
path : string URI or local filesystem path
Directory to load from, can be a URI string with scheme
(e.g. 'file://', 's3n://', or 'gs://'), or a single file,
or a directory, or a directory with a single wildcard character.
ext : str, optional, default = 'bin'
Optional file extension specifier.
conf : str, optional, default = 'conf.json'
Name of conf file with type and size information.
dtype : dtype or dtype specifier, default 'float64'
Numerical type to use for data after converting from text.
shape : tuple or list, optional, default = None
Shape of data if known, will be inferred otherwise.
skip : int, optional, default = 0
Number of items in each record to skip.
index : array, optional, default = None
Index for records, if not provided will use (0, 1, ...)
labels : array, optional, default = None
Labels for records. If provided, should have shape of shape[:-1].
engine : object, default = None
Computational engine (e.g. a SparkContext for Spark)
credentials : dict, default = None
Credentials for remote storage (e.g. S3) in the form {access: ***, secret: ***}
"""
shape, dtype = _binaryconfig(path, conf, dtype, shape, credentials)
from thunder.readers import normalize_scheme, get_parallel_reader
path = normalize_scheme(path, ext)
from numpy import dtype as dtype_func
nelements = shape[-1] + skip
recordsize = dtype_func(dtype).itemsize * nelements
if spark and isinstance(engine, spark):
lines = engine.binaryRecords(path, recordsize)
raw = lines.map(lambda x: frombuffer(buffer(x), offset=0, count=nelements, dtype=dtype)[skip:])
def switch(record):
ary, idx = record
return (idx,), ary
rdd = raw.zipWithIndex().map(switch)
if shape and len(shape) > 2:
expand = lambda k: unravel_index(k[0], shape[0:-1])
rdd = rdd.map(lambda kv: (expand(kv[0]), kv[1]))
if not index:
index = arange(shape[-1])
return fromrdd(rdd, dtype=dtype, shape=shape, index=index, ordered=True)
else:
reader = get_parallel_reader(path)(engine, credentials=credentials)
data = reader.read(path, ext=ext)
values = []
for record in data:
buf = record[1]
offset = 0
while offset < len(buf):
v = frombuffer(buffer(buf), offset=offset, count=nelements, dtype=dtype)
values.append(v[skip:])
offset += recordsize
if not len(values) == prod(shape[0:-1]):
raise ValueError('Unexpected shape, got %g records but expected %g'
% (len(values), prod(shape[0:-1])))
values = asarray(values, dtype=dtype)
if shape:
values = values.reshape(shape)
return fromarray(values, index=index, labels=labels)
0
Example 50
def load_array(self, data, array_name):
# Array 'double' is the same as python 'float' (the default for numpy arrays), illogically
self.data[array_name] = np.frombuffer(Array('d', data.ravel()).get_obj())
self.data[array_name].shape = data.shape # will raise an error if array cannot be reshaped without copying
logger.info("%s: Loaded array %s", self.name, array_name)