numpy.frombuffer

Here are the examples of the python api numpy.frombuffer taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

571 Examples 7

3 Source : alpha.py
with MIT License
from 36base

def load_image(path: str, flag=cv2.IMREAD_COLOR):
    with open(path, "rb") as f:
        data = np.frombuffer(f.read(), dtype="uint8")
    return cv2.imdecode(data, flag)


def save_image(img: np.ndarray, path: str):

3 Source : main.py
with GNU General Public License v3.0
from 4ndr3aR

	def frame_from_buf(self):
		w, h = self.resolution
		frame = np.frombuffer(self._camera._buffer.tostring(), 'uint8').reshape((h + h // 2, w))	# TODO: try to understand why the frame size has nothing to do with the original resolution
		print(f'{frame.shape = }')
		frame_bgr = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_NV21)
		return frame_bgr

	def frame_to_screen(self, frame):

3 Source : chunk_compressed_chunk.py
with Mozilla Public License 2.0
from activeloopai

    def read_sample(self, local_index: int, cast: bool = True, copy: bool = False):
        partial_sample_tile = self._get_partial_sample_tile(as_bytes=False)
        if partial_sample_tile is not None:
            return partial_sample_tile
        if self.is_image_compression:
            return self.decompressed_samples[local_index]  # type: ignore

        shape = self.shapes_encoder[local_index]
        decompressed = memoryview(self.decompressed_bytes)  # type: ignore
        if not self.byte_positions_encoder.is_empty():
            sb, eb = self.byte_positions_encoder[local_index]
            decompressed = decompressed[sb:eb]
        if self.is_text_like:
            return bytes_to_text(decompressed, self.htype)
        return np.frombuffer(decompressed, dtype=self.dtype).reshape(shape)

    def update_sample(self, local_index: int, new_sample: InputSample):

3 Source : uncompressed_chunk.py
with Mozilla Public License 2.0
from activeloopai

    def read_sample(self, local_index: int, cast: bool = True, copy: bool = False):
        partial_sample_tile = self._get_partial_sample_tile()
        if partial_sample_tile is not None:
            return partial_sample_tile
        buffer = self.memoryview_data
        shape = self.shapes_encoder[local_index]
        if not self.byte_positions_encoder.is_empty():
            sb, eb = self.byte_positions_encoder[local_index]
            buffer = buffer[sb:eb]
        if self.is_text_like:
            buffer = bytes(buffer)
            return bytes_to_text(buffer, self.htype)
        buffer = bytes(buffer) if copy else buffer
        return np.frombuffer(buffer, dtype=self.dtype).reshape(shape)

    def update_sample(self, local_index: int, sample: InputSample):

3 Source : serialize.py
with Mozilla Public License 2.0
from activeloopai

def deserialize_sequence_encoder(
    byts: Union[bytes, memoryview]
) -> Tuple[str, np.ndarray]:
    byts = memoryview(byts)
    len_version = byts[0]
    version = str(byts[1 : 1 + len_version], "ascii")
    enc = (
        np.frombuffer(byts[1 + len_version :], dtype=hub.constants.ENCODING_DTYPE)
        .reshape(-1, 3)
        .copy()
    )
    return version, enc


def check_sample_shape(shape, num_dims):

3 Source : json.py
with Mozilla Public License 2.0
from activeloopai

    def object_hook(self, obj):
        hub_custom_type = obj.get("_hub_custom_type")
        if hub_custom_type == "ndarray":
            return np.frombuffer(
                base64.b64decode(obj["data"]), dtype=obj["dtype"]
            ).reshape(obj["shape"])
        elif hub_custom_type == "Sample":
            return Sample(
                buffer=base64.b64decode(obj["data"]), compression=obj["compression"]
            )
        return obj

3 Source : shmem_vec_env.py
with MIT License
from AcutronicRobotics

    def _decode_obses(self, obs):
        result = {}
        for k in self.obs_keys:

            bufs = [b[k] for b in self.obs_bufs]
            o = [np.frombuffer(b.get_obj(), dtype=self.obs_dtypes[k]).reshape(self.obs_shapes[k]) for b in bufs]
            result[k] = np.array(o)
        return dict_to_obs(result)


def _subproc_worker(pipe, parent_pipe, env_fn_wrapper, obs_bufs, obs_shapes, obs_dtypes, keys):

3 Source : test_regression.py
with GNU General Public License v3.0
from adityaprakash-bobby

    def test_string_argsort_with_zeros(self):
        # Check argsort for strings containing zeros.
        x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2")
        assert_array_equal(x.argsort(kind='m'), np.array([1, 0]))
        assert_array_equal(x.argsort(kind='q'), np.array([1, 0]))

    def test_string_sort_with_zeros(self):

3 Source : test_regression.py
with GNU General Public License v3.0
from adityaprakash-bobby

    def test_string_sort_with_zeros(self):
        # Check sort for strings containing zeros.
        x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2")
        y = np.frombuffer(b"\x00\x01\x00\x02", dtype="|S2")
        assert_array_equal(np.sort(x, kind="q"), y)

    def test_copy_detection_zero_dim(self):

3 Source : netcdf.py
with GNU General Public License v3.0
from adityaprakash-bobby

    def _read(self):
        # Check magic bytes and version
        magic = self.fp.read(3)
        if not magic == b'CDF':
            raise TypeError("Error: %s is not a valid NetCDF 3 file" %
                            self.filename)
        self.__dict__['version_byte'] = frombuffer(self.fp.read(1), '>b')[0]

        # Read file headers and set data.
        self._read_numrecs()
        self._read_dim_array()
        self._read_gatt_array()
        self._read_var_array()

    def _read_numrecs(self):

3 Source : aucell.py
with GNU General Public License v3.0
from aertslab

def _enrichment(shared_ro_memory_array, modules, genes, cells, auc_threshold, auc_mtx, offset):
    # The rankings dataframe is properly reconstructed (checked this).
    df_rnk = pd.DataFrame(
        data=np.frombuffer(shared_ro_memory_array, dtype=DTYPE).reshape(len(cells), len(genes)),
        columns=genes,
        index=cells,
    )
    # To avoid additional memory burden de resulting AUCs are immediately stored in the output sync. array.
    result_mtx = np.frombuffer(auc_mtx.get_obj(), dtype='d')
    inc = len(cells)
    for idx, module in enumerate(modules):
        result_mtx[offset + (idx * inc) : offset + ((idx + 1) * inc)] = enrichment4cells(
            df_rnk, module, auc_threshold
        ).values.flatten(order="C")


def aucell4r(

3 Source : frame_stack.py
with MIT License
from AI4Finance-Foundation

    def __array__(self, dtype=None):
        if self.lz4_compress:
            from lz4.block import decompress
            frames = [np.frombuffer(decompress(frame), dtype=self.dtype).reshape(self.shape) for frame in self._frames]
        else:
            frames = self._frames
        out = np.stack(frames, axis=0)
        if dtype is not None:
            out = out.astype(dtype)
        return out

    def __len__(self):

3 Source : IMAQ.py
with MIT License
from AlexShkarin

    def _parse_buffer(self, buffer, dim=None, bpp=None, nframes=1):
        r,c=dim or self._get_data_dimensions_rc()
        bpp=bpp or self.get_int_value("BYTESPERPIXEL",1)
        if len(buffer)!=nframes*r*c*bpp:
            raise ValueError("wrong buffer size: expected {}x{}x{}x{}={}, got {}".format(nframes,r,c,bpp,nframes*r*c*bpp,len(buffer)))
        dt="  <  u{}".format(bpp)
        return np.frombuffer(buffer,dtype=dt).reshape((nframes,r,c))
    def _read_multiple_images_raw(self, rng=None, peek=False, missing_frame="skip"):

3 Source : PCO_SC2.py
with MIT License
from AlexShkarin

    def _read_next_buffer(self, npx=None):
        if self._buffers is None or self._next_read_buffer>=self._next_wait_buffer:
            return None
        buff=self._buffers[self._next_read_buffer%len(self._buffers)]
        if npx is None:
            npx=len(buff.buff)//2
        frame=np.frombuffer(buff.buff,dtype="  <  u2",count=npx).copy()
        metadata=buff.buff[-buff.metadata_size:] if buff.metadata_size>0 else None
        self._next_read_buffer+=1
        return frame,metadata
    def _wait_for_next_buffer(self, timeout=None):

3 Source : strpack.py
with MIT License
from AlexShkarin

def unpack_numpy_u12bit(buffer, byteorder="  <  ", count=-1):
    u8count=count*3//2 if count>0 else -1
    data=np.frombuffer(buffer,dtype="u1",count=u8count)
    fst_uint8,mid_uint8,lst_uint8=np.reshape(data,(len(data)//3,3)).astype(np.uint16).T
    if byteorder==">":
        fst_uint12=(fst_uint8 <  < 4)+(mid_uint8>>4)
        snd_uint12=((mid_uint8%16) <  < 8)+lst_uint8
    else:
        fst_uint12=fst_uint8+((mid_uint8>>4) <  < 8)
        snd_uint12=(mid_uint8%16)+(lst_uint8 <  < 4)
    return np.concatenate((fst_uint12[:,None],snd_uint12[:,None]),axis=1).flatten()

3 Source : metro.py
with MIT License
from amspector100

    def _key2bool(self, key):
        """
        Takes a key from dp dicts
        and turns it back into a boolean array.
        """
        return np.frombuffer(key, dtype=bool)

    def _create_Xtemp(self, x_flags, j):

3 Source : client.py
with MIT License
from APPFL

    def get_tensor_record(self, name, round_number):
        request = TensorRequest(
            header=self.header, name=name, round_number=round_number
        )
        self.logger.debug(f"[Client ID: {self.client_id: 03}] Requested Tensor record (name,round)=(%s,%d)", name, round_number)
        start = time.time()
        response = self.stub.GetTensorRecord(request, metadata=self.metadata)
        end = time.time()
        self.logger.debug(f"[Client ID: {self.client_id: 03}] Received Tensor record (name,round)=(%s,%d)", name, round_number)
        if round_number > 1:
            self.time_get_tensor += end - start
        shape = tuple(response.data_shape)                
        flat = np.frombuffer(response.data_bytes, dtype=eval(response.data_dtype))        
        nparray = np.reshape(flat, newshape=shape, order="C")
 
        return nparray

    def get_weight(self, training_size):

3 Source : indexed_dataset.py
with Apache License 2.0
from Ascend

    def __getitem__(self, i):
        ptr, size = self._index[i]
        np_array = np.frombuffer(
            self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr
        )
        if self._index.dtype != np.int32:
            np_array = np_array.astype(np.int32)

        return torch.from_numpy(np_array)

    @property

3 Source : run_demo_server.py
with Apache License 2.0
from Ascend

def index_post():
    global predictor
    import io
    bio = io.BytesIO()
    request.files['image'].save(bio)
    img = cv2.imdecode(np.frombuffer(bio.getvalue(), dtype='uint8'), 1)
    rst = get_predictor(checkpoint_path)(img)

    save_result(img, rst)
    return render_template('index.html', session_id=rst['session_id'])


def main():

3 Source : plot.py
with Apache License 2.0
from ashutosh1919

def plot2opencv(plt):
    """Convert a pyplot instance to image"""

    buf = _io.BytesIO()
    plt.axis('off')
    plt.savefig(buf, format='png', bbox_inches='tight', pad_inches=0)
    buf.seek(0)
    rawbuf = np.frombuffer(buf.getvalue(), dtype='uint8')
    im = cv2.imdecode(rawbuf, cv2.IMREAD_COLOR)
    buf.close()
    return im


@pil_only

3 Source : indexed_dataset.py
with MIT License
from aws-health-ai

    def __getitem__(self, i):
        ptr, size = self._index[i]
        np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr)
        if self._index.dtype != np.int64:
            np_array = np_array.astype(np.int64)

        return torch.from_numpy(np_array)

    @property

3 Source : fasta2dic.py
with GNU General Public License v3.0
from balabanmetin

def fasta2dic(ref_fp, prot_flag, mask_flag):
    refs = {}
    with open(ref_fp) as f:
        mask_translation = str.maketrans('abcdefghijklmnopqrstuvwxyz', '-' * 26)

        if prot_flag:
            invalid_translation = str.maketrans('BJOUXZ', '-' * 6)
        else:
            invalid_translation = str.maketrans('BDEFHIJKLMNOPQRSUVWXYZ', '-' * 22)

        def makeupper(s):
            if mask_flag:
                return s.translate(mask_translation)
            else:
                return s.upper()

        for name, seq, qual in readfq(f):
                refs[name] = np.frombuffer(makeupper(seq).translate(invalid_translation).encode(), dtype='S1')
    return refs

3 Source : thread_iterator.py
with GNU General Public License v3.0
from Bartzi

    def __init__(self, array, mem, offset):
        self.shape = array.shape
        self.dtype = array.dtype
        self.nbytes = array.nbytes
        self.size = array.size
        self.offset = offset
        total = self.offset + self.nbytes
        if total > len(mem):
            raise ValueError(
                'Shared memory size is too small. expect:{}, actual:{}'.format(
                    total, len(mem)))
        target = numpy.frombuffer(mem, self.dtype, self.size, self.offset)
        target[...] = array.ravel()

    def unpack(self, mem):

3 Source : thread_iterator.py
with GNU General Public License v3.0
from Bartzi

    def unpack(self, mem):
        ret = numpy.frombuffer(mem, self.dtype, self.size, self.offset)
        ret = ret.reshape(self.shape).copy()
        return ret


def _measure(data):

3 Source : sensors.py
with MIT License
from BeamNG

    def decode_image(self, buffer, width, height, channels, dtype=np.uint8):
        img_d = base64.b64decode(buffer)
        img_d = np.frombuffer(img_d, dtype=dtype)
        if channels > 1:
            img_d = img_d.reshape(height, width, channels)
        else:
            img_d = img_d.reshape(height, width)
        return Image.fromarray(img_d)

    def decode_b64_response(self, resp):

3 Source : libtest.py
with MIT License
from beefoo

def buf_to_float(x, n_bytes=2, dtype=np.float32):
    # Invert the scale of the data
    scale = 1./float(1   <   <  ((8 * n_bytes) - 1))

    # Construct the format string
    fmt = ' < i{:d}'.format(n_bytes)

    # Rescale and format the data buffer
    return scale * np.frombuffer(x, fmt).astype(dtype)

def fix_length(data, size, axis=-1, **kwargs):

3 Source : __init__.py
with MIT License
from bennomeier

    def unpack(self, pos, formatSpecifier, length):
        """ a wrapper that reads binary data
        in a given position in the file, with correct endianness, and returns the parsed
        data as a tuple, according to the format specifier. """
        start = pos + self.posWAVEDESC
        x = np.frombuffer(self.data[start:start + length], self.endianness + formatSpecifier, count=1)[0]
        return x

    def parseString(self, pos, length=16):

3 Source : tf.py
with Apache License 2.0
from blue-oil

    def get_data(self):
        """Get data in numpy format."""
        if self.is_placeholder:
            raise ValueError(
                f'{self.name} is a placeholder, which does\'t have no data...')

        # convert tensor content to numpy
        if self.tensor.tensor_content:
            dtype = type(self)._TF_TO_NP[self.tensor.dtype]
            return np.frombuffer(self.tensor.tensor_content, dtype=dtype).copy().reshape(self.get_shape())
        else:
            dtype = type(self)._TF_TO_NP[self.tensor.dtype]
            if self.tensor.dtype == 3:
                return np.asarray(self.tensor.int_val, dtype=dtype).reshape(self.get_shape())
            if self.tensor.dtype == 1:
                return np.asarray(self.tensor.float_val, dtype=dtype).reshape(self.get_shape())

    def get_shape(self) -> List[str]:

3 Source : M4i.py
with GNU General Public License v3.0
from BraiNEdarwin

    def _transfer_buffer_numpy(self, memsize, numch):
        """ Transfer buffer to numpy array """
        # setup software buffer
        buffer_size = ct.c_int16 * memsize * numch
        data_buffer = (buffer_size)()
        data_pointer = ct.cast(data_buffer, ct.c_void_p)

        # data acquisition
        self._def_transfer64bit(
            pyspcm.SPCM_BUF_DATA, pyspcm.SPCM_DIR_CARDTOPC, 0, data_pointer, 0, 2 * memsize * numch)
        self.general_command(pyspcm.M2CMD_DATA_STARTDMA |
                             pyspcm.M2CMD_DATA_WAITDMA)

        # convert buffer to numpy array
        data = ct.cast(data_pointer, ct.POINTER(buffer_size))
        output = np.frombuffer(data.contents, dtype=ct.c_int16)
        return output

    def retrieve_data(self, trace):

3 Source : features.py
with Apache License 2.0
from BreastGAN

def example_to_numpy(example, feature_name, dtype, shape):
    check_feature(example, feature_name)
    arr_string = example.features.feature[feature_name].bytes_list.value[0]
    arr_1d = np.frombuffer(arr_string, dtype=dtype)
    return np.reshape(arr_1d, shape)


def numpy_to_feature(arr, dtype):

3 Source : io.py
with Apache License 2.0
from bsc-wdc

def _read_from_buffer(data, dtype, shape, block_size, out_blocks):
    arr = np.frombuffer(data, dtype=dtype)
    arr = arr.reshape((-1, shape))

    for i in range(len(out_blocks)):
        out_blocks[i] = arr[:, i * block_size:(i + 1) * block_size]


@constraint(computing_units="${ComputingUnits}")

3 Source : kaldi_io.py
with Apache License 2.0
from by2101

def _read_vec_flt_binary(fd):
  header = fd.read(3).decode()
  if header == 'FV ' : sample_size = 4 # floats
  elif header == 'DV ' : sample_size = 8 # doubles
  else : raise UnknownVectorHeader("The header contained '%s'" % header)
  assert (sample_size > 0)
  # Dimension,
  assert (fd.read(1).decode() == '\4'); # int-size
  vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # vector dim
  # Read whole vector,
  buf = fd.read(vec_size * sample_size)
  if sample_size == 4 : ans = np.frombuffer(buf, dtype='float32')
  elif sample_size == 8 : ans = np.frombuffer(buf, dtype='float64')
  else : raise BadSampleSize
  return ans


# Writing,
def write_vec_flt(file_or_fd, v, key=''):

3 Source : pickler.py
with Apache License 2.0
from bytedance

    def _custom_c2s_loads(cls, func, args):
        keys = marshal.loads(args[b'keys'])
        cursors = marshal.loads(args[b'cursors'])
        samples = {}
        for k in keys:
            _k = k.encode()
            shape, dtype = pickle.loads(args[_k + b'_info'])
            array = np.frombuffer(args[_k + b'_bytes'], dtype=dtype)
            array = array.reshape(shape)
            samples[k] = array
        args = keys, cursors, samples
        return args, {}


class ReadHeadPickler(Pickler):

3 Source : pickler.py
with Apache License 2.0
from bytedance

    def _custom_s2c_loads(cls, func, returns):
        keys = marshal.loads(returns[b'keys'])
        samples = {}
        for k in keys:
            _k = k.encode()
            shape, dtype = pickle.loads(returns[_k + b'_info'])
            array = np.frombuffer(returns[_k + b'_bytes'], dtype=dtype)
            array = array.reshape(shape)
            samples[k] = array
        return samples

3 Source : tcp_server_client.py
with MIT License
from CEMES-CNRS

    def get_scalar(self):
        """

        Parameters
        ----------
        socket

        Returns
        -------

        """
        data_type = self.get_string()
        data_len = self.get_int()
        data_bytes = self.check_received_length(data_len)

        data = np.frombuffer(data_bytes, dtype=data_type)[0]
        return data

    def get_array(self):

3 Source : tcp_server_client.py
with MIT License
from CEMES-CNRS

    def get_array(self):
        """get 1D or 2D arrays"""
        data_type = self.get_string()
        data_len = self.get_int()
        shape_len = self.get_int()
        shape = []
        for ind in range(shape_len):
            shape.append(self.get_int())
        data_bytes = self.check_received_length(data_len)
        data = np.frombuffer(data_bytes, dtype=data_type)
        data = data.reshape(tuple(shape))
        return data

    def send_array(self, data_array):

3 Source : htmm.py
with MIT License
from Charleo85

    def infer(self, iters=None):
        if iters is None: iters = self.iters_
        shared_arr = None
        if self.num_workers_ > 1:
            shared_arr = RawArray('d', self.p_dwzpsi_.flatten())
            tmp = np.frombuffer(shared_arr)
            self.p_dwzpsi_ = tmp.reshape(self.p_dwzpsi_shape_)

        for epoch in tqdm(range(iters)):
            self.e_step(shared_arr)
            self.m_step()
            tqdm.write("iteration: %d, loglikelihood: %f" % (epoch, self.loglik_))

        if self.num_workers_ > 1:
            self.p_dwzpsi_ = np.copy(self.p_dwzpsi_)


    def map_topic_estimate(self, idx):

3 Source : htmm.py
with MIT License
from Charleo85

    def e_step_chunk(self, pid, q, shared_arr):
        chunk_len = len(self.docs_) // (self.num_workers_ - 1)
        start_idx = pid * chunk_len
        end_idx = min(len(self.docs_), (pid+1) * chunk_len)

        tmp = np.frombuffer(shared_arr)
        p_dwzpsi_ptr = tmp.reshape(self.p_dwzpsi_shape_)

        ret = 0.0
        for d in range(start_idx, end_idx):
            ret += self.e_step_in_single_doc(d, p_dwzpsi_ptr)
        q.put(ret)


    def e_step_in_single_doc(self, idx, p_dwzpsi_ptr):

3 Source : htmm.py
with MIT License
from Charleo85

    def czw_chunk(self, pid, shared_arr):
        chunk_len = len(self.docs_) // (self.num_workers_ - 1)
        start_idx = pid * chunk_len
        end_idx = min(len(self.docs_), (pid+1) * chunk_len)

        tmp = np.frombuffer(shared_arr.get_obj())
        czw = tmp.reshape((self.topics_, self.words_))

        for d in range(start_idx, end_idx):
            for i in range(self.docs_[d].num_sentences):
                sen = self.docs_[d].sentence_list[i]
                for w in sen.word_list:
                    for z in range(self.topics_):
                        czw[z, w] += self.p_dwzpsi_[d][i][z] + self.p_dwzpsi_[d][i][z+self.topics_]


    def find_theta(self):

3 Source : indexed_dataset.py
with MIT License
from chenyangh

    def __getitem__(self, i):
        ptr, size = self._index[i]
        np_array = np.frombuffer(
            self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr
        )
        if self._index.dtype != np.int64:
            np_array = np_array.astype(np.int64)

        return torch.from_numpy(np_array)

    @property

3 Source : decoder.py
with MIT License
from chris-hld

def _init_shared_array(shared_array_base, shared_array_shape):
    """Make 'shared_array' available to child processes."""
    global shared_array
    shared_array = np.frombuffer(shared_array_base.get_obj())
    shared_array = shared_array.reshape(shared_array_shape)
#   <   --Parallel worker stuff

3 Source : pyro_dataset.py
with GNU General Public License v3.0
from christianpayer

    def queue_data_to_numpy_data(self, queue_data):
        """
        Convert queue data that was serialized through pyro to numpy data.
        :param queue_data: Queue data.
        :return: Numpy array.
        """
        if self.compression_type == 'lz4':
            return np.frombuffer(lz4.frame.decompress(queue_data[0]), dtype=queue_data[2]).reshape(queue_data[1])
        elif self.compression_type == 'zfp':
            return zfpy.decompress_numpy(queue_data)
        return np.frombuffer(queue_data[0], dtype=queue_data[2]).reshape(queue_data[1])

    def queue_entry_to_dataset_entry(self, queue_entry):

3 Source : kaldi_io.py
with GNU Lesser General Public License v3.0
from Ckst123

def _read_subvec_int_binary(fd, start, length):
    assert (fd.read(1).decode() == '\4')  # int-size
    vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0]  # vector dim
    assert start + length   <  = vec_size
    if start > 0:
        fd.seek(start * 5, 1)
    # Elements from int32 vector are sored in tuples: (sizeof(int32), value),
    vec = np.frombuffer(fd.read(length * 5), dtype=[('size', 'int8'), ('value', 'int32')], count=length)
    assert (vec[0]['size'] == 4)  # int32 size,
    ans = vec[:]['value']  # values are in 2nd column,
    return ans

# Writing,
def write_vec_int(file_or_fd, v, key=''):

3 Source : filter.py
with GNU Affero General Public License v3.0
from cleanlab

def _to_np_array(mp_arr, dtype="int32", shape=None):  # pragma: no cover
    """multipropecessing Helper function to convert a multiprocessing
    RawArray to a numpy array."""
    arr = np.frombuffer(mp_arr, dtype=dtype)
    if shape is None:
        return arr
    return arr.reshape(shape)


def _init(

3 Source : pipeline.py
with BSD 3-Clause "New" or "Revised" License
from cornell-zhang

    def recv_array(self, flags=0, copy=True, track=False):
        """recv a numpy array"""
        md = self.recv_json(flags=flags)
        msg = self.recv(flags=flags, copy=copy, track=track)
        A = numpy.frombuffer(msg, dtype=md['dtype'])
        return A.reshape(md['shape'])


class SerializingContext(zmq.Context):

3 Source : test_detection_utils.py
with Apache License 2.0
from CVMI-Lab

    def test_opencv_exif_orientation(self):
        import cv2

        URL = "detectron2://assets/Landscape_5.jpg"
        with PathManager.open(URL, "rb") as f:
            img = cv2.imdecode(np.frombuffer(f.read(), dtype="uint8"), cv2.IMREAD_COLOR)
        self.assertEqual(img.dtype, np.uint8)
        self.assertEqual(img.shape, (1200, 1800, 3))


if __name__ == "__main__":

3 Source : level3.py
with GNU General Public License v3.0
from CyanideCN

    def _rhi(self):
        azi = np.frombuffer(self.buf.read(4), "f4")[0]
        top = np.frombuffer(self.buf.read(4), "f4")[0]
        bot = np.frombuffer(self.buf.read(4), "f4")[0]
        self.params["azimuth"] = azi
        self.params["top"] = top
        self.params["bottom"] = bot


def get_product_param(ptype: int, param_bytes: bytes) -> dict:

3 Source : test_real_transforms.py
with Apache License 2.0
from dashanji

def is_longdouble_binary_compatible():
    try:
        one = np.frombuffer(
            b'\x00\x00\x00\x00\x00\x00\x00\x80\xff\x3f\x00\x00\x00\x00\x00\x00',
            dtype='  <  f16')
        return one == np.longfloat(1.)
    except TypeError:
        return False


def get_reference_data():

3 Source : _fortran.py
with Apache License 2.0
from dashanji

    def _read_size(self, eof_ok=False):
        n = self._header_dtype.itemsize
        b = self._fp.read(n)
        if (not b) and eof_ok:
            raise FortranEOFError("End of file occurred at end of record")
        elif len(b)   <   n:
            raise FortranFormattingError(
                "End of file in the middle of the record size")
        return int(np.frombuffer(b, dtype=self._header_dtype, count=1))

    def write_record(self, *items):

3 Source : bento.py
with MIT License
from databento

    def _replay_bin(self, callback: Callable[[Any], None]) -> None:
        dtype = BIN_RECORD_MAP[self._schema]
        reader: BinaryIO = self.reader(decompress=True)
        while True:
            raw: bytes = reader.read(self.struct_size)
            record = np.frombuffer(raw, dtype=dtype)
            if not record:
                break
            callback(record[0])

    def _replay_csv_or_json(self, callback: Callable[[Any], None]) -> None:

See More Examples