numpy.uint8

Here are the examples of the python api numpy.uint8 taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

200 Examples 7

Example 1

Project: peppy
Source File: filter.py
View license
    def scaleChunk(self, raw, minval, maxval, u1, u2, v1, v2, output):
        assert self.dprint("processing chunk [%d:%d, %d:%d], min=%d max=%d" % (u1, u2, v1, v2, minval, maxval))
        if minval == maxval:
            output[u1:u2, v1:v2] = (raw[u1:u2, v1:v2] - minval).astype(numpy.uint8)
        else:
            #gray=((raw-minval)*(255.0/(maxval-minval))).astype(numpy.uint8)
            temp1 = raw[u1:u2, v1:v2] - minval
            temp2 = temp1 * (255.0/(maxval-minval))
            output[u1:u2, v1:v2] = temp2.astype(numpy.uint8)

Example 2

Project: python-seabreeze
Source File: spectrometer.py
View license
    @convert_exceptions("Error while reading raw spectrum")
    def get_formatted_spectrum(self, out):
        tmp = numpy.empty((self._RAW_SPECTRUM_LEN), dtype=numpy.uint8)
        self.get_unformatted_spectrum(tmp)
        # The byte order is different for some models
        tsorted = numpy.empty((self._RAW_SPECTRUM_LEN - 1), dtype=numpy.uint8)
        idx = [(i//2)%64 + (i%2)*64 + (i//(128))*(128) for i in range(self._RAW_SPECTRUM_LEN-1)]
        tsorted = tmp[idx] & numpy.array((0xFF, 0x0F)*(self._PIXELS), dtype=numpy.uint8)  # high nibble not guaranteed to be pulled low
        ret = numpy.array(struct.unpack("<" + "H"*self._PIXELS, tsorted), dtype=numpy.double)
        # sorted and parsed
        out[:] = ret * self._NORMALIZATION_VALUE
        return self._PIXELS  # compatibility

Example 3

Project: python-seabreeze
Source File: spectrometer.py
View license
    @convert_exceptions("Error while reading raw spectrum")
    def get_formatted_spectrum(self, out):
        tmp = numpy.empty((self._RAW_SPECTRUM_LEN), dtype=numpy.uint8)
        self.get_unformatted_spectrum(tmp)
        # The byte order is different for some models
        tsorted = numpy.empty((self._RAW_SPECTRUM_LEN - 1), dtype=numpy.uint8)
        idx = [(i//2)%64 + (i%2)*64 + (i//(128))*(128) for i in range(self._RAW_SPECTRUM_LEN-1)]
        tsorted = tmp[idx] & numpy.array((0xFF, 0x0F)*(self._PIXELS), dtype=numpy.uint8)  # high nibble not guaranteed to be pulled low
        ret = numpy.array(struct.unpack("<" + "H"*self._PIXELS, tsorted), dtype=numpy.double)
        # sorted and parsed
        out[:] = ret * self._NORMALIZATION_VALUE
        return self._PIXELS  # compatibility

Example 4

Project: pylibtiff
Source File: test_lzw.py
View license
def test_encode():
    for arr in [
        numpy.array([7,7,7,8,8,7,7,6,6], numpy.uint8),
        numpy.array(list(range(400000)), numpy.uint8),
        numpy.array([1,3,7,15,31,63], numpy.uint8)]:

        rarr = c_encode(arr)
        arr2 = c_decode(rarr, arr.nbytes)
        assert arr2.nbytes == arr.nbytes and (arr2==arr).all(),repr((arr2,arr))

Example 5

Project: DQN-chainer
Source File: dqn_agent.py
View license
    def reset_state(self, observation):
        # Preprocess
        obs_array = self.scale_image(observation)
        # Updates for next step
        self.last_observation = obs_array

        # Initialize State
        self.state = np.zeros((self.dqn.n_history, 84, 84), dtype=np.uint8)
        self.state[0] = obs_array

Example 6

Project: SnapSudoku
Source File: digit.py
View license
    def buildDigit(self):
        componentId = 0
        A, C = self.H / 4, 3 * self.H / 4 + 1
        B, D = self.W / 4, 3 * self.W / 4 + 1
        for i in xrange(A, C):
            for j in xrange(B, D):
                if not self.visited[i][j]:
                    self.bfs(i, j, componentId)
                    componentId += 1
        componentSizes = [0 for _ in xrange(componentId)]
        for row in self.digit:
            for cell in row:
                if cell is not None:
                    componentSizes[cell] += 1
        largest = componentSizes.index(max(componentSizes))
        for i in xrange(self.H):
            for j in xrange(self.W):
                self.digit[i][j] = 255 if self.digit[i][j] == largest else 0
        self.digit = np.asarray(self.digit, dtype=np.uint8)

Example 7

Project: Yeppp
Source File: test_add_unittest.py
View license
    def test_add_V8uV8u_V16u(self):
        a_tmp = self.a.astype(numpy.uint8)
        b_tmp = self.b.astype(numpy.uint8)
        c = numpy.empty([self.n]).astype(numpy.uint16)
        a_ptr = a_tmp.ctypes.data_as(ctypes.POINTER(ctypes.c_uint8))
        b_ptr = b_tmp.ctypes.data_as(ctypes.POINTER(ctypes.c_uint8))
        c_ptr = c.ctypes.data_as(ctypes.POINTER(ctypes.c_uint16))

        yepCore_Add_V8uV8u_V16u(a_ptr, b_ptr, c_ptr, self.n)

        for i in range(self.n):
            self.assertEqual(a_tmp[i] + b_tmp[i], c[i])

Example 8

Project: deep-pwning
Source File: utils.py
View license
def extract_labels(filename, num_images):
    """Extract the labels into a vector of int64 label IDs."""
    print('Extracting', filename)
    with gzip.open(filename) as bytestream:
        bytestream.read(8)
        buf = bytestream.read(1 * num_images)
        labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)
    return labels

Example 9

Project: abstract_rendering
Source File: categories.py
View license
    def shade(self, grid):
        (height, width, depth) = grid.shape
        outgrid = np.empty((height, width, 4), dtype=np.uint8)

        sums = ToCounts.shade(grid, dtype=np.float32)
        maskbg = (sums == 0)
        mask = (grid[:, :, self.cat]/sums) >= self.cutoff

        outgrid[mask] = self.above
        outgrid[~mask] = self.below
        outgrid[maskbg] = self.background
        return outgrid

Example 10

Project: landsat-util
Source File: ndvi.py
View license
    def write_band(self, output_band, output_file, image_data):

        # from http://publiclab.org/notes/cfastie/08-26-2014/new-ndvi-colormap
        with rasterio.open(output_file, 'w', driver='GTiff',
                           width=image_data['shape'][1],
                           height=image_data['shape'][0],
                           count=1,
                           dtype=numpy.uint8,
                           nodata=0,
                           transform=image_data['dst_transform'],
                           crs=self.dst_crs) as output:

            output.write_band(1, output_band)

            self.output("Writing to file", normal=True, color='green', indent=1)
        return output_file

Example 11

Project: prettytensor
Source File: data_utils.py
View license
def mnist_extract_labels(filename, num_images):
  """Extract the labels into a 1-hot matrix [image index, label index]."""
  print('Extracting', filename)
  with gzip.open(filename) as bytestream:
    bytestream.read(8)
    buf = bytestream.read(1 * num_images)
    labels = np.frombuffer(buf, dtype=np.uint8)
  # Convert to dense 1-hot representation.
  return (np.arange(10) == labels[:, None]).astype(np.float32)

Example 12

View license
def extract_images(filename):
  """Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
  print('Extracting', filename)
  with gzip.open(filename) as bytestream:
    magic = _read32(bytestream)
    if magic != 2051:
      raise ValueError(
          'Invalid magic number %d in MNIST image file: %s' %
          (magic, filename))
    num_images = _read32(bytestream)
    rows = _read32(bytestream)
    cols = _read32(bytestream)
    buf = bytestream.read(rows * cols * num_images)
    data = numpy.frombuffer(buf, dtype=numpy.uint8)
    data = data.reshape(num_images, rows, cols, 1)
    return data

Example 13

Project: hyperspy
Source File: test_tiff.py
View license
def test_saving_with_custom_tag():
    s = hs.signals.Signal2D(
        np.arange(
            10 * 15,
            dtype=np.uint8).reshape(
            (10,
             15)))
    with tempfile.TemporaryDirectory() as tmpdir:
        fname = os.path.join(tmpdir, 'test_saving_with_custom_tag.tif')
        extratag = [(65000, 's', 1, "Random metadata", False)]
        s.save(fname, extratags=extratag, overwrite=True)
        s2 = hs.load(fname)
        nt.assert_equal(s2.original_metadata['Number_65000'],
                        b"Random metadata")

Example 14

View license
@timeout(30)
def fetch_cvimage_from_url(url, maxsize=10 * 1024 * 1024):
    req = requests.get(url, timeout=5, stream=True)
    content = ''
    for chunk in req.iter_content(2048):
        content += chunk
        if len(content) > maxsize:
            req.close()
            raise ValueError('Response too large')
    img_array = np.asarray(bytearray(content), dtype=np.uint8)
    cv2_img_flag = cv2.CV_LOAD_IMAGE_COLOR
    image = cv2.imdecode(img_array, cv2_img_flag)
    return image

Example 15

Project: ipython_extensions
Source File: pil_display.py
View license
def display_image_array(a):
    """If an array looks like RGB[A] data, display it as an image."""
    import numpy as np
    if len(a.shape) != 3 or a.shape[2] not in {3,4} or a.dtype != np.uint8:
        return
    md = {
        'width': a.shape[1] // 2
    }
    return (array2imgdata_pil(a), md)

Example 16

Project: conta-bolas
Source File: new_approach.py
View license
    def single_hsv2bgr(self, hsv_color):
        # creates a single pixel image (1 x 1 x 3 colors)
        temp_image = np.zeros((1,1,3), np.uint8)
        # paints the pixel in hsv
        temp_image[0] = (hsv_color)
        temp_image = cv2.cvtColor(temp_image, cv2.COLOR_HSV2BGR)
        #print type(temp_image[0][0][0])
        pixel = temp_image[0][0]
        bgr_color = (pixel[0].item(),
                     pixel[1].item(),
                     pixel[2].item())
        return bgr_color

Example 17

Project: deep-learning-from-scratch
Source File: mnist.py
View license
def _load_label(file_name):
    file_path = dataset_dir + "/" + file_name
    
    print("Converting " + file_name + " to NumPy Array ...")
    with gzip.open(file_path, 'rb') as f:
            labels = np.frombuffer(f.read(), np.uint8, offset=8)
    print("Done")
    
    return labels

Example 18

Project: chainer
Source File: cifar.py
View license
def _retrieve_cifar(name):
    root = download.get_dataset_directory('pfnet/chainer/cifar')
    path = os.path.join(root, '{}.npz'.format(name))
    url = 'https://www.cs.toronto.edu/~kriz/{}-python.tar.gz'.format(name)

    def creator(path):
        archive_path = download.cached_download(url)

        train_x = numpy.empty((5, 10000, 3072), dtype=numpy.uint8)
        train_y = numpy.empty((5, 10000), dtype=numpy.uint8)
        test_y = numpy.empty(10000, dtype=numpy.uint8)

        dir_name = '{}-batches-py'.format(name)

        with tarfile.open(archive_path, 'r:gz') as archive:
            # training set
            for i in range(5):
                file_name = '{}/data_batch_{}'.format(dir_name, i + 1)
                d = _pickle_load(archive.extractfile(file_name))
                train_x[i] = d['data']
                train_y[i] = d['labels']

            # test set
            file_name = '{}/test_batch'.format(dir_name)
            d = _pickle_load(archive.extractfile(file_name))
            test_x = d['data']
            test_y[...] = d['labels']  # copy to array

        train_x = train_x.reshape(50000, 3072)
        train_y = train_y.reshape(50000)

        numpy.savez_compressed(path, train_x=train_x, train_y=train_y,
                               test_x=test_x, test_y=test_y)
        return {'train_x': train_x, 'train_y': train_y,
                'test_x': test_x, 'test_y': test_y}

    return download.cache_or_load_file(path, creator, numpy.load)

Example 19

Project: cupy
Source File: cifar.py
View license
def _retrieve_cifar(name):
    root = download.get_dataset_directory('pfnet/chainer/cifar')
    path = os.path.join(root, '{}.npz'.format(name))
    url = 'https://www.cs.toronto.edu/~kriz/{}-python.tar.gz'.format(name)

    def creator(path):
        archive_path = download.cached_download(url)

        train_x = numpy.empty((5, 10000, 3072), dtype=numpy.uint8)
        train_y = numpy.empty((5, 10000), dtype=numpy.uint8)
        test_y = numpy.empty(10000, dtype=numpy.uint8)

        dir_name = '{}-batches-py'.format(name)

        with tarfile.open(archive_path, 'r:gz') as archive:
            # training set
            for i in range(5):
                file_name = '{}/data_batch_{}'.format(dir_name, i + 1)
                d = _pickle_load(archive.extractfile(file_name))
                train_x[i] = d['data']
                train_y[i] = d['labels']

            # test set
            file_name = '{}/test_batch'.format(dir_name)
            d = _pickle_load(archive.extractfile(file_name))
            test_x = d['data']
            test_y[...] = d['labels']  # copy to array

        train_x = train_x.reshape(50000, 3072)
        train_y = train_y.reshape(50000)

        numpy.savez_compressed(path, train_x=train_x, train_y=train_y,
                               test_x=test_x, test_y=test_y)
        return {'train_x': train_x, 'train_y': train_y,
                'test_x': test_x, 'test_y': test_y}

    return download.cache_or_load_file(path, creator, numpy.load)

Example 20

Project: PredNet
Source File: main.py
View license
def write_image(image, path):
    image *= 255
    image = image.transpose(1, 2, 0)
    image = image.astype(np.uint8)
    result = Image.fromarray(image)
    result.save(path)

Example 21

Project: attention-lvcsr
Source File: cifar100.py
View license
@check_exists(required_files=[DISTRIBUTION_FILE])
def convert_cifar100(directory, output_directory,
                     output_filename='cifar100.hdf5'):
    """Converts the CIFAR-100 dataset to HDF5.

    Converts the CIFAR-100 dataset to an HDF5 dataset compatible with
    :class:`fuel.datasets.CIFAR100`. The converted dataset is saved as
    'cifar100.hdf5'.

    This method assumes the existence of the following file:
    `cifar-100-python.tar.gz`

    Parameters
    ----------
    directory : str
        Directory in which the required input files reside.
    output_directory : str
        Directory in which to save the converted dataset.
    output_filename : str, optional
        Name of the saved dataset. Defaults to 'cifar100.hdf5'.

    Returns
    -------
    output_paths : tuple of str
        Single-element tuple containing the path to the converted dataset.

    """
    output_path = os.path.join(output_directory, output_filename)
    h5file = h5py.File(output_path, mode="w")
    input_file = os.path.join(directory, 'cifar-100-python.tar.gz')
    tar_file = tarfile.open(input_file, 'r:gz')

    file = tar_file.extractfile('cifar-100-python/train')
    try:
        if six.PY3:
            train = cPickle.load(file, encoding='latin1')
        else:
            train = cPickle.load(file)
    finally:
        file.close()

    train_features = train['data'].reshape(train['data'].shape[0],
                                           3, 32, 32)
    train_coarse_labels = numpy.array(train['coarse_labels'],
                                      dtype=numpy.uint8)
    train_fine_labels = numpy.array(train['fine_labels'],
                                    dtype=numpy.uint8)

    file = tar_file.extractfile('cifar-100-python/test')
    try:
        if six.PY3:
            test = cPickle.load(file, encoding='latin1')
        else:
            test = cPickle.load(file)
    finally:
        file.close()

    test_features = test['data'].reshape(test['data'].shape[0],
                                         3, 32, 32)
    test_coarse_labels = numpy.array(test['coarse_labels'], dtype=numpy.uint8)
    test_fine_labels = numpy.array(test['fine_labels'], dtype=numpy.uint8)

    data = (('train', 'features', train_features),
            ('train', 'coarse_labels', train_coarse_labels.reshape((-1, 1))),
            ('train', 'fine_labels', train_fine_labels.reshape((-1, 1))),
            ('test', 'features', test_features),
            ('test', 'coarse_labels', test_coarse_labels.reshape((-1, 1))),
            ('test', 'fine_labels', test_fine_labels.reshape((-1, 1))))
    fill_hdf5_file(h5file, data)
    h5file['features'].dims[0].label = 'batch'
    h5file['features'].dims[1].label = 'channel'
    h5file['features'].dims[2].label = 'height'
    h5file['features'].dims[3].label = 'width'
    h5file['coarse_labels'].dims[0].label = 'batch'
    h5file['coarse_labels'].dims[1].label = 'index'
    h5file['fine_labels'].dims[0].label = 'batch'
    h5file['fine_labels'].dims[1].label = 'index'

    h5file.flush()
    h5file.close()

    return (output_path,)

Example 22

Project: attention-lvcsr
Source File: test_cifar100.py
View license
def test_cifar100():
    train = CIFAR100(('train',), load_in_memory=False)
    assert train.num_examples == 50000
    handle = train.open()
    coarse_labels, features, fine_labels = train.get_data(handle,
                                                          slice(49990, 50000))

    assert features.shape == (10, 3, 32, 32)
    assert coarse_labels.shape == (10, 1)
    assert fine_labels.shape == (10, 1)
    train.close(handle)

    test = CIFAR100(('test',), load_in_memory=False)
    handle = test.open()
    coarse_labels, features, fine_labels = test.get_data(handle,
                                                         slice(0, 10))

    assert features.shape == (10, 3, 32, 32)
    assert coarse_labels.shape == (10, 1)
    assert fine_labels.shape == (10, 1)

    assert features.dtype == numpy.uint8
    assert coarse_labels.dtype == numpy.uint8
    assert fine_labels.dtype == numpy.uint8

    test.close(handle)

    stream = DataStream.default_stream(
        test, iteration_scheme=SequentialScheme(10, 10))
    data = next(stream.get_epoch_iterator())[1]

    assert data.min() >= 0.0 and data.max() <= 1.0
    assert data.dtype == config.floatX

    assert_raises(ValueError, CIFAR100, ('valid',))

Example 23

View license
def extract_labels(filename, one_hot=False):
  """Extract the labels into a 1D uint8 numpy array [index]."""
  print('Extracting', filename)
  with gzip.open(filename) as bytestream:
    magic = _read32(bytestream)
    if magic != 2049:
      raise ValueError(
          'Invalid magic number %d in MNIST label file: %s' %
          (magic, filename))
    num_items = _read32(bytestream)
    buf = bytestream.read(num_items)
    labels = numpy.frombuffer(buf, dtype=numpy.uint8)
    if one_hot:
      return dense_to_one_hot(labels)
    return labels

Example 24

Project: peregrine
Source File: message_lnav.py
View license
  def __init__(self, prn, tow0=1, n_msg=0, n_prefixBits=50):
    '''
    Constructs message object.

    Parameters
    ----------
    prn : int
      Satellite PRN
    tow0 : int
      Time of week in 6-second units for the first message
    n_msg : int, optional
      Number of messages to pre-generate for output
    n_prefixBits : int, optional
      Number of bits to issue before the first message
    '''
    super(Message, self).__init__()
    self.prn = prn
    self.n_prefixBits = n_prefixBits
    self.n_msg0 = n_msg
    self.tow0 = tow0
    self.messageCount = 0
    self.messageLen = n_prefixBits
    self.nextTow = tow0
    self.nextMsgId = 0
    self.messageBits = numpy.zeros(n_prefixBits, dtype=numpy.uint8)
    words = (n_prefixBits + 29) / 30
    if words:
      tmp = numpy.zeros(words * 30, dtype=numpy.uint8)
      tmp[1::2] = 1
      if words > 1:
        self.updateParity(tmp[0:30])
        for i in range(1, words - 1):
          self.updateParity(tmp[i * 30 - 2: i * 30 + 30])
        self.updateParity(tmp[words * 30 - 32: words * 30], True)
      else:
        self.updateParity(tmp[0: 30], True)
      self.messageBits[:] = tmp[-n_prefixBits:]
    self.a8 = numpy.ndarray(1, dtype=numpy.uint8)
    self.a32 = numpy.ndarray(1, dtype=numpy.dtype('>u4'))
    self.addMessages(n_msg)

Example 25

Project: ilastik-0.5
Source File: segmentorWSit.py
View license
        def setupWeights(self, weights):
            self.weights = weights
            #self.weights = numpy.average(weights, axis = 3).astype(numpy.uint8)#.swapaxes(0,2).view(vigra.ScalarVolume)#
            if weights.dtype != numpy.uint8:
                print "converting weights to uint8"
                self.weights = weights.astype(numpy.uint8)
                
#            self.weights = numpy.zeros(weights.shape[0:-1], 'uint8')
#            self.weights[:] = 3
#            self.weights[:,:,0::4] = 10
#            self.weights[:,0::4,:] = 10
#            self.weights[0::4,:,:] = 10
#            self.weights = self.weights

            #self.ws = vigra.tws.IncrementalWS(self.weights, 0)
            print "Incoming weights :", self.weights.dtype, self.weights.shape

            if hasattr(self, "segmentor"):
                del self.segmentor
                del self.volumeBasins
            

            if self.edgeWeights == "Difference":
                useDifference = True
            else:                
                useDifference = False
            
            #print self.dontUseSuperVoxels
            self.segmentor = vigra.svs.segmentor(self.weights, useDifference, 0, 255, 2048, self.dontUseSuperVoxels)
    
            
            self.getBasins()
            self.volumeBasins.shape = self.volumeBasins.shape + (1,)

            self.borders = self.segmentor.getBorderVolume()   
            self.borders.shape = self.borders.shape + (1,)

Example 26

Project: MCEdit-Unified
Source File: nbt_test.py
View license
    @staticmethod
    def testCreate():
        "Create an indev level."

        # The root of an NBT file is always a TAG_Compound.
        level = nbt.TAG_Compound(name="MinecraftLevel")

        # Subtags of a TAG_Compound are automatically named when you use the [] operator.
        level["About"] = nbt.TAG_Compound()
        level["About"]["Author"] = nbt.TAG_String("codewarrior")
        level["About"]["CreatedOn"] = nbt.TAG_Long(time.time())

        level["Environment"] = nbt.TAG_Compound()
        level["Environment"]["SkyBrightness"] = nbt.TAG_Byte(16)
        level["Environment"]["SurroundingWaterHeight"] = nbt.TAG_Short(32)
        level["Environment"]["FogColor"] = nbt.TAG_Int(0xcccccc)

        entity = nbt.TAG_Compound()
        entity["id"] = nbt.TAG_String("Creeper")
        entity["Pos"] = nbt.TAG_List([nbt.TAG_Float(d) for d in (32.5, 64.0, 33.3)])

        level["Entities"] = nbt.TAG_List([entity])

        # You can also create and name a tag before adding it to the compound.
        spawn = nbt.TAG_List((nbt.TAG_Short(100), nbt.TAG_Short(45), nbt.TAG_Short(55)))
        spawn.name = "Spawn"

        mapTag = nbt.TAG_Compound()
        mapTag.add(spawn)
        mapTag.name = "Map"
        level.add(mapTag)

        mapTag2 = nbt.TAG_Compound([spawn])
        mapTag2.name = "Map"

        # I think it looks more familiar with [] syntax.

        l, w, h = 128, 128, 128
        mapTag["Height"] = nbt.TAG_Short(h)  # y dimension
        mapTag["Length"] = nbt.TAG_Short(l)  # z dimension
        mapTag["Width"] = nbt.TAG_Short(w)  # x dimension

        # Byte arrays are stored as numpy.uint8 arrays.

        mapTag["Blocks"] = nbt.TAG_Byte_Array()
        mapTag["Blocks"].value = numpy.zeros(l * w * h, dtype=numpy.uint8)  # create lots of air!

        # The blocks array is indexed (y,z,x) for indev levels, so reshape the blocks
        mapTag["Blocks"].value.shape = (h, l, w)

        # Replace the bottom layer of the indev level with wood
        mapTag["Blocks"].value[0, :, :] = 5

        # This is a great way to learn the power of numpy array slicing and indexing.

        mapTag["Data"] = nbt.TAG_Byte_Array()
        mapTag["Data"].value = numpy.zeros(l * w * h, dtype=numpy.uint8)

        # Save a few more tag types for completeness

        level["ShortArray"] = nbt.TAG_Short_Array(numpy.zeros((16, 16), dtype='uint16'))
        level["IntArray"] = nbt.TAG_Int_Array(numpy.zeros((16, 16), dtype='uint32'))
        level["Float"] = nbt.TAG_Float(0.3)

        return level

Example 27

Project: MCEdit-Unified
Source File: nbt_test.py
View license
    @staticmethod
    def testCreate():
        "Create an indev level."

        # The root of an NBT file is always a TAG_Compound.
        level = nbt.TAG_Compound(name="MinecraftLevel")

        # Subtags of a TAG_Compound are automatically named when you use the [] operator.
        level["About"] = nbt.TAG_Compound()
        level["About"]["Author"] = nbt.TAG_String("codewarrior")
        level["About"]["CreatedOn"] = nbt.TAG_Long(time.time())

        level["Environment"] = nbt.TAG_Compound()
        level["Environment"]["SkyBrightness"] = nbt.TAG_Byte(16)
        level["Environment"]["SurroundingWaterHeight"] = nbt.TAG_Short(32)
        level["Environment"]["FogColor"] = nbt.TAG_Int(0xcccccc)

        entity = nbt.TAG_Compound()
        entity["id"] = nbt.TAG_String("Creeper")
        entity["Pos"] = nbt.TAG_List([nbt.TAG_Float(d) for d in (32.5, 64.0, 33.3)])

        level["Entities"] = nbt.TAG_List([entity])

        # You can also create and name a tag before adding it to the compound.
        spawn = nbt.TAG_List((nbt.TAG_Short(100), nbt.TAG_Short(45), nbt.TAG_Short(55)))
        spawn.name = "Spawn"

        mapTag = nbt.TAG_Compound()
        mapTag.add(spawn)
        mapTag.name = "Map"
        level.add(mapTag)

        mapTag2 = nbt.TAG_Compound([spawn])
        mapTag2.name = "Map"

        # I think it looks more familiar with [] syntax.

        l, w, h = 128, 128, 128
        mapTag["Height"] = nbt.TAG_Short(h)  # y dimension
        mapTag["Length"] = nbt.TAG_Short(l)  # z dimension
        mapTag["Width"] = nbt.TAG_Short(w)  # x dimension

        # Byte arrays are stored as numpy.uint8 arrays.

        mapTag["Blocks"] = nbt.TAG_Byte_Array()
        mapTag["Blocks"].value = numpy.zeros(l * w * h, dtype=numpy.uint8)  # create lots of air!

        # The blocks array is indexed (y,z,x) for indev levels, so reshape the blocks
        mapTag["Blocks"].value.shape = (h, l, w)

        # Replace the bottom layer of the indev level with wood
        mapTag["Blocks"].value[0, :, :] = 5

        # This is a great way to learn the power of numpy array slicing and indexing.

        mapTag["Data"] = nbt.TAG_Byte_Array()
        mapTag["Data"].value = numpy.zeros(l * w * h, dtype=numpy.uint8)

        # Save a few more tag types for completeness

        level["ShortArray"] = nbt.TAG_Short_Array(numpy.zeros((16, 16), dtype='uint16'))
        level["IntArray"] = nbt.TAG_Int_Array(numpy.zeros((16, 16), dtype='uint32'))
        level["Float"] = nbt.TAG_Float(0.3)

        return level

Example 28

Project: pymclevel
Source File: nbt_test.py
View license
    def testCreate(self):
        "Create an indev level."

        # The root of an NBT file is always a TAG_Compound.
        level = nbt.TAG_Compound(name="MinecraftLevel")

        # Subtags of a TAG_Compound are automatically named when you use the [] operator.
        level["About"] = nbt.TAG_Compound()
        level["About"]["Author"] = nbt.TAG_String("codewarrior")
        level["About"]["CreatedOn"] = nbt.TAG_Long(time.time())

        level["Environment"] = nbt.TAG_Compound()
        level["Environment"]["SkyBrightness"] = nbt.TAG_Byte(16)
        level["Environment"]["SurroundingWaterHeight"] = nbt.TAG_Short(32)
        level["Environment"]["FogColor"] = nbt.TAG_Int(0xcccccc)

        entity = nbt.TAG_Compound()
        entity["id"] = nbt.TAG_String("Creeper")
        entity["Pos"] = nbt.TAG_List([nbt.TAG_Float(d) for d in (32.5, 64.0, 33.3)])

        level["Entities"] = nbt.TAG_List([entity])

        # You can also create and name a tag before adding it to the compound.
        spawn = nbt.TAG_List((nbt.TAG_Short(100), nbt.TAG_Short(45), nbt.TAG_Short(55)))
        spawn.name = "Spawn"

        mapTag = nbt.TAG_Compound()
        mapTag.add(spawn)
        mapTag.name = "Map"
        level.add(mapTag)

        mapTag2 = nbt.TAG_Compound([spawn])
        mapTag2.name = "Map"

        # I think it looks more familiar with [] syntax.

        l, w, h = 128, 128, 128
        mapTag["Height"] = nbt.TAG_Short(h)  # y dimension
        mapTag["Length"] = nbt.TAG_Short(l)  # z dimension
        mapTag["Width"] = nbt.TAG_Short(w)  # x dimension

        # Byte arrays are stored as numpy.uint8 arrays.

        mapTag["Blocks"] = nbt.TAG_Byte_Array()
        mapTag["Blocks"].value = numpy.zeros(l * w * h, dtype=numpy.uint8)  # create lots of air!

        # The blocks array is indexed (y,z,x) for indev levels, so reshape the blocks
        mapTag["Blocks"].value.shape = (h, l, w)

        # Replace the bottom layer of the indev level with wood
        mapTag["Blocks"].value[0, :, :] = 5

        # This is a great way to learn the power of numpy array slicing and indexing.

        mapTag["Data"] = nbt.TAG_Byte_Array()
        mapTag["Data"].value = numpy.zeros(l * w * h, dtype=numpy.uint8)

        # Save a few more tag types for completeness

        level["ShortArray"] = nbt.TAG_Short_Array(numpy.zeros((16, 16), dtype='uint16'))
        level["IntArray"] = nbt.TAG_Int_Array(numpy.zeros((16, 16), dtype='uint32'))
        level["Float"] = nbt.TAG_Float(0.3)

        return level

Example 29

Project: pymclevel
Source File: nbt_test.py
View license
    def testCreate(self):
        "Create an indev level."

        # The root of an NBT file is always a TAG_Compound.
        level = nbt.TAG_Compound(name="MinecraftLevel")

        # Subtags of a TAG_Compound are automatically named when you use the [] operator.
        level["About"] = nbt.TAG_Compound()
        level["About"]["Author"] = nbt.TAG_String("codewarrior")
        level["About"]["CreatedOn"] = nbt.TAG_Long(time.time())

        level["Environment"] = nbt.TAG_Compound()
        level["Environment"]["SkyBrightness"] = nbt.TAG_Byte(16)
        level["Environment"]["SurroundingWaterHeight"] = nbt.TAG_Short(32)
        level["Environment"]["FogColor"] = nbt.TAG_Int(0xcccccc)

        entity = nbt.TAG_Compound()
        entity["id"] = nbt.TAG_String("Creeper")
        entity["Pos"] = nbt.TAG_List([nbt.TAG_Float(d) for d in (32.5, 64.0, 33.3)])

        level["Entities"] = nbt.TAG_List([entity])

        # You can also create and name a tag before adding it to the compound.
        spawn = nbt.TAG_List((nbt.TAG_Short(100), nbt.TAG_Short(45), nbt.TAG_Short(55)))
        spawn.name = "Spawn"

        mapTag = nbt.TAG_Compound()
        mapTag.add(spawn)
        mapTag.name = "Map"
        level.add(mapTag)

        mapTag2 = nbt.TAG_Compound([spawn])
        mapTag2.name = "Map"

        # I think it looks more familiar with [] syntax.

        l, w, h = 128, 128, 128
        mapTag["Height"] = nbt.TAG_Short(h)  # y dimension
        mapTag["Length"] = nbt.TAG_Short(l)  # z dimension
        mapTag["Width"] = nbt.TAG_Short(w)  # x dimension

        # Byte arrays are stored as numpy.uint8 arrays.

        mapTag["Blocks"] = nbt.TAG_Byte_Array()
        mapTag["Blocks"].value = numpy.zeros(l * w * h, dtype=numpy.uint8)  # create lots of air!

        # The blocks array is indexed (y,z,x) for indev levels, so reshape the blocks
        mapTag["Blocks"].value.shape = (h, l, w)

        # Replace the bottom layer of the indev level with wood
        mapTag["Blocks"].value[0, :, :] = 5

        # This is a great way to learn the power of numpy array slicing and indexing.

        mapTag["Data"] = nbt.TAG_Byte_Array()
        mapTag["Data"].value = numpy.zeros(l * w * h, dtype=numpy.uint8)

        # Save a few more tag types for completeness

        level["ShortArray"] = nbt.TAG_Short_Array(numpy.zeros((16, 16), dtype='uint16'))
        level["IntArray"] = nbt.TAG_Int_Array(numpy.zeros((16, 16), dtype='uint32'))
        level["Float"] = nbt.TAG_Float(0.3)

        return level

Example 30

Project: Cerebrum
Source File: weaver.py
View license
	@staticmethod
	def start():
		pairs = MapperUtil.get_allpairs() # Get pairs starting from 0th line
		if not pairs:
			print ("No pairs found.")
			sys.exit()

		p = pyaudio.PyAudio() # Create a PyAudio session

		# Create a stream
		stream = p.open(format=FORMAT,
						channels=CHANNELS,
						rate=RATE,
						output=True)

		#H2V_cursor = NeuralNetUtil.get_neurons("H2V")
		elmH2V = None

		# Loop over the pairs coming from CROSSMODAL
		for pair in pairs:
			   #time.sleep(0.5) # Wait 0.5 seconds to prevent aggressive loop
			   print pair

			   if pair['direction'] == "H2V":
				   print "____________________________________________________________\n"
				   print pair['timestamp1']

				   hearing_memory = HearingMemoryUtil.get_memory(pair['timestamp1'])
				   hearing_memory = hearing_memory.next()['data']
				   #print hearing_memory.next()['data']
				   #chunky_array = numpy.fromstring(hearing_memory.next()['data'], 'int16')
				   #print chunky_array
				   stream.write(hearing_memory)

				   numpy_audio = numpy.fromstring(hearing_memory, numpy.uint8)
				   #print numpy_audio
				   print "Audio: ",numpy_audio.shape
				   #print numpy.transpose(numpy_audio.reshape((numpy_audio.shape[0],1))).shape


				   vision_memory = VisionMemoryUtil.get_memory(pair['timestamp2'])
				   vision_memory = vision_memory.next()

				   frame_amodal = numpy.fromstring(vision_memory['amodal'], numpy.uint8)
				   print "Frame Threshold: ",frame_amodal.shape
				   cv2.imshow("Frame Threshhold", frame_amodal.reshape(360,640))
				   cv2.moveWindow("Frame Threshhold",50,100)

				   frame_color = numpy.fromstring(vision_memory['color'], numpy.uint8)
				   print "Frame Delta Colored: ",frame_color.shape
				   cv2.imshow("Frame Delta Colored", frame_color.reshape(360,640,3))
				   cv2.moveWindow("Frame Delta Colored",1200,100)
				   key = cv2.waitKey(500) & 0xFF
				   #time.sleep(2.0)

				   modulo = numpy_audio.shape[0] % RATE
				   numpy_audio = numpy_audio[:-modulo]
				   for one_second in numpy.array_split(numpy_audio, int(numpy_audio.shape[0] / RATE)):
					   X = numpy.transpose(one_second.reshape((one_second.shape[0],1)))
					   T = numpy.transpose(frame_amodal.reshape((frame_amodal.shape[0],1)))
					   X = X.astype(numpy.float32, copy=False)
					   T = T.astype(numpy.float32, copy=False)
					   X[0] = X[0] / X[0].max()
					   T[0] = T[0] / T[0].max()
					   print X.shape
					   print T.shape
					   if elmH2V is None:
						   elmH2V = HPELM(X.shape[1],T.shape[1])
						   if os.path.exists(os.path.expanduser("~/CerebralCortexH2V.pkl")):
							   #elmH2V.nnet.neurons = H2V_cursor.next()['neurons']
							   elmH2V.load(os.path.expanduser("~/CerebralCortexH2V.pkl"))
						   else:
							   elmH2V.add_neurons(100, "sigm")
					   elmH2V.train(X, T, "LOO")
					   print elmH2V.predict(X)
					   cv2.imshow(">>>PREDICTION<<<", numpy.transpose(elmH2V.predict(X)).reshape(360,640))
					   cv2.moveWindow(">>>PREDICTION<<<",50,550)

		print elmH2V.nnet.neurons
		elmH2V.save(os.path.expanduser("~/CerebralCortexH2V.pkl"))

Example 31

Project: Cerebrum
Source File: weaver.py
View license
	@staticmethod
	def start():
		pairs = MapperUtil.get_allpairs() # Get pairs starting from 0th line
		if not pairs:
			print ("No pairs found.")
			sys.exit()

		p = pyaudio.PyAudio() # Create a PyAudio session

		# Create a stream
		stream = p.open(format=FORMAT,
						channels=CHANNELS,
						rate=RATE,
						output=True)

		#H2V_cursor = NeuralNetUtil.get_neurons("H2V")
		elmH2V = None

		# Loop over the pairs coming from CROSSMODAL
		for pair in pairs:
			   #time.sleep(0.5) # Wait 0.5 seconds to prevent aggressive loop
			   print pair

			   if pair['direction'] == "H2V":
				   print "____________________________________________________________\n"
				   print pair['timestamp1']

				   hearing_memory = HearingMemoryUtil.get_memory(pair['timestamp1'])
				   hearing_memory = hearing_memory.next()['data']
				   #print hearing_memory.next()['data']
				   #chunky_array = numpy.fromstring(hearing_memory.next()['data'], 'int16')
				   #print chunky_array
				   stream.write(hearing_memory)

				   numpy_audio = numpy.fromstring(hearing_memory, numpy.uint8)
				   #print numpy_audio
				   print "Audio: ",numpy_audio.shape
				   #print numpy.transpose(numpy_audio.reshape((numpy_audio.shape[0],1))).shape


				   vision_memory = VisionMemoryUtil.get_memory(pair['timestamp2'])
				   vision_memory = vision_memory.next()

				   frame_amodal = numpy.fromstring(vision_memory['amodal'], numpy.uint8)
				   print "Frame Threshold: ",frame_amodal.shape
				   cv2.imshow("Frame Threshhold", frame_amodal.reshape(360,640))
				   cv2.moveWindow("Frame Threshhold",50,100)

				   frame_color = numpy.fromstring(vision_memory['color'], numpy.uint8)
				   print "Frame Delta Colored: ",frame_color.shape
				   cv2.imshow("Frame Delta Colored", frame_color.reshape(360,640,3))
				   cv2.moveWindow("Frame Delta Colored",1200,100)
				   key = cv2.waitKey(500) & 0xFF
				   #time.sleep(2.0)

				   modulo = numpy_audio.shape[0] % RATE
				   numpy_audio = numpy_audio[:-modulo]
				   for one_second in numpy.array_split(numpy_audio, int(numpy_audio.shape[0] / RATE)):
					   X = numpy.transpose(one_second.reshape((one_second.shape[0],1)))
					   T = numpy.transpose(frame_amodal.reshape((frame_amodal.shape[0],1)))
					   X = X.astype(numpy.float32, copy=False)
					   T = T.astype(numpy.float32, copy=False)
					   X[0] = X[0] / X[0].max()
					   T[0] = T[0] / T[0].max()
					   print X.shape
					   print T.shape
					   if elmH2V is None:
						   elmH2V = HPELM(X.shape[1],T.shape[1])
						   if os.path.exists(os.path.expanduser("~/CerebralCortexH2V.pkl")):
							   #elmH2V.nnet.neurons = H2V_cursor.next()['neurons']
							   elmH2V.load(os.path.expanduser("~/CerebralCortexH2V.pkl"))
						   else:
							   elmH2V.add_neurons(100, "sigm")
					   elmH2V.train(X, T, "LOO")
					   print elmH2V.predict(X)
					   cv2.imshow(">>>PREDICTION<<<", numpy.transpose(elmH2V.predict(X)).reshape(360,640))
					   cv2.moveWindow(">>>PREDICTION<<<",50,550)

		print elmH2V.nnet.neurons
		elmH2V.save(os.path.expanduser("~/CerebralCortexH2V.pkl"))

Example 32

Project: fuel
Source File: cifar100.py
View license
@check_exists(required_files=[DISTRIBUTION_FILE])
def convert_cifar100(directory, output_directory,
                     output_filename='cifar100.hdf5'):
    """Converts the CIFAR-100 dataset to HDF5.

    Converts the CIFAR-100 dataset to an HDF5 dataset compatible with
    :class:`fuel.datasets.CIFAR100`. The converted dataset is saved as
    'cifar100.hdf5'.

    This method assumes the existence of the following file:
    `cifar-100-python.tar.gz`

    Parameters
    ----------
    directory : str
        Directory in which the required input files reside.
    output_directory : str
        Directory in which to save the converted dataset.
    output_filename : str, optional
        Name of the saved dataset. Defaults to 'cifar100.hdf5'.

    Returns
    -------
    output_paths : tuple of str
        Single-element tuple containing the path to the converted dataset.

    """
    output_path = os.path.join(output_directory, output_filename)
    h5file = h5py.File(output_path, mode="w")
    input_file = os.path.join(directory, 'cifar-100-python.tar.gz')
    tar_file = tarfile.open(input_file, 'r:gz')

    file = tar_file.extractfile('cifar-100-python/train')
    try:
        if six.PY3:
            train = cPickle.load(file, encoding='latin1')
        else:
            train = cPickle.load(file)
    finally:
        file.close()

    train_features = train['data'].reshape(train['data'].shape[0],
                                           3, 32, 32)
    train_coarse_labels = numpy.array(train['coarse_labels'],
                                      dtype=numpy.uint8)
    train_fine_labels = numpy.array(train['fine_labels'],
                                    dtype=numpy.uint8)

    file = tar_file.extractfile('cifar-100-python/test')
    try:
        if six.PY3:
            test = cPickle.load(file, encoding='latin1')
        else:
            test = cPickle.load(file)
    finally:
        file.close()

    test_features = test['data'].reshape(test['data'].shape[0],
                                         3, 32, 32)
    test_coarse_labels = numpy.array(test['coarse_labels'], dtype=numpy.uint8)
    test_fine_labels = numpy.array(test['fine_labels'], dtype=numpy.uint8)

    data = (('train', 'features', train_features),
            ('train', 'coarse_labels', train_coarse_labels.reshape((-1, 1))),
            ('train', 'fine_labels', train_fine_labels.reshape((-1, 1))),
            ('test', 'features', test_features),
            ('test', 'coarse_labels', test_coarse_labels.reshape((-1, 1))),
            ('test', 'fine_labels', test_fine_labels.reshape((-1, 1))))
    fill_hdf5_file(h5file, data)
    h5file['features'].dims[0].label = 'batch'
    h5file['features'].dims[1].label = 'channel'
    h5file['features'].dims[2].label = 'height'
    h5file['features'].dims[3].label = 'width'
    h5file['coarse_labels'].dims[0].label = 'batch'
    h5file['coarse_labels'].dims[1].label = 'index'
    h5file['fine_labels'].dims[0].label = 'batch'
    h5file['fine_labels'].dims[1].label = 'index'

    h5file.flush()
    h5file.close()

    return (output_path,)

Example 33

Project: fuel
Source File: cifar100.py
View license
@check_exists(required_files=[DISTRIBUTION_FILE])
def convert_cifar100(directory, output_directory,
                     output_filename='cifar100.hdf5'):
    """Converts the CIFAR-100 dataset to HDF5.

    Converts the CIFAR-100 dataset to an HDF5 dataset compatible with
    :class:`fuel.datasets.CIFAR100`. The converted dataset is saved as
    'cifar100.hdf5'.

    This method assumes the existence of the following file:
    `cifar-100-python.tar.gz`

    Parameters
    ----------
    directory : str
        Directory in which the required input files reside.
    output_directory : str
        Directory in which to save the converted dataset.
    output_filename : str, optional
        Name of the saved dataset. Defaults to 'cifar100.hdf5'.

    Returns
    -------
    output_paths : tuple of str
        Single-element tuple containing the path to the converted dataset.

    """
    output_path = os.path.join(output_directory, output_filename)
    h5file = h5py.File(output_path, mode="w")
    input_file = os.path.join(directory, 'cifar-100-python.tar.gz')
    tar_file = tarfile.open(input_file, 'r:gz')

    file = tar_file.extractfile('cifar-100-python/train')
    try:
        if six.PY3:
            train = cPickle.load(file, encoding='latin1')
        else:
            train = cPickle.load(file)
    finally:
        file.close()

    train_features = train['data'].reshape(train['data'].shape[0],
                                           3, 32, 32)
    train_coarse_labels = numpy.array(train['coarse_labels'],
                                      dtype=numpy.uint8)
    train_fine_labels = numpy.array(train['fine_labels'],
                                    dtype=numpy.uint8)

    file = tar_file.extractfile('cifar-100-python/test')
    try:
        if six.PY3:
            test = cPickle.load(file, encoding='latin1')
        else:
            test = cPickle.load(file)
    finally:
        file.close()

    test_features = test['data'].reshape(test['data'].shape[0],
                                         3, 32, 32)
    test_coarse_labels = numpy.array(test['coarse_labels'], dtype=numpy.uint8)
    test_fine_labels = numpy.array(test['fine_labels'], dtype=numpy.uint8)

    data = (('train', 'features', train_features),
            ('train', 'coarse_labels', train_coarse_labels.reshape((-1, 1))),
            ('train', 'fine_labels', train_fine_labels.reshape((-1, 1))),
            ('test', 'features', test_features),
            ('test', 'coarse_labels', test_coarse_labels.reshape((-1, 1))),
            ('test', 'fine_labels', test_fine_labels.reshape((-1, 1))))
    fill_hdf5_file(h5file, data)
    h5file['features'].dims[0].label = 'batch'
    h5file['features'].dims[1].label = 'channel'
    h5file['features'].dims[2].label = 'height'
    h5file['features'].dims[3].label = 'width'
    h5file['coarse_labels'].dims[0].label = 'batch'
    h5file['coarse_labels'].dims[1].label = 'index'
    h5file['fine_labels'].dims[0].label = 'batch'
    h5file['fine_labels'].dims[1].label = 'index'

    h5file.flush()
    h5file.close()

    return (output_path,)

Example 34

View license
    def __init__(self, domain):
        super(ActiveContourWindow, self).__init__()
        self.setGeometry(300, 300, 650, 650)
        self.setWindowTitle('Active Contour')
        self.domain = domain

        """
        Fetch image and compute statistics
        sensor         = domain.get_radar()
        detect_channel = domain.algorithm_params['water_detect_radar_channel']
        ee_image       = sensor.image.select([detect_channel]).toUint16()
        if sensor.log_scale:
            statisics_image = ee_image.log10()
        else:
            statisics_image = ee_image
        (band_names, band_statistics) = compute_band_statistics(statisics_image, domain.ground_truth, domain.bounds)

        (self.local_image, self.snake) = initialize_active_contour(domain, ee_image, band_statistics, sensor.log_scale)

         Retrieve the local image bands and merge them into a fake RGB image
        channels = [self.local_image.get_image(detect_channel), self.local_image.get_image(detect_channel), self.local_image.get_image(detect_channel)]
        channel_images = [PIL.Image.fromarray(numpy.uint8(c*255/1200)) for c in channels]  Convert from 16 bit to 8 bit
        self.display_image = PIL.Image.merge('RGB', channel_images)
        self.step = 1
        self.show()
        """
        """
        Initialize the contour with the selected sensor band
        sensor_name = 'uavsar'
        sensor      = getattr(domain, sensor_name)
        ee_image    = sensor.image.select(['hh'])

        # TODO: Make sure the name and statistics line up inside the class!
        # Compute statistics for each band -> Log10 needs to be applied here!
        (band_names, band_statistics) = compute_band_statistics(statisics_image, domain.ground_truth, domain.bounds)

        (self.local_image, self.snake) = initialize_active_contour(domain, ee_image, band_statistics, sensor.log_scale)

        # Retrieve the local image bands and merge them into a fake RGB image
        #channels = [self.local_image.get_image('hh'), self.local_image.get_image('hv'), self.local_image.get_image('vv')]
        channels = [self.local_image.get_image('hh'), self.local_image.get_image('hh'), self.local_image.get_image('hh')]
        channel_images = [PIL.Image.fromarray(numpy.uint8(c >> 8)) for c in channels] # Convert from 16 bit to 8 bit
        self.display_image = PIL.Image.merge('RGB', channel_images)
        self.step = 1
        self.show()
        """

        SKYBOX_SCALE = 1200 / 256
        train_domain = domain.training_domain  # For skybox data there is probably no earlier image to train off of
        try:  # The Skybox data can be in one of two names
            sensor = domain.skybox
            trainSensor = train_domain.skybox
        except:
            sensor = domain.skybox_nir
            trainSensor = train_domain.skybox_nir
        ee_image = sensor.image.toUint16()  # For Skybox, these are almost certainly the same image.
        ee_image_train = trainSensor.image.toUint16()

        if train_domain.training_features:  # Train using features
            (band_names, band_statistics) = compute_band_statistics_features(ee_image_train, train_domain.training_features)
        else: # Train using training truth
            (band_names, band_statistics) = compute_band_statistics(ee_image_train, train_domain.ground_truth,
                                                                    train_domain.bounds)
        (self.local_image, self.snake) = initialize_active_contour(domain, ee_image, band_statistics, False)

        # Retrieve the local image bands and merge them into a fake RGB image
        channels = [self.local_image.get_image('Red'), self.local_image.get_image('Green'), self.local_image.get_image('Blue')]
        channel_images = [PIL.Image.fromarray(numpy.uint8(c / SKYBOX_SCALE)) for c in channels] # Convert from Skybox range to 8 bit
        self.display_image = PIL.Image.merge('RGB', channel_images)
        self.step = 1
        self.show()

Example 35

View license
    def __init__(self, domain):
        super(ActiveContourWindow, self).__init__()
        self.setGeometry(300, 300, 650, 650)
        self.setWindowTitle('Active Contour')
        self.domain = domain

        """
        Fetch image and compute statistics
        sensor         = domain.get_radar()
        detect_channel = domain.algorithm_params['water_detect_radar_channel']
        ee_image       = sensor.image.select([detect_channel]).toUint16()
        if sensor.log_scale:
            statisics_image = ee_image.log10()
        else:
            statisics_image = ee_image
        (band_names, band_statistics) = compute_band_statistics(statisics_image, domain.ground_truth, domain.bounds)

        (self.local_image, self.snake) = initialize_active_contour(domain, ee_image, band_statistics, sensor.log_scale)

         Retrieve the local image bands and merge them into a fake RGB image
        channels = [self.local_image.get_image(detect_channel), self.local_image.get_image(detect_channel), self.local_image.get_image(detect_channel)]
        channel_images = [PIL.Image.fromarray(numpy.uint8(c*255/1200)) for c in channels]  Convert from 16 bit to 8 bit
        self.display_image = PIL.Image.merge('RGB', channel_images)
        self.step = 1
        self.show()
        """
        """
        Initialize the contour with the selected sensor band
        sensor_name = 'uavsar'
        sensor      = getattr(domain, sensor_name)
        ee_image    = sensor.image.select(['hh'])

        # TODO: Make sure the name and statistics line up inside the class!
        # Compute statistics for each band -> Log10 needs to be applied here!
        (band_names, band_statistics) = compute_band_statistics(statisics_image, domain.ground_truth, domain.bounds)

        (self.local_image, self.snake) = initialize_active_contour(domain, ee_image, band_statistics, sensor.log_scale)

        # Retrieve the local image bands and merge them into a fake RGB image
        #channels = [self.local_image.get_image('hh'), self.local_image.get_image('hv'), self.local_image.get_image('vv')]
        channels = [self.local_image.get_image('hh'), self.local_image.get_image('hh'), self.local_image.get_image('hh')]
        channel_images = [PIL.Image.fromarray(numpy.uint8(c >> 8)) for c in channels] # Convert from 16 bit to 8 bit
        self.display_image = PIL.Image.merge('RGB', channel_images)
        self.step = 1
        self.show()
        """

        SKYBOX_SCALE = 1200 / 256
        train_domain = domain.training_domain  # For skybox data there is probably no earlier image to train off of
        try:  # The Skybox data can be in one of two names
            sensor = domain.skybox
            trainSensor = train_domain.skybox
        except:
            sensor = domain.skybox_nir
            trainSensor = train_domain.skybox_nir
        ee_image = sensor.image.toUint16()  # For Skybox, these are almost certainly the same image.
        ee_image_train = trainSensor.image.toUint16()

        if train_domain.training_features:  # Train using features
            (band_names, band_statistics) = compute_band_statistics_features(ee_image_train, train_domain.training_features)
        else: # Train using training truth
            (band_names, band_statistics) = compute_band_statistics(ee_image_train, train_domain.ground_truth,
                                                                    train_domain.bounds)
        (self.local_image, self.snake) = initialize_active_contour(domain, ee_image, band_statistics, False)

        # Retrieve the local image bands and merge them into a fake RGB image
        channels = [self.local_image.get_image('Red'), self.local_image.get_image('Green'), self.local_image.get_image('Blue')]
        channel_images = [PIL.Image.fromarray(numpy.uint8(c / SKYBOX_SCALE)) for c in channels] # Convert from Skybox range to 8 bit
        self.display_image = PIL.Image.merge('RGB', channel_images)
        self.step = 1
        self.show()

Example 36

Project: muscle-plotter
Source File: winddispatcher.py
View license
    def runSimulation(self):
        blur_size = 1
        gray_threshold = 240
        simulation_width = 200
        simulation_height = 200
        # color inverted because of openCV
        WHITE = 0  # 255,
        BLACK = 255  # 0
        # openCV parms
        DILATE_ITERATIONS = 5
        DILATE_MATRIX_SIZE = 20
        ERODE_ITERATIONS = 5
        SKIP_DILATE = False
        SKIP_CONTOURS = True
        SKIP_ERODE = False

        sketch_area_height = round(self.active_area[3] - self.active_area[1])
        sketch_area_width = round(self.active_area[2] - self.active_area[0])

        wind_area = np.empty(shape=(sketch_area_height, sketch_area_width),
                             dtype=np.uint8)
        wind_area[:] = WHITE
        print("size of image:" + str(wind_area.shape))
        print("sketches saved: " + str(len(self.sketches)))
        for point in self.sketches:
            # print("orig:"+str(point))
            y = self.remap(point[1], self.active_area[1],
                           self.active_area[3], 0, sketch_area_height - 1)
            x = self.remap(point[0], self.active_area[0],
                           self.active_area[2], 0, sketch_area_width - 1)
            # print("remaped: " + str([x,y]))
            for i in range(-blur_size, blur_size):
                for j in range(-blur_size, blur_size):
                    # print("saved and blurred "+ str(point))
                    wind_area[x + i, y + j] = BLACK

        # actually blur here just before the image conversion and rotation
        #(Image.fromarray(wind_area)).save("wind_pre.png")
        # kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
        kernel = np.ones((DILATE_MATRIX_SIZE, DILATE_MATRIX_SIZE), np.uint8)
        if not SKIP_DILATE:
            wind_area = cv2.dilate(wind_area, kernel,
                                   iterations=DILATE_ITERATIONS)

        # countours
        full_canvas_img = wind_area
        if not SKIP_CONTOURS:
            ret, thresh = cv2.threshold(wind_area, 127, 255, 0)
            _, contours, _ = cv2.findContours(thresh, cv2.RETR_TREE,
                                              cv2.CHAIN_APPROX_SIMPLE)
            #print("openCV/countours")
            #if isinstance(contours, list):
            #    print(len(contours))
            #    if (len(contours) > 0):
            #        print(len(contours[0]))
            full_canvas_img = cv2.drawContours(thresh, contours, -1,
                                               (128, 255, 0), 3)

        # erode those sketches
        if not SKIP_ERODE:
            full_canvas_img = cv2.erode(full_canvas_img, kernel,
                                        ERODE_ITERATIONS)

        # array to image & image manipulation
        full_canvas_img = Image.fromarray(full_canvas_img)
        full_canvas_img = full_canvas_img.rotate(90)
        # full_canvas_img = full_canvas_img.transpose(Image.FLIP_LEFT_RIGHT)
        # full_canvas_img = full_canvas_img.transpose(Image.FLIP_TOP_BOTTOM)
        # full_canvas_img = full_canvas_img.rotate(180)
        # full_canvas_img.save("full.png")
        # gaussian off for dilate
        # wind_area = gaussian_filter(wind_area, sigma=gaussian_power)
        # Image.fromarray(dilated).show()
        # full_canvas_blur_img = Image.fromarray(wind_area)
        # full_canvas_blur_img.save("blur.png")
        scaled_img = imresize(full_canvas_img, (simulation_width,
                                                simulation_height))

        for i in range(len(scaled_img)):
            for j in range(len(scaled_img[i])):
                if scaled_img[i][j] < gray_threshold:
                    # binarize again (blur and scales erode it)
                    scaled_img[i][j] = 0
        scaled_img = Image.fromarray(scaled_img)
        scaled_img = ImageOps.invert(scaled_img)

        self.simulation = WindSim(scaled_img)
        scaled_img.save(str(os.getcwd() +
                            "/muscleplotter/modules/windtunnel/data/load.png"))

Example 37

Project: atpy
Source File: ipactable.py
View license
@auto_download_to_file
@auto_decompress_to_fileobj
@auto_fileobj_to_file
def read(self, filename, definition=3, verbose=False, smart_typing=False):
    '''
    Read a table from a IPAC file

    Required Arguments:

        *filename*: [ string ]
            The IPAC file to read the table from

    Optional Keyword Arguments:

        *definition*: [ 1 | 2 | 3 ]

            The definition to use to read IPAC tables:

            1: any character below a pipe symbol belongs to the
               column on the left, and any characters below the
               first pipe symbol belong to the first column.
            2: any character below a pipe symbol belongs to the
               column on the right.
            3: no characters should be present below the pipe
               symbols (default).

        *smart_typing*: [ True | False ]

            Whether to try and save memory by using the smallest
            integer type that can contain a column. For example,
            a column containing only values between 0 and 255 can
            be stored as an unsigned 8-bit integer column. The
            default is false, so that all integer columns are
            stored as 64-bit integers.
    '''

    if not definition in [1, 2, 3]:
        raise Exception("definition should be one of 1/2/3")

    self.reset()

    # Open file for reading
    f = open(filename, 'r')

    line = f.readline()

    # Read in comments and keywords
    while True:

        char1 = line[0:1]
        char2 = line[1:2]

        if char1 != '\\':
            break

        if char2==' ' or not '=' in line: # comment
            self.add_comment(line[1:])
        else:          # keyword
            pos = line.index('=')
            key, value = line[1:pos], line[pos + 1:]
            value = value.replace("'", "").replace('"', '')
            key, value = key.strip(), value.strip()
            self.add_keyword(key, value)

        line = f.readline()


    # Column headers

    l = 0
    units = {}
    nulls = {}

    while True:

        char1 = line[0:1]

        if char1 != "|":
            break

        if l==0: # Column names

            line = line.replace('-', ' ').strip()

            # Find all pipe symbols
            pipes = []
            for i, c in enumerate(line):
                if c=='|':
                    pipes.append(i)

            # Find all names
            names = line.replace(" ", "").split("|")[1:-1]

        elif l==1: # Data types

            line = line.replace('-', ' ').strip()

            types = dict(zip(names, \
                line.replace(" ", "").split("|")[1:-1]))

        elif l==2: # Units

            units = dict(zip(names, \
                line.replace(" ", "").split("|")[1:-1]))

        else: # Null values

            nulls = dict(zip(names, \
                line.replace(" ", "").split("|")[1:-1]))

        line = f.readline()
        l = l + 1

    if len(pipes) != len(names) + 1:
        raise "An error occured while reading the IPAC table"

    if len(units)==0:
        for name in names:
            units[name]=''

    if len(nulls)==0:
        nulls_given = False
        for name in names:
            nulls[name]=''
    else:
        nulls_given = True

    # Pre-compute numpy column types
    numpy_types = {}
    for name in names:
        numpy_types[name] = type_dict[types[name]]

    # Data

    array = {}
    for name in names:
        array[name] = []


    while True:

        if line.strip() == '':
            break

        for i in range(len(pipes)-1):

            first, last = pipes[i] + 1, pipes[i + 1]

            if definition==1:
                last = last + 1
                if first==1:
                    first=0
            elif definition==2:
                first = first - 1

            if i + 1==len(pipes)-1:
                item = line[first:].strip()
            else:
                item = line[first:last].strip()

            if item.lower() == 'null' and nulls[names[i]] != 'null':
                if nulls[names[i]] == '':
                    if verbose:
                        warnings.warn("WARNING: found unexpected 'null' value. Setting null value for column "+names[i]+" to 'null'")
                    nulls[names[i]] = 'null'
                    nulls_given = True
                else:
                    raise Exception("null value for column "+names[i]+" is set to "+nulls[i]+" but found value 'null'")
            array[names[i]].append(item)

        line = f.readline()

    # Check that null values are of the correct type
    if nulls_given:
        for name in names:
            try:
                n = numpy_types[name](nulls[name])
                nulls[name] = n
            except:
                n = invalid[numpy_types[name]]
                for i, item in enumerate(array[name]):
                    if item == nulls[name]:
                        array[name][i] = n
                if verbose:
                    if len(str(nulls[name]).strip()) == 0:
                        warnings.warn("WARNING: empty null value for column "+name+" set to "+str(n))
                    else:
                        warnings.warn("WARNING: null value for column "+name+" changed from "+str(nulls[name])+" to "+str(n))
                nulls[name] = n

    # Convert to numpy arrays
    for name in names:

        if smart_typing:

            dtype = None

            low = min(array[name])
            high = max(array[name])

            if types[name] in ['i', 'int', 'integer']:
                low, high = long(low), long(high)
                for nt in [np.uint8, np.int8, np.uint16, np.int16, np.uint32, np.int32, np.uint64, np.int64]:
                    if low >= np.iinfo(nt).min and high <= np.iinfo(nt).max:
                        dtype = nt
                        break
            elif types[name] in ['long']:
                low, high = long(low), long(high)
                for nt in [np.uint64, np.int64]:
                    if low >= np.iinfo(nt).min and high <= np.iinfo(nt).max:
                        dtype = nt
                        break
            elif types[name] in ['float', 'real']:
                low, high = float(low), float(high)
                for nt in [np.float32, np.float64]:
                    if low >= np.finfo(nt).min and high <= np.finfo(nt).max:
                        dtype = nt
                        break
            else:
                dtype = type_dict[types[name]]

        else:
            dtype = type_dict[types[name]]

            # If max integer is larger than 2**63 then use uint64
            if dtype == np.int64:
                if max([long(x) for x in array[name]]) > 2**63:
                    dtype = np.uint64
                    warnings.warn("using type uint64 for column %s" % name)

        array[name] = np.array(array[name], dtype=dtype)

        if smart_typing:
            if np.min(array) >= 0 and np.max(array) <= 1:
                array = array == 1

        if self._masked:
            self.add_column(name, array[name], \
                mask=smart_mask(array[name], nulls[name]), unit=units[name], \
                fill=nulls[name])
        else:
            self.add_column(name, array[name], \
                null=nulls[name], unit=units[name])

Example 38

Project: brother_ql
Source File: brother_ql_create.py
View license
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('image')
    parser.add_argument('outfile', nargs='?', type=argparse.FileType('wb'), default=stdout)
    parser.add_argument('--model', default='QL-500')
    parser.add_argument('--list-models', action='store_true', \
      help='List available models and quit (the image argument is still required but ignored)')
    parser.add_argument('--threshold', type=int, default=170)
    parser.add_argument('--loglevel', type=lambda x: getattr(logging, x), default=logging.WARNING)
    args = parser.parse_args()

    logging.basicConfig(level=args.loglevel)

    args.model = args.model.upper()

    if args.list_models:
        print('Supported models:')
        print('\n'.join(models))
        sys.exit(0)

    try:
        qlr = BrotherQLRaster(args.model)
    except BrotherQLUnknownModel:
        sys.exit("Unknown model. Use option --list-models to show available models.")
    qlr.exception_on_warning = True
    device_pixel_width = qlr.get_pixel_width()

    im = Image.open(args.image)
    hsize = int(im.size[1] / im.size[0] * device_pixel_width)
    im = im.resize((device_pixel_width, hsize), Image.ANTIALIAS)
    im = im.convert("L")
    arr = np.asarray(im, dtype=np.uint8)
    arr.flags.writeable = True
    white_idx = arr[:,:] <  args.threshold
    black_idx = arr[:,:] >= args.threshold
    arr[white_idx] = 1
    arr[black_idx] = 0


    try:
        qlr.add_switch_mode()
    except BrotherQLUnsupportedCmd:
        pass
    qlr.add_invalidate()
    qlr.add_initialize()
    try:
        qlr.add_switch_mode()
    except BrotherQLUnsupportedCmd:
        pass
    qlr.add_status_information()
    qlr.mtype = 0x0A
    qlr.mwidth = 62
    qlr.mlength = 0
    qlr.add_media_and_quality(im.size[1])
    try:
        qlr.add_autocut(True)
        qlr.add_cut_every(1)
    except BrotherQLUnsupportedCmd:
        pass
    try:
        qlr.dpi_600 = False
        qlr.cut_at_end = True
        qlr.add_expanded_mode()
    except BrotherQLUnsupportedCmd:
        pass
    qlr.add_margins()
    try:
        qlr.add_compression(True)
    except BrotherQLUnsupportedCmd:
        pass
    qlr.add_raster_data(arr)
    qlr.add_print()

    args.outfile.write(qlr.data)

Example 39

Project: brother_ql
Source File: __init__.py
View license
@route('/api/print/text')
@route('/api/print/text/')
@route('/api/print/text/<content>')
def print_text(content=None):
    """
    More possible parameters:
    - alignment
    """

    return_dict = {'success': False}

    if content is None:
        return_dict['error'] = 'Please provide the text for the label'
        return return_dict

    threshold = 170
    fontsize = int(request.query.get('font_size', 100))
    width = 720
    margin = 0
    height = 100 + 2*margin

    try:
        font_family = request.query.get('font_family')
        font_style  = request.query.get('font_style')
        if font_family is None:
            font_family = DEFAULT_FONT['family']
            font_style =  DEFAULT_FONT['style']
        if font_style is None:
            font_style =  'Regular'
        font_path = FONTS[font_family][font_style]
    except KeyError:
        return_dict['error'] = "Couln't find the font & style"
        return return_dict

    im = Image.new('L', (width, height), 'white')
    draw = ImageDraw.Draw(im)
    im_font = ImageFont.truetype(font_path, fontsize)
    textsize = draw.textsize(content, font=im_font)
    vertical_offset = (height - textsize[1])//2
    horizontal_offset = max((width - textsize[0])//2, 0)
    if 'ttf' in font_path: vertical_offset -= 10
    offset = horizontal_offset, vertical_offset
    if DEBUG: print("Offset: {}".format(offset))
    draw.text(offset, content, (0), font=im_font)
    if DEBUG: im.save('sample-out.png')
    arr = np.asarray(im, dtype=np.uint8)
    arr.flags.writeable = True
    white_idx = arr[:,:] <  threshold
    black_idx = arr[:,:] >= threshold
    arr[white_idx] = 1
    arr[black_idx] = 0

    qlr = BrotherQLRaster(MODEL)
    qlr.add_switch_mode()
    qlr.add_invalidate()
    qlr.add_initialize()
    qlr.add_status_information()
    qlr.mtype = 0x0A
    qlr.mwidth = 62
    qlr.mlength = 0
    qlr.add_media_and_quality(im.size[1])
    qlr.add_autocut(True)
    qlr.add_cut_every(1)
    qlr.dpi_600 = False
    qlr.cut_at_end = True
    qlr.add_expanded_mode()
    qlr.add_margins()
    qlr.add_compression(True)
    qlr.add_raster_data(arr)
    qlr.add_print()

    if not DEBUG:
        try:
            be = BACKEND_CLASS(BACKEND_STRING_DESCR)
            be.write(qlr.data)
            be.dispose()
            del be
        except Exception as e:
            return_dict['message'] = str(e)
            logger.warning('Exception happened: %s', e)
            response.status = 500
            return return_dict

    return_dict['success'] = True
    if DEBUG: return_dict['data'] = str(qlr.data)
    return return_dict

Example 40

Project: RoBO
Source File: example_fmin_fabolas.py
View license
def load_dataset():
    # This function loads the MNIST data, its copied from the Lasagne tutorial
    # We first define a download function, supporting both Python 2 and 3.
    if sys.version_info[0] == 2:
        from urllib import urlretrieve
    else:
        from urllib.request import urlretrieve

    def download(filename, source='http://yann.lecun.com/exdb/mnist/'):
        print("Downloading %s" % filename)
        urlretrieve(source + filename, filename)

    # We then define functions for loading MNIST images and labels.
    # For convenience, they also download the requested files if needed.
    import gzip

    def load_mnist_images(filename):
        if not os.path.exists(filename):
            download(filename)
        # Read the inputs in Yann LeCun's binary format.
        with gzip.open(filename, 'rb') as f:
            data = np.frombuffer(f.read(), np.uint8, offset=16)
        # The inputs are vectors now, we reshape them to monochrome 2D images,
        # following the shape convention: (examples, channels, rows, columns)
        data = data.reshape(-1, 1, 28, 28)
        # The inputs come as bytes, we convert them to float32 in range [0,1].
        # (Actually to range [0, 255/256], for compatibility to the version
        # provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)
        return data / np.float32(256)

    def load_mnist_labels(filename):
        if not os.path.exists(filename):
            download(filename)
        # Read the labels in Yann LeCun's binary format.
        with gzip.open(filename, 'rb') as f:
            data = np.frombuffer(f.read(), np.uint8, offset=8)
        # The labels are vectors of integers now, that's exactly what we want.
        return data

    # We can now download and read the training and test set images and labels.
    X_train = load_mnist_images('train-images-idx3-ubyte.gz')
    y_train = load_mnist_labels('train-labels-idx1-ubyte.gz')
    X_test = load_mnist_images('t10k-images-idx3-ubyte.gz')
    y_test = load_mnist_labels('t10k-labels-idx1-ubyte.gz')

    # We reserve the last 10000 training examples for validation.
    X_train, X_val = X_train[:-10000], X_train[-10000:]
    y_train, y_val = y_train[:-10000], y_train[-10000:]

    X_train = X_train.reshape(X_train.shape[0], 28 * 28)
    X_val = X_val.reshape(X_val.shape[0], 28 * 28)
    X_test = X_test.reshape(X_test.shape[0], 28 * 28)
    
    # We just return all the arrays in order, as expected in main().
    # (It doesn't matter how we do this as long as we can read them again.)
    return X_train, y_train, X_val, y_val, X_test, y_test

Example 41

Project: pupil
Source File: offline_reference_surface.py
View license
    def generate_heatmap(self,section):

        if self.cache is None:
            logger.warning('Surface cache is not build yet.')
            return


        x,y = self.real_world_size['x'],self.real_world_size['y']
        x = max(1,int(x))
        y = max(1,int(y))

        filter_size = (int(self.heatmap_detail * x)/2)*2 +1
        std_dev = filter_size /6.
        self.heatmap = np.ones((y,x,4),dtype=np.uint8)
        all_gaze = []

        for frame_idx,c_e in enumerate(self.cache[section]):
            if c_e:
                frame_idx+=section.start
                for gp in self.gaze_on_srf_by_frame_idx(frame_idx,c_e['m_from_screen']):
                    if gp['confidence']>=self.g_pool.min_data_confidence:
                        all_gaze.append(gp['norm_pos'])

        if not all_gaze:
            logger.warning("No gaze data on surface for heatmap found.")
            all_gaze.append((-1.,-1.))
        all_gaze = np.array(all_gaze)
        all_gaze *= [self.real_world_size['x'],self.real_world_size['y']]
        hist,xedge,yedge = np.histogram2d(all_gaze[:,0], all_gaze[:,1],
                                            bins=[x,y],
                                            range=[[0, self.real_world_size['x']], [0,self.real_world_size['y']]],
                                            normed=False,
                                            weights=None)


        hist = np.rot90(hist)

        #smoothing..
        hist = cv2.GaussianBlur(hist, (filter_size,filter_size),std_dev)
        maxval = np.amax(hist)
        if maxval:
            scale = 255./maxval
        else:
            scale = 0

        hist = np.uint8( hist*(scale) )

        #colormapping
        c_map = cv2.applyColorMap(hist, cv2.COLORMAP_JET)

        self.heatmap[:,:,:3] = c_map
        self.heatmap[:,:,3] = 125
        self.heatmap_texture = Named_Texture()
        self.heatmap_texture.update_from_ndarray(self.heatmap)

Example 42

Project: pymo
Source File: _numpysurfarray.py
View license
def pixels3d (surface):
    """pygame.numpyarray.pixels3d (Surface): return array

    reference pixels into a 3d array

    Create a new 3D array that directly references the pixel values in a
    Surface. Any changes to the array will affect the pixels in the
    Surface. This is a fast operation since no data is copied.

    This will only work on Surfaces that have 24-bit or 32-bit
    formats. Lower pixel formats cannot be referenced.

    The Surface this references will remain locked for the lifetime of
    the array (see the Surface.lock - lock the Surface memory for pixel
    access method).
    """
    bpp = surface.get_bytesize ()
    if bpp < 3 or bpp > 4:
        raise ValueError("unsupported bit depth for 3D reference array")
    lilendian = pygame.get_sdl_byteorder () == pygame.LIL_ENDIAN

    start = 0
    step = 0

    # Check for RGB or BGR surface.
    shifts = surface.get_shifts ()
    if shifts[0] == 16 and shifts[1] == 8 and shifts[2] == 0:
        # RGB 
        if lilendian:
            start = 2
            step = -1
        else:
            start = 0
            step = 1
    elif shifts[2] == 16 and shifts[1] == 8 and shifts[0] == 0:
        # BGR
        if lilendian:
            start = 0
            step = 1
        else:
            start = 2
            step = -1
    else:
        raise ValueError("unsupported colormasks for 3D reference array")

    if bpp == 4 and not lilendian:
        start += 1

    array = numpy.ndarray \
            (shape=(surface.get_width (), surface.get_height (), 3),
             dtype=numpy.uint8, buffer=surface.get_buffer (),
             offset=start, strides=(bpp, surface.get_pitch (),step))
    return array

Example 43

Project: pymo
Source File: _numpysurfarray.py
View license
def blit_array (surface, array):
    """pygame.numpyarray.blit_array (Surface, array): return None

    blit directly from a array values

    Directly copy values from an array into a Surface. This is faster
    than converting the array into a Surface and blitting. The array
    must be the same dimensions as the Surface and will completely
    replace all pixel values.

    This function will temporarily lock the Surface as the new values
    are copied.
    """
    bpp = surface.get_bytesize ()
    if bpp <= 0 or bpp > 4:
        raise ValueError("unsupported bit depth for surface")
    
    shape = array.shape
    width = surface.get_width ()

    typecode = (numpy.uint8, numpy.uint16, None, numpy.uint32)[bpp - 1]
    array = array.astype (typecode)

    # Taken from from Alex Holkner's pygame-ctypes package. Thanks a
    # lot.
    if len(shape) == 3 and shape[2] == 3:
        array = numpy.transpose (array, (1, 0, 2))
        shifts = surface.get_shifts ()
        losses = surface.get_losses ()
        array = (array[:,:,::3] >> losses[0] << shifts[0]) | \
                (array[:,:,1::3] >> losses[1] << shifts[1]) | \
                (array[:,:,2::3] >> losses[2] << shifts[2])
    elif len (shape) == 2:
        array = numpy.transpose (array)
    else:
        raise ValueError("must be a valid 2d or 3d array")

    if width != shape[0] or surface.get_height () != shape[1]:
        raise ValueError("array must match the surface dimensions")

    itemsize = array.itemsize
    data = array.tostring ()

    if itemsize > bpp:
        # Trim bytes from each element, keep least significant byte(s)
        pattern = '%s(%s)' % ('.' * (itemsize - bpp), '.' * bpp)
        if pygame.get_sdl_byteorder () == pygame.LIL_ENDIAN:
            pattern = '(%s)%s' % ('.' * bpp, '.' * (itemsize - bpp))
        data = ''.join (re.compile (pattern, flags=re.DOTALL).findall (data))
    elif itemsize < bpp:
        # Add pad bytes to each element, at most significant end
        pad = '\0' * (bpp - itemsize)
        pixels = re.compile ('.' * itemsize, flags=re.DOTALL).findall (data)
        data = pad.join (pixels)
        if pygame.get_sdl_byteorder () == pygame.LIL_ENDIAN:
            data = data + pad
        else:
            data = pad + data

    # Add zeros pad for pitch correction
    pitchdiff = surface.get_pitch () - width * bpp
    if pitchdiff > 0:
        pad = '\0' * pitchdiff
        rows = re.compile ('.' * width * bpp, flags=re.DOTALL).findall (data)
        data = pad.join (rows) + pad

    surface.get_buffer ().write (data, 0)

Example 44

Project: pyqtgraph
Source File: VideoSpeedTest.py
View license
def mkData():
    with pg.BusyCursor():
        global data, cache, ui
        frames = ui.framesSpin.value()
        width = ui.widthSpin.value()
        height = ui.heightSpin.value()
        dtype = (ui.dtypeCombo.currentText(), ui.rgbCheck.isChecked(), frames, width, height)
        if dtype not in cache:
            if dtype[0] == 'uint8':
                dt = np.uint8
                loc = 128
                scale = 64
                mx = 255
            elif dtype[0] == 'uint16':
                dt = np.uint16
                loc = 4096
                scale = 1024
                mx = 2**16
            elif dtype[0] == 'float':
                dt = np.float
                loc = 1.0
                scale = 0.1
            
            if ui.rgbCheck.isChecked():
                data = np.random.normal(size=(frames,width,height,3), loc=loc, scale=scale)
                data = pg.gaussianFilter(data, (0, 6, 6, 0))
            else:
                data = np.random.normal(size=(frames,width,height), loc=loc, scale=scale)
                data = pg.gaussianFilter(data, (0, 6, 6))
            if dtype[0] != 'float':
                data = np.clip(data, 0, mx)
            data = data.astype(dt)
            data[:, 10, 10:50] = mx
            data[:, 9:12, 48] = mx
            data[:, 8:13, 47] = mx
            cache = {dtype: data} # clear to save memory (but keep one to prevent unnecessary regeneration)
            
        data = cache[dtype]
        updateLUT()
        updateSize()

Example 45

Project: DoNotSnap
Source File: RegionOfInterest.py
View license
def findRANSACCircles(edges, circleSearches=5):
    edges = edges.copy()
    mask = np.zeros(edges.shape, dtype=np.uint8)

    minRadius = 10
    maxRadius = 100

    def verifyCircle(dt, center, radius):
        minInlierDist = 2.0
        maxInlierDistMax = 100.0
        maxInlierDist = max(minInlierDist, min(maxInlierDistMax, radius / 25.0))

        # choose samples along the circle and count inlier percentage
        samples = np.arange(0, 2 * np.pi, 0.05)
        cX = radius * np.cos(samples) + center[0]
        cY = radius * np.sin(samples) + center[1]

        coords = np.array((cX, cY)).T
        counter = len(samples)

        cXMask = (cX < dt.shape[1]) & (cX >= 0)
        cYMask = (cY < dt.shape[0]) & (cY >= 0)
        cMask = cXMask & cYMask

        gdt = dt[cY[cMask].astype(int), cX[cMask].astype(int)]
        dtMask = gdt < maxInlierDist

        inlierSet = coords[cMask][dtMask]
        inlier = len(inlierSet)

        return float(inlier) / counter, inlierSet

    def getCircle(p1, p2, p3):
        x1 = float(p1[0])
        x2 = float(p2[0])
        x3 = float(p3[0])

        y1 = float(p1[1])
        y2 = float(p2[1])
        y3 = float(p3[1])

        center_x = (x1 * x1 + y1 * y1) * (y2 - y3) + (x2 * x2 + y2 * y2) * (y3 - y1) + (x3 * x3 + y3 * y3) * (y1 - y2)
        x = 2 * (x1 * (y2 - y3) - y1 * (x2 - x3) + x2 * y3 - x3 * y2)
        if not x:
            return None, None
        center_x /= x

        center_y = (x1 * x1 + y1 * y1) * (x3 - x2) + (x2 * x2 + y2 * y2) * (x1 - x3) + (x3 * x3 + y3 * y3) * (x2 - x1)
        y = 2 * (x1 * (y2 - y3) - y1 * (x2 - x3) + x2 * y3 - x3 * y2)
        if not y:
            return None, None
        center_y /= y

        radius = math.sqrt((center_x - x1) * (center_x - x1) + (center_y - y1) * (center_y - y1))

        return (center_x, center_y), radius

    def getPointPositions(binaryImage):
        return [(x, y) for y, x in zip(*np.where(binaryImage > 0))]

    for _ in xrange(circleSearches):
        edgePositions = getPointPositions(edges)

        # create distance transform to efficiently evaluate distance to nearest edge
        dt = cv2.distanceTransform(255 - edges, cv2.cv.CV_DIST_L1, 3)

        bestCircleCenter = None
        bestCircleRadius = 0
        bestCirclePercentage = 0

        minCirclePercentage = 0.6  # at least 60% of a circle must be present

        maxNrOfIterations = len(edgePositions)  # TODO: adjust this parameter or include some real ransac criteria with inlier/outlier percentages to decide when to stop

        for its in xrange(maxNrOfIterations):
            # RANSAC: randomly choose 3 point and create a circle:

            # TODO: choose randomly but more intelligent,
            # so that it is more likely to choose three points of a circle.
            # For example if there are many small circles, it is unlikely to randomly choose 3 points of the same circle.
            idx1 = np.random.randint(len(edgePositions))
            idx2 = np.random.randint(len(edgePositions))
            idx3 = np.random.randint(len(edgePositions))

            # we need 3 different samples:
            if idx1 == idx2 or idx1 == idx3 or idx3 == idx2:
                continue

            # create circle from 3 points:
            center, radius = getCircle(edgePositions[idx1], edgePositions[idx2], edgePositions[idx3])
            if not center or radius > maxRadius:
                continue

            # inlier set unused at the moment but could be used to approximate a (more robust) circle from alle inlier
            # verify or falsify the circle by inlier counting:
            cPerc, inlierSet = verifyCircle(dt, center, radius)

            if cPerc >= bestCirclePercentage and radius >= minRadius:
                bestCirclePercentage = cPerc
                bestCircleRadius = radius
                bestCircleCenter = center

        # draw if good circle was found
        if bestCirclePercentage >= minCirclePercentage and bestCircleRadius >= minRadius:
            cv2.circle(mask, (int(round(bestCircleCenter[0])), int(round(bestCircleCenter[1]))), int(round(bestCircleRadius)), 255, -1)
            # mask found circle
            cv2.circle(edges, (int(round(bestCircleCenter[0])), int(round(bestCircleCenter[1]))), int(round(bestCircleRadius)), (0, 0, 0), 3)
    return mask

Example 46

Project: python-ivi
Source File: hprtl.py
View license
def parse_hprtl(rtl_file):
    """Convert HP Raster Transfer Language (RTL) to numpy array"""
    color = 1
    width = 0
    byte_width = 0
    height = 0
    compression = 0

    current_row = 0
    plane_cnt = 1
    current_plane = 0

    resolution = 1

    plane_data = None

    in_raster = False

    red = 0
    green = 0
    blue = 0

    color_list = [
        (255, 255, 255), # white
        (  0,   0,   0) # black
    ]

    if type(rtl_file) == str:
        rtlf = open(rtl_file, 'rb')
    else:
        rtlf = rtl_file

    while True:
        s = rtlf.read(1)

        if len(s) == 0:
            break

        if s[0] != 0x1b:
            continue

        s = rtlf.read(1)

        if len(s) == 0:
            break

        if s[0] == ord('*'):
            # valid ESC* command
            # read [letter][numbers][letter]
            cmd = rtlf.read(2)

            while True:
                if (cmd[-1] < ord('0') or cmd[-1] > ord('9')) and cmd[-1] != ord('-'):
                    break

                s = rtlf.read(1)

                # ignore null bytes
                if s[0] != 0:
                    cmd += s

            ca = cmd[0]
            cb = cmd[-1]

            #print(cmd)

            if ca == ord('r') and (cb == ord('u') or cb == ord('U')):
                # color command *r#u or *r#U
                color = int(cmd[1:-1])

                if color == -4:
                    # KCMY
                    plane_cnt = 4
                    color_list = [
                        (255, 255, 255), # white
                        (127, 127, 127), # white
                        (  0, 255, 255), # cyan
                        (  0, 127, 127), # cyan
                        (255,   0, 255), # magenta
                        (127,   0, 127), # magenta
                        (  0,   0, 255), # blue
                        (  0,   0, 127), # blue
                        (255, 255,   0), # yellow
                        (127, 127,   0), # yellow
                        (  0, 255,   0), # green
                        (  0, 127,   0), # green
                        (255,   0,   0), # red
                        (127,   0,   0), # red
                        ( 63,  63,  63), # black
                        (  0,   0,   0) # black
                    ]
                elif color == -3:
                    # CMY
                    plane_cnt = 3
                    color_list = [
                        (255, 255, 255), # white
                        (  0, 255, 255), # cyan
                        (255,   0, 255), # magenta
                        (  0,   0, 255), # blue
                        (255, 255,   0), # yellow
                        (  0, 255,   0), # green
                        (255,   0,   0), # red
                        (  0,   0,   0)  # black
                    ]
                elif color == 1:
                    # K
                    plane_cnt = 1
                    color_list = [
                        (255, 255, 255), # white
                        (  0,   0,   0) # black
                    ]
                elif color == 3:
                    # RGB
                    plane_cnt = 3
                    color_list = [
                        (  0,   0,   0), # black
                        (255,   0,   0), # red
                        (  0, 255,   0), # green
                        (255, 255,   0), # yellow
                        (  0,   0, 255), # blue
                        (255,   0, 255), # magenta
                        (  0, 255, 255), # cyan
                        (255, 255, 255)  # white
                    ]
                elif color == 4:
                    # indexed RGB
                    plane_cnt = 4
                    color_list = [
                        (  0,   0,   0), # black
                        (  0,   0,   0), # black
                        (127,   0,   0), # red
                        (255,   0,   0), # red
                        (  0, 127,   0), # green
                        (  0, 255,   0), # green
                        (127, 127,   0), # yellow
                        (255, 255,   0), # yellow
                        (  0,   0, 127), # blue
                        (  0,   0, 255), # blue
                        (127,   0, 127), # magenta
                        (255,   0, 255), # magenta
                        (  0, 127, 127), # cyan
                        (  0, 255, 255), # cyan
                        (127, 127, 127),  # white
                        (255, 255, 255)  # white
                    ]
                else:
                    raise Exception("Invalid color")
            elif ca == ord('r') and (cb == ord('a') or cb == ord('A')):
                # start raster graphics
                # if we missed the stop of one section, stop on the start of the next
                if in_raster:
                    in_raster = False
                # only grab the first section
                if height == 0:
                    in_raster = True
            elif ca == ord('r') and (cb == ord('c') or cb == ord('C')):
                # end raster graphics
                in_raster = False
            elif ca == ord('r') and (cb == ord('b') or cb == ord('B')):
                # unknown
                pass
            elif ca == ord('r') and (cb == ord('s') or cb == ord('S')):
                # raster width
                width = int(cmd[1:-1])
                byte_width = int((width+7)/8)
            elif ca == ord('r') and (cb == ord('t') or cb == ord('T')):
                # raster height
                #height = int(cmd[1:-1])
                pass
            elif ca == ord('b') and (cb == ord('m') or cb == ord('M')):
                # set compression
                compression = int(cmd[1:-1])
            elif ca == ord('t') and (cb == ord('r') or cb == ord('R')):
                # set resolution
                resolution = int(cmd[1:-1])
            elif ca == ord('v') and (cb == ord('a') or cb == ord('A')):
                # set red component
                red = int(cmd[1:-1])
            elif ca == ord('v') and (cb == ord('b') or cb == ord('B')):
                # set green component
                green = int(cmd[1:-1])
            elif ca == ord('v') and (cb == ord('c') or cb == ord('C')):
                # set blue component
                blue = int(cmd[1:-1])
            elif ca == ord('v') and (cb == ord('i') or cb == ord('I')):
                # assign index
                ind = int(cmd[1:-1])
                color_list[ind] = (red, green, blue)
            elif ca == ord('b') and (cb == ord('v') or cb == ord('V') or cb == ord('w') or cb == ord('W')):
                # image row
                l = int(cmd[1:-1])

                # read row
                d = rtlf.read(l)

                # skip if we are not in a raster section
                if not in_raster:
                    continue

                # set width if not yet set
                # width must be set if compression enabled, otherwise
                # all lines will be the same length
                if width == 0:
                    width = l * 8

                if byte_width == 0:
                    byte_width = l

                # add row if on first plane
                if current_plane == 0:
                    if height == 0:
                        plane_data = np.zeros((10, byte_width, plane_cnt), dtype=np.uint8)

                    height += 1

                    if height >= plane_data.shape[0]:
                        # need to add more rows
                        plane_data = np.append(plane_data, np.zeros((10, byte_width, plane_cnt), dtype=np.uint8), 0)

                if compression == 0 or compression == 1:
                    x = 0
                    for b in d:
                        plane_data[height-1][x][current_plane] = b
                        x += 1
                elif compression == 2:
                    k = 0
                    x = 0
                    while True:
                        if len(d) <= k:
                            break
                        h = d[k]
                        k += 1
                        if h == 128:
                            continue
                        if h < 128:
                            for j in range(h+1):
                                b = d[k]
                                k += 1
                                plane_data[height-1][x][current_plane] = b
                                x += 1
                        if h > 128:
                            b = d[k]
                            k += 1
                            for j in range(257-h):
                                plane_data[height-1][x][current_plane] = b
                                x += 1
                else:
                    raise Exception("Invalid compression")

                # go to next plane, if more than one plane
                if plane_cnt > 0:
                    current_plane += 1
                    if current_plane == plane_cnt or cb == ord('w') or cb == ord('W'):
                        current_plane = 0

    # convert to bits
    plane_data = np.unpackbits(plane_data, axis=1)

    # strip off extra rows
    plane_data = plane_data[0:height, 0:width, :]

    # convert plane data to RGB
    plane_data = np.right_shift(np.packbits(plane_data, axis=2), 8-plane_cnt)
    
    rgb_data = np.zeros((height, width, 3), dtype=np.uint8)

    for y in range(height):
        for x in range(width):
            rgb_data[y][x] = color_list[plane_data[y][x][0]]

    plane_data = rgb_data

    return plane_data

Example 47

Project: mipp
Source File: loader.py
View license
    def _read(self, rows, columns, mda):
        shape = (rows.stop - rows.start, columns.stop - columns.start)
        if (columns.start < 0 or
            columns.stop > mda.image_size[0] or
            rows.start < 0 or
            rows.stop > mda.image_size[1]):
            raise IndexError, "index out of range"

        image_files = self.image_files
        
        #
        # Order segments
        #
        segments = {}
        for f in image_files:
            s = _xrit.read_imagedata(f)
            segments[s.segment.seg_no] = f
        start_seg_no = s.segment.planned_start_seg_no
        end_seg_no = s.segment.planned_end_seg_no
        ncols =  s.structure.nc
        segment_nlines = s.structure.nl

        #
        # Data type
        #
        converter = _null_converter
        if mda.data_type == 8:        
            data_type = numpy.uint8
            data_type_len = 8
        elif mda.data_type == 10:
            converter = convert.dec10216
            data_type = numpy.uint16
            data_type_len = 16
        elif mda.data_type == 16:
            data_type = numpy.uint16
            data_type_len = 16
        elif mda.data_type == -16:
            data_type = '>u2'
            data_type_len = 16
        else:
            raise mipp.ReaderError, "unknown data type: %d bit per pixel"\
                %mda.data_type

        #
        # Calculate initial and final line and column.
        # The interface 'load(..., center, size)' will produce
        # correct values relative to the image orientation. 
        # line_init, line_end : 1-based
        #
        line_init = rows.start + 1
        line_end = line_init + rows.stop - rows.start - 1
        col_count = shape[1]
        col_offset = (columns.start)*data_type_len//8

        #
        # Calculate initial and final segments
        # depending on the image orientation.
        # seg_init, seg_end : 1-based.
        #
        seg_init = ((line_init-1)//segment_nlines) + 1
        seg_end = ((line_end-1)//segment_nlines) + 1

        #
        # Calculate initial line in image, line increment
        # offset for columns and factor for columns,
        # and factor for columns, depending on the image
        # orientation
        #
        if mda.first_pixel == 'north west':
            first_line = 0
            increment_line = 1
            factor_col = 1
        elif mda.first_pixel == 'north east':
            first_line = 0
            increment_line = 1
            factor_col = -1
        elif mda.first_pixel == 'south west':
            first_line = shape[0] - 1
            increment_line = -1
            factor_col = 1
        elif mda.first_pixel == 'south east':
            first_line = shape[0] - 1
            increment_line = -1
            factor_col = -1
        else:
            raise mipp.ReaderError, "unknown geographical orientation of " + \
                "first pixel: '%s'"%mda.first_pixel

        #
        # Generate final image with no data
        #
        image = numpy.zeros(shape, dtype=data_type) + mda.no_data_value
    
        #
        # Begin the segment processing.
        #
        seg_no = seg_init
        line_in_image = first_line
        while seg_no <= seg_end:
            line_in_segment = 1
      
            #
            # Calculate initial line in actual segment.
            #
            if seg_no == seg_init:
                init_line_in_segment = (line_init
                                        - (segment_nlines*(seg_init - 1)))
            else:
                init_line_in_segment = 1

            #
            # Calculate final line in actual segment.
            #
            if seg_no == seg_end:
                end_line_in_segment = line_end - (segment_nlines*(seg_end - 1))
            else:
                end_line_in_segment = segment_nlines

            #
            # Open segment file.
            #
            seg_file = segments.get(seg_no, None)
            if not seg_file:
                #
                # No data for this segment.
                #
                logger.warning("Segment number %d not found"%seg_no)

                # all image lines are already set to no-data count.
                line_in_segment = init_line_in_segment
                while line_in_segment <= end_line_in_segment:
                    line_in_segment += 1
                    line_in_image += increment_line
            else:
                #
                # Data for this segment.
                #
                logger.info("Read %s"%seg_file)
                seg = _xrit.read_imagedata(seg_file)
            
                #
                # Skip lines not processed.
                #
                while line_in_segment < init_line_in_segment:
                    line = seg.readline()
                    line_in_segment += 1

                #
                # Reading and processing segment lines.
                #
                while line_in_segment <= end_line_in_segment:
                    line = seg.readline()[mda.line_offset:]
                    line = converter(line)

                    line = (numpy.frombuffer(line,
                                             dtype=data_type,
                                             count=col_count,
                                             offset=col_offset)[::factor_col])
                
                    #
                    # Insert image data.
                    #
                    image[line_in_image] = line
                
                    line_in_segment += 1
                    line_in_image += increment_line
            
                seg.close()

            seg_no += 1

        #
        # Compute mask before calibration
        #

        mask = (image == mda.no_data_value)

        #
        # Calibrate ?
        #
        mda.is_calibrated = False
        if self.do_calibrate:
            # do this before masking.
            calibrate = self.do_calibrate
            if isinstance(calibrate, bool):
                # allow boolean True/False for 1/0
                calibrate = int(calibrate)
            image, mda.calibration_unit = mda.calibrate(image, calibrate=calibrate)
            mda.is_calibrated = True
        else:
            mda.calibration_unit = ""

        #
        # With or without mask ?
        #
        if self.do_mask and not isinstance(image, numpy.ma.core.MaskedArray):
            image = numpy.ma.array(image, mask=mask, copy=False)
        elif ((not self.do_mask) and 
                isinstance(image, numpy.ma.core.MaskedArray)):
            image = image.filled(mda.no_data_value)
            
        return image

Example 48

Project: mpop
Source File: ninjotiff.py
View license
def save(geo_image, filename, ninjo_product_name=None, writer_options=None,
         **kwargs):
    """MPOP's interface to Ninjo TIFF writer.

    :Parameters:
        geo_image : mpop.imageo.geo_image.GeoImage
            See MPOP's documentation.
        filename : str
            The name of the TIFF file to be created
    :Keywords:
        ninjo_product_name : str
            Optional index to Ninjo configuration file.
        writer_options : dict
            options dictionary as defined in MPOP interface 
            See _write
        kwargs : dict
            See _write

    **Note**:
        * Some arguments are type casted, since they could come from a config file, read as strings.
        * 8 bits grayscale with a colormap (if specified, inverted for IR channels).
        * 16 bits grayscale with no colormap (if specified, MinIsWhite is set for IR).
        * min value will be reserved for transparent color.
        * If possible mpop.imageo.image's standard finalize will be used.
    """

    if writer_options:
        # add writer_options
        kwargs.update(writer_options)
        if 'ninjo_product_name' in writer_options:
            ninjo_product_name = writer_options['ninjo_product_name']

    dtype = np.uint8
    if 'nbits' in kwargs:
        nbits = int(kwargs['nbits'])
        if nbits == 16:
            dtype = np.uint16

    try:
        value_range_measurement_unit = (float(kwargs["ch_min_measurement_unit"]),
                                        float(kwargs["ch_max_measurement_unit"]))
    except KeyError:
        value_range_measurement_unit = None

    data_is_scaled_01 = bool(kwargs.get("data_is_scaled_01", True))

    data, scale, offset, fill_value = _finalize(geo_image,
                                                dtype=dtype,
                                                data_is_scaled_01=data_is_scaled_01,
                                                value_range_measurement_unit=value_range_measurement_unit,)

    area_def = geo_image.area
    time_slot = geo_image.time_slot

    # Some Ninjo tiff names
    kwargs['gradient'] = scale
    kwargs['axis_intercept'] = offset
    kwargs['transparent_pix'] = fill_value
    kwargs['image_dt'] = time_slot
    kwargs['is_calibrated'] = True
    if geo_image.mode == 'P' and 'cmap' not in kwargs:
        r, g, b = zip(*geo_image.palette)
        r = list((np.array(r) * 255).astype(np.uint8))
        g = list((np.array(g) * 255).astype(np.uint8))
        b = list((np.array(b) * 255).astype(np.uint8))
        if len(r) < 256:
            r += [0] * (256 - len(r))
            g += [0] * (256 - len(g))
            b += [0] * (256 - len(b))
        kwargs['cmap'] = r, g, b

    write(data, filename, area_def, ninjo_product_name, **kwargs)

Example 49

Project: mpop
Source File: image.py
View license
    def pil_image(self):
        """Return a PIL image from the current image.
        """
        channels, fill_value = self._finalize()

        if self.is_empty():
            return Pil.new(self.mode, (0, 0))

        if(self.mode == "L"):
            if fill_value is not None:
                img = Pil.fromarray(channels[0].filled(fill_value))
            else:
                img = Pil.fromarray(channels[0].filled(0))
                alpha = np.zeros(channels[0].shape, np.uint8)
                mask = np.ma.getmaskarray(channels[0])
                alpha = np.where(mask, alpha, 255)
                pil_alpha = Pil.fromarray(alpha)

                img = Pil.merge("LA", (img, pil_alpha))
        elif(self.mode == "LA"):
            if fill_value is not None:
                img = Pil.fromarray(channels[0].filled(fill_value))
                pil_alpha = Pil.fromarray(channels[1])
            else:
                img = Pil.fromarray(channels[0].filled(0))
                alpha = np.zeros(channels[0].shape, np.uint8)
                mask = np.ma.getmaskarray(channels[0])
                alpha = np.where(mask, alpha, channels[1])
                pil_alpha = Pil.fromarray(alpha)
            img = Pil.merge("LA", (img, pil_alpha))

        elif(self.mode == "RGB"):
            # Mask where all channels have missing data (incomplete data will
            # be shown).
            mask = (np.ma.getmaskarray(channels[0]) &
                    np.ma.getmaskarray(channels[1]) &
                    np.ma.getmaskarray(channels[2]))

            if fill_value is not None:
                pil_r = Pil.fromarray(channels[0].filled(fill_value[0]))
                pil_g = Pil.fromarray(channels[1].filled(fill_value[1]))
                pil_b = Pil.fromarray(channels[2].filled(fill_value[2]))
                img = Pil.merge("RGB", (pil_r, pil_g, pil_b))
            else:
                pil_r = Pil.fromarray(channels[0].filled(0))
                pil_g = Pil.fromarray(channels[1].filled(0))
                pil_b = Pil.fromarray(channels[2].filled(0))

                alpha = np.zeros(channels[0].shape, np.uint8)
                alpha = np.where(mask, alpha, 255)
                pil_a = Pil.fromarray(alpha)

                img = Pil.merge("RGBA", (pil_r, pil_g, pil_b, pil_a))

        elif(self.mode == "RGBA"):
            # Mask where all channels have missing data (incomplete data will
            # be shown).
            mask = (np.ma.getmaskarray(channels[0]) &
                    np.ma.getmaskarray(channels[1]) &
                    np.ma.getmaskarray(channels[2]) &
                    np.ma.getmaskarray(channels[3]))

            if fill_value is not None:
                pil_r = Pil.fromarray(channels[0].filled(fill_value[0]))
                pil_g = Pil.fromarray(channels[1].filled(fill_value[1]))
                pil_b = Pil.fromarray(channels[2].filled(fill_value[2]))
                pil_a = Pil.fromarray(channels[3].filled(fill_value[3]))
                img = Pil.merge("RGBA", (pil_r, pil_g, pil_b, pil_a))
            else:
                pil_r = Pil.fromarray(channels[0].filled(0))
                pil_g = Pil.fromarray(channels[1].filled(0))
                pil_b = Pil.fromarray(channels[2].filled(0))

                alpha = np.where(mask, 0, channels[3])
                pil_a = Pil.fromarray(alpha)

                img = Pil.merge("RGBA", (pil_r, pil_g, pil_b, pil_a))

        else:
            raise TypeError("Does not know how to use mode %s." % (self.mode))

        return img

Example 50

Project: glymur
Source File: test_openjp2.py
View license
def tile_decoder(**kwargs):
    """Fixture called with various configurations by many tests.

    Reads a tile.  That's all it does.
    """
    stream = openjp2.stream_create_default_file_stream(kwargs['filename'],
                                                       True)
    dparam = openjp2.set_default_decoder_parameters()

    dparam.decod_format = kwargs['codec_format']

    # Do not use layer decoding limitation.
    dparam.cp_layer = 0

    # do not use resolution reductions.
    dparam.cp_reduce = 0

    codec = openjp2.create_decompress(kwargs['codec_format'])

    openjp2.set_info_handler(codec, None)
    openjp2.set_warning_handler(codec, None)
    openjp2.set_error_handler(codec, None)

    openjp2.setup_decoder(codec, dparam)
    image = openjp2.read_header(stream, codec)
    openjp2.set_decode_area(codec, image,
                            kwargs['x0'], kwargs['y0'],
                            kwargs['x1'], kwargs['y1'])

    data = np.zeros((1150, 2048, 3), dtype=np.uint8)
    while True:
        rargs = openjp2.read_tile_header(codec, stream)
        tidx = rargs[0]
        size = rargs[1]
        go_on = rargs[-1]
        if not go_on:
            break
        openjp2.decode_tile_data(codec, tidx, data, size, stream)

    openjp2.end_decompress(codec, stream)
    openjp2.destroy_codec(codec)
    openjp2.stream_destroy(stream)
    openjp2.image_destroy(image)