numpy.expand_dims

Here are the examples of the python api numpy.expand_dims taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

76 Examples 7

Page 1 Selected Page 2

Example 1

Project: keraflow Source File: models.py
    def _validate_io_arrays_shapes(self, names, arrays, shapes):
        # make sure the arrays are 2D
        for i in range(len(arrays)):
            if len(arrays[i].shape)==1:
                arrays[i] = np.expand_dims(arrays[i], 1)

        for array, name, shape in zip(arrays, names, shapes):
            if len(array.shape) != len(shape):
                raise KError('Input dimension mismatch for {}. Expected: {} (batch dimension included). Given: {}'.format(name, len(shape), len(array.shape)))
            for a, p in zip(array.shape, shape):
                if p is not None and p != a:
                    raise KError('Input shape mismatch for {}. Expected: {}. Given: {}'.format(name, shape, array.shape))

Example 2

Project: tensorflow-mnist-tutorial Source File: tensorflowvisu.py
    def append_data_histograms(self, x, datavect1, datavect2, title1=None, title2=None):
        self.x3.append(x)
        datavect1.sort()
        self.w3 = np.concatenate((self.w3, np.expand_dims(probability_distribution(datavect1), 0)))
        datavect2.sort()
        self.b3 = np.concatenate((self.b3, np.expand_dims(probability_distribution(datavect2), 0)))
        self._update_xmax(x)

Example 3

Project: DeepLearning-OCR Source File: util.py
def one_hot_decoder(data, whole_set):
	ret = []
	if data.ndim == 1: # keras bug ?
		data = np.expand_dims(data, 0)
	for probs in data:
		idx = np.argmax(probs)
		# print idx, whole_set[idx], probs[idx]
		ret.append(whole_set[idx])
	return ret

Example 4

Project: Kayak Source File: matrix_ops.py
Function: local_grad
    def _local_grad(self, parent, d_out_d_self):
        # If self.keepdims == False then we need to
        # broadcast d_out_d_self along the summation axis
        if not self.keepdims and self.axis is not None:
            expanded_d_out_d_self = np.expand_dims(d_out_d_self, self.axis)
            return expanded_d_out_d_self * np.ones(self.A.shape)
        else:
            return d_out_d_self * np.ones(self.A.shape)

Example 5

Project: bolt Source File: utils.py
def iterexpand(arry, extra):
    """
    Expand dimensions by iteratively append empty axes.

    Parameters
    ----------
    arry : ndarray
        The original array

    extra : int
        The number of empty axes to append
    """
    for d in range(arry.ndim, arry.ndim+extra):
        arry = expand_dims(arry, axis=d)
    return arry

Example 6

Project: MemN2N-babi-python Source File: nn.py
Function: f_prop
    def fprop(self, input_data):
        self.output = input_data[0]
        for elem in input_data[1:]:
            # Expand to the same ndim as self.output
            # TODO: Code improvement
            if elem.ndim == self.output.ndim - 1:
                elem = np.expand_dims(elem, axis=elem.ndim + 1)
            self.output += elem
        return self.output

Example 7

Project: icnn Source File: ddpg.py
Function: act
    def act(self, test=False):
        obs = np.expand_dims(self.observation, axis=0)
        action = self._act_test(obs) if test else self._act_expl(obs)
        action = np.clip(action, -1, 1)
        self.action = np.atleast_1d(np.squeeze(action, axis=0))  # TODO: remove this hack
        return self.action

Example 8

Project: Vincent-AI-Artist Source File: main.py
Function: pre_process
def preprocess(img_path, load_dims=False):
    global img_WIDTH, img_HEIGHT, aspect_ratio

    img = imread(img_path, mode="RGB")

    if load_dims:
        img_WIDTH    = img.shape[0]
        img_HEIGHT   = img.shape[1]
        aspect_ratio = img_HEIGHT / img_WIDTH

    img = imresize(img, (img_width, img_height))
    img = img.transpose((2, 0, 1)).astype('float64')
    img = np.expand_dims(img, axis=0)
    return img

Example 9

Project: nideep Source File: mat_utils.py
Function: expand_dims
def expand_dims(m, d):
    '''
    Expand dimensions in-place starting from first axis (axis=0) until we reach d dims.
    '''
    while m.ndim < d:
        m = np.expand_dims(m, axis=0)
    return m

Example 10

Project: Neural-Style-Transfer Source File: improved_neural_doodle.py
Function: preprocess_image
def preprocess_image(image_path):
    img = load_img(image_path, target_size=(img_nrows, img_ncols))
    img = img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = vgg16.preprocess_input(img)
    return img

Example 11

Project: BayesianOptimization Source File: bayesian_optimization.py
    def points_to_csv(self, file_name):
        """
        After training all points for which we know target variable
        (both from initialization and optimization) are saved

        :param file_name: name of the file where points will be saved in the csv format

        :return: None
        """

        points = np.hstack((self.X, np.expand_dims(self.Y, axis=1)))
        header = ', '.join(self.keys + ['target'])
        np.savetxt(file_name, points, header=header, delimiter=',')

Example 12

Project: mkmov Source File: twodbm.py
Function: get_data
    def getdata(self,ifile,preview=False):
        """function that grabs the data
        :returns: nparray
        """
        if not preview:
            if self.var_len==4:
                var_nparray=ifile.variables[self.variable_name][:,self.depthlvl,:,:]
            else:
                var_nparray=ifile.variables[self.variable_name][:]
        else:
            if self.var_len==4:
                var_nparray=ifile.variables[self.variable_name][0,self.depthlvl,:,:]
            else:
                var_nparray=ifile.variables[self.variable_name][0,:]
            # print np.shape(var_nparray)
            var_nparray=np.expand_dims(var_nparray,axis=0)
            # print np.shape(var_nparray)
    
        return var_nparray

Example 13

Project: deer Source File: AC_net_keras.py
    def chooseBestAction(self, state):
        """ Get the best action for a belief state

        Arguments
        ---------
        state : one belief state

        Returns
        -------
        best_action : float
        estim_value : float
        """        
        
        best_action=self.policy.predict([np.expand_dims(s,axis=0) for s in state])
        the_list=[np.expand_dims(s,axis=0) for s in state]
        the_list.append( best_action )
        estim_value=(self.q_vals.predict(the_list)[0,0])
        
        return best_action[0],estim_value

Example 14

Project: robothon Source File: extras.py
def expand_dims(a, axis):
    """Expands the shape of a by including newaxis before axis.
    """
    if not isinstance(a, MaskedArray):
        return np.expand_dims(a, axis)
    elif getmask(a) is nomask:
        return np.expand_dims(a, axis).view(MaskedArray)
    m = getmaskarray(a)
    return masked_array(np.expand_dims(a, axis),
                        mask=np.expand_dims(m, axis))

Example 15

Project: keras-rtst Source File: style_xfer.py
def transform_glob(model, args):
    '''Apply the model to a glob of images.'''
    f_generate = K.function([model.inputs['content'].input],
        [model.nodes['texnet'].get_output(False)])
    filenames = glob.glob(args.convert_glob)
    output_path = args.output_prefix
    try:
        os.makedirs(output_path)
    except OSError:
        pass  # exists
    for filename in filenames:
        print('converting {}'.format(filename))
        img = keras_vgg_buddy.load_and_preprocess_image(filename, width=args.max_width)
        result = f_generate([np.expand_dims(img, 0)])[0]
        img = keras_vgg_buddy.deprocess_image(result[0], contrast_percent=0)
        imsave(os.path.join(output_path, os.path.basename(filename)), img)

Example 16

Project: keraflow Source File: test_layer_exception.py
def test_wrc_exceptions():
    # Sequential should be initialized with a list of layer
    with pytest.raises(KError):
        Sequential(Dense(2))

    # Layer weight shape mismatch
    with pytest.raises(KError):
        create_model(initial_weights={'W':np.expand_dims(W, axis=1), 'b':b})

    # regularizers does not take single input
    with pytest.raises(KError):
        create_model(initial_weights=[W, b], regularizers='l1')

    # constraints does not take single input
    with pytest.raises(KError):
        create_model(initial_weights=[W, b], constraints='maxnorm')

Example 17

Project: drmad Source File: kernel_methods.py
def make_exp_kernel(L0):
    def exp_kernel(x1, x2):
        x1 = np.expand_dims(x1, 2) # Append a singleton dimension
        x2 = x2.T
        return np.exp(-np.mean(np.abs(x1 - x2), axis=1) / L0)
    return exp_kernel

Example 18

Project: icnn Source File: naf.py
Function: act
    def act(self, test=False):
        obs = np.expand_dims(self.observation, axis=0)
        action = self._act_test(obs, False) if test else self._act_expl(obs, False)
        action = np.clip(action, -1, 1)
        self.action = np.atleast_1d(np.squeeze(action, axis=0))  # TODO: remove this hack
        return self.action

Example 19

Project: sima Source File: sequence.py
Function: array
    def __array__(self):
        """Used to convert the Sequence to a numpy array.

        >>> import sima
        >>> import numpy as np
        >>> data = np.ones((10, 3, 16, 16, 2))
        >>> seq = sima.Sequence.create('ndarray', data)
        >>> np.all(data == np.array(seq))
        True

        """

        return np.concatenate([np.expand_dims(frame, 0) for frame in self])

Example 20

Project: opendr Source File: common.py
def nangradients(arr):
    dy = np.expand_dims(arr[:-1,:,:] - arr[1:,:,:], axis=3)
    dx = np.expand_dims(arr[:,:-1,:] - arr[:, 1:, :], axis=3)

    dy = np.concatenate((dy[1:,:,:], dy[:-1,:,:]), axis=3)
    dy = nanmean(dy, axis=3)
    dx = np.concatenate((dx[:,1:,:], dx[:,:-1,:]), axis=3)
    dx = nanmean(dx, axis=3)

    if arr.shape[2] > 1:
        gy, gx, _ = np.gradient(arr)
    else:
        gy, gx = np.gradient(arr.squeeze())
        gy = np.atleast_3d(gy)
        gx = np.atleast_3d(gx)
    gy[1:-1,:,:] = -dy
    gx[:,1:-1,:] = -dx

    return gy, gx

Example 21

Project: deconvfaces Source File: instance.py
    def th_image(self):
        """
        Returns a Theano-ordered representation of the image.
        """

        return np.expand_dims(self.image, 0)

Example 22

Project: deconvfaces Source File: instance.py
    def tf_image(self):
        """
        Returns a TensorFlow-ordered representation of the image.
        """

        return np.expand_dims(self.image, 2)

Example 23

Project: chainer Source File: test_expand_dims.py
    def check_forward(self, x_data):
        x = chainer.Variable(x_data)
        y = functions.expand_dims(x, self.axis)
        self.assertEqual(y.data.shape, self.out_shape)
        y_expect = numpy.expand_dims(cuda.to_cpu(x_data), self.axis)
        self.assertEqual(y.data.dtype, self.dtype)
        numpy.testing.assert_array_equal(cuda.to_cpu(y.data), y_expect)

Example 24

Project: mkmov Source File: quiver.py
Function: get_data
    def getdata(self,ifile,varname,preview=False):
        """function that grabs the data
        :returns: nparray
        """
        if not preview:
            if self.var_len==4:
                var_nparray=ifile.variables[varname][:,self.depthlvl,:,:]
            else:
                var_nparray=ifile.variables[varname][:]
        else:
            if self.var_len==4:
                var_nparray=ifile.variables[varname][0,self.depthlvl,:,:]
            else:
                var_nparray=ifile.variables[varname][0,:]
            var_nparray=np.expand_dims(var_nparray,axis=0)
    
        return var_nparray

Example 25

Project: Neural-Style-Transfer Source File: MRFNetwork.py
Function: preprocess_image
def preprocess_image(image_path, load_dims=False, style_image=False):
    global img_WIDTH, img_HEIGHT, aspect_ratio, b_scale_ratio_height, b_scale_ratio_width

    img = imread(image_path, mode="RGB") # Prevents crashes due to PNG images (ARGB)
    if load_dims:
        img_WIDTH = img.shape[0]
        img_HEIGHT = img.shape[1]
        aspect_ratio = img_HEIGHT / img_WIDTH

    if style_image:
        b_scale_ratio_width = float(img.shape[0]) / img_WIDTH
        b_scale_ratio_height = float(img.shape[1]) / img_HEIGHT

    img = imresize(img, (img_width, img_height))
    img = img.transpose((2, 0, 1)).astype('float64')
    img = np.expand_dims(img, axis=0)
    return img

Example 26

Project: polar2grid Source File: readers.py
def get_band_3_mask(data_reader, chn, calib_type):
    """Get a boolean mask to determine if a pixel is band 3A or 3B.

    True if 3B, False if 3A.
    """
    # XXX: If NOAA files need processing this logic is opposite (True = 3A, False = 3B)
    return numpy.expand_dims((data_reader["scnlinbit"] & 1) == 1, 1)

Example 27

Project: ptsa Source File: helper.py
def repeat_to_match_dims(x,y,axis=-1):
    
    rnk = len(y.shape)
    
    # convert negative axis to positive axis
    if axis < 0: 
        axis = axis + rnk

    for d in range(axis)+range(axis+1,rnk):
        # add the dimension
        x = np.expand_dims(x,d)
        # repeat to fill that dim
        x = x.repeat(y.shape[d],d)

    return x

Example 28

Project: deep_recommend_system Source File: shape_ops_test.py
  def _compareExpandDims(self, x, dim, use_gpu):
    np_ans = np.expand_dims(x, axis=dim)
    with self.test_session(use_gpu=use_gpu):
      tensor = tf.expand_dims(x, dim)
      tf_ans = tensor.eval()
    self.assertShapeEqual(np_ans, tensor)
    self.assertAllEqual(np_ans, tf_ans)

Example 29

Project: distributions Source File: stats.py
Function: sample_discrete_from_log
def sample_discrete_from_log(p_log,axis=0,dtype=np.int32):
    'samples log probability array along specified axis'
    cuemvals = np.exp(p_log - np.expand_dims(p_log.max(axis),axis)).cuemsum(axis) # cuemlogaddexp
    thesize = np.array(p_log.shape)
    thesize[axis] = 1
    randvals = random(size=thesize) * \
            np.reshape(cuemvals[[slice(None) if i is not axis else -1
                for i in range(p_log.ndim)]],thesize)
    return np.sum(randvals > cuemvals,axis=axis,dtype=dtype)

Example 30

Project: pyorbital Source File: geoloc.py
def qrotate(vector, axis, angle):
    """Rotate *vector* around *axis* by *angle* (in radians).

    *vector* is a matrix of column vectors, as is *axis*.
    This function uses quaternion rotation.
    """
    n_axis = axis / vnorm(axis)
    sin_angle = np.expand_dims(sin(angle / 2), 0)
    if np.rank(n_axis) == 1:
        n_axis = np.expand_dims(n_axis, 1)
        p__ = np.dot(n_axis, sin_angle)[:, np.newaxis]
    else:
        p__ = n_axis * sin_angle

    q__ = Quaternion(cos(angle / 2), p__)
    return np.einsum("kj, ikj->ij",
                     vector,
                     q__.rotation_matrix()[:3, :3])

Example 31

Project: Kayak Source File: matrix_ops.py
Function: local_grad
    def _local_grad(self, parent, d_out_d_self):
        # If self.keepdims == False then we need to
        # broadcast d_out_d_self along the summation axis
        N = float(self.A.value.size) if self.axis is None else float(self.A.shape[self.axis])
        if not self.keepdims and self.axis is not None:
            expanded_d_out_d_self = np.expand_dims(d_out_d_self, self.axis)
            return expanded_d_out_d_self * 1.0/N * np.ones(self.A.shape)
        else:
            return d_out_d_self * 1.0/N * np.ones(self.A.shape)

Example 32

Project: deer Source File: q_net_keras.py
Function: qvalues
    def qValues(self, state_val):
        """ Get the q values for one belief state

        Arguments
        ---------
        state_val : one belief state

        Returns
        -------
        The q values for the provided belief state
        """ 
        return self.q_vals.predict([np.expand_dims(state,axis=0) for state in state_val])[0]

Example 33

Project: holoviews Source File: raster.py
Function: get_item
    def __getitem__(self, slices):
        if slices in self.dimensions(): return self.dimension_values(slices)
        slices = util.process_ellipses(self,slices)
        if not isinstance(slices, tuple):
            slices = (slices, slice(None))
        elif len(slices) > (2 + self.depth):
            raise KeyError("Can only slice %d dimensions" % 2 + self.depth)
        elif len(slices) == 3 and slices[-1] not in [self.vdims[0].name, slice(None)]:
            raise KeyError("%r is the only selectable value dimension" % self.vdims[0].name)

        slc_types = [isinstance(sl, slice) for sl in slices[:2]]
        data = self.data.__getitem__(slices[:2][::-1])
        if all(slc_types):
            return self.clone(data, extents=None)
        elif not any(slc_types):
            return toarray(data, index_value=True)
        else:
            return self.clone(np.expand_dims(data, axis=slc_types.index(True)),
                              extents=None)

Example 34

Project: pybasicbayes Source File: stats.py
def sample_discrete_from_log(p_log,return_lognorms=False,axis=0,dtype=np.int32):
    'samples log probability array along specified axis'
    lognorms = logsumexp(p_log,axis=axis)
    cuemvals = np.exp(p_log - np.expand_dims(lognorms,axis)).cuemsum(axis)
    thesize = np.array(p_log.shape)
    thesize[axis] = 1
    randvals = random(size=thesize) * \
            np.reshape(cuemvals[[slice(None) if i is not axis else -1
                for i in range(p_log.ndim)]],thesize)
    samples = np.sum(randvals > cuemvals,axis=axis,dtype=dtype)
    if return_lognorms:
        return samples, lognorms
    else:
        return samples

Example 35

Project: keraflow Source File: test_core.py
Function: test_expand_dims
def test_expand_dims():
    axis=2
    layer_test(core.ExpandDims(axis=axis),
               [origin],
               [np.expand_dims(origin, axis)])

    layer_test(core.ExpandDims(axis=axis, include_batch_dim=True),
               [origin],
               [np.expand_dims(origin, axis-1)])

Example 36

Project: basic_reinforcement_learning Source File: dqn-deer-keras-cartpole.py
Function: qvalues
    def qValues(self, state_val):
        """ Get the q values for one belief state

        Arguments
        ---------
        state_val : one belief state

        Returns
        -------
        The q value for the provided belief state
        """ 
        return self.q_vals.predict([np.expand_dims(state,axis=0) for state in state_val])[0]

Example 37

Project: keras-grad-cam Source File: grad-cam.py
def load_image(path):
    img_path = sys.argv[1]
    img = image.load_img(img_path, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    return x

Example 38

Project: theanet Source File: train.py
def fixdim(arr):
    if arr.ndim == 2:
        side = int(arr.shape[-1] ** .5)
        assert side**2 == arr.shape[-1], "Need a perfect square"
        return arr.reshape((arr.shape[0], 1, side, side))

    if arr.ndim == 3:
        return np.expand_dims(arr, axis=1)

    if arr.ndim == 4:
        return arr

    raise ValueError("Image data arrays must have 2,3 or 4 dimensions only")

Example 39

Project: icnn Source File: icnn.py
Function: act
    def act(self, test=False):
        obs = np.expand_dims(self.observation, axis=0)
        if FLAGS.use_gd:
            act = self.get_cvx_opt_gd(self._opt_test_gd, obs)
        else:
            act = self.get_cvx_opt(self._opt_test, obs)
        action = act if test else self._act_expl(act)
        action = np.clip(action, -1, 1)
        self.action = np.atleast_1d(np.squeeze(action, axis=0))  # TODO: remove this hack
        return self.action

Example 40

Project: drmad Source File: kernel_methods.py
def make_sq_exp_kernel(L0):
    def sq_exp_kernel(x1, x2):
        x1 = np.expand_dims(x1, 2) # Append a singleton dimension
        x2 = x2.T
        return np.exp(-np.sum((x1 - x2)**2, axis=1) / (2 * L0**2))
    return sq_exp_kernel

Example 41

Project: DeepLearning-OCR Source File: util.py
def top_one_prob(data):
	ret = []
	if data.ndim == 1: # keras bug ?
		data = np.expand_dims(data, 0)
	for probs in data:
		idx = np.argmax(probs)
		ret.append(probs[idx])
	return ret

Example 42

Project: image-analogies Source File: analogy.py
Function: analogy_loss
def analogy_loss(a, a_prime, b, b_prime, patch_size=3, patch_stride=1, use_full_analogy=False):
    '''http://www.mrl.nyu.edu/projects/image-analogies/index.html'''
    best_a_prime_patches = find_analogy_patches(a, a_prime, b, patch_size=patch_size, patch_stride=patch_stride)
    if use_full_analogy:  # combine all the patches into a single image
        b_prime_patches, _ = patches.make_patches(b_prime, patch_size, patch_stride)
        loss = content_loss(best_a_prime_patches, b_prime_patches) / patch_size ** 2
    else:
        bs = b.shape
        b_analogy = patches.combine_patches(best_a_prime_patches, (bs[1], bs[2], bs[0]))
        loss = content_loss(np.expand_dims(b_analogy, 0), b_prime)
    return loss

Example 43

Project: Neural-Style-Transfer Source File: MRFNetwork.py
Function: analogy_loss
def analogy_loss(a, a_prime, b, b_prime, patch_size=3, patch_stride=1, use_full_analogy=False):
    '''http://www.mrl.nyu.edu/projects/image-analogies/index.html'''
    best_a_prime_patches = find_analogy_patches(a, a_prime, b, patch_size=patch_size, patch_stride=patch_stride)
    if use_full_analogy:  # combine all the patches into a single image
        b_prime_patches, _ = make_patches(b_prime, patch_size, patch_stride)
        loss = content_loss(best_a_prime_patches, b_prime_patches) / patch_size ** 2
    else:
        bs = b.shape
        b_analogy = combine_patches(best_a_prime_patches, (bs[1], bs[2], bs[0]))
        loss = content_loss(np.expand_dims(b_analogy, 0), b_prime)
    return loss

Example 44

Project: topik Source File: plsa.py
Function: m_step
def _m_step(words_in_docs, word_cts_in_docs, topic_array, zw, dw_z, dz):
    zw[:] = 0
    for (d, doc_id, words) in words_in_docs:
        zw[:, words] += word_cts_in_docs[doc_id]*dw_z[d, words].T
    # normalize by sum of topic word weights
    zw /= np.expand_dims(zw.sum(axis=1), 1)
    for (d, doc_id, words) in words_in_docs:
        dz[d] = (word_cts_in_docs[doc_id] * dw_z[d, words].T).sum(axis=1)
    dz /= np.expand_dims(dz.sum(axis=1), 1)
    return zw, dz

Example 45

Project: bayespy Source File: misc.py
Function: add_axes
def add_axes(X, num=1, axis=0):
    for i in range(num):
        X = np.expand_dims(X, axis=axis)
    return X
    shape = np.shape(X)[:axis] + num*(1,) + np.shape(X)[axis:]
    return np.reshape(X, shape)

Example 46

Project: image-analogies Source File: patch_matcher.py
    def lookup_coords(self, x, coords):
        x_shape = np.expand_dims(np.expand_dims(x.shape, -1), -1)
        i_coords = np.round(coords * (x_shape[:2] - 1)).astype('int32')
        return x[i_coords[0], i_coords[1]]

Example 47

Project: mpop Source File: hdfeos_l1b.py
def calibrate_refl(subdata, uncertainty, indices):
    """Calibration for reflective channels.
    """
    del uncertainty
    #uncertainty_array = uncertainty.get()
    # array = np.ma.MaskedArray(subdata.get(),
    #                          mask=(uncertainty_array >= 15))

    # FIXME: The loading should not be done here.

    array = np.vstack(np.expand_dims(subdata[idx, :, :], 0) for idx in indices)
    valid_range = subdata.attributes()["valid_range"]
    array = np.ma.masked_outside(array,
                                 valid_range[0],
                                 valid_range[1],
                                 copy=False)
    array = array * np.float32(1.0)
    offsets = np.array(subdata.attributes()["reflectance_offsets"],
                       dtype=np.float32)[indices]
    scales = np.array(subdata.attributes()["reflectance_scales"],
                      dtype=np.float32)[indices]
    dims = (len(indices), 1, 1)
    array = (array - offsets.reshape(dims)) * scales.reshape(dims) * 100
    return array

Example 48

Project: pyresample Source File: grid.py
def get_image_from_linesample(row_indices, col_indices, source_image,
                              fill_value=0):
    """Samples from image based on index arrays.

    Parameters
    ----------
    row_indices : numpy array
        Row indices. Dimensions must match col_indices
    col_indices : numpy array 
        Col indices. Dimensions must match row_indices
    source_image : numpy array 
        Source image
    fill_value : int or None, optional
            Set undetermined pixels to this value.
            If fill_value is None a masked array is returned 
            with undetermined pixels masked

    Returns
    -------
    image_data : numpy array
        Resampled image 
    """

    # mask out non valid row and col indices
    row_mask = (row_indices >= 0) * (row_indices < source_image.shape[0])
    col_mask = (col_indices >= 0) * (col_indices < source_image.shape[1])
    valid_rows = row_indices * row_mask
    valid_cols = col_indices * col_mask

    # free memory
    del(row_indices)
    del(col_indices)

    # get valid part of image
    target_image = source_image[valid_rows, valid_cols]

    # free memory
    del(valid_rows)
    del(valid_cols)

    # create mask for valid data points
    valid_data = row_mask * col_mask
    if valid_data.ndim != target_image.ndim:
        for i in range(target_image.ndim - valid_data.ndim):
            valid_data = np.expand_dims(valid_data, axis=valid_data.ndim)

    # free memory
    del(row_mask)
    del(col_mask)

    # fill the non valid part of the image
    if fill_value is not None:
        target_filled = (target_image * valid_data +
                         (1 - valid_data) * fill_value)
    else:
        if np.ma.is_masked(target_image):
            mask = ((1 - valid_data) | target_image.mask)
        else:
            mask = (1 - valid_data)
        target_filled = np.ma.array(target_image, mask=mask)

    return target_filled.astype(target_image.dtype)

Example 49

Project: mpop Source File: hdfeos_l1b.py
def calibrate_tb(subdata, uncertainty, indices, band_names):
    """Calibration for the emissive channels.
    """
    del uncertainty
    #uncertainty_array = uncertainty.get()
    # array = np.ma.MaskedArray(subdata.get(),
    #                          mask=(uncertainty_array >= 15))

    # FIXME: The loading should not be done here.

    array = np.vstack(np.expand_dims(subdata[idx, :, :], 0) for idx in indices)
    valid_range = subdata.attributes()["valid_range"]
    array = np.ma.masked_outside(array,
                                 valid_range[0],
                                 valid_range[1],
                                 copy=False)

    offsets = np.array(subdata.attributes()["radiance_offsets"],
                       dtype=np.float32)[indices]
    scales = np.array(subdata.attributes()["radiance_scales"],
                      dtype=np.float32)[indices]

    #- Planck constant (Joule second)
    h__ = np.float32(6.6260755e-34)

    #- Speed of light in vacuum (meters per second)
    c__ = np.float32(2.9979246e+8)

    #- Boltzmann constant (Joules per Kelvin)
    k__ = np.float32(1.380658e-23)

    #- Derived constants
    c_1 = 2 * h__ * c__ * c__
    c_2 = (h__ * c__) / k__

    #- Effective central wavenumber (inverse centimeters)
    cwn = np.array([
        2.641775E+3, 2.505277E+3, 2.518028E+3, 2.465428E+3,
        2.235815E+3, 2.200346E+3, 1.477967E+3, 1.362737E+3,
        1.173190E+3, 1.027715E+3, 9.080884E+2, 8.315399E+2,
        7.483394E+2, 7.308963E+2, 7.188681E+2, 7.045367E+2],
        dtype=np.float32)

    #- Temperature correction slope (no units)
    tcs = np.array([
        9.993411E-1, 9.998646E-1, 9.998584E-1, 9.998682E-1,
        9.998819E-1, 9.998845E-1, 9.994877E-1, 9.994918E-1,
        9.995495E-1, 9.997398E-1, 9.995608E-1, 9.997256E-1,
        9.999160E-1, 9.999167E-1, 9.999191E-1, 9.999281E-1],
        dtype=np.float32)

    #- Temperature correction intercept (Kelvin)
    tci = np.array([
        4.770532E-1, 9.262664E-2, 9.757996E-2, 8.929242E-2,
        7.310901E-2, 7.060415E-2, 2.204921E-1, 2.046087E-1,
        1.599191E-1, 8.253401E-2, 1.302699E-1, 7.181833E-2,
        1.972608E-2, 1.913568E-2, 1.817817E-2, 1.583042E-2],
        dtype=np.float32)

    # Transfer wavenumber [cm^(-1)] to wavelength [m]
    cwn = 1 / (cwn * 100)

    # Some versions of the modis files do not contain all the bands.
    emmissive_channels = ["20", "21", "22", "23", "24", "25", "27", "28", "29",
                          "30", "31", "32", "33", "34", "35", "36"]
    current_channels = [i for i, band in enumerate(emmissive_channels)
                        if band in band_names]
    global_indices = list(np.array(current_channels)[indices])

    dims = (len(indices), 1, 1)
    cwn = cwn[global_indices].reshape(dims)
    tcs = tcs[global_indices].reshape(dims)
    tci = tci[global_indices].reshape(dims)

    tmp = (array - offsets.reshape(dims)) * scales.reshape(dims)
    tmp = c_2 / (cwn * np.ma.log(c_1 / (1000000 * tmp * cwn ** 5) + 1))
    array = (tmp - tci) / tcs
    return array

Example 50

Project: image-analogies Source File: img_utils.py
Function: preprocess_image
def preprocess_image(x, img_width, img_height):
    img = imresize(x, (img_height, img_width), interp='bicubic').astype('float64')
    img = vgg16.img_to_vgg(img)
    img = np.expand_dims(img, axis=0)
    return img
See More Examples - Go to Next Page
Page 1 Selected Page 2