numpy.expand_dims

Here are the examples of the python api numpy.expand_dims taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

100 Examples 7

Example 1

Project: chainer
Source File: test_expand_dims.py
View license
    def check_forward(self, x_data):
        x = chainer.Variable(x_data)
        y = functions.expand_dims(x, self.axis)
        self.assertEqual(y.data.shape, self.out_shape)
        y_expect = numpy.expand_dims(cuda.to_cpu(x_data), self.axis)
        self.assertEqual(y.data.dtype, self.dtype)
        numpy.testing.assert_array_equal(cuda.to_cpu(y.data), y_expect)

Example 2

Project: pyorbital
Source File: geoloc.py
View license
def qrotate(vector, axis, angle):
    """Rotate *vector* around *axis* by *angle* (in radians).

    *vector* is a matrix of column vectors, as is *axis*.
    This function uses quaternion rotation.
    """
    n_axis = axis / vnorm(axis)
    sin_angle = np.expand_dims(sin(angle / 2), 0)
    if np.rank(n_axis) == 1:
        n_axis = np.expand_dims(n_axis, 1)
        p__ = np.dot(n_axis, sin_angle)[:, np.newaxis]
    else:
        p__ = n_axis * sin_angle

    q__ = Quaternion(cos(angle / 2), p__)
    return np.einsum("kj, ikj->ij",
                     vector,
                     q__.rotation_matrix()[:3, :3])

Example 3

Project: image-analogies
Source File: analogy.py
View license
def analogy_loss(a, a_prime, b, b_prime, patch_size=3, patch_stride=1, use_full_analogy=False):
    '''http://www.mrl.nyu.edu/projects/image-analogies/index.html'''
    best_a_prime_patches = find_analogy_patches(a, a_prime, b, patch_size=patch_size, patch_stride=patch_stride)
    if use_full_analogy:  # combine all the patches into a single image
        b_prime_patches, _ = patches.make_patches(b_prime, patch_size, patch_stride)
        loss = content_loss(best_a_prime_patches, b_prime_patches) / patch_size ** 2
    else:
        bs = b.shape
        b_analogy = patches.combine_patches(best_a_prime_patches, (bs[1], bs[2], bs[0]))
        loss = content_loss(np.expand_dims(b_analogy, 0), b_prime)
    return loss

Example 4

Project: keras-rtst
Source File: style_xfer.py
View license
def transform_glob(model, args):
    '''Apply the model to a glob of images.'''
    f_generate = K.function([model.inputs['content'].input],
        [model.nodes['texnet'].get_output(False)])
    filenames = glob.glob(args.convert_glob)
    output_path = args.output_prefix
    try:
        os.makedirs(output_path)
    except OSError:
        pass  # exists
    for filename in filenames:
        print('converting {}'.format(filename))
        img = keras_vgg_buddy.load_and_preprocess_image(filename, width=args.max_width)
        result = f_generate([np.expand_dims(img, 0)])[0]
        img = keras_vgg_buddy.deprocess_image(result[0], contrast_percent=0)
        imsave(os.path.join(output_path, os.path.basename(filename)), img)

Example 5

Project: theanet
Source File: train.py
View license
def fixdim(arr):
    if arr.ndim == 2:
        side = int(arr.shape[-1] ** .5)
        assert side**2 == arr.shape[-1], "Need a perfect square"
        return arr.reshape((arr.shape[0], 1, side, side))

    if arr.ndim == 3:
        return np.expand_dims(arr, axis=1)

    if arr.ndim == 4:
        return arr

    raise ValueError("Image data arrays must have 2,3 or 4 dimensions only")

Example 6

Project: Vincent-AI-Artist
Source File: main.py
View license
def preprocess(img_path, load_dims=False):
    global img_WIDTH, img_HEIGHT, aspect_ratio

    img = imread(img_path, mode="RGB")

    if load_dims:
        img_WIDTH    = img.shape[0]
        img_HEIGHT   = img.shape[1]
        aspect_ratio = img_HEIGHT / img_WIDTH

    img = imresize(img, (img_width, img_height))
    img = img.transpose((2, 0, 1)).astype('float64')
    img = np.expand_dims(img, axis=0)
    return img

Example 7

View license
def preprocess_image(image_path):
    img = load_img(image_path, target_size=(img_nrows, img_ncols))
    img = img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = vgg16.preprocess_input(img)
    return img

Example 8

Project: Neural-Style-Transfer
Source File: MRFNetwork.py
View license
def preprocess_image(image_path, load_dims=False, style_image=False):
    global img_WIDTH, img_HEIGHT, aspect_ratio, b_scale_ratio_height, b_scale_ratio_width

    img = imread(image_path, mode="RGB") # Prevents crashes due to PNG images (ARGB)
    if load_dims:
        img_WIDTH = img.shape[0]
        img_HEIGHT = img.shape[1]
        aspect_ratio = img_HEIGHT / img_WIDTH

    if style_image:
        b_scale_ratio_width = float(img.shape[0]) / img_WIDTH
        b_scale_ratio_height = float(img.shape[1]) / img_HEIGHT

    img = imresize(img, (img_width, img_height))
    img = img.transpose((2, 0, 1)).astype('float64')
    img = np.expand_dims(img, axis=0)
    return img

Example 9

Project: Neural-Style-Transfer
Source File: MRFNetwork.py
View license
def analogy_loss(a, a_prime, b, b_prime, patch_size=3, patch_stride=1, use_full_analogy=False):
    '''http://www.mrl.nyu.edu/projects/image-analogies/index.html'''
    best_a_prime_patches = find_analogy_patches(a, a_prime, b, patch_size=patch_size, patch_stride=patch_stride)
    if use_full_analogy:  # combine all the patches into a single image
        b_prime_patches, _ = make_patches(b_prime, patch_size, patch_stride)
        loss = content_loss(best_a_prime_patches, b_prime_patches) / patch_size ** 2
    else:
        bs = b.shape
        b_analogy = combine_patches(best_a_prime_patches, (bs[1], bs[2], bs[0]))
        loss = content_loss(np.expand_dims(b_analogy, 0), b_prime)
    return loss

Example 10

View license
def preprocess_image(image_path):
    img = load_img(image_path, target_size=(img_nrows, img_ncols))
    img = img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = vgg16.preprocess_input(img)
    return img

Example 11

View license
def preprocess_image(image_path):
    img = load_img(image_path, target_size=(img_nrows, img_ncols))
    img = img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = vgg16.preprocess_input(img)
    return img

Example 12

View license
def preprocess_image(image_path):
    img = load_img(image_path, target_size=(img_nrows, img_ncols))
    img = img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = vgg16.preprocess_input(img)
    return img

Example 13

View license
  def _compareExpandDims(self, x, dim, use_gpu):
    np_ans = np.expand_dims(x, axis=dim)
    with self.test_session(use_gpu=use_gpu):
      tensor = tf.expand_dims(x, dim)
      tf_ans = tensor.eval()
    self.assertShapeEqual(np_ans, tensor)
    self.assertAllEqual(np_ans, tf_ans)

Example 14

View license
  def _compareExpandDims(self, x, dim, use_gpu):
    np_ans = np.expand_dims(x, axis=dim)
    with self.test_session(use_gpu=use_gpu):
      tensor = tf.expand_dims(x, dim)
      tf_ans = tensor.eval()
    self.assertShapeEqual(np_ans, tensor)
    self.assertAllEqual(np_ans, tf_ans)

Example 15

Project: deer
Source File: AC_net_keras.py
View license
    def chooseBestAction(self, state):
        """ Get the best action for a belief state

        Arguments
        ---------
        state : one belief state

        Returns
        -------
        best_action : float
        estim_value : float
        """        
        
        best_action=self.policy.predict([np.expand_dims(s,axis=0) for s in state])
        the_list=[np.expand_dims(s,axis=0) for s in state]
        the_list.append( best_action )
        estim_value=(self.q_vals.predict(the_list)[0,0])
        
        return best_action[0],estim_value

Example 16

Project: deer
Source File: q_net_keras.py
View license
    def qValues(self, state_val):
        """ Get the q values for one belief state

        Arguments
        ---------
        state_val : one belief state

        Returns
        -------
        The q values for the provided belief state
        """ 
        return self.q_vals.predict([np.expand_dims(state,axis=0) for state in state_val])[0]

Example 17

Project: MemN2N-babi-python
Source File: nn.py
View license
    def fprop(self, input_data):
        self.output = input_data[0]
        for elem in input_data[1:]:
            # Expand to the same ndim as self.output
            # TODO: Code improvement
            if elem.ndim == self.output.ndim - 1:
                elem = np.expand_dims(elem, axis=elem.ndim + 1)
            self.output += elem
        return self.output

Example 18

View license
    def qValues(self, state_val):
        """ Get the q values for one belief state

        Arguments
        ---------
        state_val : one belief state

        Returns
        -------
        The q value for the provided belief state
        """ 
        return self.q_vals.predict([np.expand_dims(state,axis=0) for state in state_val])[0]

Example 19

Project: drmad
Source File: kernel_methods.py
View license
def make_exp_kernel(L0):
    def exp_kernel(x1, x2):
        x1 = np.expand_dims(x1, 2) # Append a singleton dimension
        x2 = x2.T
        return np.exp(-np.mean(np.abs(x1 - x2), axis=1) / L0)
    return exp_kernel

Example 20

Project: drmad
Source File: kernel_methods.py
View license
def make_sq_exp_kernel(L0):
    def sq_exp_kernel(x1, x2):
        x1 = np.expand_dims(x1, 2) # Append a singleton dimension
        x2 = x2.T
        return np.exp(-np.sum((x1 - x2)**2, axis=1) / (2 * L0**2))
    return sq_exp_kernel

Example 21

Project: DeepLearning-OCR
Source File: util.py
View license
def one_hot_decoder(data, whole_set):
	ret = []
	if data.ndim == 1: # keras bug ?
		data = np.expand_dims(data, 0)
	for probs in data:
		idx = np.argmax(probs)
		# print idx, whole_set[idx], probs[idx]
		ret.append(whole_set[idx])
	return ret

Example 22

Project: DeepLearning-OCR
Source File: util.py
View license
def top_one_prob(data):
	ret = []
	if data.ndim == 1: # keras bug ?
		data = np.expand_dims(data, 0)
	for probs in data:
		idx = np.argmax(probs)
		ret.append(probs[idx])
	return ret

Example 23

Project: deconvfaces
Source File: instance.py
View license
    def th_image(self):
        """
        Returns a Theano-ordered representation of the image.
        """

        return np.expand_dims(self.image, 0)

Example 24

Project: deconvfaces
Source File: instance.py
View license
    def tf_image(self):
        """
        Returns a TensorFlow-ordered representation of the image.
        """

        return np.expand_dims(self.image, 2)

Example 25

Project: bolt
Source File: utils.py
View license
def iterexpand(arry, extra):
    """
    Expand dimensions by iteratively append empty axes.

    Parameters
    ----------
    arry : ndarray
        The original array

    extra : int
        The number of empty axes to append
    """
    for d in range(arry.ndim, arry.ndim+extra):
        arry = expand_dims(arry, axis=d)
    return arry

Example 26

Project: mkmov
Source File: quiver.py
View license
    def getdata(self,ifile,varname,preview=False):
        """function that grabs the data
        :returns: nparray
        """
        if not preview:
            if self.var_len==4:
                var_nparray=ifile.variables[varname][:,self.depthlvl,:,:]
            else:
                var_nparray=ifile.variables[varname][:]
        else:
            if self.var_len==4:
                var_nparray=ifile.variables[varname][0,self.depthlvl,:,:]
            else:
                var_nparray=ifile.variables[varname][0,:]
            var_nparray=np.expand_dims(var_nparray,axis=0)
    
        return var_nparray

Example 27

Project: mkmov
Source File: twodbm.py
View license
    def getdata(self,ifile,preview=False):
        """function that grabs the data
        :returns: nparray
        """
        if not preview:
            if self.var_len==4:
                var_nparray=ifile.variables[self.variable_name][:,self.depthlvl,:,:]
            else:
                var_nparray=ifile.variables[self.variable_name][:]
        else:
            if self.var_len==4:
                var_nparray=ifile.variables[self.variable_name][0,self.depthlvl,:,:]
            else:
                var_nparray=ifile.variables[self.variable_name][0,:]
            # print np.shape(var_nparray)
            var_nparray=np.expand_dims(var_nparray,axis=0)
            # print np.shape(var_nparray)
    
        return var_nparray

Example 28

Project: ptsa
Source File: helper.py
View license
def repeat_to_match_dims(x,y,axis=-1):
    
    rnk = len(y.shape)
    
    # convert negative axis to positive axis
    if axis < 0: 
        axis = axis + rnk

    for d in range(axis)+range(axis+1,rnk):
        # add the dimension
        x = np.expand_dims(x,d)
        # repeat to fill that dim
        x = x.repeat(y.shape[d],d)

    return x

Example 29

Project: topik
Source File: plsa.py
View license
def _m_step(words_in_docs, word_cts_in_docs, topic_array, zw, dw_z, dz):
    zw[:] = 0
    for (d, doc_id, words) in words_in_docs:
        zw[:, words] += word_cts_in_docs[doc_id]*dw_z[d, words].T
    # normalize by sum of topic word weights
    zw /= np.expand_dims(zw.sum(axis=1), 1)
    for (d, doc_id, words) in words_in_docs:
        dz[d] = (word_cts_in_docs[doc_id] * dw_z[d, words].T).sum(axis=1)
    dz /= np.expand_dims(dz.sum(axis=1), 1)
    return zw, dz

Example 30

Project: polar2grid
Source File: readers.py
View license
def get_band_3_mask(data_reader, chn, calib_type):
    """Get a boolean mask to determine if a pixel is band 3A or 3B.

    True if 3B, False if 3A.
    """
    # XXX: If NOAA files need processing this logic is opposite (True = 3A, False = 3B)
    return numpy.expand_dims((data_reader["scnlinbit"] & 1) == 1, 1)

Example 31

View license
    def points_to_csv(self, file_name):
        """
        After training all points for which we know target variable
        (both from initialization and optimization) are saved

        :param file_name: name of the file where points will be saved in the csv format

        :return: None
        """

        points = np.hstack((self.X, np.expand_dims(self.Y, axis=1)))
        header = ', '.join(self.keys + ['target'])
        np.savetxt(file_name, points, header=header, delimiter=',')

Example 32

Project: distributions
Source File: stats.py
View license
def sample_discrete_from_log(p_log,axis=0,dtype=np.int32):
    'samples log probability array along specified axis'
    cumvals = np.exp(p_log - np.expand_dims(p_log.max(axis),axis)).cumsum(axis) # cumlogaddexp
    thesize = np.array(p_log.shape)
    thesize[axis] = 1
    randvals = random(size=thesize) * \
            np.reshape(cumvals[[slice(None) if i is not axis else -1
                for i in range(p_log.ndim)]],thesize)
    return np.sum(randvals > cumvals,axis=axis,dtype=dtype)

Example 33

Project: Kayak
Source File: matrix_ops.py
View license
    def _local_grad(self, parent, d_out_d_self):
        # If self.keepdims == False then we need to
        # broadcast d_out_d_self along the summation axis
        if not self.keepdims and self.axis is not None:
            expanded_d_out_d_self = np.expand_dims(d_out_d_self, self.axis)
            return expanded_d_out_d_self * np.ones(self.A.shape)
        else:
            return d_out_d_self * np.ones(self.A.shape)

Example 34

Project: Kayak
Source File: matrix_ops.py
View license
    def _local_grad(self, parent, d_out_d_self):
        # If self.keepdims == False then we need to
        # broadcast d_out_d_self along the summation axis
        N = float(self.A.value.size) if self.axis is None else float(self.A.shape[self.axis])
        if not self.keepdims and self.axis is not None:
            expanded_d_out_d_self = np.expand_dims(d_out_d_self, self.axis)
            return expanded_d_out_d_self * 1.0/N * np.ones(self.A.shape)
        else:
            return d_out_d_self * 1.0/N * np.ones(self.A.shape)

Example 35

Project: robothon
Source File: extras.py
View license
def expand_dims(a, axis):
    """Expands the shape of a by including newaxis before axis.
    """
    if not isinstance(a, MaskedArray):
        return np.expand_dims(a, axis)
    elif getmask(a) is nomask:
        return np.expand_dims(a, axis).view(MaskedArray)
    m = getmaskarray(a)
    return masked_array(np.expand_dims(a, axis),
                        mask=np.expand_dims(m, axis))

Example 36

Project: holoviews
Source File: raster.py
View license
    def __getitem__(self, slices):
        if slices in self.dimensions(): return self.dimension_values(slices)
        slices = util.process_ellipses(self,slices)
        if not isinstance(slices, tuple):
            slices = (slices, slice(None))
        elif len(slices) > (2 + self.depth):
            raise KeyError("Can only slice %d dimensions" % 2 + self.depth)
        elif len(slices) == 3 and slices[-1] not in [self.vdims[0].name, slice(None)]:
            raise KeyError("%r is the only selectable value dimension" % self.vdims[0].name)

        slc_types = [isinstance(sl, slice) for sl in slices[:2]]
        data = self.data.__getitem__(slices[:2][::-1])
        if all(slc_types):
            return self.clone(data, extents=None)
        elif not any(slc_types):
            return toarray(data, index_value=True)
        else:
            return self.clone(np.expand_dims(data, axis=slc_types.index(True)),
                              extents=None)

Example 37

Project: keraflow
Source File: models.py
View license
    def _validate_io_arrays_shapes(self, names, arrays, shapes):
        # make sure the arrays are 2D
        for i in range(len(arrays)):
            if len(arrays[i].shape)==1:
                arrays[i] = np.expand_dims(arrays[i], 1)

        for array, name, shape in zip(arrays, names, shapes):
            if len(array.shape) != len(shape):
                raise KError('Input dimension mismatch for {}. Expected: {} (batch dimension included). Given: {}'.format(name, len(shape), len(array.shape)))
            for a, p in zip(array.shape, shape):
                if p is not None and p != a:
                    raise KError('Input shape mismatch for {}. Expected: {}. Given: {}'.format(name, shape, array.shape))

Example 38

Project: keraflow
Source File: test_core.py
View license
def test_expand_dims():
    axis=2
    layer_test(core.ExpandDims(axis=axis),
               [origin],
               [np.expand_dims(origin, axis)])

    layer_test(core.ExpandDims(axis=axis, include_batch_dim=True),
               [origin],
               [np.expand_dims(origin, axis-1)])

Example 39

Project: keraflow
Source File: test_layer_exception.py
View license
def test_wrc_exceptions():
    # Sequential should be initialized with a list of layer
    with pytest.raises(KError):
        Sequential(Dense(2))

    # Layer weight shape mismatch
    with pytest.raises(KError):
        create_model(initial_weights={'W':np.expand_dims(W, axis=1), 'b':b})

    # regularizers does not take single input
    with pytest.raises(KError):
        create_model(initial_weights=[W, b], regularizers='l1')

    # constraints does not take single input
    with pytest.raises(KError):
        create_model(initial_weights=[W, b], constraints='maxnorm')

Example 40

Project: keras-grad-cam
Source File: grad-cam.py
View license
def load_image(path):
    img_path = sys.argv[1]
    img = image.load_img(img_path, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    return x

Example 41

Project: icnn
Source File: ddpg.py
View license
    def act(self, test=False):
        obs = np.expand_dims(self.observation, axis=0)
        action = self._act_test(obs) if test else self._act_expl(obs)
        action = np.clip(action, -1, 1)
        self.action = np.atleast_1d(np.squeeze(action, axis=0))  # TODO: remove this hack
        return self.action

Example 42

Project: icnn
Source File: icnn.py
View license
    def act(self, test=False):
        obs = np.expand_dims(self.observation, axis=0)
        if FLAGS.use_gd:
            act = self.get_cvx_opt_gd(self._opt_test_gd, obs)
        else:
            act = self.get_cvx_opt(self._opt_test, obs)
        action = act if test else self._act_expl(act)
        action = np.clip(action, -1, 1)
        self.action = np.atleast_1d(np.squeeze(action, axis=0))  # TODO: remove this hack
        return self.action

Example 43

Project: icnn
Source File: naf.py
View license
    def act(self, test=False):
        obs = np.expand_dims(self.observation, axis=0)
        action = self._act_test(obs, False) if test else self._act_expl(obs, False)
        action = np.clip(action, -1, 1)
        self.action = np.atleast_1d(np.squeeze(action, axis=0))  # TODO: remove this hack
        return self.action

Example 44

Project: icnn
Source File: ddpg.py
View license
    def act(self, test=False):
        obs = np.expand_dims(self.observation, axis=0)
        action = self._act_test(obs) if test else self._act_expl(obs)
        action = np.clip(action, -1, 1)
        self.action = np.atleast_1d(np.squeeze(action, axis=0))  # TODO: remove this hack
        return self.action

Example 45

Project: icnn
Source File: icnn.py
View license
    def act(self, test=False):
        obs = np.expand_dims(self.observation, axis=0)
        if FLAGS.use_gd:
            act = self.get_cvx_opt_gd(self._opt_test_gd, obs)
        else:
            act = self.get_cvx_opt(self._opt_test, obs)
        action = act if test else self._act_expl(act)
        action = np.clip(action, -1, 1)
        self.action = np.atleast_1d(np.squeeze(action, axis=0))  # TODO: remove this hack
        return self.action

Example 46

Project: icnn
Source File: naf.py
View license
    def act(self, test=False):
        obs = np.expand_dims(self.observation, axis=0)
        action = self._act_test(obs, False) if test else self._act_expl(obs, False)
        action = np.clip(action, -1, 1)
        self.action = np.atleast_1d(np.squeeze(action, axis=0))  # TODO: remove this hack
        return self.action

Example 47

Project: sima
Source File: sequence.py
View license
    def __array__(self):
        """Used to convert the Sequence to a numpy array.

        >>> import sima
        >>> import numpy as np
        >>> data = np.ones((10, 3, 16, 16, 2))
        >>> seq = sima.Sequence.create('ndarray', data)
        >>> np.all(data == np.array(seq))
        True

        """

        return np.concatenate([np.expand_dims(frame, 0) for frame in self])

Example 48

Project: sima
Source File: sequence.py
View license
    def __array__(self):
        """Used to convert the Sequence to a numpy array.

        >>> import sima
        >>> import numpy as np
        >>> data = np.ones((10, 3, 16, 16, 2))
        >>> seq = sima.Sequence.create('ndarray', data)
        >>> np.all(data == np.array(seq))
        True

        """

        return np.concatenate([np.expand_dims(frame, 0) for frame in self])

Example 49

View license
    def append_data_histograms(self, x, datavect1, datavect2, title1=None, title2=None):
        self.x3.append(x)
        datavect1.sort()
        self.w3 = np.concatenate((self.w3, np.expand_dims(probability_distribution(datavect1), 0)))
        datavect2.sort()
        self.b3 = np.concatenate((self.b3, np.expand_dims(probability_distribution(datavect2), 0)))
        self._update_xmax(x)

Example 50

View license
    def append_data_histograms(self, x, datavect1, datavect2, title1=None, title2=None):
        self.x3.append(x)
        datavect1.sort()
        self.w3 = np.concatenate((self.w3, np.expand_dims(probability_distribution(datavect1), 0)))
        datavect2.sort()
        self.b3 = np.concatenate((self.b3, np.expand_dims(probability_distribution(datavect2), 0)))
        self._update_xmax(x)