numpy.intp

Here are the examples of the python api numpy.intp taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

76 Examples 7

Page 1 Selected Page 2

Example 1

Project: numba Source File: test_scan.py
Function: test_shuffle
    def test_shuffle(self):
        @hsa.jit
        def foo(inp, mask, out):
            tid = hsa.get_local_id(0)
            out[tid] = hsa.activelanepermute_wavewidth(inp[tid], mask[tid], 0,
                                                       False)

        inp = np.arange(64, dtype=np.intp)
        for i in range(10):
            mask = np.random.randint(0, inp.size, inp.size).astype(np.uint32)
            out = np.zeros_like(inp)
            foo[1, 64](inp, mask, out)
            np.testing.assert_equal(inp[mask], out)

Example 2

Project: numba Source File: test_simple.py
    def test_local_id(self):
        @hsa.jit
        def udt(output):
            global_id = hsa.get_global_id(0)
            local_id = hsa.get_local_id(0)
            output[global_id] = local_id

        # Allocate extra space to track bad indexing
        out = np.zeros(100 + 2, dtype=np.intp)
        udt[10, 10](out[1:-1])

        subarr = out[1:-1]

        for parted in np.split(subarr, 10):
            np.testing.assert_equal(parted, np.arange(10))

        self.assertEqual(out[0], 0)
        self.assertEqual(out[-1], 0)

Example 3

Project: hedge Source File: gmsh.py
Function: init
    def __init__(self, nodes, ldis):
        self.nodes = nodes
        self.ldis  = ldis

        node_src_indices = np.array(
                ldis.get_lexicographic_gmsh_node_indices(),
                dtype=np.intp)

        nodes = np.array(nodes, dtype=np.float64)
        reordered_nodes = nodes[node_src_indices, :]

        self.modal_coeff = la.solve(
                ldis.equidistant_vandermonde(), reordered_nodes)
        # axis 0: node number, axis 1: xyz axis

        if False:
            for i, c in zip(ldis.generate_mode_identifiers(), self.modal_coeff):
                print i, c

Example 4

Project: msmbuilder Source File: test_libdistance.py
def test_sumdist_double_float():
    pairs = random.randint(0, 10, size=(5, 2)).astype(np.intp)
    for metric in VECTOR_METRICS:
        for X in (X_double, X_float):
            alldist = scipy.spatial.distance.squareform(pdist(X, metric))
            np.testing.assert_almost_equal(
                    sum(alldist[p[0], p[1]] for p in pairs),
                    sumdist(X, metric, pairs))

Example 5

Project: hedge Source File: indexing.py
def partial_to_all_subset_indices(subsets, base=0):
    """Takes a sequence of bools and generates it into an array of indices
    to be used to insert the subset into the full set.

    Example:

    >>> list(partial_to_all_subset_indices([[False, True, True], [True,False,True]]))
    [array([0 1]), array([2 3]
    """

    idx = base
    for subset in subsets:
        result = []
        for is_in in subset:
            if is_in:
                result.append(idx)
                idx += 1

        yield numpy.array(result, dtype=numpy.intp)

Example 6

Project: PyFR Source File: blasext.py
Function: copy
    def copy(self, dst, src):
        if dst.traits != src.traits:
            raise ValueError('Incompatible matrix types')

        if dst.nbytes >= 2**31:
            raise ValueError('Matrix too large for copy')

        # Render the kernel template
        ksrc = self.backend.lookup.get_template('par-memcpy').render()

        # Build the kernel
        kern = self._build_kernel('par_memcpy', ksrc,
                                  [np.intp, np.intp, np.int32])

        class CopyKernel(ComputeKernel):
            def run(self, queue):
                kern(dst, src, dst.nbytes)

        return CopyKernel()

Example 7

Project: numba Source File: test_simple.py
Function: test_array_access
    def test_array_access(self):
        magic_token = 123

        @hsa.jit
        def udt(output):
            output[0] = magic_token

        out = np.zeros(1, dtype=np.intp)
        udt[1, 1](out)

        self.assertEqual(out[0], magic_token)

Example 8

Project: numba Source File: test_simple.py
Function: test_global_id
    def test_global_id(self):
        @hsa.jit
        def udt(output):
            global_id = hsa.get_global_id(0)
            output[global_id] = global_id

        # Allocate extra space to track bad indexing
        out = np.zeros(100 + 2, dtype=np.intp)
        udt[10, 10](out[1:-1])

        np.testing.assert_equal(out[1:-1], np.arange(100))

        self.assertEqual(out[0], 0)
        self.assertEqual(out[-1], 0)

Example 9

Project: numba Source File: test_gil.py
    def run_in_threads(self, func, n_threads):
        # Run the function in parallel over an array and collect results.
        threads = []
        # Warm up compilation, since we don't want that to interfere with
        # the test proper.
        func(self.make_test_array(1), np.arange(1, dtype=np.intp))
        arr = self.make_test_array(50)
        for i in range(n_threads):
            # Ensure different threads write into the array in different
            # orders.
            indices = np.arange(arr.size, dtype=np.intp)
            np.random.shuffle(indices)
            t = threading.Thread(target=func, args=(arr, indices))
            threads.append(t)
        for t in threads:
            t.start()
        for t in threads:
            t.join()
        return arr

Example 10

Project: scikit-learn Source File: base.py
Function: predict
    def predict(self, X):
        """Perform classification on samples in X.

        For an one-class model, +1 or -1 is returned.

        Parameters
        ----------
        X : {array-like, sparse matrix}, shape (n_samples, n_features)
            For kernel="precomputed", the expected shape of X is
            [n_samples_test, n_samples_train]

        Returns
        -------
        y_pred : array, shape (n_samples,)
            Class labels for samples in X.
        """
        y = super(BaseSVC, self).predict(X)
        return self.classes_.take(np.asarray(y, dtype=np.intp))

Example 11

Project: msmbuilder Source File: test_kmedoids.py
def test_contigify_ids_1():
    inp = np.array([0, 10, 10, 20, 20, 21], dtype=np.intp)
    ref = np.array([0, 1, 1, 2, 2, 3], dtype=np.intp)
    out, mapping = contigify_ids(inp)
    assert np.all(out == ref)
    # it's inplace, so they should be equal now
    assert np.all(inp == out)
    assert mapping == {0: 0, 10: 1, 20: 2, 21: 3}

Example 12

Project: AWS-Lambda-ML-Microservice-Skeleton Source File: test_old_ma.py
    def test_xtestCount(self):
        # Test count
        ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
        self.assertTrue(count(ott).dtype.type is np.intp)
        self.assertEqual(3, count(ott))
        self.assertEqual(1, count(1))
        self.assertTrue(eq(0, array(1, mask=[1])))
        ott = ott.reshape((2, 2))
        self.assertTrue(count(ott).dtype.type is np.intp)
        assert_(isinstance(count(ott, 0), np.ndarray))
        self.assertTrue(count(ott).dtype.type is np.intp)
        self.assertTrue(eq(3, count(ott)))
        assert_(getmask(count(ott, 0)) is nomask)
        self.assertTrue(eq([1, 2], count(ott, 0)))

Example 13

Project: scipy Source File: sputils.py
def downcast_intp_index(arr):
    """
    Down-cast index array to np.intp dtype if it is of a larger dtype.

    Raise an error if the array contains a value that is too large for
    intp.
    """
    if arr.dtype.itemsize > np.dtype(np.intp).itemsize:
        if arr.size == 0:
            return arr.astype(np.intp)
        maxval = arr.max()
        minval = arr.min()
        if maxval > np.iinfo(np.intp).max or minval < np.iinfo(np.intp).min:
            raise ValueError("Cannot deal with arrays with indices larger "
                             "than the machine maximum address size "
                             "(e.g. 64-bit indices on 32-bit machine).")
        return arr.astype(np.intp)
    return arr

Example 14

Project: dipy Source File: utils.py
def _rmi(index, dims):
    """An alternate implementation of numpy.ravel_multi_index for older
    versions of numpy.

    Assumes array layout is C contiguous
    """
    # Upcast to integer type capable of holding largest array index
    index = np.asarray(index, dtype=np.intp)
    dims = np.asarray(dims)
    if index.ndim > 2:
        raise ValueError("Index should be 1 or 2-D")
    elif index.ndim == 2:
        index = index.T
    if (index >= dims).any():
        raise ValueError("Index exceeds dimensions")
    strides = np.r_[dims[:0:-1].cuemprod()[::-1], 1]
    return (strides * index).sum(-1)

Example 15

Project: xarray Source File: nputils.py
def inverse_permutation(indices):
    """Return indices for an inverse permutation.

    Parameters
    ----------
    indices : 1D np.ndarray with dtype=int
        Integer positions to assign elements to.

    Returns
    -------
    inverse_permutation : 1D np.ndarray with dtype=int
        Integer indices to take from the original array to create the
        permutation.
    """
    # use intp instead of int64 because of windows :(
    inverse_permutation = np.empty(len(indices), dtype=np.intp)
    inverse_permutation[indices] = np.arange(len(indices), dtype=np.intp)
    return inverse_permutation

Example 16

Project: numba Source File: test_scan.py
    def test_shuf_wave_inclusive_scan(self):
        @hsa.jit
        def foo(inp, out):
            gid = hsa.get_global_id(0)
            out[gid] = shuf_wave_inclusive_scan(inp[gid])

        inp = np.arange(64, dtype=np.intp)
        out = np.zeros_like(inp)
        foo[1, 64](inp, out)
        np.testing.assert_equal(out, inp.cuemsum())

Example 17

Project: hedge Source File: data.py
Function: init
    def __init__(self, discr, nodes, vol_indices, face_groups, fg_ranges,
            el_face_to_face_group_and_face_pair={}):
        self.discr = discr
        self.nodes = nodes
        self.vol_indices = np.asarray(vol_indices, dtype=np.intp)
        self.face_groups = face_groups
        self.fg_ranges = fg_ranges
        self.el_face_to_face_group_and_face_pair = \
                el_face_to_face_group_and_face_pair

Example 18

Project: numba Source File: test_simple.py
    def test_array_access_3d(self):
        magic_token = 123

        @hsa.jit
        def udt(output):
            for i in range(output.shape[0]):
                for j in range(output.shape[1]):
                    for k in range(output.shape[2]):
                        output[i, j, k] = magic_token

        out = np.zeros((10, 10, 10), dtype=np.intp)
        udt[1, 1](out)
        np.testing.assert_equal(out, magic_token)

Example 19

Project: hedge Source File: element.py
Function: init
    def __init__(self, id, vertex_indices, all_vertices):
        vertex_indices = numpy.asarray(vertex_indices, dtype=numpy.intp)
        vertices = [all_vertices[v] for v in vertex_indices]

        # calculate maps, initialize
        map = self.get_map_unit_to_global(vertices)
        Element.__init__(self, id, vertex_indices, map)
        self.inverse_map = map.inverted()

        # calculate face normals and jacobians
        face_normals, face_jacobians = \
                self.face_normals_and_jacobians(vertices, map)

        self.face_normals = face_normals
        self.face_jacobians = face_jacobians

Example 20

Project: numba Source File: test_gufunc.py
    def test_ufunc_like(self):
        # Test problem that the stride of "scalar" gufunc argument not properly
        # handled when the actual argument is an array,
        # causing the same value (first value) being repeated.
        gufunc = GUVectorize(axpy, '(), (), () -> ()', target=self.target)
        gufunc.add('(intp, intp, intp, intp[:])')
        gufunc = gufunc.build_ufunc()

        x = np.arange(10, dtype=np.intp)
        out = gufunc(x, x, x)

        np.testing.assert_equal(out, x * x + x)

Example 21

Project: hedge Source File: indexing.py
def full_to_subset_indices(subset, base=0):
    """Takes a sequence of bools and turns it into an array of indices
    to be used to extract the subset from the full set.

    Example:

    >>> full_to_subset_indices([False, True, True])
    array([1 2])
    """

    result = []
    for i, is_in in enumerate(subset):
        if is_in:
            result.append(i + base)

    return numpy.array(result, dtype=numpy.intp)

Example 22

Project: hedge Source File: indexing.py
def full_to_all_subset_indices(subsets, base=0):
    """Takes a sequence of bools and generates it into an array of indices
    to be used to extract the subset from the full set.

    Example:

    >>> list(full_to_all_subset_indices([[False, True, True], [True,False,True]]))
    [array([1 2]), array([3 5]
    """

    for subset in subsets:
        result = []
        for i, is_in in enumerate(subset):
            if is_in:
                result.append(i + base)
        base += len(subset)

        yield numpy.array(result, dtype=numpy.intp)

Example 23

Project: chainer Source File: cudnn.py
def get_rnn_lin_layer_matrix_params(
        handle, rnn_desc, layer, x_desc, w_desc, w, lin_layer_id):
    mat_desc = Descriptor(cudnn.createFilterDescriptor(),
                          cudnn.destroyFilterDescriptor)
    ptr = numpy.array(0, dtype=numpy.intp)
    cudnn.getRNNLinLayerMatrixParams(
        handle, rnn_desc.value, layer, x_desc.value, w_desc.value, w.data.ptr,
        lin_layer_id, mat_desc.value, ptr.ctypes.data)
    offset = (ptr - w.data.ptr) // 4
    _, _, _, dim = cudnn.getFilterNdDescriptor(mat_desc.value, 3)
    size = numpy.prod(dim)
    mat = w[offset: offset + size]
    return mat

Example 24

Project: AWS-Lambda-ML-Microservice-Skeleton Source File: test_regression.py
Function: test_large_fancy_indexing
    def test_large_fancy_indexing(self, level=rlevel):
        # Large enough to fail on 64-bit.
        nbits = np.dtype(np.intp).itemsize * 8
        thesize = int((2**nbits)**(1.0/5.0)+1)

        def dp():
            n = 3
            a = np.ones((n,)*5)
            i = np.random.randint(0, n, size=thesize)
            a[np.ix_(i, i, i, i, i)] = 0

        def dp2():
            n = 3
            a = np.ones((n,)*5)
            i = np.random.randint(0, n, size=thesize)
            a[np.ix_(i, i, i, i, i)]

        self.assertRaises(ValueError, dp)
        self.assertRaises(ValueError, dp2)

Example 25

Project: scikit-learn Source File: test_fast_dict.py
Function: test_int_float_dict_argmin
def test_int_float_dict_argmin():
    # Test the argmin implementation on the IntFloatDict
    keys = np.arange(100, dtype=np.intp)
    values = np.arange(100, dtype=np.float64)
    d = IntFloatDict(keys, values)
    assert_equal(argmin(d), (0, 0))

Example 26

Project: msmbuilder Source File: regularspatial.py
Function: fit
    def fit(self, X, y=None):
        cluster_ids = [0]
        for i in range(1, len(X)):
            # distance from X[i] to each X with indices in cluster_ids
            d = libdistance.dist(
                X, X[i], metric=self.metric, X_indices=np.array(cluster_ids, dtype=np.intp))
            if np.all(d > self.d_min):
                cluster_ids.append(i)

        self.cluster_center_indices_ = cluster_ids
        self.cluster_centers_ = X[np.array(cluster_ids)]
        self.n_clusters_ = len(cluster_ids)
        return self

Example 27

Project: AWS-Lambda-ML-Microservice-Skeleton Source File: test_logistic.py
Function: test_multinomial_binary
def test_multinomial_binary():
    # Test multinomial LR on a binary problem.
    target = (iris.target > 0).astype(np.intp)
    target = np.array(["setosa", "not-setosa"])[target]

    for solver in ['lbfgs', 'newton-cg']:
        clf = LogisticRegression(solver=solver, multi_class='multinomial')
        clf.fit(iris.data, target)

        assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
        assert_equal(clf.intercept_.shape, (1,))
        assert_array_equal(clf.predict(iris.data), target)

        mlr = LogisticRegression(solver=solver, multi_class='multinomial',
                                 fit_intercept=False)
        mlr.fit(iris.data, target)
        pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
                                      axis=1)]
        assert_greater(np.mean(pred == target), .9)

Example 28

Project: scipy Source File: lil.py
Function: getnnz
    def getnnz(self, axis=None):
        if axis is None:
            return sum([len(rowvals) for rowvals in self.data])
        if axis < 0:
            axis += 2
        if axis == 0:
            out = np.zeros(self.shape[1], dtype=np.intp)
            for row in self.rows:
                out[row] += 1
            return out
        elif axis == 1:
            return np.array([len(rowvals) for rowvals in self.data], dtype=np.intp)
        else:
            raise ValueError('axis out of bounds')

Example 29

Project: msmbuilder Source File: test_kmedoids.py
def test_contigify_ids_2():
    inp = np.array([2, 0, 10, 2, 2, 10], dtype=np.intp)
    ref = np.array([0, 1, 2, 0, 0, 2], dtype=np.intp)
    out, mapping = contigify_ids(inp)
    assert np.all(out == ref)
    # it's inplace, so they should be equal now
    assert np.all(inp == out)
    assert mapping == {2: 0, 0: 1, 10: 2}

Example 30

Project: chainer Source File: cudnn.py
def get_rnn_lin_layer_bias_params(
        handle, rnn_desc, layer, x_desc, w_desc, w, lin_layer_id):
    bias_desc = Descriptor(cudnn.createFilterDescriptor(),
                           cudnn.destroyFilterDescriptor)
    ptr = numpy.array(0, dtype=numpy.intp)
    cudnn.getRNNLinLayerBiasParams(
        handle, rnn_desc.value, layer, x_desc.value, w_desc.value, w.data.ptr,
        lin_layer_id, bias_desc.value, ptr.ctypes.data)
    offset = (ptr - w.data.ptr) // 4
    _, _, _, dim = cudnn.getFilterNdDescriptor(bias_desc.value, 3)
    size = numpy.prod(dim)
    bias = w[offset: offset + size]
    return bias

Example 31

Project: msmbuilder Source File: test_libdistance.py
def test_sumdist_rmsd():
    pairs = random.randint(0, 10, size=(5, 2)).astype(np.intp)
    alldist = scipy.spatial.distance.squareform(pdist(X_rmsd, "rmsd"))
    np.testing.assert_almost_equal(
            sum(alldist[p[0], p[1]] for p in pairs),
            sumdist(X_rmsd, "rmsd", pairs),
            decimal=6)

Example 32

Project: PyFR Source File: provider.py
Function: build_kernel
    @memoize
    def _build_kernel(self, name, src, argtypes):
        # Compile the source code
        prg = cl.Program(self.backend.ctx, src)
        prg.build(['-cl-fast-relaxed-math'])

        # Retrieve the kernel
        kern = getattr(prg, name)

        # Set the argument types
        dtypes = [t if t != np.intp else None for t in argtypes]
        kern.set_scalar_arg_dtypes(dtypes)

        return kern

Example 33

Project: nipy Source File: __init__.py
Function: proc_array
def _proc_array(array):
    """ Change array dtype from intp to int32 / int64

    Parameters
    ----------
    array : ndarray

    Returns
    -------
    output_array : ndarray
        `array` unchanged or view of array where array dtype has been changed
        from ``np.intp`` to ``np.int32`` or ``np.int64`` depending on whether
        this is a 32 or 64 bit numpy.  All other dtypes unchanged.
    """
    if array.dtype == np.dtype(np.intp):
        return array.view(_INT_DTYPE)
    return array

Example 34

Project: scikit-image Source File: orb.py
    def _extract_octave(self, octave_image, keypoints, orientations):
        mask = _mask_border_keypoints(octave_image.shape, keypoints,
                                      distance=20)
        keypoints = np.array(keypoints[mask], dtype=np.intp, order='C',
                             copy=False)
        orientations = np.array(orientations[mask], dtype=np.double, order='C',
                                copy=False)

        descriptors = _orb_loop(octave_image, keypoints, orientations)

        return descriptors, mask

Example 35

Project: numba Source File: test_scan.py
    def test_shuffle_up(self):
        @hsa.jit
        def foo(inp, out):
            gid = hsa.get_global_id(0)
            out[gid] = shuffle_up(inp[gid], 1)

        inp = np.arange(128, dtype=np.intp)
        out = np.zeros_like(inp)
        foo[1, 128](inp, out)

        inp = inp.reshape(2, 64)
        out = out.reshape(inp.shape)

        for i in range(out.shape[0]):
            np.testing.assert_equal(inp[0, :-1], out[0, 1:])
            np.testing.assert_equal(inp[0, -1], out[0, 0])

Example 36

Project: robothon Source File: test_regression.py
Function: test_large_fancy_indexing
    def test_large_fancy_indexing(self, level=rlevel):
        # Large enough to fail on 64-bit.
        nbits = np.dtype(np.intp).itemsize * 8
        thesize = int((2**nbits)**(1.0/5.0)+1)
        def dp():
            n = 3
            a = np.ones((n,)*5)
            i = np.random.randint(0,n,size=thesize)
            a[np.ix_(i,i,i,i,i)] = 0
        def dp2():
            n = 3
            a = np.ones((n,)*5)
            i = np.random.randint(0,n,size=thesize)
            g = a[np.ix_(i,i,i,i,i)]
        self.failUnlessRaises(ValueError, dp)
        self.failUnlessRaises(ValueError, dp2)

Example 37

Project: numba Source File: test_scan.py
    def test_shuf_device_inclusive_scan(self):
        @hsa.jit
        def foo(inp, out):
            gid = hsa.get_global_id(0)
            temp = hsa.shared.array(2, dtype=intp)
            out[gid] = shuf_device_inclusive_scan(inp[gid], temp)

        inp = np.arange(128, dtype=np.intp)
        out = np.zeros_like(inp)

        foo[1, inp.size](inp, out)
        np.testing.assert_equal(out, np.cuemsum(inp))

Example 38

Project: numba Source File: test_simple.py
    def test_group_id(self):
        @hsa.jit
        def udt(output):
            global_id = hsa.get_global_id(0)
            group_id = hsa.get_group_id(0)
            output[global_id] = group_id + 1

        # Allocate extra space to track bad indexing
        out = np.zeros(100 + 2, dtype=np.intp)
        udt[10, 10](out[1:-1])

        subarr = out[1:-1]

        for i, parted in enumerate(np.split(subarr, 10), start=1):
            np.testing.assert_equal(parted, i)

        self.assertEqual(out[0], 0)
        self.assertEqual(out[-1], 0)

Example 39

Project: numba Source File: test_simple.py
    def test_array_access_2d(self):
        magic_token = 123

        @hsa.jit
        def udt(output):
            for i in range(output.shape[0]):
                for j in range(output.shape[1]):
                    output[i, j] = magic_token

        out = np.zeros((10, 10), dtype=np.intp)
        udt[1, 1](out)
        np.testing.assert_equal(out, magic_token)

Example 40

Project: hedge Source File: partition.py
Function: embeddings
    @memoize_method
    def _embeddings(self):
        result = []
        for part_data, part_discr in zip(self.parts_data, self.parts_discr):
            part_emb = numpy.zeros((len(part_discr),), dtype=numpy.intp)
            result.append(part_emb)

            for g_el, l_el in part_data.global2local_elements.iteritems():
                g_slice = self.whole_discr.find_el_range(g_el)
                part_emb[part_discr.find_el_range(l_el)] = \
                        numpy.arange(g_slice.start, g_slice.stop)
        return result

Example 41

Project: chainer Source File: helper.py
def _make_positive_indices(self, impl, args, kw):
    ks = [k for k, v in kw.items() if v in _unsigned_dtypes]
    for k in ks:
        kw[k] = numpy.intp
    mask = cupy.asnumpy(impl(self, *args, **kw)) >= 0
    return numpy.nonzero(mask)

Example 42

Project: scipy Source File: test_regression.py
def test_ticket_742():
    def SE(img, thresh=.7, size=4):
        mask = img > thresh
        rank = len(mask.shape)
        la, co = ndimage.label(mask,
                               ndimage.generate_binary_structure(rank, rank))
        slices = ndimage.find_objects(la)

    if np.dtype(np.intp) != np.dtype('i'):
        shape = (3,1240,1240)
        a = np.random.rand(np.product(shape)).reshape(shape)
        # shouldn't crash
        SE(a)

Example 43

Project: AWS-Lambda-ML-Microservice-Skeleton Source File: test_fast_dict.py
Function: test_int_float_dict_argmin
def test_int_float_dict_argmin():
    # Test the argmin implementation on the IntFloatDict
    keys = np.arange(100, dtype=np.intp)
    values = np.arange(100, dtype=np.float)
    d = IntFloatDict(keys, values)
    assert_equal(argmin(d), (0, 0))

Example 44

Project: cupy Source File: generate.py
def ix_(*args):
    """Construct an open mesh from multiple sequences.

    This function takes N 1-D sequences and returns N outputs with N
    dimensions each, such that the shape is 1 in all but one dimension
    and the dimension with the non-unit shape value cycles through all
    N dimensions.

    Using `ix_` one can quickly construct index arrays that will index
    the cross product. ``a[cupy.ix_([1,3],[2,5])]`` returns the array
    ``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``.

    Args:
        *args: 1-D sequences

    Returns:
        tuple of ndarrays:
        N arrays with N dimensions each, with N the number of input sequences.
        Together these arrays form an open mesh.

    Examples
    --------
    >>> a = cupy.arange(10).reshape(2, 5)
    >>> a
    array([[0, 1, 2, 3, 4],
           [5, 6, 7, 8, 9]])
    >>> ixgrid = cupy.ix_([0,1], [2,4])
    >>> ixgrid
    (array([[0],
           [1]]), array([[2, 4]]))
     .. seealso:: :func:`numpy.ix_`

    """
    out = []
    nd = len(args)
    for k, new in enumerate(args):
        new = cupy.asarray(new)
        if new.ndim != 1:
            raise ValueError("Cross index must be 1 dimensional")
        if new.size == 0:
            # Explicitly type empty arrays to avoid float default
            new = new.astype(numpy.intp)
        if cupy.issubdtype(new.dtype, cupy.bool_):
            new, = new.nonzero()
        new = new.reshape((1,) * k + (new.size,) + (1,) * (nd - k - 1))
        out.append(new)
    return tuple(out)

Example 45

Project: chainer Source File: det.py
def _det_gpu(b):
    # We do a batched LU decomposition on the GPU to compute
    # and compute the determinant by multiplying the diagonal.
    # Change the shape of the array to be size=1 minibatch if necessary.
    # Also copy the matrix as the elments will be modified in-place.
    a = matmul._as_batch_mat(b).copy()
    n = a.shape[1]
    n_matrices = len(a)
    # Pivot array
    p = cuda.cupy.zeros((n_matrices, n), dtype='int32')
    # Output array
    # These arrays hold information on the execution success
    # or if the matrix was singular.
    info = cuda.cupy.zeros(n_matrices, dtype=numpy.intp)
    ap = matmul._mat_ptrs(a)
    _, lda = matmul._get_ld(a)
    cuda.cublas.sgetrfBatched(cuda.Device().cublas_handle, n, ap.data.ptr, lda,
                              p.data.ptr, info.data.ptr, n_matrices)
    det = cuda.cupy.prod(a.diagonal(axis1=1, axis2=2), axis=1)
    # The determinant is equal to the product of the diagonal entries
    # of `a` where the sign of `a` is flipped depending on whether
    # the pivot array is equal to its index.
    rng = cuda.cupy.arange(1, n + 1, dtype='int32')
    parity = cuda.cupy.sum(p != rng, axis=1) % 2
    sign = 1. - 2. * parity.astype('float32')
    return det * sign, info

Example 46

Project: chainer Source File: generate.py
def ix_(*args):
    """Construct an open mesh from multiple sequences.

    This function takes N 1-D sequences and returns N outputs with N
    dimensions each, such that the shape is 1 in all but one dimension
    and the dimension with the non-unit shape value cycles through all
    N dimensions.

    Using `ix_` one can quickly construct index arrays that will index
    the cross product. ``a[cupy.ix_([1,3],[2,5])]`` returns the array
    ``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``.

    Args:
        *args: 1-D sequences

    Returns:
        tuple of ndarrays:
        N arrays with N dimensions each, with N the number of input sequences.
        Together these arrays form an open mesh.

    Examples
    --------
    >>> a = cupy.arange(10).reshape(2, 5)
    >>> a
    array([[0, 1, 2, 3, 4],
           [5, 6, 7, 8, 9]])
    >>> ixgrid = cupy.ix_([0,1], [2,4])
    >>> ixgrid
    (array([[0],
           [1]]), array([[2, 4]]))

     .. seealso:: :func:`numpy.ix_`

    """
    out = []
    nd = len(args)
    for k, new in enumerate(args):
        new = cupy.asarray(new)
        if new.ndim != 1:
            raise ValueError("Cross index must be 1 dimensional")
        if new.size == 0:
            # Explicitly type empty arrays to avoid float default
            new = new.astype(numpy.intp)
        if cupy.issubdtype(new.dtype, cupy.bool_):
            new, = new.nonzero()
        new = new.reshape((1,) * k + (new.size,) + (1,) * (nd - k - 1))
        out.append(new)
    return tuple(out)

Example 47

Project: scikit-image Source File: plot_matching.py
def match_corner(coord, window_ext=5):
    r, c = np.round(coord).astype(np.intp)
    window_orig = img_orig[r-window_ext:r+window_ext+1,
                           c-window_ext:c+window_ext+1, :]

    # weight pixels depending on distance to center pixel
    weights = gaussian_weights(window_ext, 3)
    weights = np.dstack((weights, weights, weights))

    # compute sum of squared differences to all corners in warped image
    SSDs = []
    for cr, cc in coords_warped:
        window_warped = img_warped[cr-window_ext:cr+window_ext+1,
                                   cc-window_ext:cc+window_ext+1, :]
        SSD = np.sum(weights * (window_orig - window_warped)**2)
        SSDs.append(SSD)

    # use corner with minimum SSD as correspondence
    min_idx = np.argmin(SSDs)
    return coords_warped_subpix[min_idx]

Example 48

Project: chainer Source File: n_step_lstm.py
Function: init
    def __init__(self, lst, back_pointer):
        self._value = numpy.array(lst, dtype=numpy.intp)
        # Store back_pointer to prevent the GC removes the original variable
        self._back_pointer = back_pointer

Example 49

Project: chainer Source File: histogram.py
def bincount(x, weights=None, minlength=None):
    """Count number of occurrences of each value in array of non-negative ints.

    Args:
        x (cupy.ndarray): Input array.
        weights (cupy.ndarray): Weights array which has the same shape as
            ``x``.
        minlength (int): A minimum number of bins for the output array.

    Returns:
        cupy.ndarray: The result of binning the input array. The length of
            output is equal to ``max(cupy.max(x) + 1, minlength)``.

    .. seealso:: :func:`numpy.bincount`

    """
    if x.ndim > 1:
        raise ValueError('object too deep for desired array')
    if x.ndim < 1:
        raise ValueError('object of too small depth for desired array')
    if x.dtype.kind == 'f':
        raise TypeError('x must be int array')
    if (x < 0).any():
        raise ValueError('The first argument of bincount must be non-negative')
    if weights is not None and x.shape != weights.shape:
        raise ValueError('The weights and list don\'t have the same length.')
    if minlength is not None:
        minlength = int(minlength)
        if minlength <= 0:
            raise ValueError('minlength must be positive')

    size = int(cupy.max(x)) + 1
    if minlength is not None:
        size = max(size, minlength)

    if weights is None:
        # atomicAdd for int64 is not provided
        b = cupy.zeros((size,), dtype=cupy.int32)
        cupy.ElementwiseKernel(
            'S x', 'raw U bin',
            'atomicAdd(&bin[x], 1)',
            'bincount_kernel'
        )(x, b)
        b = b.astype(numpy.intp)
    else:
        # atomicAdd for float64 is not provided
        b = cupy.zeros((size,), dtype=cupy.float32)
        cupy.ElementwiseKernel(
            'S x, T w', 'raw U bin',
            'atomicAdd(&bin[x], w)',
            'bincount_with_weight_kernel'
        )(x, weights, b)
        b = b.astype(cupy.float64)

    return b

Example 50

Project: scikit-learn Source File: hierarchical.py
Function: hc_cut
def _hc_cut(n_clusters, children, n_leaves):
    """Function cutting the ward tree for a given number of clusters.

    Parameters
    ----------
    n_clusters : int or ndarray
        The number of clusters to form.

    children : 2D array, shape (n_nodes-1, 2)
        The children of each non-leaf node. Values less than `n_samples`
        correspond to leaves of the tree which are the original samples.
        A node `i` greater than or equal to `n_samples` is a non-leaf
        node and has children `children_[i - n_samples]`. Alternatively
        at the i-th iteration, children[i][0] and children[i][1]
        are merged to form node `n_samples + i`

    n_leaves : int
        Number of leaves of the tree.

    Returns
    -------
    labels : array [n_samples]
        cluster labels for each point

    """
    if n_clusters > n_leaves:
        raise ValueError('Cannot extract more clusters than samples: '
                         '%s clusters where given for a tree with %s leaves.'
                         % (n_clusters, n_leaves))
    # In this function, we store nodes as a heap to avoid recomputing
    # the max of the nodes: the first element is always the smallest
    # We use negated indices as heaps work on smallest elements, and we
    # are interested in largest elements
    # children[-1] is the root of the tree
    nodes = [-(max(children[-1]) + 1)]
    for i in xrange(n_clusters - 1):
        # As we have a heap, nodes[0] is the smallest element
        these_children = children[-nodes[0] - n_leaves]
        # Insert the 2 children and remove the largest node
        heappush(nodes, -these_children[0])
        heappushpop(nodes, -these_children[1])
    label = np.zeros(n_leaves, dtype=np.intp)
    for i, node in enumerate(nodes):
        label[_hierarchical._hc_get_descendent(-node, children, n_leaves)] = i
    return label
See More Examples - Go to Next Page
Page 1 Selected Page 2