numpy.random.randint

Here are the examples of the python api numpy.random.randint taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

161 Examples 7

Example 1

Project: ilastik-0.5 Source File: testModule.py
    def mainFunction(self):
        # ...randomly exchange some pixels
        import numpy
        for i in range(10):
            self.testProject.threshold_ov._data._data[numpy.random.randint(self.testProject.threshold_ov._data._data.shape[0]), numpy.random.randint(self.testProject.threshold_ov._data._data.shape[1]), numpy.random.randint(self.testProject.threshold_ov._data._data.shape[2]), numpy.random.randint(self.testProject.threshold_ov._data._data.shape[3]), numpy.random.randint(self.testProject.threshold_ov._data._data.shape[4])] = numpy.random.randint(255)
        self.testProject.dataMgr[self.testProject.dataMgr._activeImageNumber].Connected_Components.setInputData(self.testProject.threshold_ov._data)
        
        self.testThread = TestThread(self.testProject.connectedComponentsMgr, self.testProject.listOfResultOverlays, self.testProject.listOfFilenames)
        QtCore.QObject.connect(self.testThread, QtCore.SIGNAL('done()'), self.finalizeTest)
        self.testThread.start(None) # ...compute connected components without background

Example 2

Project: ilastik-0.5 Source File: labelWidget.py
    def createLabel(self):
        name = "Label " + len(self.items).__str__()
        for index, item in enumerate(self.items):
            if str(item.text()) == name:
                name = name + "-2"
        number = len(self.items)
        if number >= len(self.labelColorTable):
            color = QtGui.QColor.fromRgb(numpy.random.randint(255),numpy.random.randint(255),numpy.random.randint(255))
        else:
            color = self.labelColorTable[number]
        number +=1
        self.addLabel(name, number, color)
        self.buildColorTab()

Example 3

Project: ilastik-0.5 Source File: seedWidget.py
    def createLabel(self):
        name = "Seed " + len(self.items).__str__()
        number = len(self.items)
        if number >= len(self.labelColorTable):
            color = QtGui.QColor.fromRgb(numpy.random.randint(255),numpy.random.randint(255),numpy.random.randint(255))
        else:
            color = self.labelColorTable[number]
        number +=1
        self.addLabel(name, number, color)
        self.buildColorTab()

Example 4

Project: theano-bpr Source File: bpr.py
    def _uniform_user_sampling(self, n_samples):
        """
          Creates `n_samples` random samples from training data for performing Stochastic
          Gradient Descent. We start by uniformly sampling users, 
          and then sample a positive and a negative item for each 
          user sample.
        """
        sys.stderr.write("Generating %s random training samples\n" % str(n_samples))
        sgd_users = numpy.array(list(self._train_users))[numpy.random.randint(len(list(self._train_users)), size=n_samples)]
        sgd_pos_items, sgd_neg_items = [], []
        for sgd_user in sgd_users:
            pos_item = self._train_dict[sgd_user][numpy.random.randint(len(self._train_dict[sgd_user]))]
            sgd_pos_items.append(pos_item)
            neg_item = numpy.random.randint(self._n_items)
            while neg_item in self._train_dict[sgd_user]:
                neg_item = numpy.random.randint(self._n_items)
            sgd_neg_items.append(neg_item)
        return sgd_users, sgd_pos_items, sgd_neg_items

Example 5

Project: statsmodels Source File: mediation.py
Function: fit_model
    def _fit_model(self, model, fit_kwargs, boot=False):
        klass = model.__class__
        init_kwargs = model._get_init_kwds()
        endog = model.endog
        exog = model.exog
        if boot:
            ii = np.random.randint(0, len(endog), len(endog))
            endog = endog[ii]
            exog = exog[ii, :]
        outcome_model = klass(endog, exog, **init_kwargs)
        return outcome_model.fit(**fit_kwargs)

Example 6

Project: robothon Source File: test_numeric.py
Function: test_boolean
    def test_boolean(self):
        a = rand(3,5,8)
        V = rand(5,8)
        g1 = randint(0,5,size=15)
        g2 = randint(0,8,size=15)
        V[g1,g2] = -V[g1,g2]
        assert (array([a[0][V>0],a[1][V>0],a[2][V>0]]) == a[:,V>0]).all()

Example 7

Project: reading-text-in-the-wild Source File: mjsynth_dictnet.py
Function: findclasses
    def findClasses(self):
        assert self.numOfClasses <= 88172 # Length of lexicon.txt
        choice_set = list( set(range(0,88171)).difference(set(self.excluded_examples)) )
        N = len(choice_set)
        while len(self.classes) < self.numOfClasses:
            randomClass = choice_set[np.random.randint(0, N)]
            if randomClass not in self.classes:
                self.classes.append(randomClass.__str__())
                self.class_mapping.append(randomClass)
                 
        assert len(self.classes) == self.numOfClasses

Example 8

Project: diagnose-heart Source File: utils.py
def get_image_pair(X, Xpr,index=-1,shift=0.5):
    mode = 'RGB' if X.shape[1] == 3 else 'L'
    index = np.random.randint(X.shape[0]) if index == -1 else index
    original_image = Image.fromarray(get_picture_array(X, index,shift=shift),mode=mode)
    new_size = (original_image.size[0], original_image.size[1]*2)
    new_im = Image.new(mode, new_size)
    new_im.paste(original_image, (0,0))
    rec_image = Image.fromarray(get_picture_array(Xpr, index,shift=shift),mode=mode)
    new_im.paste(rec_image, (0,original_image.size[1]))
    return new_im

Example 9

Project: elephas Source File: hyperparam.py
    def minimize(self, dummy_iterator):
        trials = Trials()
        algo = rand.suggest

        elem = dummy_iterator.next()
        import random
        random.seed(elem)
        rand_seed = np.random.randint(elem)

        best_run = base_minimizer(model=None, data=None, algo=algo, max_evals=self.max_evals,
                                  trials=trials, full_model_string=self.model_string, rseed=rand_seed)
        yield trials

Example 10

Project: vmfactory Source File: Vmaze.py
    def mutate_colors(self, proba_change):
        """ Randomly changes some colors of the maze, each color
        light having a probability proba_change to be changed. """

        changes = np.random.randint(0, 2, size=len(self.colors))
        randoms = np.random.uniform(size= len(self.colors))
        indices = randoms < proba_change
        new_colors = (self.colors + indices*changes) % 3
        self.colorize( new_colors )

Example 11

Project: blaze Source File: test_postgresql_compute.py
@pytest.yield_fixture
def main(url):
    try:
        main = odo([(i, int(np.random.randint(10))) for i in range(13)],
                   url % 'main',
                   dshape=dshape('var * {id: int64, data: int64}'),
                   primary_key=['id'])
    except sa.exc.OperationalError as e:
        pytest.skip(str(e))
    else:
        try:
            yield main
        finally:
            drop(main)

Example 12

Project: cesiumpy Source File: test_provider.py
    def test_SingleTimeImageryProvider_tempfile(self):
        tm._skip_if_no_matplotlib()

        import numpy as np
        import matplotlib.pyplot as plt

        img = np.random.randint(0, 255, (100, 100, 3))
        ax = plt.imshow(img)
        img = cesiumpy.entities.material.TemporaryImage(ax.figure)
        m = cesiumpy.SingleTileImageryProvider(img, rectangle=(-120.0, 40.0, -100, 60))
        self.assertTrue(re.match("""new Cesium\\.SingleTileImageryProvider\\(\\{url : "\w+\\.png", rectangle : Cesium\\.Rectangle\\.fromDegrees\\(-120\\.0, 40\\.0, -100\\.0, 60\\.0\\)\\}\\)""", m.script))
        plt.close()

Example 13

Project: SciDB-Py Source File: __init__.py
def chunk_fuzz(func):

    def wrapper(*args, **kwargs):
        result = func(*args, **kwargs)
        persistent = result.persistent
        result.persistent = False
        chunks = randint(100, 300, (result.ndim,))
        chunk_overlap = randint(0, 3, (result.ndim,))
        result = rechunk(result, chunk_size=chunks, chunk_overlap=chunk_overlap)
        result.eval()
        result.persistent = persistent
        return result

    return wrapper

Example 14

Project: noisyopt Source File: main.py
Function: n
    @N.setter
    def N(self, value):
        N = int(value)
        if self.paired and (N > self._N):
            Nadd = N - self._N
            self.seeds.extend(list(np.random.randint(0, self.uint32max, size=Nadd)))
        self._N = N

Example 15

Project: Haystack Source File: haystack_motifs_CORE.py
def get_random_coordinates(coords,genome):
    random_coords=[]
    for c in coords:
        random_bpstart=np.random.randint(1,genome.chr_len[c.chr_id]-len(c)+1)
        random_coords.append(Coordinate(c.chr_id,random_bpstart,random_bpstart+len(c)-1))
    return random_coords

Example 16

Project: mrec Source File: testing.py
def get_random_coo_matrix(rows=3,cols=10,nnz=20):
    row_col = random.sample(xrange(rows*cols),nnz)  # ensure <row,col> are unique
    row = [i // cols for i in row_col]
    col = [i % cols for i in row_col]
    data = np.random.randint(0,nnz*5,nnz)
    return coo_matrix((data,(row,col)),shape=(rows,cols))

Example 17

Project: kaggle-ndsb Source File: load.py
    def create_random_gen(self):
        def random_gen():
            for i in range(self.num_chunks_train):
                indices = np.random.randint(self.y_train.shape[0], size=self.chunk_size)
                yield [self.y_train[indices], self.info_train[indices]], self.labels_train[indices]

        return buffering.buffered_gen_threaded(random_gen())

Example 18

Project: nideep Source File: test_accuracy.py
    def test_compare_to_sklearn(self):
        """
        Comparison using calculations by hand
        """
        for n in range(2, 100):
            # Multi-label classification notation with binary label indicators
            y_true = np.random.randint(2, size=(n, n))
            y_pred = np.random.randint(2, size=(n, n))
            # Hamming accuracy by hand
            hamming_loss = 0.
            for i in range(n):
                for j in range(n):
                    hamming_loss += (y_true[i, j] != y_pred[i, j])
            # Compare both
            acc_sklearn = 1. - hamming_loss / n ** 2
            acc_hamming = accuracy.hamming_accuracy(y_true, y_pred)
            assert_equals(acc_hamming, acc_sklearn)

Example 19

Project: odl Source File: fn_base_slow_test.py
Function: test_get_item
def test_getitem(fn):
    indices = np.random.randint(0, fn.size - 1, 5)

    x0 = np.arange(fn.size)
    x = fn.element(x0)

    for index in indices:
        assert x[index] == index

Example 20

Project: pajbot Source File: bot.py
Function: quit
    def quit(self, message, event, **options):
        quit_chub = self.config['main'].get('control_hub', None)
        quit_delay = 0

        if quit_chub is not None and event.target == ('#{}'.format(quit_chub)):
            quit_delay_random = 300
            try:
                if message is not None and int(message.split()[0]) >= 1:
                    quit_delay_random = int(message.split()[0])
            except (IndexError, ValueError, TypeError):
                pass
            quit_delay = random.randint(0, quit_delay_random)
            log.info('{} is restarting in {} seconds.'.format(self.nickname, quit_delay))

        self.execute_delayed(quit_delay, self.quit_bot)

Example 21

Project: cvxpy Source File: inpainting.py
def drawRandLine(draw,width):
    x = [np.random.randint(0,im.size[0]) for i in range(2)]
    y = [np.random.randint(0,im.size[1]) for i in range(2)]
    xy = zip(x,y)
    #fill gives the color
    draw.line(xy,fill=255,width=width)

Example 22

Project: pgmpy Source File: test_NaiveBayes.py
    def test_fit_model_creation_exception(self):
        values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
                              columns=['A', 'B', 'C', 'D', 'E'])
        values2 = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 3)),
                               columns=['C', 'D', 'E'])

        self.assertRaises(ValueError, self.model1.fit, values)
        self.assertRaises(ValueError, self.model1.fit, values2)
        self.assertRaises(ValueError, self.model2.fit, values2, 'A')

Example 23

Project: spark-sklearn Source File: test_keyed_models.py
    def test_diff_type_input(self):
        # Integer array
        minExamples = 1
        featureGen = lambda: np.random.randint(low=0, high=10, size=KeyedModelTests.NDIM)
        labelGen = lambda: np.random.random()
        self.checkKeyedModelEquivalent(minExamples, featureGen, labelGen,
                                       sklearnEstimator=LinearRegression(), yCol="y")

        # float input
        featureGen = lambda: np.random.random()
        self.checkKeyedModelEquivalent(minExamples, featureGen, labelGen,
                                       sklearnEstimator=LinearRegression(), yCol="y")

        # integer input
        featureGen = lambda: np.random.randint(100)
        self.checkKeyedModelEquivalent(minExamples, featureGen, labelGen,
                                       sklearnEstimator=LinearRegression(), yCol="y")

Example 24

Project: scikit-image Source File: test_corner.py
Function: test_num_peaks
def test_num_peaks():
    """For a bunch of different values of num_peaks, check that
    peak_local_max returns exactly the right amount of peaks. Test
    is run on the astronaut image in order to produce a sufficient number of corners"""

    img_corners = corner_harris(rgb2gray(data.astronaut()))

    for i in range(20):
        n = np.random.randint(1, 21)
        results = peak_local_max(img_corners,
                                 min_distance=10, threshold_rel=0, num_peaks=n)
        assert (results.shape[0] == n)

Example 25

Project: deepgraph Source File: test_DeepGraph.py
    def test_random(self):

        from deepgraph.deepgraph import _triu_indices

        N = np.random.randint(900, 1100)
        n = N*(N-1)/2
        start = np.random.randint(0, n)
        end = np.random.randint(start, n)

        indices_true = np.triu_indices(N, k=1)
        sources_true = indices_true[0][start:end]
        targets_true = indices_true[1][start:end]

        indices_test = _triu_indices(N, start, end)
        sources_test = indices_test[0]
        targets_test = indices_test[1]

        assert ((sources_true == sources_test).all() and
                (targets_true == targets_test).all())

Example 26

Project: treeano Source File: utils_test.py
def test_is_int_ndarray():
    assert not treeano.utils.is_int_ndarray(3)
    assert not treeano.utils.is_int_ndarray([True])
    assert not treeano.utils.is_int_ndarray([42])
    assert not treeano.utils.is_int_ndarray(42.0)
    for x in [T.iscalar(), T.lscalar()]:
        assert treeano.utils.is_int_ndarray(x.eval({x: 42}))
    assert treeano.utils.is_int_ndarray(np.random.randint(42, size=(4, 5)))
    assert treeano.utils.is_int_ndarray(
        np.random.randint(42, size=(4, 5)).astype(np.int32))

Example 27

Project: dipy Source File: shm.py
def bootstrap_data_voxel(data, H, R, permute=None):
    """Like bootstrap_data_array but faster when for a single voxel

    data must be 1d and normalized
    """
    if permute is None:
        permute = randint(data.shape[-1], size=data.shape[-1])
    r = dot(data, R.T)
    boot_data = dot(data, H.T)
    boot_data += r[permute]
    return boot_data

Example 28

Project: scikit-learn Source File: test_random_projection.py
def test_input_size_jl_min_dim():
    assert_raises(ValueError, johnson_lindenstrauss_min_dim,
                  3 * [100], 2 * [0.9])

    assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
                  2 * [0.9])

    johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
                                  0.5 * np.ones((10, 10)))

Example 29

Project: hessianfree Source File: test_gpu.py
def test_sum_cols(dtype):
    for _ in range(100):
        N = 200
        a = np.random.randn(np.random.randint(1, N),
                            np.random.randint(1, N)).astype(dtype)
        a_gpu = gpuarray.to_gpu(a)

        out = hf.gpu.sum_cols(a_gpu).get()

        assert np.allclose(out, np.sum(a, axis=0), atol=1e-5)

Example 30

Project: pyexperiment Source File: test_state.py
    def test_loading_numpy_performance(self):
        """Test loading a numpy array and make sure it's reasonably fast
        """
        random = np.array(
            np.random.randint(0, 255, 1024*1024))
        state['random'] = random
        with tempfile.NamedTemporaryFile() as temp:
            state.save(temp.name)
            state.reset_instance()
            self.assertNotIn('random', state)
            tic = datetime.now()
            state.load(temp.name)
            toc = datetime.now()
            self.assertTrue((toc - tic).total_seconds() < 0.5)

Example 31

Project: pims Source File: test_multidimensional.py
    def test_frame_no(self):
        self.v.iter_axes = 't'
        for i in np.random.randint(0, 100, 10):
            assert_equal(self.v[i].frame_no, i)
        self.v.iter_axes = 'zc'
        for i in np.random.randint(0, 3*20, 10):
            assert_equal(self.v[i].frame_no, i)

Example 32

Project: PyClassLessons Source File: train_and_export.py
def draw_random_misclassification(truth_array, prediction, test_label, test_data):
    """
    Prints the prediction, label and digit for a random misclassified sample
    """
    incorrect_idx = [idx for idx, is_true in enumerate(truth_array) if not is_true]
    n = incorrect_idx[np.random.randint(0, len(incorrect_idx))]
    print "predicted [%s]\nlabeled [%s]\nraw data:\n%s" % (prediction[n].argmax(), test_label[n], MNIST.display(test_data[n]))

Example 33

Project: data-analysis Source File: pi_estimation.py
def probability_calculation(item):
    """Read a file and return a sequence of (word, occurances) values.
    """

    print multiprocessing.current_process().name, 'calculating', item
    output = []
    IN_CIRCLE = 0
    for i in range(int(NBR_PER_WORKER)):
        x = numpy.random.randint(0, RADIUQ)
        y = numpy.random.randint(0, RADIUQ)
        if (numpy.sqrt(x ** 2 + y ** 2) < RADIUQ):
            IN_CIRCLE += 1
    output.append(('pi', IN_CIRCLE))
    return output

Example 34

Project: scikit-beam Source File: test_correlation.py
Function: set_up
def setup():
    global num_levels, num_bufs, xdim, ydim, stack_size, img_stack, rois
    num_levels = 6
    num_bufs = 4  # must be even
    xdim = 256
    ydim = 512
    stack_size = 100
    img_stack = np.random.randint(1, 3, (stack_size, xdim, ydim))
    rois = np.zeros_like(img_stack[0])
    # make sure that the ROIs can be any integers greater than 1.
    # They do not have to start at 1 and be continuous
    rois[0:xdim//10, 0:ydim//10] = 5
    rois[xdim//10:xdim//5, ydim//10:ydim//5] = 3

Example 35

Project: pele Source File: oxdna.py
def choose_bond(N, P_mid=0.):
    mid = 0.5 * float(N) - 0.5

    while True:
        i = np.random.randint(N)
        dist = float(min(i, N - i - 1))
        if (1. - P_mid) * dist / mid < np.random.random():
            return i

Example 36

Project: fatiando Source File: test_random.py
def test_gridder_scatter_seed():
    "gridder.scatter returns same sequence using same random seed"
    area = [0, 1000, 0, 1000]
    size = 1000
    for seed in numpy.random.randint(low=0, high=10000, size=20):
        x1, y1 = gridder.scatter(area, size, seed=seed)
        x2, y2 = gridder.scatter(area, size, seed=seed)
        assert numpy.all(x1 == x2) and numpy.all(y1 == y2)

Example 37

Project: volumina Source File: labeled3d.py
def sliceImg(width, height, axisLabels, perpAxisLabel, perpAxisValue):
    print perpAxisLabel, perpAxisValue
    img = QImage(width, height, QImage.Format_ARGB32)
    img.fill(0)

    p = QPainter(img)
    p.setPen(QColor(255,255,255))
    p.setBrush(QBrush(QColor(255,255,255)))
    def arrow(p, From, To, label):
        p.drawLine(From, To)
        p.drawText(To, label)

    offset = 10
    arrow(p, QPoint(offset, offset), QPoint(offset, height-offset), axisLabels[1])
    arrow(p, QPoint(offset, offset), QPoint(width-offset,  offset), axisLabels[0])
    p.drawText(2*offset, 2*offset, "%s=%d" % (perpAxisLabel, perpAxisValue))
    fm = p.fontMetrics()
    size = fm.size(Qt.TextSingleLine, "updown")

    p.drawText(numpy.random.randint(offset, width-offset-size.width()), numpy.random.randint(offset, height-offset-size.height()), "updown")

    dots = []
    numPixels = 0
    while numPixels < 30:
        r = numpy.random.randint(1, 255)
        rx, ry = numpy.random.randint(offset, width-offset), numpy.random.randint(offset, height-offset)
        if img.pixel(rx,ry) != 0:
            continue
        p.setPen(QPen(QColor(r,r,r)))
        p.drawPoint(rx, ry)
        dots.append(((rx,ry), r))
        numPixels += 1

    p.end()
    
    
    img.save('test.png')

    a = qimage2ndarray.rgb_view(img)
    a = a[:,:,0].squeeze().swapaxes(0,1)

    for (rx,ry), r in dots:
        assert QColor.fromRgba(img.pixel(rx,ry)).red() == r, "QColor.fromRgba(img.pixel(rx,ry)).red() == %d != %d" % (QColor.fromRgba(img.pixel(rx,ry)).red(), r)
        assert(a[rx,ry] == r), "a[%d,%d] == %d != %d)" % (rx, ry, a[rx,ry], r)
    return (a, dots)

Example 38

Project: NEXT Source File: CrowdKernel.py
Function: get_query
  def getQuery(self,butler):
    R = 10
    n = butler.algorithms.get(key='n')
    num_reported_answers = butler.algorithms.get(key='num_reported_answers')

    if num_reported_answers == None:
      num_reported_answers = 0

    if num_reported_answers < R*n:
      a = num_reported_answers/R
      b = numpy.random.randint(n)
      while b==a:
        b = numpy.random.randint(n)
      c = numpy.random.randint(n)
      while c==a or c==b:
        c = numpy.random.randint(n)
      return [a, b, c]

    X = numpy.array(butler.algorithms.get(key='X'))
    tau = numpy.array(butler.algorithms.get(key='tau'))

    # set maximum time allowed to search for a query
    t_max = .05
    best_q, best_score = utilsCrowdKernel.getRandomQuery(X)
    t_start = time.time()
    best_entropy = -1*float('inf')

    while time.time()-t_start<t_max:
      q,score = utilsCrowdKernel.getRandomQuery(X)
      b,c,a = q
      p = 0
      for i in range(n):
        p += utilsCrowdKernel.getCrowdKernelTripletProbability(X[b],X[c],X[i]) * tau[a,i]

      taub = list(tau[a])
      for i in range(n):
        taub[i] = taub[i] * utilsCrowdKernel.getCrowdKernelTripletProbability(X[b],X[c],X[i])
      taub = taub/sum(taub)

      tauc = list(tau[a])
      for i in range(n):
        tauc[i] = tauc[i] * utilsCrowdKernel.getCrowdKernelTripletProbability(X[c],X[b],X[i])
      tauc = tauc/sum(tauc)

      entropy  = -p*utilsCrowdKernel.getEntropy(taub)-(1-p)*utilsCrowdKernel.getEntropy(tauc)

      if entropy > best_entropy:
        best_q = q
        best_entropy = entropy
    index_center = best_q[2]
    index_left = best_q[0]
    index_right = best_q[1]
    return [index_center, index_left, index_right]

Example 39

Project: NEXT Source File: STE.py
  def getQuery(self,butler):
    R = 10
    n = butler.algorithms.get(key='n')
    num_reported_answers = butler.algorithms.get(key='num_reported_answers')

    if num_reported_answers == None:
      num_reported_answers = 0
      butler.algorithms.set(key='num_reported_answers', value=0)

    if num_reported_answers < R*n:
      r = random.Random()
      r.seed(42)
      idxs = np.arange(n).repeat(R).tolist()
      r.shuffle(idxs)
      a = idxs[num_reported_answers]

      b = numpy.random.randint(n)
      while b==a:
        b = numpy.random.randint(n)
      c = numpy.random.randint(n)
      while c==a or c==b:
        c = numpy.random.randint(n)
      return [a, b, c]

    X = numpy.array(butler.algorithms.get(key='X'))
    tau = numpy.array(butler.algorithms.get(key='tau'))


    # set maximum time allowed to search for a query
    t_max = .05
    best_q, best_score = utilsSTE.getRandomQuery(X)
    t_start = time.time()
    best_entropy = -1*float('inf')

    while time.time()-t_start<t_max:
      q,score = utilsSTE.getRandomQuery(X)
      b,c,a = q
      p = 0
      for i in range(n):
        p += utilsSTE.getSTETripletProbability(X[b],X[c],X[i]) * tau[a,i]

      taub = list(tau[a])
      for i in range(n):
        taub[i] = taub[i] * utilsSTE.getSTETripletProbability(X[b],X[c],X[i])
      taub = taub/sum(taub)

      tauc = list(tau[a])
      for i in range(n):
        tauc[i] = tauc[i] * utilsSTE.getSTETripletProbability(X[c],X[b],X[i])
      tauc = tauc/sum(tauc)

      entropy  = -p*utilsSTE.getEntropy(taub)-(1-p)*utilsSTE.getEntropy(tauc)

      if entropy > best_entropy:
        best_q = q
        best_entropy = entropy
    index_center = best_q[2]
    index_left = best_q[0]
    index_right = best_q[1]

    return [index_center,index_left,index_right]

Example 40

Project: nupic Source File: categories_test.py
def simulateCategories(numSamples=100, numDimensions=500):
  """Simulate running KNN classifier on many disjoint categories"""

  failures = ""
  LOGGER.info("Testing the sparse KNN Classifier on many disjoint categories")
  knn = KNNClassifier(k=1, distanceNorm=1.0, useSparseMemory=True)

  for i in range(0, numSamples):

    # select category randomly and generate vector
    c = 2*numpy.random.randint(0, 50) + 50
    v = createPattern(c, numDimensions)
    knn.learn(v, c)

  # Go through each category and ensure we have at least one from each!
  for i in range(0, 50):
    c = 2*i+50
    v = createPattern(c, numDimensions)
    knn.learn(v, c)

  errors = 0
  for i in range(0, numSamples):

    # select category randomly and generate vector
    c = 2*numpy.random.randint(0, 50) + 50
    v = createPattern(c, numDimensions)

    inferCat, _kir, _kd, _kcd = knn.infer(v)
    if inferCat != c:
      LOGGER.info("Mistake with %s %s %s %s %s", v[v.nonzero()], \
        "mapped to category", inferCat, "instead of category", c)
      LOGGER.info("   %s", v.nonzero())
      errors += 1
  if errors != 0:
    failures += "Failure in handling non-consecutive category indices\n"

  # Test closest methods
  errors = 0
  for i in range(0, 10):

    # select category randomly and generate vector
    c = 2*numpy.random.randint(0, 50) + 50
    v = createPattern(c, numDimensions)

    p = knn.closestTrainingPattern(v, c)
    if not (c in p.nonzero()[0]):
      LOGGER.info("Mistake %s %s", p.nonzero(), v.nonzero())
      LOGGER.info("%s %s", p[p.nonzero()], v[v.nonzero()])
      errors += 1

  if errors != 0:
    failures += "Failure in closestTrainingPattern method\n"

  return failures, knn

Example 41

Project: WASP Source File: sim_pe_reads.py
def gen_read_coords(options, haps, het_only=True):
    """generate coordinates for a read pair that overlaps a SNP"""
    # to make this more efficient, could observer random
    # vars for many reads at once, rather than single read at a time

    if het_only:
        # select a heterozygous site to overlap
        is_het = haps.hap1 != haps.hap2
        i = numpy.random.randint(np.sum(is_het))
        snp_pos = haps.pos[is_het][i]

        
        sys.stderr.write("selected SNP %d %s/%s\n" % 
                         (snp_pos, chr(haps.ref_allele[i]), chr(haps.alt_allele[i])))

        
    else:
        # select any SNP to overlap
        i = numpy.random.randint(haps.pos.shape[0])
        snp_pos = haps.pos[i]


    # at what read position should het site be?
    snp_read_pos = numpy.random.randint(options.read_len)

    # what should insert size be?
    insert_size = numpy.random.normal(options.insert_size_mean,
                                      options.insert_size_sd)
    insert_size = int(np.rint(insert_size))

    # insert size cannot be smaller than read size...
    insert_size = max(options.read_len, insert_size)
    
    # does left or right read overlap SNP?
    if numpy.random.randint(2) == 0:
        # left read overlaps SNP
        left_start = snp_pos - snp_read_pos + 1
        left_end = left_start + options.read_len - 1
        right_end = left_start + insert_size - 1
        right_start = right_end - options.read_len + 1
    else:
        # right read overlaps SNP
        right_start = snp_pos - snp_read_pos + 1
        right_end = right_start + options.read_len - 1
        left_start = right_end - insert_size + 1
        left_end = left_start + options.read_len - 1

    read_coord = ReadCoord(options.chrom, left_start, left_end, right_start, right_end)

    return read_coord

Example 42

Project: theanolm Source File: network.py
    def __init__(self, vocabulary, architecture, mode=None, profile=False):
        """Initializes the neural network parameters for all layers, and
        creates Theano shared variables from them.

        :type vocabulary: Vocabulary
        :param vocabulary: mapping between word IDs and word classes

        :type architecture: Architecture
        :param architecture: an object that describes the network architecture

        :type mode: Network.Mode
        :param mode: selects mini-batch or single time step processing

        :type profile: bool
        :param profile: if set to True, creates a Theano profile object
        """

        self.vocabulary = vocabulary
        self.architecture = architecture
        self.mode = self.Mode() if mode is None else mode

        M1 = 2147483647
        M2 = 2147462579
        random_seed = [
            numpy.random.randint(0, M1),
            numpy.random.randint(0, M1),
            numpy.random.randint(1, M1),
            numpy.random.randint(0, M2),
            numpy.random.randint(0, M2),
            numpy.random.randint(1, M2)]
        self.random = RandomStreams(random_seed)

        # Word and class inputs will be available to NetworkInput layers.
        self.input_word_ids = tensor.matrix('network/input_word_ids', dtype='int64')
        self.input_class_ids = tensor.matrix('network/input_class_ids', dtype='int64')
        if self.mode.minibatch:
            self.input_word_ids.tag.test_value = test_value(
                size=(100, 16),
                max_value=vocabulary.num_words())
            self.input_class_ids.tag.test_value = test_value(
                size=(100, 16),
                max_value=vocabulary.num_classes())
        else:
            self.input_word_ids.tag.test_value = test_value(
                size=(1, 16),
                max_value=vocabulary.num_words())
            self.input_class_ids.tag.test_value = test_value(
                size=(1, 16),
                max_value=vocabulary.num_classes())

        # Recurrent layers will create these lists, used to initialize state
        # variables of appropriate sizes, for doing forward passes one step at a
        # time.
        self.recurrent_state_input = []
        self.recurrent_state_size = []

        # Create the layers.
        logging.debug("Creating layers.")
        self.layers = OrderedDict()
        for input_options in architecture.inputs:
            input = NetworkInput(input_options, self)
            self.layers[input.name] = input
        for layer_description in architecture.layers:
            layer_options = self._layer_options_from_description(
                layer_description)
            if layer_options['name'] == architecture.output_layer:
                layer_options['size'] = vocabulary.num_classes()
            layer = create_layer(layer_options, self, profile=profile)
            self.layers[layer.name] = layer
        self.output_layer = self.layers[architecture.output_layer]

        # This list will be filled by the recurrent layers to contain the
        # recurrent state outputs, for doing forward passes one step at a time.
        self.recurrent_state_output = [None] * len(self.recurrent_state_size)

        # This input variable can be used to specify the classes whose
        # probabilities will be computed, instead of the whole distribution.
        self.target_class_ids = tensor.matrix('network/target_class_ids',
                                              dtype='int64')
        if self.mode.minibatch:
            self.target_class_ids.tag.test_value = test_value(
                size=(100, 16),
                max_value=vocabulary.num_classes())
        else:
            self.target_class_ids.tag.test_value = test_value(
                size=(1, 16),
                max_value=vocabulary.num_classes())

        # This input variable is used only for detecting <unk> target words.
        self.target_word_ids = tensor.matrix('network/target_word_ids',
                                             dtype='int64')
        if self.mode.minibatch:
            self.target_word_ids.tag.test_value = test_value(
                size=(100, 16),
                max_value=vocabulary.num_words())
        else:
            self.target_word_ids.tag.test_value = test_value(
                size=(1, 16),
                max_value=vocabulary.num_words())

        # Create initial parameter values.
        logging.debug("Initializing parameters.")
        self.param_init_values = OrderedDict()
        num_params = 0
        for layer in self.layers.values():
            for name, value in layer.param_init_values.items():
                logging.debug("- %s size=%d", name, value.size)
                num_params += value.size
            self.param_init_values.update(layer.param_init_values)
        logging.debug("Total number of parameters: %d", num_params)

        # Create Theano shared variables.
        self.params = {name: theano.shared(value, name)
                       for name, value in self.param_init_values.items()}
        for layer in self.layers.values():
            layer.set_params(self.params)

        # mask is used to mask out the rest of the input matrix, when a sequence
        # is shorter than the maximum sequence length. The mask is kept as int8
        # data type, which is how Tensor stores booleans.
        if self.mode.minibatch:
            self.mask = tensor.matrix('network/mask', dtype='int8')
            self.mask.tag.test_value = test_value(
                size=(100, 16),
                max_value=True)
        else:
            self.mask = tensor.ones(self.input_word_ids.shape, dtype='int8')

        # Dropout layer needs to know whether we are training or evaluating.
        self.is_training = tensor.scalar('network/is_training', dtype='int8')
        self.is_training.tag.test_value = 1

        # Softmax layer needs to know how many noise words to sample for noise-
        # contrastive estimation.
        self.num_noise_samples = tensor.scalar('network/num_noise_samples',
                                               dtype='int64')
        self.num_noise_samples.tag.test_value = 100

        for layer in self.layers.values():
            layer.create_structure()

Example 43

Project: category2vec Source File: word2vec.py
Function: train_sentence_sg
        def train_sentence_sg(model, sentence, alpha, work=None):
            """
            Update skip-gram model by training on a single sentence.

            The sentence is a list of Vocab objects (or None, where the corresponding
            word is not in the vocabulary. Called internally from `Word2Vec.train()`.

            This is the non-optimized, Python version. If you have cython installed, gensim
            will use the optimized version from word2vec_inner instead.

            """
            if model.negative:
                # precompute negative labels
                labels = zeros(model.negative + 1)
                labels[0] = 1.0

            for pos, word in enumerate(sentence):
                if word is None:
                    continue  # OOV word in the input sentence => skip
                reduced_window = random.randint(model.window)  # `b` in the original word2vec code

                # now go over all words from the (reduced) window, predicting each one in turn
                start = max(0, pos - model.window + reduced_window)
                for pos2, word2 in enumerate(sentence[start : pos + model.window + 1 - reduced_window], start):
                    # don't train on OOV words and on the `word` itself
                    if word2 and not (pos2 == pos):
                        l1 = model.syn0[word2.index]
                        neu1e = zeros(l1.shape)

                        if model.hs:
                            # work on the entire tree at once, to push as much work into numpy's C routines as possible (performance)
                            l2a = deepcopy(model.syn1[word.point])  # 2d matrix, codelen x layer1_size
                            fa = 1.0 / (1.0 + exp(-dot(l1, l2a.T)))  #  propagate hidden -> output
                            ga = (1 - word.code - fa) * alpha  # vector of error gradients multiplied by the learning rate
                            model.syn1[word.point] += outer(ga, l1)  # learn hidden -> output
                            neu1e += dot(ga, l2a) # save error (for this reason l2a is deepcopied)

                        if model.negative:
                            # use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
                            word_indices = [word.index]
                            while len(word_indices) < model.negative + 1:
                                w = model.table[random.randint(model.table.shape[0])]
                                if w != word.index:
                                    word_indices.append(w)
                            l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size
                            fb = 1. / (1. + exp(-dot(l1, l2b.T))) # propagate hidden -> output
                            gb = (labels - fb) * alpha # vector of error gradients multiplied by the learning rate
                            model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output
                            neu1e += dot(gb, l2b) # save error

                        model.syn0[word2.index] += neu1e  # learn input -> hidden

            return len([word for word in sentence if word is not None])

Example 44

Project: RoBO Source File: random_search.py
Function: init
    def __init__(self, task=None, save_dir=None, num_save=1, rng=None):
        """
        Random Search [1] that simply evaluates random points. We do not have
        any priors thus we sample points uniformly at random.

        [1] J. Bergstra and Y. Bengio.
            Random search for hyper-parameter optimization.
            JMLR, 2012.

        Parameters
        ----------
        task: TaskObject
            Task object that contains the objective function and additional
            meta information such as the lower and upper bound of the search
            space.
        num_save: int
            Defines after how many iteration the output is saved.
        save_dir: String
            Output path
        rng: numpy.random.RandomState

        """

        if rng is None:
            self.rng = np.random.RandomState(np.random.randint(0, 10000))
        else:
            self.rng = rng

        self.task = task
        self.save_dir = save_dir

        self.X = None
        self.Y = None

        self.estimator = BestObservation(self,
                                         self.task.X_lower,
                                         self.task.X_upper)
        self.time_func_eval = []
        self.time_overhead = []

        self.num_save = num_save

        self.model_untrained = True

        self.incuembent = None
        self.incuembents = []
        self.incuembent_values = []
        self.runtime = []
        if self.save_dir is not None:
            self.create_save_dir()

Example 45

Project: category2vec Source File: word2vec.py
        def train_sentence_cbow(model, sentence, alpha, work=None, neu1=None):
            """
            Update CBOW model by training on a single sentence.

            The sentence is a list of Vocab objects (or None, where the corresponding
            word is not in the vocabulary. Called internally from `Word2Vec.train()`.

            This is the non-optimized, Python version. If you have cython installed, gensim
            will use the optimized version from word2vec_inner instead.

            """
            if model.negative:
                # precompute negative labels
                labels = zeros(model.negative + 1)
                labels[0] = 1.

            for pos, word in enumerate(sentence):
                if word is None:
                    continue  # OOV word in the input sentence => skip
                reduced_window = random.randint(model.window) # `b` in the original word2vec code
                start = max(0, pos - model.window + reduced_window)
                window_pos = enumerate(sentence[start : pos + model.window + 1 - reduced_window], start)
                word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]
                l1 = np_sum(model.syn0[word2_indices], axis=0) # 1 x layer1_size
                if word2_indices and model.cbow_mean:
                    l1 /= len(word2_indices)
                neu1e = zeros(l1.shape)

                if model.hs:
                    l2a = model.syn1[word.point] # 2d matrix, codelen x layer1_size
                    fa = 1. / (1. + exp(-dot(l1, l2a.T))) # propagate hidden -> output
                    ga = (1. - word.code - fa) * alpha # vector of error gradients multiplied by the learning rate
                    model.syn1[word.point] += outer(ga, l1) # learn hidden -> output
                    neu1e += dot(ga, l2a) # save error

                if model.negative:
                    # use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
                    word_indices = [word.index]
                    while len(word_indices) < model.negative + 1:
                        w = model.table[random.randint(model.table.shape[0])]
                        if w != word.index:
                            word_indices.append(w)
                    l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size
                    fb = 1. / (1. + exp(-dot(l1, l2b.T))) # propagate hidden -> output
                    gb = (labels - fb) * alpha # vector of error gradients multiplied by the learning rate
                    model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output
                    neu1e += dot(gb, l2b) # save error

                model.syn0[word2_indices] += neu1e # learn input -> hidden, here for all words in the window separately

            return len([word for word in sentence if word is not None])

Example 46

Project: fast-rcnn Source File: minibatch.py
def get_minibatch(roidb, num_classes):
    """Given a roidb, construct a minibatch sampled from it."""
    num_images = len(roidb)
    # Sample random scales to use for each image in this batch
    random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
                                    size=num_images)
    assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
        'num_images ({}) must divide BATCH_SIZE ({})'. \
        format(num_images, cfg.TRAIN.BATCH_SIZE)
    rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images
    fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)

    # Get the input image blob, formatted for caffe
    im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)

    # Now, build the region of interest and label blobs
    rois_blob = np.zeros((0, 5), dtype=np.float32)
    labels_blob = np.zeros((0), dtype=np.float32)
    bbox_targets_blob = np.zeros((0, 4 * num_classes), dtype=np.float32)
    bbox_loss_blob = np.zeros(bbox_targets_blob.shape, dtype=np.float32)
    # all_overlaps = []
    for im_i in xrange(num_images):
        labels, overlaps, im_rois, bbox_targets, bbox_loss \
            = _sample_rois(roidb[im_i], fg_rois_per_image, rois_per_image,
                           num_classes)

        # Add to RoIs blob
        rois = _project_im_rois(im_rois, im_scales[im_i])
        batch_ind = im_i * np.ones((rois.shape[0], 1))
        rois_blob_this_image = np.hstack((batch_ind, rois))
        rois_blob = np.vstack((rois_blob, rois_blob_this_image))

        # Add to labels, bbox targets, and bbox loss blobs
        labels_blob = np.hstack((labels_blob, labels))
        bbox_targets_blob = np.vstack((bbox_targets_blob, bbox_targets))
        bbox_loss_blob = np.vstack((bbox_loss_blob, bbox_loss))
        # all_overlaps = np.hstack((all_overlaps, overlaps))

    # For debug visualizations
    # _vis_minibatch(im_blob, rois_blob, labels_blob, all_overlaps)

    blobs = {'data': im_blob,
             'rois': rois_blob,
             'labels': labels_blob}

    if cfg.TRAIN.BBOX_REG:
        blobs['bbox_targets'] = bbox_targets_blob
        blobs['bbox_loss_weights'] = bbox_loss_blob

    return blobs

Example 47

Project: tpot Source File: base.py
Function: generate
    def _generate(self, pset, min_, max_, condition, type_=None):
        """Generate a Tree as a list of list. The tree is build from the root to
        the leaves, and it stop growing when the condition is fulfilled.

        Parameters
        ----------
        pset: PrimitiveSetTyped
            Primitive set from which primitives are selected.
        min_: int
            Minimum height of the produced trees.
        max_: int
            Maximum Height of the produced trees.
        condition: function
            The condition is a function that takes two arguments,
            the height of the tree to build and the current
            depth in the tree.
        type_: class
            The type that should return the tree when called, when
            :obj:`None` (default) no return type is enforced.

        Returns
        -------
        individual: list
            A grown tree with leaves at possibly different depths
            dependending on the condition function.
        """
        if type_ is None:
            type_ = pset.ret
        expr = []
        height = np.random.randint(min_, max_)
        stack = [(0, type_)]
        while len(stack) != 0:
            depth, type_ = stack.pop()

            # We've added a type_ parameter to the condition function
            if condition(height, depth, type_):
                try:
                    term = np.random.choice(pset.terminals[type_])
                except IndexError:
                    _, _, traceback = sys.exc_info()
                    raise IndexError("The gp.generate function tried to add "
                                      "a terminal of type '%s', but there is "
                                      "none available." % (type_,)).\
                                      with_traceback(traceback)
                if inspect.isclass(term):
                    term = term()
                expr.append(term)
            else:
                try:
                    prim = np.random.choice(pset.primitives[type_])
                except IndexError:
                    _, _, traceback = sys.exc_info()
                    raise IndexError("The gp.generate function tried to add "
                                      "a primitive of type '%s', but there is "
                                      "none available." % (type_,)).\
                                      with_traceback(traceback)
                expr.append(prim)
                for arg in reversed(prim.args):
                    stack.append((depth+1, arg))

        return expr

Example 48

Project: pymc3 Source File: sampling.py
def sample_ppc(trace, samples=None, model=None, vars=None, size=None, random_seed=None, progressbar=True):
    """Generate posterior predictive samples from a model given a trace.

    Parameters
    ----------
    trace : backend, list, or MultiTrace
        Trace generated from MCMC sampling
    samples : int
        Number of posterior predictive samples to generate. Defaults to the
        length of `trace`
    model : Model (optional if in `with` context)
        Model used to generate `trace`
    vars : iterable
        Variables for which to compute the posterior predictive samples.
        Defaults to `model.observed_RVs`.
    size : int
        The number of random draws from the distribution specified by the
        parameters in each sample of the trace.

    Returns
    -------
    Dictionary keyed by `vars`, where the values are the corresponding
    posterior predictive samples.
    """
    if samples is None:
        samples = len(trace)

    if model is None:
        model = modelcontext(model)

    if vars is None:
        vars = model.observed_RVs

    seed(random_seed)

    if progressbar:
        indices = tqdm(randint(0, len(trace), samples), total=samples)
    else:
        indices = randint(0, len(trace), samples)

    ppc = defaultdict(list)
    for idx in indices:
        param = trace[idx]
        for var in vars:
            ppc[var.name].append(var.distribution.random(point=param,
                                                         size=size))

    return {k: np.asarray(v) for k, v in ppc.items()}

Example 49

Project: auto-sklearn Source File: xgradient_boosting.py
Function: init
    def __init__(self, learning_rate, n_estimators, subsample,
                 max_depth, colsample_bylevel, colsample_bytree, gamma,
                 min_child_weight, max_delta_step, reg_alpha, reg_lambda,
                 base_score, scale_pos_weight, nthread=1, init=None,
                 random_state=None, verbose=0):
        ## Do not exist
        # self.loss = loss
        # self.min_samples_split = min_samples_split
        # self.min_samples_leaf = min_samples_leaf
        # self.min_weight_fraction_leaf = min_weight_fraction_leaf
        # self.max_leaf_nodes = max_leaf_nodes

        self.learning_rate = learning_rate
        self.n_estimators = n_estimators
        self.subsample = subsample
        self.max_depth = max_depth

        ## called differently
        # max_features: Subsample ratio of columns for each split, in each level.
        self.colsample_bylevel = colsample_bylevel

        # min_weight_fraction_leaf: Minimum sum of instance weight(hessian)
        # needed in a child.
        self.min_child_weight = min_child_weight

        # Whether to print messages while running boosting.
        if verbose:
            self.silent = False
        else:
            self.silent = True

        # Random number seed.
        if random_state is None:
            self.seed = numpy.random.randint(1, 10000, size=1)[0]
        else:
            self.seed = random_state.randint(1, 10000, size=1)[0]

        ## new paramaters
        # Subsample ratio of columns when constructing each tree.
        self.colsample_bytree = colsample_bytree

        # Minimum loss reduction required to make a further partition on a leaf
        # node of the tree.
        self.gamma = gamma

        # Maximum delta step we allow each tree's weight estimation to be.
        self.max_delta_step = max_delta_step

        # L2 regularization term on weights
        self.reg_alpha = reg_alpha

        # L1 regularization term on weights
        self.reg_lambda = reg_lambda

        # Balancing of positive and negative weights.
        self.scale_pos_weight = scale_pos_weight

        # The initial prediction score of all instances, global bias.
        self.base_score = base_score

        # Number of parallel threads used to run xgboost.
        self.nthread = nthread

        ## Were there before, didn't touch
        self.init = init
        self.estimator = None

Example 50

Project: qutip Source File: test_sparse.py
def test_csr_kron():
    "Sparse: Test CSR Kron"
    for kk in range(10):
        ra = np.random.randint(2,100)
        rb = np.random.randint(2,100)
        A = rand_herm(ra,0.5).data
        B = rand_herm(rb,0.5).data
        C = sp.kron(A,B, format='csr')
        D = _csr_kron(A.data,A.indices,A.indptr, A.shape[0], A.shape[1],
                    B.data,B.indices,B.indptr, B.shape[0], B.shape[1])
        assert_almost_equal(C.data, D.data)
        assert_equal(C.indices, D.indices)
        assert_equal(C.indptr, D.indptr)
        
    for kk in range(10):
        ra = np.random.randint(2,100)
        rb = np.random.randint(2,100)
        A = rand_ket(ra,0.5).data
        B = rand_herm(rb,0.5).data
        C = sp.kron(A,B, format='csr')
        D = _csr_kron(A.data,A.indices,A.indptr, A.shape[0], A.shape[1],
                    B.data,B.indices,B.indptr, B.shape[0], B.shape[1])
        assert_almost_equal(C.data, D.data)
        assert_equal(C.indices, D.indices)
        assert_equal(C.indptr, D.indptr)
    
    for kk in range(10):
        ra = np.random.randint(2,100)
        rb = np.random.randint(2,100)
        A = rand_dm(ra,0.5).data
        B = rand_herm(rb,0.5).data
        C = sp.kron(A,B, format='csr')
        D = _csr_kron(A.data,A.indices,A.indptr, A.shape[0], A.shape[1],
                    B.data,B.indices,B.indptr, B.shape[0], B.shape[1])
        assert_almost_equal(C.data, D.data)
        assert_equal(C.indices, D.indices)
        assert_equal(C.indptr, D.indptr)
        
    for kk in range(10):
        ra = np.random.randint(2,100)
        rb = np.random.randint(2,100)
        A = rand_ket(ra,0.5).data
        B = rand_ket(rb,0.5).data
        C = sp.kron(A,B, format='csr')
        D = _csr_kron(A.data,A.indices,A.indptr, A.shape[0], A.shape[1],
                    B.data,B.indices,B.indptr, B.shape[0], B.shape[1])
        assert_almost_equal(C.data, D.data)
        assert_equal(C.indices, D.indices)
        assert_equal(C.indptr, D.indptr)
See More Examples - Go to Next Page
Page 1 Selected Page 2 Page 3 Page 4