numpy.random.random_integers

Here are the examples of the python api numpy.random.random_integers taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

127 Examples 7

3 Source : test_random.py
with GNU General Public License v3.0
from adityaprakash-bobby

    def test_random_integers_deprecated(self):
        with warnings.catch_warnings():
            warnings.simplefilter("error", DeprecationWarning)

            # DeprecationWarning raised with high == None
            assert_raises(DeprecationWarning,
                          np.random.random_integers,
                          np.iinfo('l').max)

            # DeprecationWarning raised with high != None
            assert_raises(DeprecationWarning,
                          np.random.random_integers,
                          np.iinfo('l').max, np.iinfo('l').max)

    def test_random_sample(self):

3 Source : tracker.py
with GNU General Public License v3.0
from AIHunters

    def merge_move(self, final_merge=None, pair=None):
        if final_merge is None:
            if not self.data_processing.pairs_to_consider:
                self.returned_state = True
                return

            pair_selection = np.random.random_integers(
                0, len(self.data_processing.pairs_to_consider) - 1)
            self.create_new_partition_merge(self.data_processing.pairs_to_consider[pair_selection])
            del self.data_processing.pairs_to_consider[pair_selection]
        else:
            self.create_new_partition_merge(pair)

    def create_new_partition_merge(self, pair_chosen):

3 Source : common_layers_test.py
with MIT License
from akzaidi

  def testFlatten4D3D(self):
    x = np.random.random_integers(1, high=8, size=(3, 5, 2))
    with self.test_session() as session:
      y = common_layers.flatten4d3d(common_layers.embedding(x, 10, 7))
      session.run(tf.global_variables_initializer())
      res = session.run(y)
    self.assertEqual(res.shape, (3, 5 * 2, 7))

  def testEmbedding(self):

3 Source : common_layers_test.py
with MIT License
from akzaidi

  def testEmbedding(self):
    x = np.random.random_integers(1, high=8, size=(3, 5))
    with self.test_session() as session:
      y = common_layers.embedding(x, 10, 16)
      session.run(tf.global_variables_initializer())
      res = session.run(y)
    self.assertEqual(res.shape, (3, 5, 16))

  def testShakeShake(self):

3 Source : basic_test.py
with MIT License
from akzaidi

  def testBasicFcRelu(self):
    x = np.random.random_integers(0, high=255, size=(1, 28, 28, 1))
    y = np.random.random_integers(0, high=9, size=(1, 1))
    hparams = trainer_lib.create_hparams(
        "basic_fc_small", problem_name="image_mnist", data_dir=".")
    with self.test_session() as session:
      features = {
          "inputs": tf.constant(x, dtype=tf.int32),
          "targets": tf.constant(y, dtype=tf.int32),
      }
      model = basic.BasicFcRelu(hparams, tf.estimator.ModeKeys.TRAIN)
      logits, _ = model(features)
      session.run(tf.global_variables_initializer())
      res = session.run(logits)
    self.assertEqual(res.shape, (1, 1, 1, 1, 10))


if __name__ == "__main__":

3 Source : lstm_test.py
with MIT License
from akzaidi

  def testLSTMSeq2Seq(self):
    vocab_size = 9
    x = np.random.random_integers(1, high=vocab_size - 1, size=(3, 5, 1, 1))
    y = np.random.random_integers(1, high=vocab_size - 1, size=(3, 6, 1, 1))
    hparams = lstm.lstm_seq2seq()
    p_hparams = problem_hparams.test_problem_hparams(vocab_size, vocab_size)
    with self.test_session() as session:
      features = {
          "inputs": tf.constant(x, dtype=tf.int32),
          "targets": tf.constant(y, dtype=tf.int32),
      }
      model = lstm.LSTMSeq2seq(hparams, tf.estimator.ModeKeys.TRAIN,
                               p_hparams)
      logits, _ = model(features)
      session.run(tf.global_variables_initializer())
      res = session.run(logits)
    self.assertEqual(res.shape, (3, 6, 1, 1, vocab_size))

  def testLSTMSeq2SeqAttention(self):

3 Source : lstm_test.py
with MIT License
from akzaidi

  def testLSTMSeq2seqBidirectionalEncoder(self):
    vocab_size = 9
    x = np.random.random_integers(1, high=vocab_size - 1, size=(3, 5, 1, 1))
    y = np.random.random_integers(1, high=vocab_size - 1, size=(3, 6, 1, 1))
    hparams = lstm.lstm_seq2seq()
    p_hparams = problem_hparams.test_problem_hparams(vocab_size, vocab_size)
    with self.test_session() as session:
      features = {
          "inputs": tf.constant(x, dtype=tf.int32),
          "targets": tf.constant(y, dtype=tf.int32),
      }
      model = lstm.LSTMSeq2seqBidirectionalEncoder(
          hparams, tf.estimator.ModeKeys.TRAIN, p_hparams)
      logits, _ = model(features)
      session.run(tf.global_variables_initializer())
      res = session.run(logits)
    self.assertEqual(res.shape, (3, 6, 1, 1, vocab_size))

  def testLSTMSeq2seqAttentionBidirectionalEncoder(self):

3 Source : env.py
with MIT License
from chucnorrisful

    def reset(self):
        observation = self.env.reset()

        if self._TRAINING and np.random.random_integers(0, 1) == 4:
            ys, xs = np.where(observation[0].observation.feature_screen.player_relative == 1)
            observation = self.env.step(actions=(FUNCTIONS.select_point("toggle", (xs[0], ys[0])),))

        observation = self.env.step(actions=(FUNCTIONS.select_army(0),))

        self.last_obs = observation[0]

        # small_observation = observation[0].observation.feature_screen.unit_density
        small_observation = [observation[0].observation.feature_screen.player_relative,
                             observation[0].observation.feature_screen.selected]

        return small_observation

    def render(self, mode: str = 'human', close: bool = False):

3 Source : env.py
with MIT License
from chucnorrisful

    def reset(self):
        observation = self.env.reset()

        if self._TRAINING and np.random.random_integers(1, 1) == 1:
            ys, xs = np.where(observation[0].observation.feature_screen.player_relative == 1)
            observation = self.env.step(actions=(FUNCTIONS.select_point("toggle", (xs[0], ys[0])),))

        # observation = self.env.step(actions=(FUNCTIONS.select_army()))

        self.last_obs = observation[0]
        small_observation = np.array([observation[0].observation.feature_screen.player_relative, observation[0].observation.feature_screen.selected])

        return small_observation

    def render(self, mode: str = 'human', close: bool = False):

3 Source : test_random.py
with Apache License 2.0
from dashanji

    def test_random_integers_deprecated(self):
        with warnings.catch_warnings():
            warnings.simplefilter("error", DeprecationWarning)

            # DeprecationWarning raised with high == None
            assert_raises(DeprecationWarning,
                          np.random.random_integers,
                          np.iinfo('l').max)

            # DeprecationWarning raised with high != None
            assert_raises(DeprecationWarning,
                          np.random.random_integers,
                          np.iinfo('l').max, np.iinfo('l').max)

    def test_random(self):

3 Source : test_randomstate.py
with Apache License 2.0
from dashanji

    def test_random_integers_deprecated(self):
        with warnings.catch_warnings():
            warnings.simplefilter("error", DeprecationWarning)

            # DeprecationWarning raised with high == None
            assert_raises(DeprecationWarning,
                          random.random_integers,
                          np.iinfo('l').max)

            # DeprecationWarning raised with high != None
            assert_raises(DeprecationWarning,
                          random.random_integers,
                          np.iinfo('l').max, np.iinfo('l').max)

    def test_random_sample(self):

3 Source : eval.py
with BSD 2-Clause "Simplified" License
from dfouhey

    def bootSamp(self):

        #first pick the groups
        groups = np.random.random_integers(0,self.numGroups-1,self.numGroups)
        #then return the sample
        return np.concatenate([self.npids[self.G[self.groups[g]]] for g in groups],axis=0)

####################
### Evaluation stuff 
###

def evaluateNBinary(Y,Yh,doBCI=False,bciGroup=None,bSampleCount=1000):

3 Source : test_basic.py
with MIT License
from dmitriy-serdyuk

def random_lil(shape, dtype, nnz):
    rval = sp.lil_matrix(shape, dtype=dtype)
    huge = 2 ** 30
    for k in range(nnz):
        # set non-zeros in random locations (row x, col y)
        idx = numpy.random.random_integers(huge, size=2) % shape
        value = numpy.random.rand()
        # if dtype *int*, value will always be zeros!
        if "int" in dtype:
            value = int(value * 100)
        # The call to tuple is needed as scipy 0.13.1 do not support
        # ndarray with lenght 2 as idx tuple.
        rval.__setitem__(
            tuple(idx),
            value)
    return rval


def sparse_random_inputs(format, shape, n=1, out_dtype=None, p=0.5, gap=None,

3 Source : test_basic.py
with MIT License
from dmitriy-serdyuk

    def setUp(self):
        super(AddSSDataTester, self).setUp()
        self.op_class = AddSSData

        for format in sparse.sparse_formats:
            variable = getattr(theano.sparse, format + '_matrix')

            rand = numpy.array(
                numpy.random.random_integers(3, size=(3, 4)) - 1,
                dtype=theano.config.floatX)
            constant = as_sparse_format(rand, format)

            self.x[format] = [variable() for t in range(2)]
            self.a[format] = [constant for t in range(2)]

    def test_op(self):

3 Source : test_extra_ops.py
with MIT License
from dmitriy-serdyuk

    def test_perform(self):
        x = tensor.lscalar()
        f = function([x], self.op(x))
        M = numpy.random.random_integers(3, 50, size=())
        assert numpy.allclose(f(M), numpy.bartlett(M))
        assert numpy.allclose(f(0), numpy.bartlett(0))
        assert numpy.allclose(f(-1), numpy.bartlett(-1))
        b = numpy.array([17], dtype='uint8')
        assert numpy.allclose(f(b[0]), numpy.bartlett(b[0]))

    def test_infer_shape(self):

3 Source : test_extra_ops.py
with MIT License
from dmitriy-serdyuk

    def test_infer_shape(self):
        x = tensor.lscalar()
        self._compile_and_check([x], [self.op(x)],
                                [numpy.random.random_integers(3, 50, size=())],
                                self.op_class)
        self._compile_and_check([x], [self.op(x)], [0], self.op_class)
        self._compile_and_check([x], [self.op(x)], [1], self.op_class)


class TestFillDiagonal(utt.InferShapeTester):

3 Source : test_basic.py
with MIT License
from dmitriy-serdyuk

def random_lil(shape, dtype, nnz):
    rval = sp.lil_matrix(shape, dtype=dtype)
    huge = 2 ** 30
    for k in range(nnz):
        # set non-zeros in random locations (row x, col y)
        idx = numpy.random.random_integers(huge, size=2) % shape
        value = numpy.random.rand()
        # if dtype *int*, value will always be zeros!
        if "int" in dtype:
            value = int(value * 100)
        # The call to tuple is needed as scipy 0.13.1 do not support
        # ndarray with lenght 2 as idx tuple.
        rval.__setitem__(
            tuple(idx),
            value)
    return rval


class test_get_item(unittest.TestCase):

3 Source : eval_train_test.py
with GNU General Public License v3.0
from fanci-dga-detection

def rf_grid_search(data_set: DataSet, n_est_values=16, n_jobs=8):
    num_of_estimators = numpy.random.random_integers(10, 1000, n_est_values)
    max_feat = range(2, 44)
    param_grid = dict(max_features=max_feat, n_estimators=num_of_estimators, criterion=['gini', 'entropy'])
    return grid_search(RandomForestClassifier(), param_grid, data_set, n_jobs=n_jobs)


def svm_grid_search(data_set: DataSet, n_values=32, search_start_exp=-16, search_end_exp=3, n_jobs=8):

3 Source : NavGame.py
with GNU General Public License v2.0
from FracturedPlane

    def initEpoch(self):
        """
            Reset agent location
        """
        # new_loc = np.random.random_integers(self._state_bounds[0][0], self._state_bounds[1][0], self._state_length)
        new_loc = np.random.random_integers(-8, 8, self._state_length)
        self._agent = new_loc
        
    def generateValidationEnvironmentSample(self, seed):

3 Source : CausalSimulator3.py
with Apache License 2.0
from IBM

    def _noise_col_shuffle(x_outcome, cf, snr):
        # randomly sample the same samples to be shuffle:
        rnd_state_sample = np.random.random_integers(low=0, high=999999)
        shuffled_samples = x_outcome.sample(frac=1 - snr, random_state=rnd_state_sample)
        sampled_cf = {i: cf[i].sample(frac=1 - snr, random_state=rnd_state_sample) for i in list(cf.keys())}
        # shuffle and keep the exact order among the counterfactuals:
        rnd_state_shuffle = np.random.random_integers(low=0, high=999999)
        shuffled_samples[:] = shuffled_samples.sample(frac=1, random_state=rnd_state_shuffle).values
        for i in list(sampled_cf.keys()):
            sampled_cf[i][:] = sampled_cf[i].sample(frac=1, random_state=rnd_state_shuffle).values
        # assign back to the signal:
        x_noised_continuous = x_outcome
        x_noised_continuous.loc[shuffled_samples.index, :] = shuffled_samples
        for i in list(cf.keys()):
            cf[i].loc[sampled_cf[i].index] = sampled_cf[i]

    @staticmethod

3 Source : prioritized_memory.py
with MIT License
from JanMatas

    def sample(self, batch_size):
        with self.lock:
            idxes = np.random.random_integers(
                low=0, high=self.nb_entries - 1, size=batch_size)
            demos = [i   <   self.num_demonstrations for i in idxes]
            encoded_sample = self._get_batches_for_idxes(idxes)
            encoded_sample['weights'] = array_min2d(np.ones((batch_size, )))
            encoded_sample['idxes'] = idxes
            encoded_sample['demos'] = array_min2d(demos)
            return encoded_sample

    def demonstrations_done(self):

3 Source : prioritized_memory.py
with MIT License
from JanMatas

    def _sample_proportional(self, batch_size, pretrain):
        with self.lock:
            res = []
            if pretrain:
                res = np.random.random_integers(
                    low=0, high=self.nb_entries - 1, size=batch_size)
                return res
            for _ in range(batch_size):
                while True:
                    mass = np.random.uniform(
                        0, self._it_sum.sum(0,
                                            len(self.storage) - 1))
                    idx = self._it_sum.find_prefixsum_idx(mass)
                    if idx not in res:
                        res.append(idx)
                        break
            return res

    def sample_prioritized(self, batch_size, beta, pretrain=False):

3 Source : dataset.py
with Apache License 2.0
from kkoutini

def get_roll_func(axis=1, shift=None, shift_range=50):
    print("rolling...")

    def roll_func(b):
        x, i, y = b
        x = torch.as_tensor(x)
        sf = shift
        if shift is None:
            sf = int(np.random.random_integers(-shift_range, shift_range))
        global FirstTime

        return x.roll(sf, axis), i, y

    return roll_func


@dataset.command

3 Source : dfn_utils.py
with BSD 3-Clause Clear License
from komeisugiura

def generate_random_onehot_vectors(nb_classes, size):
    """
    generate one-hot vectors
    """
    tmp = np.random.random_integers(0, nb_classes - 1, size)
    one_hot_targets = np.eye(nb_classes)[tmp]
    return one_hot_targets


def leaky_relu(X, leak=0.2):

3 Source : rcv_utils.py
with MIT License
from medgift

def visualize_overlap(patch, annotations):
    plt.figure()
    plt.imshow(patch)
    plt.imshow(annotations, alpha=0.5)
    plt.savefig('./training/'+str(np.random.random_integers(0,100000))+'.png')
    plt.close()

def get_patch_statistics(patch, annotations):

3 Source : q_image.py
with MIT License
from MIDA-group

    def random_from_level(self, n, level):
        arr = self.pnts[level]
        m = arr.shape[0]
        if m == 0:
            return arr
        else:
            return arr[np.random.random_integers(0, m-1, n), :]
    def random_integers(self, m, n):

3 Source : q_image.py
with MIT License
from MIDA-group

    def random_integers(self, m, n):
        if m > 0 and n > 0:
            return np.random.random_integers(0, m, n)
        else:
            return np.zeros((0, self.pnts[0].shape[1]), dtype='int')


    def random_sample(self, n):

3 Source : grammars.py
with GNU Lesser General Public License v3.0
from neuronalX

    def NP(self):
        f = 4 * [[]]
        f[0] = lambda: self.N()
        f[1] = lambda: self.N() + self.SRC()
        f[2] = lambda: self.N() + self.ORC()
        f[3] = lambda: self.N() + self.PP()
        return f[random.random_integers(0, 3)]()
    
    def PP(self):

3 Source : pyrobo.py
with MIT License
from nicoguaro

def cuentame(pos=None):
    u"""Imprime una frase de la vendedora de rosas

    Parámetros
    ----------
    val : int, opcional
        Posición de la frase en la lista de frases.

    """
    num_frases = len(frases_cultas)
    if pos is None or pos >= num_frases:
        pos = random_integers(0, num_frases - 1)
    frase = frases_cultas[pos]
    personaje = personajes[pos]
    espacios_autor = (len(frase) - len(personaje))*" "
    msj = u"❝{}❞\n\n{}{}".format(frase, espacios_autor, personaje)
    print(msj)
    return None

3 Source : memory.py
with BSD 3-Clause "New" or "Revised" License
from pemami4911

    def sample(self, batch_size):
        # Draw such that we always have a proceeding element.
        batch_idxs = np.random.random_integers(self.nb_entries - 2, size=batch_size)

        obs_batch = self.observations.get_batch(batch_idxs)
        discrete_actions_batch = self.discrete_actions.get_batch(batch_idxs)
        dense_actions_batch = self.dense_actions.get_batch(batch_idxs)
        reward_batch = self.rewards.get_batch(batch_idxs)

        return obs_batch, discrete_actions_batch, dense_actions_batch, reward_batch

    def append(self, obs, discrete_action, dense_action, reward):

3 Source : utils.py
with MIT License
from psychopa4

def augmentation(lr,hr):
    a=np.random.random_integers(0,1)
    b=np.random.random_integers(0,1)
    rot=np.random.random_integers(0,1)
    if a+b>0:
        a=-2*a+1
        b=-2*b+1
        lr=lr[:,:,::a,::b,:]
        hr=hr[:,:,::a,::b,:]
    if rot==1:
        lr=lr.transpose((0,1,3,2,4))
        hr=hr.transpose((0,1,3,2,4))
    return lr,hr

3 Source : random_region.py
with BSD 3-Clause "New" or "Revised" License
from pysal

    def get_region_breaks(self, num_regions):
        region_breaks = set([])
        while len(region_breaks)   <   num_regions - 1:
            region_breaks.add(np.random.random_integers(1, self.n - 1))
        region_breaks = list(region_breaks)
        region_breaks.sort()
        return region_breaks

    def get_cards(self, num_regions):

3 Source : random_region.py
with BSD 3-Clause "New" or "Revised" License
from pysal

    def grow_free(self, w, test_card, region, candidates, potential):
        # increment potential areas after each new area is
        # added to the region (faster than the grow_compact)
        pot_index = np.random.random_integers(0, len(potential) - 1)
        add_area = potential[pot_index]
        region.append(add_area)
        candidates.remove(add_area)
        potential.remove(add_area)
        potential.extend(
            [
                i
                for i in w.neighbors[add_area]
                if i not in region and i not in potential and i in candidates
            ]
        )
        return region, candidates, potential

    def build_contig_regions(

3 Source : spec_augment.py
with GNU Affero General Public License v3.0
from rolczynski

    def mask_frequencies(features: np.ndarray, means: np.ndarray, channels: int, F: int, mf: int):
        for i in range(mf):
            f = np.random.random_integers(low=0, high=F)
            f0 = np.random.random_integers(low=0, high=channels-F)
            features[:, f0:f0+f] = means[f0:f0+f]
        return features

    @staticmethod

3 Source : spec_augment.py
with GNU Affero General Public License v3.0
from rolczynski

    def mask_time(features: np.ndarray, means: np.ndarray, time: int, T_range: Tuple[int, int], mt: int):
        Tmin, Tmax = T_range
        for i in range(mt):
            t = np.random.random_integers(low=Tmin, high=Tmax)
            t0 = np.random.random_integers(low=0, high=time-Tmax)
            features[t0:t0+t, :] = means
        return features

3 Source : collect_dice_data.py
with MIT License
from s-tian

def random_actions(state):
    act = [np.random.random_integers(-150, 150), np.random.random_integers(-150, 150),
           np.random.random_integers(-10, 10)]
    return act


def get_randomoffset():

3 Source : collect_joystick_data.py
with MIT License
from s-tian

def random_actions(state):
    return [np.random.random_integers(-70, 70), np.random.random_integers(-70, 70),
            np.random.random_integers(-150, 150)]


def get_randomoffset():

3 Source : embed_regularize.py
with BSD 3-Clause "New" or "Revised" License
from sjmielke

def tests():
  vocabsize = 50
  hiddensize = 4
  bptt = 10
  batch_size = 2

  embed = torch.nn.Embedding(vocabsize, hiddensize)

  words = np.random.random_integers(low = 0, high = vocabsize - 1, size = (batch_size, bptt))
  words = torch.LongTensor(words)

  orig_embedmat = embed(words)
  embedmat = embedded_dropout(embed, words)

  print(orig_embedmat)
  print(embedmat)


if __name__ == '__main__':

3 Source : test_attacks.py
with BSD 3-Clause "New" or "Revised" License
from StephanZheng

    def test_targeted_generate_np_gives_adversarial_example(self):
        random_labs = np.random.random_integers(0, 1, 100)
        random_labs_one_hot = np.zeros((100, 2))
        random_labs_one_hot[np.arange(100), random_labs] = 1

        _, x_adv, delta = self.generate_adversarial_examples_np(
            eps=.5, ord=np.inf, y_target=random_labs_one_hot)

        self.assertClose(delta, 0.5)

        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
        self.assertTrue(np.mean(random_labs == new_labs) > 0.7)

    def test_generate_np_can_be_called_with_different_eps(self):

3 Source : utils.py
with BSD 2-Clause "Simplified" License
from StevenBanama

def random_erasing(img, drop_out=0.3, aspect=(0.5, 2), area=(0.06, 0.10)):
    # https://arxiv.org/pdf/1708.04896.pdf
    if 1 - random.random() > drop_out:
        return img
    img = img.copy()
    height, width = img.shape[:-1]
    aspect_ratio = np.random.uniform(*aspect)
    area_ratio = np.random.uniform(*area)
    img_area = height * width * area_ratio
    dwidth, dheight = np.sqrt(img_area * aspect_ratio), np.sqrt(img_area * 1 / aspect_ratio) 
    xmin = random.randint(0, height)
    ymin = random.randint(0, width)
    xmax, ymax = min(height, int(xmin + dheight)), min(width, int(ymin + dwidth))
    img[xmin:xmax,ymin:ymax,:] = np.random.random_integers(0, 256, (xmax-xmin, ymax-ymin, 3))
    return img

def get_normal_image(row, seed=100, shape=(96, 96), is_training=True):

3 Source : utils.py
with MIT License
from taki0112

def shuffle(x, y) :
    seed = np.random.random_integers(low=0, high=1000)
    np.random.seed(seed)
    np.random.shuffle(x)

    np.random.seed(seed)
    np.random.shuffle(y)

    return x, y

def load_test_data(image_path, size=256, gray_to_RGB=False):

3 Source : tensordot_npc.py
with GNU General Public License v3.0
from tenpy

def rand_distinct_int(a, b, n):
    """returns n distinct integers from a to b inclusive."""
    if n   <   0:
        raise ValueError
    if n > b - a + 1:
        raise ValueError
    return np.sort((np.random.random_integers(a, b - n + 1, size=n))) + np.arange(n)


def rand_partitions(a, b, n):

3 Source : attentions_test.py
with Apache License 2.0
from tensorflow

  def test_mask(self):
    a = np.random.random_integers(0, 5, size=[2, 50])
    jax_mask = attentions.causal_segment_mask(a, jnp.float32)
    tf_mask = batch_major_attention.CausalSegmentMask(a, tf.float32)
    self.assertAllClose(test_utils.to_np(jax_mask), test_utils.to_np(tf_mask))

  @parameterized.parameters([(False, True, 3, True), (True, True, 3, True),

3 Source : data_utils.py
with MIT License
from tu-rbo

def make_batch_iterator(data, batch_size=32, seq_len=10):
    # go through data and select a subsequence from each sequence
    while True:
        episodes = np.random.random_integers(0, len(data['s']) - 1, size=batch_size)
        start_steps = np.random.random_integers(0, len(data['s'][0]) - seq_len - 1, size=batch_size)
        batches = {k: np.concatenate([data[k][i:i + 1, j:j + seq_len] for i, j in zip(episodes, start_steps)]) for k in data.keys()}
        yield batches

def make_repeating_batch_iterator(data, epoch_len, batch_size=32, seq_len=10):

3 Source : data_utils.py
with MIT License
from tu-rbo

def make_repeating_batch_iterator(data, epoch_len, batch_size=32, seq_len=10):
    # go through data and select a subsequence from each sequence
    repeating_episodes = np.random.random_integers(0, len(data['s']) - 1, size=[epoch_len, batch_size])
    repeating_start_steps = np.random.random_integers(0, len(data['s'][0]) - seq_len - 1, size=[epoch_len, batch_size])
    while True:
        for episodes, start_steps in zip(repeating_episodes, repeating_start_steps):
            batches = {k: np.concatenate([data[k][i:i + 1, j:j + seq_len] for i, j in zip(episodes, start_steps)]) for k in data.keys()}
            yield batches

def make_complete_batch_iterator(data, batch_size=1000, seq_len=10):

3 Source : vectorgenerator.py
with BSD 3-Clause "New" or "Revised" License
from upscale-project

def generate_additional_random_vector(n_var, depth, n_sample):
  vector = np.array( [np.random.random_integers(0, depth-1, n_sample) for i in range(n_var)] )
  vector = np.transpose(vector.reshape(n_var,n_sample))
  return vector.reshape(n_sample, n_var)

def scale_vector(vector, depth): # scale vector by depth/max(vector) 

3 Source : transforms.py
with BSD 2-Clause "Simplified" License
from YifanSun-ReID

    def __call__(self, img):
        img = img.resize((self.width, self.height),Image.BILINEAR)
        rp = np.random.random_integers(3,6)
        img = img.crop((0,0,self.width,self.height/6.*rp))
        img = img.resize((self.width, self.height),Image.BILINEAR)
        return [img, rp]

class RandomVerticalCropCont(object):

0 Source : modalities_test.py
with MIT License
from akzaidi

  def testSymbolModalityInputs(self):
    batch_size = 10
    num_datashards = 5
    length = 5
    vocab_size = 5000
    hidden_size = 9
    model_hparams = common_hparams.basic_params1()
    model_hparams.hidden_size = hidden_size
    model_hparams.mode = tf.estimator.ModeKeys.TRAIN
    x = -1 + np.random.random_integers(
        vocab_size, size=(batch_size, length, 1, 1))
    m = modalities.SymbolModality(model_hparams, vocab_size)
    data_parallelism = expert_utils.Parallelism(
        ["/device:CPU:0"] * num_datashards)
    with self.test_session() as session:
      xs = tf.split(x, num_datashards)
      sharded_output = m.bottom_sharded(xs, data_parallelism)
      output = tf.concat(sharded_output, 0)
      session.run(tf.global_variables_initializer())
      res = session.run(output)
    self.assertEqual(res.shape, (batch_size, length, 1, hidden_size))

  def testSymbolModalityTargets(self):

0 Source : modalities_test.py
with MIT License
from akzaidi

  def testSymbolModalityTargets(self):
    batch_size = 10
    num_datashards = 5
    length = 6
    height = 7
    hidden_size = 9
    vocab_size = 11
    model_hparams = common_hparams.basic_params1()
    model_hparams.hidden_size = hidden_size
    model_hparams.mode = tf.estimator.ModeKeys.TRAIN
    body_output = -1 + np.random.random_integers(
        100, size=(batch_size, length, height, hidden_size))
    targets = -1 + np.random.random_integers(
        vocab_size, size=(batch_size, length, height, 1))
    m = modalities.SymbolModality(model_hparams, vocab_size)
    data_parallelism = expert_utils.Parallelism(
        ["/device:CPU:0"] * num_datashards)
    with self.test_session() as session:
      sharded_body_output = tf.split(tf.to_float(body_output), num_datashards)
      sharded_targets = tf.split(targets, num_datashards)
      sharded_logits = m.top_sharded(sharded_body_output, sharded_targets,
                                     data_parallelism)
      train_loss = m.loss_sharded(sharded_logits, sharded_targets,
                                  data_parallelism)
      logits = tf.concat(sharded_logits, 0)
      session.run(tf.global_variables_initializer())
      res1, res2 = session.run((logits, train_loss))
    self.assertEqual(res1.shape, (batch_size, length, height, 1, vocab_size))
    self.assertEqual(res2.shape, ())

  def testSymbolModalityTargetsFactored(self):

0 Source : modalities_test.py
with MIT License
from akzaidi

  def testSymbolModalityTargetsFactored(self):
    batch_size = 10
    num_datashards = 5
    length = 6
    height = 7
    hidden_size = 9
    vocab_size = 11
    model_hparams = common_hparams.basic_params1()
    model_hparams.factored_logits = True
    model_hparams.hidden_size = hidden_size
    model_hparams.mode = tf.estimator.ModeKeys.TRAIN
    body_output = -1 + np.random.random_integers(
        100, size=(batch_size, length, height, hidden_size))
    targets = -1 + np.random.random_integers(
        vocab_size, size=(batch_size, length, height, 1))
    m = modalities.SymbolModality(model_hparams, vocab_size)
    data_parallelism = expert_utils.Parallelism(
        ["/device:CPU:0"] * num_datashards)
    with self.test_session() as session:
      sharded_body_output = tf.split(tf.to_float(body_output), num_datashards)
      sharded_targets = tf.split(targets, num_datashards)
      sharded_logits = m.top_sharded(sharded_body_output, sharded_targets,
                                     data_parallelism)
      train_loss = m.loss_sharded(sharded_logits, sharded_targets,
                                  data_parallelism)
      logits = tf.concat(sharded_logits, 0)
      session.run(tf.global_variables_initializer())
      res1, res2 = session.run((logits, train_loss))
    self.assertEqual(res1.shape, (batch_size, length, height, 1, vocab_size))
    self.assertEqual(res2.shape, ())


if __name__ == "__main__":

See More Examples