numpy.minimum

Here are the examples of the python api numpy.minimum taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

181 Examples 7

Example 1

Project: chainer
Source File: smorms3.py
View license
    def update_one_cpu(self, param, state):
        mem, g, g2 = state['mem'], state['g'], state['g2']
        grad = param.grad

        r = 1 / (mem + 1)
        g = (1 - r) * g + r * grad
        g2 = (1 - r) * g2 + r * grad * grad
        x = g * g / (g2 + self.eps)
        param.data -= grad * numpy.minimum(x, self.lr) \
            / (numpy.sqrt(g2) + self.eps)
        mem = 1 + mem * (1 - x)

        state['mem'], state['g'], state['g2'] = mem, g, g2

Example 2

Project: cupy
Source File: smorms3.py
View license
    def update_one_cpu(self, param, state):
        mem, g, g2 = state['mem'], state['g'], state['g2']
        grad = param.grad

        r = 1 / (mem + 1)
        g = (1 - r) * g + r * grad
        g2 = (1 - r) * g2 + r * grad * grad
        x = g * g / (g2 + self.eps)
        param.data -= grad * numpy.minimum(x, self.lr) \
            / (numpy.sqrt(g2) + self.eps)
        mem = 1 + mem * (1 - x)

        state['mem'], state['g'], state['g2'] = mem, g, g2

Example 3

Project: pysb
Source File: anneal_mod.py
View license
    def update_guess(self, x0):
        std = minimum(sqrt(self.T)*ones(self.dims), (self.upper-self.lower)/3.0/self.learn_rate)
        x0 = asarray(x0)
        xc = squeeze(random.normal(0, 1.0, size=self.dims))

        xnew = x0 + xc*std*self.learn_rate
        return xnew

Example 4

Project: fast-rcnn
Source File: test.py
View license
def vis_detections(im, class_name, dets, thresh=0.3):
    """Visual debugging of detections."""
    import matplotlib.pyplot as plt
    im = im[:, :, (2, 1, 0)]
    for i in xrange(np.minimum(10, dets.shape[0])):
        bbox = dets[i, :4]
        score = dets[i, -1]
        if score > thresh:
            plt.cla()
            plt.imshow(im)
            plt.gca().add_patch(
                plt.Rectangle((bbox[0], bbox[1]),
                              bbox[2] - bbox[0],
                              bbox[3] - bbox[1], fill=False,
                              edgecolor='g', linewidth=3)
                )
            plt.title('{}  {:.3f}'.format(class_name, score))
            plt.show()

Example 5

Project: tfdeploy
Source File: tfdeploy.py
View license
@Operation.factory
def Minimum(a, b):
    """
    Minimum op.
    """
    return np.minimum(a, b),

Example 6

Project: rlpy
Source File: PuddleWorld.py
View license
    def step(self, a):
        a = self.actions[a]
        ns = self.state + a + self.random_state.randn() * self.noise_level
        # make sure we stay inside the [0,1]^2 region
        ns = np.minimum(ns, 1.)
        ns = np.maximum(ns, 0.)
        self.state = ns.copy()
        return self._reward(ns), ns, self.isTerminal(), self.possibleActions()

Example 7

Project: rlpy
Source File: PuddleWorld.py
View license
    def _reward(self, s):
        if self.isTerminal(s):
            return 0  # goal state reached
        reward = -1
        # compute puddle influence
        d = self.puddles[:, 1, :] - self.puddles[:, 0, :]
        denom = (d ** 2).sum(axis=1)
        g = ((s - self.puddles[:, 0, :]) * d).sum(axis=1) / denom
        g = np.minimum(g, 1)
        g = np.maximum(g, 0)
        dists = np.sqrt(((self.puddles[:, 0, :] + g * d - s) ** 2).sum(axis=1))
        dists = dists[dists < 0.1]
        if len(dists):
            reward -= 400 * (0.1 - dists[dists < 0.1]).max()
        return reward

Example 8

Project: mondrianforest
Source File: utils.py
View license
def logsumexp_array(v1, v2):
    """
    computes logsumexp of each element in v1 and v2
    """
    v_min = np.minimum(v1, v2)
    v_max = np.maximum(v1, v2)
    op = v_max + np.log(1 + np.exp(v_min - v_max))
    return op

Example 9

Project: scikit-learn
Source File: test_shortest_path.py
View license
def test_shortest_path():
    dist_matrix = generate_graph(20)
    # We compare path length and not costs (-> set distances to 0 or 1)
    dist_matrix[dist_matrix != 0] = 1

    for directed in (True, False):
        if not directed:
            dist_matrix = np.minimum(dist_matrix, dist_matrix.T)

        graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
        for i in range(dist_matrix.shape[0]):
            # Non-reachable nodes have distance 0 in graph_py
            dist_dict = defaultdict(int)
            dist_dict.update(single_source_shortest_path_length(dist_matrix,
                                                                i))

            for j in range(graph_py[i].shape[0]):
                assert_array_almost_equal(dist_dict[j], graph_py[i, j])

Example 10

Project: iris
Source File: test_OceanSigmaZFactory.py
View license
    @staticmethod
    def derive(sigma, eta, depth, depth_c, nsigma, zlev, coord=True):
        nsigma_slice = slice(0, int(nsigma))
        temp = eta + sigma * (np.minimum(depth_c, depth) + eta)
        shape = temp.shape
        result = np.ones(shape, dtype=temp.dtype) * zlev
        result[nsigma_slice] = temp[nsigma_slice]
        if coord:
            name = 'sea_surface_height_above_reference_ellipsoid'
            result = AuxCoord(result,
                              standard_name=name,
                              units='m',
                              attributes=dict(positive='up'))
        return result

Example 11

Project: LV_groundhog
Source File: utils.py
View license
def sample_weights(sizeX, sizeY, sparsity, scale, rng):
    """
    Initialization that fixes the largest singular value.
    """
    sizeX = int(sizeX)
    sizeY = int(sizeY)
    sparsity = numpy.minimum(sizeY, sparsity)
    values = numpy.zeros((sizeX, sizeY), dtype=theano.config.floatX)
    for dx in xrange(sizeX):
        perm = rng.permutation(sizeY)
        new_vals = rng.uniform(low=-scale, high=scale, size=(sparsity,))
        vals_norm = numpy.sqrt((new_vals**2).sum())
        new_vals = scale*new_vals/vals_norm
        values[dx, perm[:sparsity]] = new_vals
    _,v,_ = numpy.linalg.svd(values)
    values = scale * values/v[0]
    return values.astype(theano.config.floatX)

Example 12

Project: LV_groundhog
Source File: utils.py
View license
def sample_weights_classic(sizeX, sizeY, sparsity, scale, rng):
    sizeX = int(sizeX)
    sizeY = int(sizeY)
    if sparsity < 0:
        sparsity = sizeY
    else:
        sparsity = numpy.minimum(sizeY, sparsity)
    sparsity = numpy.minimum(sizeY, sparsity)
    values = numpy.zeros((sizeX, sizeY), dtype=theano.config.floatX)
    for dx in xrange(sizeX):
        perm = rng.permutation(sizeY)
        new_vals = rng.normal(loc=0, scale=scale, size=(sparsity,))
        values[dx, perm[:sparsity]] = new_vals
    return values.astype(theano.config.floatX)

Example 13

Project: sfepy
Source File: eigen.py
View license
def sym_tri_eigen(diags, select_indices=None):
    """
    Compute eigenvalues of a symmetric tridiagonal matrix using
    `scipy.linalg.eigvals_banded()`.
    """
    if select_indices is not None:
        n = diags.shape[1]
        select_indices = nm.minimum(select_indices, n)
        eigs = eigvals_banded(diags, lower=True, select='i',
                              select_range=select_indices)

    else:
        eigs = eigvals_banded(diags, lower=True, select='a')

    return eigs

Example 14

Project: hred-qs
Source File: utils.py
View license
def NormalInit(rng, sizeX, sizeY, scale=0.01, sparsity=-1):
    """ 
    Normal Initialization
    """
    sizeX = int(sizeX)
    sizeY = int(sizeY)
    
    if sparsity < 0:
        sparsity = sizeY
     
    sparsity = numpy.minimum(sizeY, sparsity)
    values = numpy.zeros((sizeX, sizeY), dtype=theano.config.floatX)
    for dx in xrange(sizeX):
        perm = rng.permutation(sizeY)
        new_vals = rng.normal(loc=0, scale=scale, size=(sparsity,))
        values[dx, perm[:sparsity]] = new_vals
        
    return values.astype(theano.config.floatX)

Example 15

Project: statsmodels
Source File: _kernel_base.py
View license
    def _set_bw_bounds(self, bw):
        """
        Sets bandwidth lower bound to effectively zero )1e-10), and for
        discrete values upper bound to 1.
        """
        bw[bw < 0] = 1e-10
        _, ix_ord, ix_unord = _get_type_pos(self.data_type)
        bw[ix_ord] = np.minimum(bw[ix_ord], 1.)
        bw[ix_unord] = np.minimum(bw[ix_unord], 1.)

        return bw

Example 16

Project: tweet2vec
Source File: batch_word.py
View license
    def next(self):
        if self.curr_pos >= len(self.indices):
            self.reset()
            raise StopIteration()

        # current batch size
        curr_batch_size = np.minimum(self.batch_size, self.curr_remaining)

        # indices for current batch
        curr_indices = self.curr_indices[self.curr_pos:self.curr_pos+curr_batch_size]
        self.curr_pos += curr_batch_size
        self.curr_remaining -= curr_batch_size

        # data and targets for current batch
        x = [self.data[ii] for ii in curr_indices]
        y = [self.targets[ii] for ii in curr_indices]

        return x, y

Example 17

Project: tweet2vec
Source File: batch_char.py
View license
    def next(self):
        if self.curr_pos >= len(self.indices):
            self.reset()
            raise StopIteration()

        # current batch size
        curr_batch_size = np.minimum(self.batch_size, self.curr_remaining)

        # indices for current batch
        curr_indices = self.curr_indices[self.curr_pos:self.curr_pos+curr_batch_size]
        self.curr_pos += curr_batch_size
        self.curr_remaining -= curr_batch_size

        # data and targets for current batch
        x = [self.data[ii] for ii in curr_indices]
        y = [self.targets[ii] for ii in curr_indices]

        return x, y

Example 18

Project: pysparkling
Source File: stat_counter.py
View license
    def merge(self, value):
        delta = value - self.mu
        self.n += 1
        self.mu += delta / self.n
        self.m2 += delta * (value - self.mu)
        self.maxValue = maximum(self.maxValue, value)
        self.minValue = minimum(self.minValue, value)

        return self

Example 19

View license
  def testSaturate(self):
    in_types = tf.float32,
    out_types = tf.int8, tf.uint8, tf.int16, tf.float32
    with self.test_session() as sess:
      for in_type in in_types:
        for out_type in out_types:
          lo, hi = in_type.min, in_type.max
          x = tf.constant([lo, lo + 1, lo // 2, hi // 2, hi - 1, hi],
                          dtype=in_type)
          y = tf.saturate_cast(x, dtype=out_type)
          self.assertEqual(y.dtype, out_type)
          x, y = sess.run([x, y])
          correct = np.maximum(out_type.min, np.minimum(out_type.max, x))
          self.assertAllEqual(correct, y)

Example 20

View license
  def testSaturate(self):
    in_types = tf.float32,
    out_types = tf.int8, tf.uint8, tf.int16, tf.float32
    with self.test_session() as sess:
      for in_type in in_types:
        for out_type in out_types:
          lo, hi = in_type.min, in_type.max
          x = tf.constant([lo, lo + 1, lo // 2, hi // 2, hi - 1, hi],
                          dtype=in_type)
          y = tf.saturate_cast(x, dtype=out_type)
          self.assertEqual(y.dtype, out_type)
          x, y = sess.run([x, y])
          correct = np.maximum(out_type.min, np.minimum(out_type.max, x))
          self.assertAllEqual(correct, y)

Example 21

Project: NMT
Source File: utils.py
View license
def sample_weights(sizeX, sizeY, sparsity, scale, rng):
    """
    Initialization that fixes the largest singular value.
    """
    sizeX = int(sizeX)
    sizeY = int(sizeY)
    sparsity = numpy.minimum(sizeY, sparsity)
    values = numpy.zeros((sizeX, sizeY), dtype=theano.config.floatX)
    for dx in xrange(sizeX):
        perm = rng.permutation(sizeY)
        new_vals = rng.uniform(low=-scale, high=scale, size=(sparsity,))
        vals_norm = numpy.sqrt((new_vals**2).sum())
        new_vals = scale*new_vals/vals_norm
        values[dx, perm[:sparsity]] = new_vals
    _,v,_ = numpy.linalg.svd(values)
    values = scale * values/v[0]
    return values.astype(theano.config.floatX)

Example 22

Project: NMT
Source File: utils.py
View license
def sample_weights_classic(sizeX, sizeY, sparsity, scale, rng):
    sizeX = int(sizeX)
    sizeY = int(sizeY)
    if sparsity < 0:
        sparsity = sizeY
    else:
        sparsity = numpy.minimum(sizeY, sparsity)
    sparsity = numpy.minimum(sizeY, sparsity)
    values = numpy.zeros((sizeX, sizeY), dtype=theano.config.floatX)
    for dx in xrange(sizeX):
        perm = rng.permutation(sizeY)
        new_vals = rng.normal(loc=0, scale=scale, size=(sparsity,))
        values[dx, perm[:sparsity]] = new_vals
    return values.astype(theano.config.floatX)

Example 23

Project: NMT
Source File: utils.py
View license
def sample_weights(sizeX, sizeY, sparsity, scale, rng):
    """
    Initialization that fixes the largest singular value.
    """
    sizeX = int(sizeX)
    sizeY = int(sizeY)
    sparsity = numpy.minimum(sizeY, sparsity)
    values = numpy.zeros((sizeX, sizeY), dtype=theano.config.floatX)
    for dx in xrange(sizeX):
        perm = rng.permutation(sizeY)
        new_vals = rng.uniform(low=-scale, high=scale, size=(sparsity,))
        vals_norm = numpy.sqrt((new_vals**2).sum())
        new_vals = scale*new_vals/vals_norm
        values[dx, perm[:sparsity]] = new_vals
    _,v,_ = numpy.linalg.svd(values)
    values = scale * values/v[0]
    return values.astype(theano.config.floatX)

Example 24

Project: NMT
Source File: utils.py
View license
def sample_weights_classic(sizeX, sizeY, sparsity, scale, rng):
    sizeX = int(sizeX)
    sizeY = int(sizeY)
    if sparsity < 0:
        sparsity = sizeY
    else:
        sparsity = numpy.minimum(sizeY, sparsity)
    sparsity = numpy.minimum(sizeY, sparsity)
    values = numpy.zeros((sizeX, sizeY), dtype=theano.config.floatX)
    for dx in xrange(sizeX):
        perm = rng.permutation(sizeY)
        new_vals = rng.normal(loc=0, scale=scale, size=(sparsity,))
        values[dx, perm[:sparsity]] = new_vals
    return values.astype(theano.config.floatX)

Example 25

Project: NMT-Coverage
Source File: utils.py
View license
def sample_weights(sizeX, sizeY, sparsity, scale, rng):
    """
    Initialization that fixes the largest singular value.
    """
    sizeX = int(sizeX)
    sizeY = int(sizeY)
    sparsity = numpy.minimum(sizeY, sparsity)
    values = numpy.zeros((sizeX, sizeY), dtype=theano.config.floatX)
    for dx in xrange(sizeX):
        perm = rng.permutation(sizeY)
        new_vals = rng.uniform(low=-scale, high=scale, size=(sparsity,))
        vals_norm = numpy.sqrt((new_vals**2).sum())
        new_vals = scale*new_vals/vals_norm
        values[dx, perm[:sparsity]] = new_vals
    _,v,_ = numpy.linalg.svd(values)
    values = scale * values/v[0]
    return values.astype(theano.config.floatX)

Example 26

Project: NMT-Coverage
Source File: utils.py
View license
def sample_weights_classic(sizeX, sizeY, sparsity, scale, rng):
    sizeX = int(sizeX)
    sizeY = int(sizeY)
    if sparsity < 0:
        sparsity = sizeY
    else:
        sparsity = numpy.minimum(sizeY, sparsity)
    sparsity = numpy.minimum(sizeY, sparsity)
    values = numpy.zeros((sizeX, sizeY), dtype=theano.config.floatX)
    for dx in xrange(sizeX):
        perm = rng.permutation(sizeY)
        new_vals = rng.normal(loc=0, scale=scale, size=(sparsity,))
        values[dx, perm[:sparsity]] = new_vals
    return values.astype(theano.config.floatX)

Example 27

Project: NMT-Coverage
Source File: utils.py
View license
def sample_weights(sizeX, sizeY, sparsity, scale, rng):
    """
    Initialization that fixes the largest singular value.
    """
    sizeX = int(sizeX)
    sizeY = int(sizeY)
    sparsity = numpy.minimum(sizeY, sparsity)
    values = numpy.zeros((sizeX, sizeY), dtype=theano.config.floatX)
    for dx in xrange(sizeX):
        perm = rng.permutation(sizeY)
        new_vals = rng.uniform(low=-scale, high=scale, size=(sparsity,))
        vals_norm = numpy.sqrt((new_vals**2).sum())
        new_vals = scale*new_vals/vals_norm
        values[dx, perm[:sparsity]] = new_vals
    _,v,_ = numpy.linalg.svd(values)
    values = scale * values/v[0]
    return values.astype(theano.config.floatX)

Example 28

Project: NMT-Coverage
Source File: utils.py
View license
def sample_weights_classic(sizeX, sizeY, sparsity, scale, rng):
    sizeX = int(sizeX)
    sizeY = int(sizeY)
    if sparsity < 0:
        sparsity = sizeY
    else:
        sparsity = numpy.minimum(sizeY, sparsity)
    sparsity = numpy.minimum(sizeY, sparsity)
    values = numpy.zeros((sizeX, sizeY), dtype=theano.config.floatX)
    for dx in xrange(sizeX):
        perm = rng.permutation(sizeY)
        new_vals = rng.normal(loc=0, scale=scale, size=(sparsity,))
        values[dx, perm[:sparsity]] = new_vals
    return values.astype(theano.config.floatX)

Example 29

Project: vispy
Source File: colorbar.py
View license
    def _calc_size(self):
        """Calculate a size
        """
        (total_halfx, total_halfy) = (self.rect.right, self.rect.top)
        if self._colorbar.orientation in ["bottom", "top"]:
            (total_major_axis, total_minor_axis) = (total_halfx, total_halfy)
        else:
            (total_major_axis, total_minor_axis) = (total_halfy, total_halfx)

        major_axis = total_major_axis * (1.0 -
                                         self._major_axis_padding)
        minor_axis = major_axis * self._minor_axis_ratio

        # if the minor axis is "leaking" from the padding, then clamp
        minor_axis = np.minimum(minor_axis,
                                total_minor_axis *
                                (1.0 - self._minor_axis_padding))

        return (major_axis, minor_axis)

Example 30

Project: qspectrumanalyzer
Source File: data.py
View license
    def update_peak_hold_min(self, data):
        """Update min. peak hold data"""
        if self.peak_hold_min is None:
            self.peak_hold_min = data["y"].copy()
        else:
            self.peak_hold_min = np.minimum(self.peak_hold_min, data["y"])
            self.peak_hold_min_updated.emit(self)

Example 31

Project: mmdgm
Source File: preprocessing.py
View license
def preprocess_normalize01(x, global_sd=True):
    x_center = x.mean(axis=1, keepdims=True)
    x = x - x_center
    if not global_sd:
        x_sd = x.std(axis=1, keepdims=True)
    else:
        x_sd = x.std()
    x /= x_sd
    def f_enc(x): return (x - x_center) / x_sd
    def f_dec(x): return np.maximum(np.minimum(x * x_sd + x_center, 1), 0)
    return f_enc, f_dec, (x_center, x_sd)

Example 32

Project: ldsc
Source File: munge_sumstats.py
View license
def filter_frq(frq, log, args):
    '''
    Filter on MAF. Remove MAF < args.maf_min and out-of-bounds MAF.
    '''
    jj = (frq < 0) | (frq > 1)
    bad_frq = jj.sum()
    if bad_frq > 0:
        msg = 'WARNING: {N} SNPs had FRQ outside of [0,1]. The FRQ column may be mislabeled.'
        log.log(msg.format(N=bad_frq))

    frq = np.minimum(frq, 1 - frq)
    ii = frq > args.maf_min
    return ii & ~jj

Example 33

Project: Attentive_reader
Source File: inits.py
View license
def sample_weights(sizeX, sizeY, sparsity, scale, rng):
    """
    Initialization that fixes the largest singular value.
    """
    sizeX = int(sizeX)
    sizeY = int(sizeY)
    sparsity = numpy.minimum(sizeY, sparsity)
    values = numpy.zeros((sizeX, sizeY), dtype=theano.config.floatX)
    for dx in xrange(sizeX):
        perm = rng.permutation(sizeY)
        new_vals = rng.uniform(low=-scale, high=scale, size=(sparsity,))
        vals_norm = numpy.sqrt((new_vals**2).sum())
        new_vals = scale*new_vals/vals_norm
        values[dx, perm[:sparsity]] = new_vals
    _, v, _ = numpy.linalg.svd(values)
    values = scale * values/v[0]
    return values.astype(theano.config.floatX)

Example 34

Project: Attentive_reader
Source File: inits.py
View license
def sample_weights_classic(sizeX, sizeY, sparsity, scale, rng=None):
    if rng is None:
        rng = global_rng

    sizeX = int(sizeX)
    sizeY = int(sizeY)

    if sparsity < 0:
        sparsity = sizeY
    else:
        sparsity = numpy.minimum(sizeY, sparsity)

    sparsity = numpy.minimum(sizeY, sparsity)
    values = numpy.zeros((sizeX, sizeY), dtype=theano.config.floatX)
    for dx in xrange(sizeX):
        perm = rng.permutation(sizeY)
        new_vals = rng.normal(loc=0, scale=scale, size=(sparsity,))
        values[dx, perm[:sparsity]] = new_vals
    return values.astype(theano.config.floatX)

Example 35

Project: Attentive_reader
Source File: inits.py
View license
def sample_weights_orth(sizeX, sparsity, scale=1.0, rng=None):
    sizeX = int(sizeX)
    sizeY = sizeX

    assert sizeX == sizeY, 'for orthogonal init, sizeX == sizeY'

    if sparsity < 0:
        sparsity = sizeY
    else:
        sparsity = numpy.minimum(sizeY, sparsity)

    values = numpy.zeros((sizeX, sizeY), dtype=theano.config.floatX)
    values = rng.normal(loc=0, scale=scale, size=(sizeX, sizeY))
    u,s,v = numpy.linalg.svd(values)
    values = u.dot(v.T)
    #values = u * scale
    return values.astype(theano.config.floatX)

Example 36

Project: Attentive_reader
Source File: __init__.py
View license
def sample_weights(sizeX, sizeY, sparsity, scale, rng):
    """
    Initialization that fixes the largest singular value.
    """
    sizeX = int(sizeX)
    sizeY = int(sizeY)
    sparsity = numpy.minimum(sizeY, sparsity)
    values = numpy.zeros((sizeX, sizeY), dtype=theano.config.floatX)
    for dx in xrange(sizeX):
        perm = rng.permutation(sizeY)
        new_vals = rng.uniform(low=-scale, high=scale, size=(sparsity,))
        vals_norm = numpy.sqrt((new_vals**2).sum())
        new_vals = scale*new_vals/vals_norm
        values[dx, perm[:sparsity]] = new_vals
    _,v,_ = numpy.linalg.svd(values)
    values = scale * values/v[0]
    return values.astype(theano.config.floatX)

Example 37

Project: Attentive_reader
Source File: __init__.py
View license
def sample_weights_classic(sizeX, sizeY, sparsity, scale, rng=None):
    if rng is None:
        rng = global_rng

    sizeX = int(sizeX)
    sizeY = int(sizeY)
    if sparsity < 0:
        sparsity = sizeY
    else:
        sparsity = numpy.minimum(sizeY, sparsity)
    sparsity = numpy.minimum(sizeY, sparsity)
    values = numpy.zeros((sizeX, sizeY), dtype=theano.config.floatX)
    for dx in xrange(sizeX):
        perm = rng.permutation(sizeY)
        new_vals = rng.normal(loc=0, scale=scale, size=(sparsity,))
        values[dx, perm[:sparsity]] = new_vals
    return values.astype(theano.config.floatX)

Example 38

Project: Attentive_reader
Source File: __init__.py
View license
def sample_weights_orth(sizeX, sparsity, scale=1.0, rng=None):
    sizeX = int(sizeX)
    sizeY = sizeX

    assert sizeX == sizeY, 'for orthogonal init, sizeX == sizeY'

    if sparsity < 0:
        sparsity = sizeY
    else:
        sparsity = numpy.minimum(sizeY, sparsity)

    values = numpy.zeros((sizeX, sizeY), dtype=theano.config.floatX)
    values = rng.normal(loc=0, scale=scale, size=(sizeX, sizeY))
    u,s,v = numpy.linalg.svd(values)
    values = u.dot(v.T)
    #values = u * scale
    return values.astype(theano.config.floatX)

Example 39

Project: lifelines
Source File: generate_datasets.py
View license
def right_censor_lifetimes(lifetimes, max_, min_=0):
    """
    Right censor the deaths, uniformly
      lifetimes: (n,) array of positive random variables
      max_: the max time a censorship can occur
      min_: the min time a censorship can occur

    Returns
      The actual observations including uniform right censoring, and
      D_i (observed death or did not)

    I think this is deprecated
    """
    n = lifetimes.shape[0]
    u = min_ + (max_ - min_) * random.rand(n)
    observations = np.minimum(u, lifetimes)
    return observations, lifetimes == observations

Example 40

Project: pygaarst
Source File: geomutils.py
View license
def _getpolybounds(arrayshape, polygon):
    """Returns bounds of shapely polygon or array, as int in pixel"""
    jmin, imin, jmax, imax = polygon.bounds
    imin = int(polygon.bounds[1])
    jmin = int(polygon.bounds[0])
    imax = int(np.minimum(polygon.bounds[3], arrayshape[0]))
    jmax = int(np.minimum(polygon.bounds[2], arrayshape[1]))
    return imin, jmin, imax, jmax

Example 41

Project: refinery
Source File: MergePairSelector.py
View license
  def select_merge_components(self, hmodel, SS, MTracker, kA=None, 
                                mergename='marglik', randstate=np.random):
    if mergename == 'random':
      kA, kB = self._drawPair_random(MTracker, kA=kA, randstate=randstate)
    elif mergename == 'marglik':
      kA, kB = self._drawPair_marglik(hmodel, SS, 
                                      MTracker, kA=kA, randstate=randstate)
    else:
      raise NotImplementedError("Unknown mergename %s" % (mergename))
    # Ensure always that kA < kB always
    kMin = np.minimum(kA,kB)
    kB  = np.maximum(kA,kB)
    kA = kMin
    MTracker.verifyPair(kA, kB)
    return kA, kB

Example 42

View license
  def test_truth_is_minimum_of_objfunc(self, nTrial=100):
    vopt = GSO.beta2v(self.truebeta)
    objfunc = lambda v: GSO.neglogp(v, self.G, self.logPiMat, self.alpha0, self.gamma)
    success = 0
    fopt = objfunc(vopt)
    print vopt, fopt, '**'

    for trial in range(nTrial):
      v = vopt + 0.01 * self.PRNG.rand(self.K)
      v = np.minimum(v, 1.0 - 1e-8)
      fv = objfunc(v)
      if fopt < fv:
        success += 1
      else:
        print v, fv
    assert success > 0.98 * nTrial

Example 43

Project: refinery
Source File: TestHDPBetaOptimizer.py
View license
  def test_truth_is_minimum_of_objfunc(self, nTrial=2):
    vopt = HBO.beta2v(self.truebeta)
    objfunc = lambda v: HBO.objectiveFunc(v, self.alpha, self.gamma, self.G, self.sumLogPi)
    success = 0
    fopt = objfunc(vopt)
    print vopt, fopt, '**'

    for trial in range(nTrial):
      v = vopt + 0.01 * self.PRNG.rand(self.K)
      v = np.minimum(v, 1.0 - 1e-8)
      fv = objfunc(v)
      if fopt < fv:
        success += 1
      else:
        print v, fv
    assert success > 0.98 * nTrial

Example 44

Project: dask
Source File: chunk.py
View license
def topk(k, x):
    """ Top k elements of an array

    >>> topk(2, np.array([5, 1, 3, 6]))
    array([6, 5])
    """
    # http://stackoverflow.com/a/23734295/616616 by larsmans
    k = np.minimum(k, len(x))
    ind = np.argpartition(x, -k)[-k:]
    return np.sort(x[ind])[::-1]

Example 45

Project: datajoint-python
Source File: schema.py
View license
    def _make_tuples(self, key):
        """
        populate with random data
        """
        random.seed(str(key))
        row = dict(key,
                   sampling_frequency=6000,
                   duration=np.minimum(2, random.expovariate(1)))
        self.insert1(row)
        number_samples = int(row['duration'] * row['sampling_frequency'] + 0.5)
        sub = self.Channel()
        sub.insert(
            dict(key,
                 channel=channel,
                 voltage=np.float32(np.random.randn(number_samples)))
            for channel in range(2))

Example 46

Project: textplot
Source File: text.py
View license
    def score_intersect(self, term1, term2, **kwargs):

        """
        Compute the geometric area of the overlap between the kernel density
        estimates of two terms.

        Args:
            term1 (str)
            term2 (str)

        Returns: float
        """

        t1_kde = self.kde(term1, **kwargs)
        t2_kde = self.kde(term2, **kwargs)

        # Integrate the overlap.
        overlap = np.minimum(t1_kde, t2_kde)
        return np.trapz(overlap)

Example 47

Project: tract_querier
Source File: aabb.py
View license
    def union(self, bounding_box):
        if isinstance(bounding_box, BoundingBox):
            _bounding_box = bounding_box
        else:
            _bounding_box = BoundingBox(bounding_box)
        return BoundingBox(np.r_[
            np.minimum(self[:3], _bounding_box[:3]),
            np.maximum(self[3:], _bounding_box[3:]),
        ])

Example 48

Project: py-sdm
Source File: np_divs.py
View license
def bhattacharyya(Ks, dim, rhos, required, clamp=True):
    r'''
    Estimate the Bhattacharyya coefficient between distributions, based on kNN
    distances:  \int \sqrt{p q}

    If clamp (the default), enforces 0 <= BC <= 1.

    Returns an array of shape (num_Ks,).
    '''
    est, = required
    if clamp:
        est = np.minimum(est, 1)  # BC <= 1
    return est

Example 49

Project: skl-groups
Source File: knn.py
View license
def bhattacharyya(Ks, dim, required, clamp=True, to_self=False):
    r'''
    Estimate the Bhattacharyya coefficient between distributions, based on kNN
    distances:  \int \sqrt{p q}

    If clamp (the default), enforces 0 <= BC <= 1.

    Returns an array of shape (num_Ks,).
    '''
    est = required
    if clamp:
        est = np.minimum(est, 1)  # BC <= 1
    return est

Example 50

Project: audfprint
Source File: hash_table.py
View license
    def save(self, name, params=None):
        """ Save hash table to file <name>,
            including optional addition params
        """
        # Merge in any provided params
        if params:
            for key in params:
                self.params[key] = params[key]
        with gzip.open(name, 'wb') as f:
            pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)
        self.dirty = False
        nhashes = sum(self.counts)
        print("Saved fprints for", sum(n is not None for n in self.names), 
              "files (", nhashes, "hashes) to", name)
        # Report the proportion of dropped hashes (overfull table)
        dropped = nhashes - sum(np.minimum(self.depth, self.counts))
        print("Dropped hashes=", dropped, "(%.2f%%)" % (
            100.0*dropped/max(1, nhashes)))