numpy.random.randn

Here are the examples of the python api numpy.random.randn taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

200 Examples 7

Example 1

Project: hedge
Source File: test_basics.py
View license
def test_affine_map():
    """Check that our cheapo geometry-targeted linear algebra actually works."""
    from hedge.tools import AffineMap
    for d in range(1, 5):
    #for d in [3]:
        for i in range(100):
            a = numpy.random.randn(d, d)+10*numpy.eye(d)
            b = numpy.random.randn(d)

            m = AffineMap(a, b)

            assert abs(m.jacobian() - la.det(a)) < 1e-10
            assert la.norm(m.inverted().matrix - la.inv(a)) < 1e-10*la.norm(a)

            x = numpy.random.randn(d)

            m_inv = m.inverted()

            assert la.norm(x-m_inv(m(x))) < 1e-10

Example 2

Project: NearPy
Source File: hash_storage_tests.py
View license
    def test_hash_memory_storage_pcadp(self):
        train_vectors = numpy.random.randn(10, 100)
        hash1 = PCADiscretizedProjections('testPCADPHash', 4, train_vectors, 0.1)

        self.memory.store_hash_configuration(hash1)

        hash2 = PCADiscretizedProjections(None, None, None, None)
        hash2.apply_config(self.memory.load_hash_configuration('testPCADPHash'))

        self.assertEqual(hash1.dim, hash2.dim)
        self.assertEqual(hash1.hash_name, hash2.hash_name)
        self.assertEqual(hash1.bin_width, hash2.bin_width)
        self.assertEqual(hash1.projection_count, hash2.projection_count)

        for i in range(hash1.components.shape[0]):
            for j in range(hash1.components.shape[1]):
                self.assertEqual(hash1.components[i, j], hash2.components[i, j])

Example 3

Project: pygp
Source File: demo_gpr.py
View license
def create_toy_data():
    #0. generate Toy-Data; just samples from a superposition of a sin + linear trend
    xmin = 1
    xmax = 2.5*SP.pi
    x = SP.arange(xmin,xmax,0.7)
    
    C = 2       #offset
    sigma = 0.01
    
    b = 0
    
    y  = b*x + C + 1*SP.sin(x)
#    dy = b   +     1*SP.cos(x)
    y += sigma*random.randn(y.shape[0])
    
    y-= y.mean()
    
    x = x[:,SP.newaxis]
    return [x,y]

Example 4

Project: SnapSudoku
Source File: train.py
View license
    def __init__(self, sizes=None, cost=CrossEntropyCost, customValues=None):
        if not customValues:
            self.layers = len(sizes)
            self.sizes = sizes
            self.biases = [np.random.randn(x, 1) for x in sizes[1:]]
            self.wts = [np.random.randn(y, x)
                        for x, y in zip(sizes[:-1], sizes[1:])]
        else:
            self.sizes, self.biases, self.wts = customValues
            self.layers = len(self.sizes)
        self.cost = cost

Example 5

Project: auto-sklearn
Source File: test_imputation.py
View license
    def test_imputation_shape(self):
        """Verify the shapes of the imputed matrix for different strategies."""
        X = np.random.randn(10, 2)
        X[::2] = np.nan

        for strategy in ['mean', 'median', 'most_frequent']:
            imputer = Imputer(strategy=strategy)
            X_imputed = imputer.fit_transform(X)
            assert_equal(X_imputed.shape, (10, 2))
            X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
            assert_equal(X_imputed.shape, (10, 2))

Example 6

Project: pysb
Source File: util.py
View license
def synthetic_data(model, tspan, obs_list=None, sigma=0.1):
    #from pysb.integrate import odesolve
    from pysb.integrate import Solver
    solver = Solver(model, tspan)
    solver.run()

    # Sample from a normal distribution with variance sigma and mean 1
    # (randn generates a matrix of random numbers sampled from a normal
    # distribution with mean 0 and variance 1)
    # 
    # Note: This modifies yobs_view (the view on yobs) so that the changes 
    # are reflected in yobs (which is returned by the function). Since a new
    # Solver object is constructed for each function invocation this does not
    # cause problems in this case.
    solver.yobs_view *= ((numpy.random.randn(*solver.yobs_view.shape) * sigma) + 1)
    return solver.yobs

Example 7

Project: pystruct
Source File: random_tree_crf.py
View license
def make_random_trees(n_samples=50, n_nodes=100, n_states=7, n_features=10):
    crf = GraphCRF(inference_method='max-product', n_states=n_states,
                   n_features=n_features)
    weights = np.random.randn(crf.size_joint_feature)
    X, y = [], []
    for i in range(n_samples):
        distances = np.random.randn(n_nodes, n_nodes)
        features = np.random.randn(n_nodes, n_features)
        tree = minimum_spanning_tree(sparse.csr_matrix(distances))
        edges = np.c_[tree.nonzero()]
        X.append((features, edges))
        y.append(crf.inference(X[-1], weights))

    return X, y, weights

Example 8

Project: RLScore
Source File: test_kronecker_rls.py
View license
    def generate_data(self, poscount, negcount, dim, mean1, mean2):
        #Generates a standard binary classification data set,
        #with poscount+negcount instances. Data is normally
        #distributed, with mean1 for positive class,
        #mean2 for negative class and unit variance
        X_pos = np.random.randn(poscount, dim) + mean1
        X_neg = np.random.randn(negcount, dim) + mean2
        X = np.vstack((X_pos, X_neg))
        Y = np.vstack((np.ones((poscount, 1)), -1. * np.ones((negcount, 1))))
        perm = np.random.permutation(range(poscount + negcount))
        X = X[perm]
        Y = Y[perm]
        return X, Y

Example 9

Project: lfd
Source File: svds.py
View license
def test_svds():
    x_k33 = np.random.randn(1000,3,3)
    
    u1,s1,v1 = svds(x_k33)
    u2,s2,v2 = svds_slow(x_k33)
    assert np.allclose(u1,u2)
    assert np.allclose(s1,s2)
    assert np.allclose(v1,v2)

Example 10

Project: galry
Source File: vbosize.py
View license
        def __init__(self):
            super(TestWindow, self).__init__()
            data = .05 * np.array(rdn.randn(1, 2), dtype=np.float32)
            data = np.tile(data, (N, 1))
            data[-1,:] += .5
            self.widget = GLPlotWidget()
            self.widget.set_data(data)
            self.setGeometry(100, 100, self.widget.width, self.widget.height)
            self.setCentralWidget(self.widget)
            self.show()

Example 11

Project: scikit-learn
Source File: plot_lda.py
View license
def generate_data(n_samples, n_features):
    """Generate random blob-ish data with noisy features.

    This returns an array of input data with shape `(n_samples, n_features)`
    and an array of `n_samples` target labels.

    Only one feature contains discriminative information, the other features
    contain only noise.
    """
    X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])

    # add non-discriminative features
    if n_features > 1:
        X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
    return X, y

Example 12

Project: scipy
Source File: linalg.py
View license
    def setup(self, shape, contig, module):
        a = np.random.randn(*shape)
        if contig != 'contig':
            a = a[-1::-1,-1::-1]  # turn into a non-contiguous array
            assert_(not a.flags['CONTIGUOUS'])
        self.a = a

Example 13

Project: pstats-view
Source File: profile_pandas.py
View license
def naive_concat_dataframes():

    df1 = pd.DataFrame(
        np.random.randn(1000, 26),
        columns=[chr(ord('A') + i) for i in range(26)],
        index=range(1000),
    )

    df2 = pd.DataFrame(
        np.random.randn(1000, 26),
        columns=[chr(ord('A') + i) for i in range(26)],
        index=range(1000, 2000),
    )

    return pd.concat([df1, df2])

Example 14

Project: statsmodels
Source File: diffusion2.py
View license
    def simulate(self, l,m,nrepl):

        N = np.random.randn(nrepl,1)
        Y = N**2
        X = m + (.5*m*m/l)*Y - (.5*m/l)*np.sqrt(4*m*l*Y+m*m*(Y**2))
        U = np.random.rand(nrepl,1)

        ind = U>m/(X+m)
        X[ind] = m*m/X[ind]
        return X.ravel()

Example 15

Project: peregrine
Source File: sig_gen.py
View license
def add_noise(s, level):
    rem = len(s)
    noise = np.random.randn(16*1024*1024)*level
    noise = np.round(noise)
    noise = noise.astype(np.int8)
    i = 0
    while rem:
        n = min(len(noise), rem)
        s[i:i+n] += noise[:n]
        i += n
        rem -= n

Example 16

Project: jcvi
Source File: tsp.py
View license
def make_data(N, directed=False):
    x = np.random.randn(N)
    y = np.random.randn(N)
    xy = zip(x, y)
    M = np.zeros((N, N), dtype=float)
    for ia, ib in combinations(range(N), 2):
        ax, ay = xy[ia]
        bx, by = xy[ib]
        d = ((ax - bx) ** 2 + (ay - by) ** 2) ** .5
        M[ia, ib] = M[ib, ia] = d

    edges = []
    for ia, ib in combinations(range(N), 2):
        edges.append((ia, ib, M[ia, ib]))
        if directed:
            edges.append((ib, ia, M[ib, ia]))

    return x, y, M, edges

Example 17

Project: properscoring
Source File: test_utils.py
View license
    def test_argsort_indices(self):
        x = np.random.randn(5, 6, 7)
        for axis in [0, 1, 2, -1]:
            expected = np.sort(x, axis=axis)
            idx = argsort_indices(x, axis=axis)
            assert_allclose(expected, x[idx])

Example 18

Project: DSADD
Source File: test_checks.py
View license
def test_none_missing_raises():
    df = pd.DataFrame(np.random.randn(5, 3))
    df.iloc[0, 0] = np.nan
    with pytest.raises(AssertionError):
        ck.none_missing(df)

    with pytest.raises(AssertionError):
        dc.none_missing()(_add_one)(df)

Example 19

Project: engarde
Source File: test_checks.py
View license
def test_none_missing_raises():
    df = pd.DataFrame(np.random.randn(5, 3))
    df.iloc[0, 0] = np.nan
    with pytest.raises(AssertionError):
        ck.none_missing(df)

    with pytest.raises(AssertionError):
        dc.none_missing()(_add_n)(df, n=2)

Example 20

View license
def histogram_demo(ax):
    # example data
    mu = 100  # mean of distribution
    sigma = 15  # standard deviation of distribution
    x = mu + sigma * np.random.randn(10000)

    num_bins = 50

    # The histogram of the data.
    _, bins, _ = ax.hist(x, num_bins, normed=1, label='data')

    # Add a 'best fit' line.
    y = mlab.normpdf(bins, mu, sigma)
    ax.plot(bins, y, '-s', label='best fit')

    ax.legend()
    ax.set_xlabel('Smarts')
    ax.set_ylabel('Probability')
    ax.set_title(r'Histogram of IQ: $\mu=100$, $\sigma=15$')

Example 21

Project: pyBAST
Source File: classes.py
View license
    def sample(self,n=1):
        """ Returns n samples from a Bgmap distribution.
        """

        stds = np.random.randn( len(self.mu), n )
        chol = cholesky(self.sigma)
        samps = self.mu + chol.dot(stds).T 

        if n == 1:
            samps = samps.flatten()

        return samps

Example 22

Project: deepdish
Source File: test_io.py
View license
    def test_softlinks_recursion_sns(self):
        if _sns:
            with tmp_filename() as fn:
                A = np.random.randn(3, 3)
                AA = 4
                s = SimpleNamespace(A=A, B=A, c=A, d=A, f=A,
                                    g=[A, A, A], AA=AA, h=AA)
                s.g.append(s)
                n = reconstruct(fn, s)
                assert n.g[0] is n.A
                assert (n.A is n.B is n.c is n.d is n.f is
                        n.g[0] is n.g[1] is n.g[2])
                assert n.g[3] is n
                assert n.AA == AA == n.h

Example 23

Project: smop
Source File: core.py
View license
def randn(*args,**kwargs):
    if not args:
        return np.random.randn()
    if len(args) == 1:
        args += args
    try:
        return np.random.randn(np.prod(args)).reshape(args,order="F")
    except:
        pass

Example 24

Project: blaze
Source File: test_numpy_compute.py
View license
def test_nelements_array():
    t = symbol('t', '5 * 4 * 3 * float64')
    x = np.random.randn(*t.shape)
    result = compute(t.nelements(axis=(0, 1)), x)
    np.testing.assert_array_equal(result, np.array([20, 20, 20]))

    result = compute(t.nelements(axis=1), x)
    np.testing.assert_array_equal(result, 4 * np.ones((5, 3)))

Example 25

Project: resampy
Source File: test_core.py
View license
@pytest.mark.parametrize('axis', [0, 1, 2])
def test_shape(axis):
    sr_orig = 100
    sr_new = sr_orig // 2
    X = np.random.randn(sr_orig, sr_orig, sr_orig)
    Y = resampy.resample(X, sr_orig, sr_new, axis=axis)

    target_shape = list(X.shape)
    target_shape[axis] = target_shape[axis] * sr_new // sr_orig

    assert target_shape == list(Y.shape)

Example 26

Project: SparseLSH
Source File: lsh.py
View license
    def _generate_uniform_planes(self):
        """ Generate uniformly distributed hyperplanes and return it as a 2D
        numpy array.
        """
        dense_planes = np.random.randn(self.hash_size, self.input_dim)
        return sparse.csr_matrix(dense_planes)

Example 27

Project: eegtools
Source File: featex_test.py
View license
def test_window():
  X = np.random.randn(3, 50)
  W, ii = fe.windows([0, 10, 12, 49], [-2, 3], X)
  print W, ii

  np.testing.assert_almost_equal(ii, [10, 12])
  assert(W.shape[0] == 2)
  np.testing.assert_equal(W[0], X[:,8:13])
  np.testing.assert_equal(W[1], X[:,10:15])

Example 28

Project: lifelines
Source File: test_estimation.py
View license
    def test_large_dimensions_for_recursion_error(self):
        n = 500
        d = 50
        X = pd.DataFrame(np.random.randn(n, d))
        T = np.random.exponential(size=n)
        X['T'] = T
        aaf = AalenAdditiveFitter()
        aaf.fit(X, duration_col='T')

Example 29

Project: treelearn
Source File: test_randomized_tree.py
View license
def test_big_tree(n=1000, d = 50, max_thresholds=10):
    t = tree.RandomizedTree(max_thresholds=max_thresholds)
    x = np.random.randn(n,d)
    y = np.random.randint(0,2,n)
    t.fit(x,y)
    return t 

Example 30

Project: NAF-tensorflow
Source File: exploration.py
View license
  def add_noise(self, action, info={}):
    x = self.state
    dx = self.theta * (self.mu - x) + self.sigma * nr.randn(len(x))
    self.state = x + dx

    return action + self.state

Example 31

Project: lda2vec
Source File: embed_mixture.py
View license
def _orthogonal_matrix(shape):
    # Stolen from blocks:
    # github.com/mila-udem/blocks/blob/master/blocks/initialization.py
    M1 = np.random.randn(shape[0], shape[0])
    M2 = np.random.randn(shape[1], shape[1])

    # QR decomposition of matrix with entries in N(0, 1) is random
    Q1, R1 = np.linalg.qr(M1)
    Q2, R2 = np.linalg.qr(M2)
    # Correct that NumPy doesn't force diagonal of R to be non-negative
    Q1 = Q1 * np.sign(np.diag(R1))
    Q2 = Q2 * np.sign(np.diag(R2))

    n_min = min(shape[0], shape[1])
    return np.dot(Q1[:, :n_min], Q2[:n_min, :])

Example 32

Project: cudamat
Source File: test_cudamat.py
View license
def test_sigmoid():
    m = 256
    n = 128
    a = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')
    b = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')

    c = 1. / (1. + np.exp(-a))

    m1 = cm.CUDAMatrix(a)
    m2 = cm.CUDAMatrix(b)
    m1.apply_sigmoid(target = m2)
    m1.apply_sigmoid()

    m1.copy_to_host()
    m2.copy_to_host()

    assert np.max(np.abs(c - m1.numpy_array)) < 10**-4, "Error in CUDAMatrix.apply_sigmoid exceeded threshold"
    assert np.max(np.abs(c - m2.numpy_array)) < 10**-4, "Error in CUDAMatrix.apply_sigmoid exceeded threshold"

Example 33

View license
  def setUp(self):
    X = np.random.randn(100, 3)
    self.Data = XData(X=X)
    aPDict = dict(alpha0=1.0)
    oPDict = dict(min_covar=1e-9)
    self.hmodel = HModel.CreateEntireModel('EM', 'MixModel', 'ZMGauss', aPDict, oPDict, self.Data)

Example 34

Project: datajoint-python
Source File: test_relation.py
View license
    def test_blob_insert(self):
        """Tests inserting and retrieving blobs."""
        X = np.random.randn(20, 10)
        self.img.insert1((1, X))
        Y = self.img.fetch()[0]['img']
        assert_true(np.all(X == Y), 'Inserted and retrieved image are not identical')

Example 35

Project: george
Source File: fit_gp.py
View license
def fit_gp(fn):
    # Load the data and set up the model.
    model = GPModel(fn)

    # Initialize the walkers.
    v = model.vector
    ndim, nwalkers = len(v), 32
    p0 = [v + 1e-3*np.random.randn(ndim) for i in range(nwalkers)]
    sampler = emcee.EnsembleSampler(nwalkers, ndim, model)

    print("Running burn-in...")
    p0, _, _ = sampler.run_mcmc(p0, 1000)
    sampler.reset()

    print("Running production chain...")
    sampler.run_mcmc(p0, 4000)

    with open(os.path.splitext(fn)[0] + "-gp.pkl", "wb") as f:
        pickle.dump((model, sampler), f, -1)

Example 36

Project: Neural-Photo-Editor
Source File: NPE.py
View license
def sample():
    global  Z, output,RECON,IM,ERROR,SAMPLE_FLAG
    Z = np.random.randn(Z.shape[0],Z.shape[1])
    # Z = np.random.uniform(low=-1.0,high=1.0,size=(Z.shape[0],Z.shape[1])) # Optionally get uniform sample
    
    # Update reconstruction and error
    RECON = np.uint8(from_tanh(model.sample_at(np.float32([Z.flatten()]))[0]))
    ERROR = to_tanh(np.float32(IM)) - to_tanh(np.float32(RECON))
    update_canvas(w)
    SAMPLE_FLAG=1
    update_photo(None,output)

Example 37

Project: imdescrip
Source File: ScSPM.py
View license
    def __init__ (self, maxdim=320, psize=16, pstride=8, active=10, dsize=1024,
                    levels=(1,2,4), compress_dim=None):

        self.maxdim = maxdim
        self.psize = psize
        self.pstride = pstride 
        self.active = active
        self.levels = levels
        self.dsize = dsize
        self.compress_dim = compress_dim
        self.dic = None       # Sparse code dictionary (D)
        
        if self.compress_dim is not None:
            D = np.sum(np.array(levels)**2) * self.dsize
            self.rmat = np.random.randn(D, self.compress_dim)
            self.rmat = self.rmat / np.sqrt((self.rmat**2).sum(axis=0))
        else:
            self.rmat = None

Example 38

Project: learning-python
Source File: cprof_example.py
View license
def run_experiment(niter=100):
    K = 100
    results = []
    for _ in xrange(niter):
        mat = np.random.randn(K, K)
        max_eigenvalue = np.abs(eigvals(mat)).max()
        results.append(max_eigenvalue)
    return results

Example 39

View license
  def __init__(self, num_visible, num_hidden, learning_rate = 0.1):
    self.num_hidden = num_hidden
    self.num_visible = num_visible
    self.learning_rate = learning_rate

    # Initialize a weight matrix, of dimensions (num_visible x num_hidden), using
    # a Gaussian distribution with mean 0 and standard deviation 0.1.
    self.weights = 0.1 * np.random.randn(self.num_visible, self.num_hidden)    
    # Insert weights for the bias units into the first row and first column.
    self.weights = np.insert(self.weights, 0, 0, axis = 0)
    self.weights = np.insert(self.weights, 0, 0, axis = 1)

Example 40

Project: seya
Source File: test_conv_rnn.py
View license
    def test_conv_rnn(self):
        """Just check that the ConvRNN layer can compile and run"""
        nb_samples, timesteps, ndim, filter_dim = 5, 10, 28, 3
        input_flat = ndim ** 2
        layer = ConvRNN(filter_dim=(1, filter_dim, filter_dim),
                        reshape_dim=(1, ndim, ndim),
                        input_shape=(timesteps, input_flat),
                        return_sequences=True)
        model = Sequential()
        model.add(layer)
        model.add(TimeDistributedDense(10))
        model.compile('sgd', 'mse')

        x = np.random.randn(nb_samples, timesteps, input_flat)
        y = model.predict(x)
        assert y.shape == (nb_samples, timesteps, 10)

Example 41

Project: distarray
Source File: create_volume.py
View license
def local_add_random(volume_la):
    ''' Add randomness to the local array data. '''

    def add_random(vol, R):
        ''' Add randomness to the volume. '''
        shape = vol.shape
        rnd = numpy.random.randn(*shape)
        vol[:, :, :] += R * rnd

    vol = volume_la.ndarray
    add_random(vol, R=2.0)
    return volume_la

Example 42

Project: minirank
Source File: test.py
View license
def test_logistic():
    n_samples, n_features = 10, 10
    X = np.random.randn(n_samples, n_features)
    y = np.arange(n_samples)
    w_, theta_ = logistic.ordinal_logistic_fit(X, y)
    pred = logistic.ordinal_logistic_predict(w_, theta_, X)
    assert np.all(pred == y)

Example 43

Project: EDeN
Source File: hasher.py
View license
    def __init__(self, r=0.1, num_functions=50, dimensionality=128):
        self.r = r
        self.num_functions = num_functions
        self.dimensionality = dimensionality
        self.A = np.random.randn(dimensionality, num_functions)
        self.B = r * np.random.random_sample((1, num_functions))

Example 44

Project: pycortex
Source File: braindata.py
View license
    @classmethod
    def random(cls, subject, **kwargs):
        try:
            left, right = db.get_surf(subject, "wm")
        except IOError:
            left, right = db.get_surf(subject, "fiducial")
        nverts = len(left[0]) + len(right[0])
        return cls(np.random.randn(nverts), subject, **kwargs)

Example 45

Project: GPflow
Source File: test_kerns.py
View license
    def setUp(self):
        tf.reset_default_graph()
        self.k1 = GPflow.kernels.Matern32(2)
        self.k2 = GPflow.kernels.Matern52(2, lengthscales=0.3)
        self.k3 = self.k1 * self.k2
        self.x_free = tf.placeholder(tf.float64)
        self.X = tf.placeholder(tf.float64, [30, 2])
        self.X_data = np.random.randn(30, 2)

Example 46

View license
    def test_rand_matrix(self):
        for ell in [2, 10, 100, 199]:
            mat_a = np.random.randn(1000, 100)
            mat_b = sketch(mat_a, ell)
            print 'error vs upper-bound: ', calculateError(mat_a, mat_b), ' vs ', 2 * squaredFrobeniusNorm(mat_a) / ell
            self.assertGreaterEqual(2 * squaredFrobeniusNorm(mat_a) / ell, calculateError(mat_a, mat_b))

Example 47

Project: pgmult
Source File: particle_lds.py
View license
    def sample_predictions(self, Tpred, Npred, obs_noise=True):
        A, sigma_states, z = self.A, self.sigma_states, self.stateseq
        randseq = np.einsum(
            'tjn,ij->tin',
            np.random.randn(Tpred-1, self.n, Npred),
            np.linalg.cholesky(self.sigma_states))

        states = np.empty((Tpred, self.n, Npred))
        states[0] = np.random.multivariate_normal(A.dot(z[-1]), sigma_states, size=Npred).T
        for t in range(1, Tpred):
            states[t] = self.A.dot(states[t-1]) + randseq[t-1]

        return states

Example 48

Project: neupy
Source File: test_hinton.py
View license
    @skip_image_comparison_if_specified
    def test_simple_hinton(self):
        original_image_name = format_image_name("simple_hinton.png")
        original_image = os.path.join(IMGDIR, original_image_name)

        with image_comparison(original_image, figsize=(10, 6)) as fig:
            weight = np.random.randn(20, 20)
            ax = fig.add_subplot(1, 1, 1)
            plt.sca(ax)  # To test the case when ax=None
            plots.hinton(weight, add_legend=True)

Example 49

Project: LSHash
Source File: lshash.py
View license
    def _generate_uniform_planes(self):
        """ Generate uniformly distributed hyperplanes and return it as a 2D
        numpy array.
        """

        return np.random.randn(self.hash_size, self.input_dim)

Example 50

Project: phy
Source File: test_views.py
View license
def test_scatter_view(qtbot, gui):
    n = 1000
    v = ScatterView(coords=lambda c: [Bunch(x=np.random.randn(n),
                                            y=np.random.randn(n),
                                            spike_ids=np.arange(n),
                                            spike_clusters=np.ones(n).
                                            astype(np.int32) * c[0],
                                            )] if 2 not in c else None,
                    # data_bounds=[-3, -3, 3, 3],
                    )
    v.attach(gui)

    _select_clusters(gui)

    # qtbot.stop()
    gui.close()