numpy.copy

Here are the examples of the python api numpy.copy taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

171 Examples 7

Example 1

Project: random_walker Source File: test_random_walker.py
def test_3d_inactive():
    n=30
    lx, ly, lz = n, n, n
    data, labels = make_3d_syntheticdata(lx, ly, lz)
    old_labels = np.copy(labels)
    labels[5:25, 26:29, 26:29] = -1
    after_labels = np.copy(labels)
    labels = random_walker(data, labels)
    assert (labels.reshape(data.shape)[13:17,13:17,13:17] == 2).all()
    return data, labels, old_labels, after_labels

Example 2

Project: stanford-ctc Source File: dataLoader.py
    def loadDataFileDict(self,filenum):
        """
        Loads a data file but stores input frames in a dictionary keyed by utterance
        Each input dictionary entry is a 2-D matrix of length equal to that utterance
        Other variables returned are the same as the original loader
        """
        data_mat, alis, keys, sizes = self.loadDataFile(filenum)
        data_dict = {}
        startInd = 0
        for k,s in izip(keys,sizes):
            endInd = startInd + s
            data_dict[k] = np.copy(data_mat[:,startInd:endInd])
            startInd = endInd

        # startInd = all frames means we loaded all data
        assert startInd, data_mat.shape[1]

        return data_dict, alis, keys, sizes

Example 3

Project: info-flow-experiments Source File: permutation_test.py
Function: get_perm
def get_perm(ylabel):								
    """
    Generate a permutation for block_p_test.
    """
    blocks = ylabel.shape[0]
    yret = np.copy(ylabel)
    for i in range(0,blocks):
        random.shuffle(yret[i])
    return yret

Example 4

Project: char-rbm Source File: Utils.py
Function: soft_max
def softmax(X, copy=True):
    if copy:
            X = np.copy(X)
    X_shape = X.shape
    a, b, c = X_shape
    # This will cause overflow when large values are exponentiated.
    # Hence the largest value in each row is subtracted from each data
    max_prob = np.max(X, axis=2).reshape((X.shape[0], X.shape[1], 1))
    X -= max_prob
    np.exp(X, X)
    sum_prob = np.sum(X, axis=2).reshape((X.shape[0], X.shape[1], 1))
    X /= sum_prob
    return X

Example 5

Project: binglide Source File: painters.py
@Renderer.register_painter(None)
def paint_ascii(colored, data, mixers, coefs):
    ndata = np.copy(data)

    ndata[data > 0x7e] = 0
    ndata[data < 0x20] = 0
    ndata[data > 0] = 0x255

    for mixer in mixers:
        mixer(colored, ndata, coefs)

    return colored

Example 6

Project: bayespy Source File: poisson.py
Function: compute_message_to_parent
    def compute_message_to_parent(self, parent, index, u, u_lambda):
        """
        Compute the message to a parent node.
        """
        if index == 0:
            m0 = -1
            m1 = np.copy(u[0])
            return [m0, m1]
        else:
            raise ValueError("Index out of bounds")

Example 7

Project: plyades Source File: core.py
Function: propagate
    def propagate(self, dt=1*units.year, time_unit=units.s, interpolate=100, **kwargs):
        tout = [0.0]
        yout = [np.copy(self)]
        p = Propagator(self, dt.to(time_unit).value, **kwargs)
        p.forces = list(self._forces)
        p.forces.append(self._gravity)
        for t, y in p:
            tout.append(t)
            yout.append(y)
        tout = np.array(tout)*time_unit
        epochs = self.t + TimeDelta(tout.to(units.s), format='sec')
        yout = np.array(yout).T
        return Orbit(deepcopy(self), tout, epochs, yout, interpolate=interpolate, **kwargs)

Example 8

Project: tfdeploy Source File: tfdeploy.py
Function: squeeze
@Operation.factory(attrs=("squeeze_dims",))
def Squeeze(a, squeeze_dims):
    """
    Squeeze op, i.e. removes singular axes.
    """
    if not squeeze_dims:
        squeeze_dims = list(range(len(a.shape)))
    slices = [(0 if (dim == 1 and i in squeeze_dims) else slice(None)) \
              for i, dim in enumerate(a.shape)]
    return np.copy(a)[slices],

Example 9

Project: deeppy Source File: scalers.py
Function: transform
    def transform(self, x, copy=True):
        if copy:
            x = np.copy(x)
        x -= self._x_mean
        x *= self.std / self._x_std
        x += self.mean
        return x

Example 10

Project: omnivore Source File: editor.py
    def use_self_as_baseline(self, doc=None):
        if doc is None:
            doc = self.docuement
        bytes = np.copy(doc.bytes)
        print "SELF AS BASELINE!!!!!", bytes
        doc.init_baseline(doc.metadata, bytes)
        if doc == self.docuement:
            self.baseline_present = doc.has_baseline
            self.diff_highlight = self.baseline_present

Example 11

Project: APGL Source File: VertexList.py
Function: copy
    def copy(self):
        """
        Returns a copy of this object. 
        """
        vList = VertexList(self.V.shape[0], self.V.shape[1])
        vList.setVertices(numpy.copy(self.V))
        return vList

Example 12

Project: seasonal Source File: trend.py
Function: mean_filter
def mean_filter(data, window):
    """Apply a windowed mean filter to the data.

    This implementation leaves partial windows at the ends untouched

    """
    filtered = np.copy(data)
    cuem = np.concatenate(([0], np.cuemsum(data)))
    half = window // 2
    filtered[half : -half] = (cuem[window:] - cuem[:-window]) / window
    return filtered

Example 13

Project: PyParticles Source File: measure.py
Function: init
    def __init__( self , pset=None , force=None , subset=None , model="part_by_part" ):
        """
        Constructor:
          
        :param pset:    The particles set
        :param force:   The model of the used force
        :param subset:  a numpy 1D array containing the indicies of the measured particles
        :param model:   a strung describing the model for the measure: "part_by_part" or "subsystem"
          
        """
        self.__subset = np.copy(subset)
        
        self.__model = None
        
        self.model = model
        
        super( MeasureParticles , self ).__init__( pset=pset , force=force )

Example 14

Project: sklearn-evaluation Source File: metrics.py
def __precision(y_true, y_pred):
    '''
        Precision metric tolerant to unlabeled data in y_true,
        NA values are ignored for the precision calculation
    '''
    # make copies of the arrays to avoid modifying the original ones
    y_true = np.copy(y_true)
    y_pred = np.copy(y_pred)

    # precision = tp/(tp+fp)
    # True nehatives do not affect precision value, so for every missing
    # value in y_true, replace it with 0 and also replace the value
    # in y_pred with 0
    is_nan = np.isnan(y_true)
    y_true[is_nan] = 0
    y_pred[is_nan] = 0
    precision = precision_score(y_true, y_pred)
    return precision

Example 15

Project: gensim Source File: ldaseqmodel.py
Function: update_gamma
    def update_gamma(self):
        """
        update variational dirichlet parameters as described in the original Blei LDA paper:
        gamma = alpha + sum(phi), over every topic for every word.
        """
        self.gamma = numpy.copy(self.lda.alpha)
        n = 0 # keep track of number of iterations for phi, log_phi
        for word_id, count in self.doc:
            phi_row = self.phi[n]
            for k in range(0, self.lda.num_topics):
                self.gamma[k] += phi_row[k] * count
            n += 1

        return self.gamma

Example 16

Project: TADbit Source File: mmp_score_NAR.py
def randomize_matrix(data, savefig=None):
    size = len(data)
    rand_data = copy(data)
    for d in xrange(size):
        diag = zip(*[range(d, size), range(size - d)])
        rdiag = diag[:]
        np_shuffle(rdiag)
        for v in xrange(len(diag)):
            val = data[diag[v][0]][diag[v][1]]
            a, b = rdiag[v][0], rdiag[v][1]
            rand_data[b][a] = rand_data[a][b] = val
    if savefig:
        plt.subplot(211)
        plt.imshow(log2(data), interpolation='none')
        plt.subplot(212)
        plt.imshow(log2(rand_data), interpolation='none')
        plt.savefig(savefig, format='pdf')
        plt.close('all')
    return rand_data

Example 17

Project: rpigl Source File: transforms.py
Function: rotation
def rotation(angle, axis="z"):
    """Create a transformation matrix that represents a rotation (in radians) along the given axis."""
    c = math.cos(angle)
    s = math.sin(angle)
    i, j = _axes[axis]
    result = numpy.copy(identity)
    result[i,i] = c
    result[i,j] = -s
    result[j,i] = s
    result[j,j] = c

    return result

Example 18

Project: adagio Source File: eval.py
Function: compute_auc
def compute_auc(roc, b = 1):
    """Compute AUC value from ROC curve (for bounded fp-rate)"""
    r = np.copy(roc)
    idx = np.nonzero(r[1,:] < b)[0]

    a = 1    
    # bound AUC at b
    if b < 1:
        j = np.max(idx)
        m = (r[0,j+1] - r[0,j]) / (r[1,j+1] - r[1,j])
        n = r[0,j+1]
        a = b * m + n

    r = r[:,idx]
    r = np.append(r, [[a],[b]], 1)

    auc = (np.diff(r[1,:]) * r[0,:-1]).sum()
    auc = auc / b
    return auc

Example 19

Project: pymc3 Source File: metropolis.py
    def astep_prop(self, q0, logp):
        dimcats = self.dimcats
        if self.shuffle_dims:
            nr.shuffle(dimcats)

        q = np.copy(q0)
        logp_curr = logp(q)

        for dim, k in dimcats:
            logp_curr = self.metropolis_proportional(q, logp, logp_curr, dim, k)

        return q

Example 20

Project: lhcb_trigger_ml Source File: reports.py
Function: get_stages
    def _get_stages(self, stages):
        result = OrderedDict()
        if stages is None:
            for name, preds in self.predictions.items():
                result[name] = pandas.Series(data=[preds], index=['result'])
        else:
            stages = set(stages)
            for name, stage_preds in self._get_staged_proba().items():
                result[name] = pandas.Series()
                for stage, pred in enumerate(stage_preds):
                    if stage not in stages:
                        continue
                    result[name].loc[stage] = numpy.copy(pred)
        return result

Example 21

Project: poclbm Source File: sha256.py
Function: sha256
def sha256(state, data):
	digest = np.copy(state)
	for i in xrange(64):
		if i > 15:
			data[i] = R(data[i-2], data[i-7], data[i-15], data[i-16])
		(digest[~(i-4)&7], digest[~(i-8)&7]) = sharound(digest[(~(i-1)&7)],digest[~(i-2)&7],digest[~(i-3)&7],digest[~(i-4)&7],digest[~(i-5)&7],digest[~(i-6)&7],digest[~(i-7)&7],digest[~(i-8)&7],data[i],K[i])
	return np.add(digest, state)

Example 22

Project: async-deep-rl Source File: emulator.py
Function: get_initial_state
    def get_initial_state(self):
        """ Get the initial state """
        self.new_game()
        for step in xrange(NR_IMAGES):
            reward, new_screen_image_rgb = self.action_repeat(0)
            self.screen_images_processed[:, :, step] = self.process_frame_pool()
            self.show_screen(new_screen_image_rgb)
        if self.is_terminal():
            MAX_START_WAIT -= 1
            return self.get_initial_state()
        return np.copy(self.screen_images_processed) #get_reshaped_state()      

Example 23

Project: QuantEcon.py Source File: test_discrete_rv.py
    def test_Q_updates(self):
        "discrete_rv: Q attributes updates on q change?"
        Q_init = np.copy(self.drv.Q)

        # change q, see if Q updates
        x = np.random.rand(10)
        x /= x.sum()
        self.drv.q = x
        Q_after = self.drv.Q

        # should be different
        self.assertFalse(np.allclose(Q_init, Q_after))

        # clean up: reset values
        self.drv.q = self.x

        # now we should have our original Q back
        assert_allclose(Q_init, self.drv.Q)

Example 24

Project: seasonal Source File: trend.py
Function: median_filter
def median_filter(data, window):
    """Apply a median filter to the data.

    This implementation leaves partial windows at the ends untouched

    """
    filtered = np.copy(data)
    for i in range(window // 2, len(data) - window // 2):
        filtered[i] = np.median(data[max(0, i - window // 2) : i + window // 2 + 1])
    return filtered

Example 25

Project: Vincent-AI-Artist Source File: main.py
Function: grads
    def grads(self, x):
        assert self.loss_value is not None

        grad_values = np.copy(self.grad_values)
        self.loss_value  = None
        self.grad_values = None

        return grad_values

Example 26

Project: eulerian-audio-magnification Source File: utils.py
def amplify_pyramid(pyr, passband, fs, gain=5.0):
    tap = firwin(100, passband, nyq=(fs / 2.0), pass_zero=False)
    (_, num_freqs, levels) = pyr.shape
    amplified_pyr = np.copy(pyr)
    for i in xrange(num_freqs):
        for j in xrange(levels):
            amplitude = gain * filtfilt(tap, [1.0], np.abs(pyr[:, i, j]))
            theta = np.angle(pyr[:, i, j])
            amplified_pyr[:, i, j] += amplitude * np.exp(1.0j * theta)
    return amplified_pyr

Example 27

Project: hifive Source File: fend.py
Function: load
    def load(self):
        """
        Load fend data from h5dict specified at object creation.

        Any call of this function will overwrite current object data with values from the last :func:`save` call.

        :returns: None
        """
        fendfile = h5py.File(self.file, 'r')
        self.binned = None
        for key in fendfile.keys():
            self[key] = numpy.copy(fendfile[key])
        for key in fendfile['/'].attrs.keys():
            self[key] = fendfile['/'].attrs[key]
        fendfile.close()
        return None

Example 28

Project: char-rnn-tensorflow Source File: utils.py
Function: create_batches
    def create_batches(self):
        self.num_batches = int(self.tensor.size / (self.batch_size *
                                                   self.seq_length))

        # When the data (tensor) is too small, let's give them a better error message
        if self.num_batches==0:
            assert False, "Not enough data. Make seq_length and batch_size small."

        self.tensor = self.tensor[:self.num_batches * self.batch_size * self.seq_length]
        xdata = self.tensor
        ydata = np.copy(self.tensor)
        ydata[:-1] = xdata[1:]
        ydata[-1] = xdata[0]
        self.x_batches = np.split(xdata.reshape(self.batch_size, -1), self.num_batches, 1)
        self.y_batches = np.split(ydata.reshape(self.batch_size, -1), self.num_batches, 1)

Example 29

Project: gala Source File: test_potentials.py
def leapfrog_integrator(x,tmax,NT,Pot):
    deltat = tmax/NT
    h = deltat/100.
    t = 0.
    counter = 0
    X = np.copy(x)
    results = np.array([x])
    while(t<tmax):
        X[3:] += 0.5*h*Pot.tot_force(X[0],X[1],X[2])
        X[:3] += h*X[3:]
        X[3:] += 0.5*h*Pot.tot_force(X[0],X[1],X[2])
        # if(t==0.1):
        if(counter % 100 == 0):
            results=np.vstack((results,X))
        t+=h
        counter+=1
    return results

Example 30

Project: gensim Source File: ldaseqmodel.py
    def make_lda_seq_slice(self, lda, time):
        """
        set up the LDA model topic-word values with that of ldaseq.
        """
        for k in range(0, self.num_topics):
            lda.topics[:, k] = numpy.copy(self.topic_chains[k].e_log_prob[:, time])

        lda.alpha = numpy.copy(self.alphas)
        return lda

Example 31

Project: poem-bot Source File: utils.py
Function: create_batches
    def create_batches(self):
        self.num_batches = int(self.tensor.size / (self.batch_size *
                                                   self.seq_length))
        self.tensor = self.tensor[:self.num_batches * self.batch_size * self.seq_length]
        xdata = self.tensor
        ydata = np.copy(self.tensor)
        ydata[:-1] = xdata[1:]
        ydata[-1] = xdata[0]
        self.x_batches = np.split(xdata.reshape(self.batch_size, -1), self.num_batches, 1)
        self.y_batches = np.split(ydata.reshape(self.batch_size, -1), self.num_batches, 1)

Example 32

Project: bayespy Source File: expfamily.py
Function: get_parameters
    def get_parameters(self):
        r"""
        Return parameters of the VB distribution.

        The parameters should be such that they can be used for
        optimization, that is, use log transformation for positive
        parameters.
        """
        return [np.copy(p) for p in self.phi]

Example 33

Project: text2image Source File: alignDraw.py
Function: save_weights
    def save_weights(self, path, c_ts, read_attent_params, write_attent_params):
        weights_f = h5py.File(path, 'w')
        
        for i in xrange(len(self._params)):
            dset = weights_f.create_dataset(params_names[i], self._params[i].shape.eval(), dtype='f')
            dset[:] = np.copy(self._params[i].eval())

        weights_f.close()

Example 34

Project: tensorpack Source File: plot-point.py
def exponential_smooth(data, alpha):
    """ smooth data by alpha. returned a smoothed version"""
    ret = np.copy(data)
    now = data[0]
    for k in range(len(data)):
        ret[k] = now * alpha + data[k] * (1-alpha)
        now = ret[k]
    return ret

Example 35

Project: kaggle-heart Source File: utils_heart.py
def make_monotone_cdf(cdf):
    cdf_out = np.copy(cdf)
    for j in xrange(len(cdf_out) - 1):
        if cdf_out[j] > cdf_out[j + 1]:
            cdf_out[j + 1] = cdf_out[j]
    cdf_out = np.clip(cdf_out, 0., 1.)
    return cdf_out

Example 36

Project: rpigl Source File: transforms.py
def ortho2d(width, height):
    """Create a transformation matrix that maps [0,width]x[0,height] into [-1,1]x[-1,1].
    Useful for 2D games that want to see a conventional framebuffer coordinate system."""
    result = numpy.copy(_basic_ortho2d)
    result[0, 0] = 2.0/width
    result[1, 1] = -2.0/height
    return result

Example 37

Project: TADbit Source File: shrec.py
Function: adj
def adj(g):
    """
    Convert a directed graph to an adjaceny matrix.
    >>> g = {1: {2: 3, 3: 8, 5: -4}, 2: {4: 1, 5: 7}, 3: {2: 4},
             4: {1: 2, 3: -5}, 5: {4: 6}}
    >>> adj(g)
    {1: {1: 0, 2: 3, 3: 8, 4: inf, 5: -4},
     2: {1: inf, 2: 0, 3: inf, 4: 1, 5: 7},
     3: {1: inf, 2: 4, 3: 0, 4: inf, 5: inf},
     4: {1: 2, 2: inf, 3: -5, 4: 0, 5: inf},
     5: {1: inf, 2: inf, 3: inf, 4: 6, 5: 0}}
    """
    vertices = range(len(g))

    dist = copy(g)  # copy g
    for i in vertices:
        dist[i][i] = 0.
    return dist

Example 38

Project: aeneas Source File: dtw.py
    def _compute_acm_in_place(self, cost_matrix):
        self.log(u"Computing the acm with the in-place algorithm...")
        n, m = cost_matrix.shape
        self.log([u"n m: %d %d", n, m])
        current_row = numpy.copy(cost_matrix[0, :])
        # COMMENTED cost_matrix[0][0] = current_row[0]
        for j in range(1, m):
            cost_matrix[0][j] = current_row[j] + cost_matrix[0][j - 1]
        for i in range(1, n):
            current_row = numpy.copy(cost_matrix[i, :])
            cost_matrix[i][0] = cost_matrix[i - 1][0] + current_row[0]
            for j in range(1, m):
                cost_matrix[i][j] = current_row[j] + min(
                    cost_matrix[i - 1][j],
                    cost_matrix[i][j - 1],
                    cost_matrix[i - 1][j - 1]
                )
        self.log(u"Computing the acm with the in-place algorithm... done")
        return cost_matrix

Example 39

Project: plyades Source File: propagator.py
Function: init
    def __init__(self, s0, dt, **kwargs):
        self.s0 = s0
        self.dt = dt
        self.forces = []
        self.params = {'body': s0.body, 'frame': s0.frame}
        self.solver = ode(self._rhs).set_integrator('dop853', nsteps=1, **kwargs)
        self.solver.set_initial_value(np.copy(s0), 0.0)
        self.solver.set_f_params(self.params)
        self.solver._integrator.iwork[2] = -1

Example 40

Project: pydmps Source File: dmp_discrete.py
    def gen_goal(self, y_des):
        """Generate the goal for path imitation.
        For rhythmic DMPs the goal is the average of the
        desired trajectory.

        y_des np.array: the desired trajectory to follow
        """

        return np.copy(y_des[:, -1])

Example 41

Project: word-rnn-tensorflow Source File: utils.py
Function: create_batches
    def create_batches(self):
        self.num_batches = int(self.tensor.size / (self.batch_size *
                                                   self.seq_length))
        if self.num_batches==0:
            assert False, "Not enough data. Make seq_length and batch_size small."

        self.tensor = self.tensor[:self.num_batches * self.batch_size * self.seq_length]
        xdata = self.tensor
        ydata = np.copy(self.tensor)

        ydata[:-1] = xdata[1:]
        ydata[-1] = xdata[0]
        self.x_batches = np.split(xdata.reshape(self.batch_size, -1), self.num_batches, 1)
        self.y_batches = np.split(ydata.reshape(self.batch_size, -1), self.num_batches, 1)

Example 42

Project: magic_init Source File: magic_init.py
Function: save
def save(net):
	import numpy as np
	r = {}
	for l,n in zip(net.layers, net._layer_names):
		if len(l.blobs) > 0:
			r[n] = [np.copy(b.data) for b in l.blobs]
	return r

Example 43

Project: folk-rnn Source File: data_iter.py
    def __iter_random_lens(self):
        available_idxs = np.copy(self.tune_idxs)
        while len(available_idxs) >= self.batch_size:
            rand_idx = self.rng.choice(range(len(available_idxs)), size=self.batch_size, replace=False)
            yield available_idxs[rand_idx]
            available_idxs = np.delete(available_idxs, rand_idx)

Example 44

Project: rcnn Source File: lstm_bptt.py
Function: create_batches
def create_batches(data_text, map_to_ids, batch_size):
    data_ids = map_to_ids(data_text)
    N = len(data_ids)
    L = ((N-1)/batch_size) * batch_size
    x = np.copy(data_ids[:L].reshape(batch_size,-1).T)
    y = np.copy(data_ids[1:L+1].reshape(batch_size,-1).T)
    return x, y

Example 45

Project: tfdeploy Source File: tfdeploy.py
Function: split
@Operation.factory(attrs=("num_split",))
def Split(dim, a, n):
    """
    Split op.
    """
    return tuple(np.split(np.copy(a), n, axis=dim))

Example 46

Project: chainer Source File: hinge.py
Function: forward_cpu
    def forward_cpu(self, inputs):
        x, t = inputs
        num = len(x)
        self.bottom_diff = numpy.copy(x)
        self.bottom_diff[numpy.arange(num), t] *= -1
        self.bottom_diff = numpy.maximum(0, 1 + self.bottom_diff)
        if self.norm == 'L1':
            loss = self.bottom_diff.sum() / num
        elif self.norm == 'L2':
            loss = (self.bottom_diff ** 2).sum() / num
        else:
            raise NotImplementedError()

        return numpy.array(loss, dtype=x.dtype),

Example 47

Project: grid-lstm-tensorflow Source File: utils.py
Function: create_batches
    def create_batches(self):
        self.num_batches = self.tensor.size / (self.batch_size * self.seq_length)
        self.tensor = self.tensor[:self.num_batches * self.batch_size * self.seq_length]
        xdata = self.tensor
        ydata = np.copy(self.tensor)
        ydata[:-1] = xdata[1:]
        ydata[-1] = xdata[0]
        self.x_batches = np.split(xdata.reshape(self.batch_size, -1), self.num_batches, 1)
        self.y_batches = np.split(ydata.reshape(self.batch_size, -1), self.num_batches, 1)

        validation_batches = int(self.num_batches * .2)
        self.val_batches = zip(self.x_batches[-validation_batches:], self.y_batches[-validation_batches:])
        self.x_batches = self.x_batches[:-validation_batches]
        self.y_batches = self.y_batches[:-validation_batches]
        self.num_batches -= validation_batches

Example 48

Project: image-analogies Source File: optimizer.py
Function: grads
    def grads(self, x):
        assert self.loss_value is not None
        grad_values = np.copy(self.grad_values)
        self.loss_value = None
        self.grad_values = None
        return grad_values

Example 49

Project: seagoatvision Source File: filterchain.py
Function: execute
    def execute(self, image):
        original_image = np.copy(image)
        # first image observator
        if self.original_image_observer:
            self.send_image(original_image, self.original_image_observer)

        try:
            for f in self.filters:
                if f.get_is_active():
                    f.set_original_image(original_image)
                    image = f.execute(image)

                lst_observer = self.image_observers.get(f.get_name(), [])
                if lst_observer:
                    self.send_image(image, lst_observer)
        except BaseException as e:
            msg = "(Exec exception Filter %s) %s" % (f.get_name(), e)
            log.printerror_stacktrace(logger, msg, check_duplicate=True)
        return image

Example 50

Project: cardoon Source File: integration.py
Function: init
    def init(self, h, q, dq = None):
        """
        Initialize for integration

        Set time step size to h, previous charge to q previous
        derivative to dq
        """
        self.h = h
        self.a0 = 2. / h    
        self.qnm1 = np.copy(q)
        if dq == None:
            self.dqnm1 = np.zeros_like(q)
        else:
            self.dqnm1 = np.copy(dq)
See More Examples - Go to Next Page
Page 1 Selected Page 2 Page 3 Page 4