numpy.add

Here are the examples of the python api numpy.add taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

83 Examples 7

Page 1 Selected Page 2

Example 1

Project: tvb-library Source File: common_test.py
    @unittest.skipIf(not hasattr(numpy.add, 'at'),
                     'Cannot test fallback numpy.add.at implementation without '
                     'a version of NumPy which provides this ufunc method (>=1.8).')
    def test_add_at(self):
        ri = numpy.random.randint
        for nd in range(1, 5):
            m, n, rest = ri(3, 50), ri(51, 100), tuple(ri(3, 10, nd - 1))
            source = ri(-100, 100, (n,) + rest)
            map = ri(0, m, n)
            expected, actual = numpy.zeros((2, m) + rest)
            numpy.add.at(expected, map, source)
            common._add_at(actual, map, source)
            self.assertTrue(numpy.allclose(expected, actual))

Example 2

Project: sharedmem Source File: parallel.py
Function: test_shared
def testshared():
    for schedule in ['static', 'dynamic', 'guided']:
        with Parallel(
                Shared(a=[0, 0]),
                Reduction(numpy.add, b=[0, 0])
                ) as p:
            for i in p.forloop(range(100), schedule=schedule):
                with p.critical:
                    p.var.a += numpy.array([i, i * 10])
                p.var.b += numpy.array([i, i * 10]) 
            
        assert (p.var.a == p.var.b).all()

Example 3

Project: NeuroM Source File: _neuritefunc.py
def segment_midpoints(neurites, neurite_type=NeuriteType.all):
    '''Return a list of segment mid-points in a collection of neurites'''
    def _seg_midpoint(sec):
        '''Return the mid-points of segments in a section'''
        pts = sec.points
        return np.divide(np.add(pts[:-1], pts[1:])[:, :3], 2.0)

    neurite_filter = is_type(neurite_type)
    return [s for ss in iter_sections(neurites, neurite_filter=neurite_filter)
            for s in _seg_midpoint(ss)]

Example 4

Project: tfdeploy Source File: tfdeploy.py
@Operation.factory(types=("SegmentSum", "UnsortedSegmentSum"))
def SegmentSum(a, ids, *args):
    """
    Segmented sum op.
    """
    func = lambda idxs: reduce(np.add, a[idxs])
    return seg_map(func, a, ids),

Example 5

Project: biggus Source File: test__ufunc_wrapper.py
    def test_nin2_docstring(self):
        wrapped_fn = _ufunc_wrapper(np.add)
        doc = inspect.getdoc(wrapped_fn)

        expected = ('Return the elementwise evaluation of '
                    'np.add(a, b) as another Array.')
        self.assertEqual(doc, expected)

Example 6

Project: iris Source File: test_Cube__operators.py
    def test_lazy_biggus_add_scalar(self):
        c1 = self.build_lazy_cube([1, 2])
        cube = c1 + 5
        self.assertEqual(c1 + 5, 5 + c1)
        result = cube.lazy_data()
        self.assertTrue(cube.has_lazy_data())
        self.assert_elementwise(c1, None, result, np.add)

Example 7

Project: pyDive Source File: test_algorithm.py
def test_reduce(init_pyDive):
    input_array = pyDive.h5.open(input_file, "fields").load()

    energy_array = pyDive.empty(input_array.shape, dtype=input_array.dtype["fieldE"]["x"])

    def energy(out, fields):
        out[:] = fields["fieldE/x"]**2 + fields["fieldE/y"]**2 + fields["fieldB/z"]**2

    pyDive.map(energy, energy_array, input_array)

    test_total = pyDive.reduce(energy_array, np.add)
    ref_total = np.add.reduce(energy_array.gather(), axis=None)

    diff = abs(ref_total - test_total)
    assert diff / ref_total < 1.0e-5

Example 8

Project: tfdeploy Source File: tfdeploy.py
Function: add
@Operation.factory(types=("Add", "BiasAdd"))
def Add(a, b):
    """
    Addition op.
    """
    return np.add(a, b),

Example 9

Project: sharedmem Source File: parallel.py
def testraiseordered():
    for schedule in ['static', 'dynamic', 'guided']:
        try:
            with Parallel(
                    Reduction(numpy.add, a=[0, 0])
                    ) as p:
                for i, ordered in p.forloop(range(20), 
                        ordered=True, schedule=schedule):
                    with ordered:
                        p.var.a += numpy.array([i, i * 10])
                        if i == 19:
                            raise ValueError('raised at i == 19')
            assert False
        except ValueError as e:
            pass

Example 10

Project: AWS-Lambda-ML-Microservice-Skeleton Source File: test_operators.py
    def test_combine_generic(self):
        df1 = self.frame
        df2 = self.frame.ix[:-5, ['A', 'B', 'C']]

        combined = df1.combine(df2, np.add)
        combined2 = df2.combine(df1, np.add)
        self.assertTrue(combined['D'].isnull().all())
        self.assertTrue(combined2['D'].isnull().all())

        chunk = combined.ix[:-5, ['A', 'B', 'C']]
        chunk2 = combined2.ix[:-5, ['A', 'B', 'C']]

        exp = self.frame.ix[:-5, ['A', 'B', 'C']].reindex_like(chunk) * 2
        assert_frame_equal(chunk, exp)
        assert_frame_equal(chunk2, exp)

Example 11

Project: GPy Source File: independent_outputs.py
Function: k
    def K(self,X ,X2=None):
        K = self.parts[0].K(X, X2) # compute 'base' kern everywhere
        slices = [index_to_slices(X[:,i]) for i in self.extra_dims]
        if X2 is None:
            [[[np.add(K[s,s], k.K(X[s], None), K[s, s]) for s in slices_i] for slices_i in slices_k] for k, slices_k in zip(self.parts[1:], slices)]
        else:
            slices2 = [index_to_slices(X2[:,i]) for i in self.extra_dims]
            [[[np.add(K[s,ss], k.K(X[s], X2[ss]), K[s, ss]) for s,ss in zip(slices_i, slices_j)] for slices_i, slices_j in zip(slices_k1, slices_k2)] for k, slices_k1, slices_k2 in zip(self.parts[1:], slices, slices2)]
        return K

Example 12

Project: bayespy Source File: add.py
Function: compute_moments
    def _compute_moments(self, *u_parents):
        """
        Compute the moments of the sum
        """

        u0 = functools.reduce(np.add,
                              (u_parent[0] for u_parent in u_parents))
        u1 = functools.reduce(np.add,
                              (u_parent[1] for u_parent in u_parents))

        for i in range(self.N):
            for j in range(i+1, self.N):
                xi_xj = linalg.outer(u_parents[i][0], u_parents[j][0], ndim=self.ndim)
                xj_xi = linalg.transpose(xi_xj, ndim=self.ndim)
                u1 = u1 + xi_xj + xj_xi
                                                                     
        return [u0, u1]

Example 13

Project: NeuroM Source File: _neuritefunc.py
def segment_radial_distances(neurites, neurite_type=NeuriteType.all, origin=None):
    '''Lengths of the segments in a collection of neurites'''
    def _seg_rd(sec, pos):
        '''list of radial distances of all segments of a section'''
        mid_pts = np.divide(np.add(sec.points[:-1], sec.points[1:])[:, :3], 2.0)
        return np.sqrt([mm.point_dist2(p, pos) for p in mid_pts])

    dist = []
    for n in iter_neurites(neurites, filt=is_type(neurite_type)):
        pos = n.root_node.points[0] if origin is None else origin
        dist.extend([s for ss in n.iter_sections() for s in _seg_rd(ss, pos)])

    return dist

Example 14

Project: iris Source File: test_Cube__operators.py
    def test_lazy_biggus_add_cubes(self):
        c1 = self.build_lazy_cube([1, 2])
        cube = c1 + c1
        result = cube.lazy_data()
        self.assertTrue(cube.has_lazy_data())
        self.assert_elementwise(c1, c1.lazy_data(), result, np.add)

Example 15

Project: sharedmem Source File: parallel.py
Function: test_reduction
def testreduction():
    for schedule in ['static', 'dynamic', 'guided']:
        with Parallel(
                Reduction(numpy.add, a=[0, 0])
                ) as p:
            for i in p.forloop(range(20), schedule=schedule) :
                p.var.a += numpy.array([i, i * 10])
        assert (p.var.a == [190, 1900]).all()

Example 16

Project: NeuroM Source File: morphmath.py
def segment_radial_dist(seg, pos):
    '''Return the radial distance of a tree segment to a given point

    The radial distance is the euclidian distance between the mid-point of
    the segment and the point in question.

    Parameters:
        seg: tree segment

        pos: origin to which disrances are measured. It must have at lease 3
        components. The first 3 components are (x, y, z).
    '''
    return point_dist(pos, np.divide(np.add(seg[0], seg[1]), 2.0))

Example 17

Project: GPy Source File: add.py
Function: k
    @Cache_this(limit=3, force_kwargs=['which_parts'])
    def K(self, X, X2=None, which_parts=None):
        """
        Add all kernels together.
        If a list of parts (of this kernel!) `which_parts` is given, only
        the parts of the list are taken to compute the covariance.
        """
        if which_parts is None:
            which_parts = self.parts
        elif not isinstance(which_parts, (list, tuple)):
            # if only one part is given
            which_parts = [which_parts]
        return reduce(np.add, (p.K(X, X2) for p in which_parts))

Example 18

Project: landsat-util Source File: image.py
    def _pansize(self, bands):

        self.output('Calculating Pan Ratio', normal=True, arrow=True)

        m = numpy.add(bands[0], bands[1])
        m = numpy.add(m, bands[2])
        pan = numpy.multiply(numpy.nan_to_num(numpy.true_divide(1, m)), bands[self.band8])

        return pan

Example 19

Project: sharedmem Source File: parallel.py
def testraisecritical():
    for schedule in ['static', 'dynamic', 'guided']:
        try:
            with Parallel(
                    Reduction(numpy.add, a=[0, 0])
                    ) as p:
                for i in p.forloop(range(20), schedule=schedule):
                    with p.critical:
                        p.var.a += numpy.array([i, i * 10])
                        if i == 19:
                            raise ValueError('raised at i == 19')
            assert False
        except ValueError as e:
            pass

Example 20

Project: raster-functions Source File: Arithmetic.py
Function: updaterasterinfo
    def updateRasterInfo(self, **kwargs):
        m = kwargs.get('op', 'Add').lower()

        if m == 'add':          self.op = np.add
        elif m == 'subtract':   self.op = np.subtract
        elif m == 'multiply':   self.op = np.multiply
        elif m == 'divide':     self.op = np.divide

        kwargs['output_info']['statistics'] = ()
        kwargs['output_info']['histogram'] = ()
        return kwargs

Example 21

Project: GPy Source File: add.py
Function: kdiag
    @Cache_this(limit=3, force_kwargs=['which_parts'])
    def Kdiag(self, X, which_parts=None):
        if which_parts is None:
            which_parts = self.parts
        elif not isinstance(which_parts, (list, tuple)):
            # if only one part is given
            which_parts = [which_parts]
        return reduce(np.add, (p.Kdiag(X) for p in which_parts))

Example 22

Project: sharedmem Source File: parallel.py
Function: test_barrier
def testbarrier():
    now = time.time()
    with Parallel(
            Shared(a=[0, 0]),
            Reduction(numpy.add, b=[0, 0])
            ) as p:
        #time.sleep(p.rank * 0.01)
        p.barrier()
        p.barrier()

Example 23

Project: astrodendro Source File: dendrogram.py
Function: neighbours
    def neighbours(self, idx):
        """
        Return a list of indices to the neighbours of a given pixel.

        This method can be overridden to handle custom layouts
        (e.g., healpix maps, periodic boundaries, etc.)

        Parameters
        ----------
        idx : tuple
            The N-dimensional location of a pixel in the data

        Returns
        -------
        List of N-dimensional locations of each neighbour
        """
        return [tuple(c) for c in np.add(_offsets[self.n_dim], idx)]

Example 24

Project: tfdeploy Source File: tfdeploy.py
@Operation.factory
def SparseSegmentSqrtN(a, idxs, ids):
    """
    Sparse segmented sum / sqrt(n=len(idxs)) op.
    """
    func = lambda _idxs: np.divide(reduce(np.add, a[idxs][_idxs]), np.math.sqrt(len(_idxs)))
    return seg_map(func, a, ids),

Example 25

Project: AWS-Lambda-ML-Microservice-Skeleton Source File: test_ufunc.py
    def test_kwarg_exact(self):
        assert_raises(TypeError, np.add, 1, 2, castingx='safe')
        assert_raises(TypeError, np.add, 1, 2, dtypex=np.int)
        assert_raises(TypeError, np.add, 1, 2, extobjx=[4096])
        assert_raises(TypeError, np.add, 1, 2, outx=None)
        assert_raises(TypeError, np.add, 1, 2, sigx='ii->i')
        assert_raises(TypeError, np.add, 1, 2, signaturex='ii->i')
        assert_raises(TypeError, np.add, 1, 2, subokx=False)
        assert_raises(TypeError, np.add, 1, 2, wherex=[True])

Example 26

Project: sherpa Source File: pragbayes.py
Function: add_deviations
    def add_deviations(self, specresp):
        # copy the old ARF (use new memory for deviations)
        new_arf = np.add(specresp, self.bias)
        # Include the perturbed effective area in each iteration.
        rr = np.random.randint(self.ncomp)
        return np.add(new_arf, self.simcomp[rr], new_arf)

Example 27

Project: GPy Source File: diag.py
Function: add
def add(A, b, offset=0):
    """
    Add b to the view of A in place (!).
    Returns modified A.
    Broadcasting is allowed, thus b can be scalar.

    if offset is not zero, make sure b is of right shape!

    :param ndarray A: 2 dimensional array
    :param ndarray-like b: either one dimensional or scalar
    :param int offset: same as in view.
    :rtype: view of A, which is adjusted inplace
    """
    return _diag_ufunc(A, b, offset, np.add)

Example 28

Project: cloudpickle Source File: cloudpickle_test.py
Function: test_ufunc
    @pytest.mark.skipif(platform.python_implementation() == 'PyPy',
                        reason="Skip numpy and scipy tests on PyPy")
    def test_ufunc(self):
        # test a numpy ufunc (universal function), which is a C-based function
        # that is applied on a numpy array

        if np:
            # simple ufunc: np.add
            self.assertEqual(pickle_depickle(np.add), np.add)
        else:  # skip if numpy is not available
            pass

        if spp:
            # custom ufunc: scipy.special.iv
            self.assertEqual(pickle_depickle(spp.iv), spp.iv)
        else:  # skip if scipy is not available
            pass

Example 29

Project: seagoatvision Source File: psychedelic.py
Function: execute
    def execute(self, image):
        self._images.append(image)
        while len(self._images) >= self.nb_images.get():
            del self._images[0]

        try:
            for img in self._images:
                image = np.add(image, img)
        except:
            pass
        return image

Example 30

Project: tfdeploy Source File: tfdeploy.py
Function: addn
@Operation.factory(unpack=False)
def AddN(inputs):
    """
    Multi add op.
    """
    return reduce(np.add, inputs),

Example 31

Project: hyperspy Source File: test_tools.py
    def test_numpy_unfunc_two_arg_titled(self):
        s1, s2 = self.signal.deepcopy(), self.signal.deepcopy()
        s1.metadata.General.title = "A"
        s2.metadata.General.title = "B"
        result = np.add(s1, s2)
        nt.assert_true(isinstance(result, signals.Signal1D))
        np.testing.assert_array_equal(result.data, np.add(s1.data, s2.data))
        nt.assert_equal(result.metadata.General.title, "add(A, B)")

Example 32

Project: sharedmem Source File: parallel.py
Function: test_kill
def testkill():
    try:
        with Parallel(
                Shared(a=[0, 0]),
                Reduction(numpy.add, b=[0, 0])
                ) as p:
        #    time.sleep(p.rank * 0.01)
            p.barrier()

            if p.rank == p.num_threads - 1:
                os.kill(os.getpid(), signal.SIGKILL)
            p.barrier()
        assert False
    except ParallelException as e:
        return
    assert False

Example 33

Project: sherpa Source File: pragbayes.py
Function: add_deviations
    def add_deviations(self, specresp, rrin=None, rrsig=None):
        # copy the old ARF (use new memory for deviations)
        new_arf = np.add(specresp, self.bias)

        rrout = np.random.standard_normal(self.ncomp)
        if rrin is not None and rrsig is not None:
            rrout = rrin + rrsig * rrout
        self.rrout = rrout

        tmp = self.eigenvec * self.eigenval[:,np.newaxis] * rrout[:,np.newaxis]
        return np.add(new_arf, tmp.sum(axis=0), new_arf)

Example 34

Project: Reactor-3 Source File: maps.py
def render_lights(size=MAP_WINDOW_SIZE, show_weather=True):
	if not SETTINGS['draw lights']:
		return False

	reset_lights(size=size)
	_weather_light = weather.get_lighting()
	
	#Not entirely my code. Made some changes to someone's code from libtcod's Python forum.
	RGB_LIGHT_BUFFER[0] = numpy.add(RGB_LIGHT_BUFFER[0], _weather_light[0])
	RGB_LIGHT_BUFFER[1] = numpy.add(RGB_LIGHT_BUFFER[1], _weather_light[1])
	RGB_LIGHT_BUFFER[2] = numpy.add(RGB_LIGHT_BUFFER[2], _weather_light[2])
	(x, y) = SETTINGS['light mesh grid']
	
	if show_weather:
		weather.generate_effects(size)

	_remove_lights = []
	for light in WORLD_INFO['lights']:
		_x_range = light['pos'][0]-CAMERA_POS[0]
		_y_range = light['pos'][1]-CAMERA_POS[1]
		
		if _x_range <= -20 or _x_range>=size[0]+20:
			continue
		
		if _y_range <= -20 or _y_range>=size[1]+20:
			continue
		
		if not 'old_pos' in light:
			light['old_pos'] = (0, 0, -2)
		else:
			light['old_pos'] = light['pos'][:]
		
		if 'follow_item' in light:
			if not light['follow_item'] in ITEMS:
				_remove_lights.append(light)
				continue
				
			light['pos'] = items.get_pos(light['follow_item'])[:]
		
		_render_x = light['pos'][0]-CAMERA_POS[0]
		_render_y = light['pos'][1]-CAMERA_POS[1]
		_x = numbers.clip(light['pos'][0]-(size[0]/2),0,MAP_SIZE[0])
		_y = numbers.clip(light['pos'][1]-(size[1]/2),0,MAP_SIZE[1])
		_top_left = (_x,_y,light['pos'][2])
		
		#TODO: Render only on move
		if not tuple(light['pos']) == tuple(light['old_pos']):
			light['los'] = cython_render_los.render_los((light['pos'][0],light['pos'][1]), light['brightness']*2, view_size=size, top_left=_top_left)
		
		los = light['los'].copy()
		
		_x_scroll = _x-CAMERA_POS[0]
		_x_scroll_over = 0
		_y_scroll = _y-CAMERA_POS[1]
		_y_scroll_over = 0
		
		if _x_scroll<0:
			_x_scroll_over = _x_scroll
			_x_scroll = los.shape[1]+_x_scroll
		
		if _y_scroll<0:
			_y_scroll_over = _y_scroll
			_y_scroll = los.shape[0]+_y_scroll
		
		los = numpy.roll(los, _y_scroll, axis=0)
		los = numpy.roll(los, _x_scroll, axis=1)
		los[_y_scroll_over:_y_scroll,] = 1
		los[:,_x_scroll_over:_x_scroll] = 1
		
		if SETTINGS['diffuse light']:
			_y, _x = diffuse_light((y, x))
			(x, y) = numpy.meshgrid(_x, _y)
		
		sqr_distance = (x - (_render_x))**2.0 + (y - (_render_y))**2.0
		
		brightness = numbers.clip(random.uniform(light['brightness']*light['shake'], light['brightness']), 0.01, 50) / sqr_distance
		brightness *= los
		#brightness *= LOS_BUFFER[0]
		
		#_mod = (abs((WORLD_INFO['length_of_day']/2)-WORLD_INFO['real_time_of_day'])/float(WORLD_INFO['length_of_day']))*5.0	
		#_mod = numbers.clip(_mod-1, 0, 1)
		#(255*_mod, 165*_mod, 0*_mod)
		#print brightness
		#light['brightness'] = 25
		#light['color'][0] = 255*(light['brightness']/255.0)
		#light['color'][1] = (light['brightness']/255.0)
		#light['color'][2] = 255*(light['brightness']/255.0)
		RGB_LIGHT_BUFFER[0] -= (brightness.clip(0, 2)*(light['color'][0]))#numpy.subtract(RGB_LIGHT_BUFFER[0], light['color'][0]).clip(0, 255)
		RGB_LIGHT_BUFFER[1] -= (brightness.clip(0, 2)*(light['color'][1]))#numpy.subtract(RGB_LIGHT_BUFFER[1], light['color'][1]).clip(0, 255)
		RGB_LIGHT_BUFFER[2] -= (brightness.clip(0, 2)*(light['color'][2]))#numpy.subtract(RGB_LIGHT_BUFFER[2], light['color'][2]).clip(0, 255)

Example 35

Project: biggus Source File: test__Elementwise.py
Function: test_add
    def test_add(self):
        self._test(np.add, ma.add)

Example 36

Project: ray Source File: core.py
@ray.remote
def add(x1, x2):
  return np.add(x1, x2)

Example 37

Project: bayespy Source File: add.py
Function: compute_message_to_parent
    def _compute_message_to_parent(self, index, m, *u_parents):
        """
        Compute the message to a parent node.

        .. math::

           (\sum_i \mathbf{x}_i)^T \mathbf{M}_2 (\sum_j \mathbf{x}_j)
           + (\sum_i \mathbf{x}_i)^T \mathbf{m}_1

        Moments of the parents are

        .. math::

           u_1^{(i)} = \langle \mathbf{x}_i \rangle
           \\
           u_2^{(i)} = \langle \mathbf{x}_i \mathbf{x}_i^T \rangle

        Thus, the message for :math:`i`-th parent is

        .. math::
        
           \phi_{x_i}^{(1)} = \mathbf{m}_1 + 2 \mathbf{M}_2 \sum_{j\neq i} \mathbf{x}_j
           \\
           \phi_{x_i}^{(2)} = \mathbf{M}_2
        """

        # Remove the moments of the parent that receives the message
        u_parents = u_parents[:index] + u_parents[(index+1):]

        m0 = (m[0] +
              linalg.mvdot(
                  2*m[1],
                  functools.reduce(np.add,
                                   (u_parent[0] for u_parent in u_parents)),
                  ndim=self.ndim))

        m1 = m[1]
            
        return [m0, m1]

Example 38

Project: peregrine Source File: satellite_glo.py
  def getBatchSignals(self,
                      userTimeAll_s,
                      samples,
                      outputConfig,
                      noiseParams,
                      band,
                      debug):
    '''
    Generates signal samples.

    Parameters
    ----------
    userTimeAll_s : numpy.ndarray(n_samples, dtype=numpy.float64)
      Vector of observer's timestamps in seconds for the interval start.
    samples : numpy.ndarray((4, n_samples))
      Array to which samples are added.
    outputConfig : object
      Output configuration object.
    noiseParams : NoiseParameters
      Noise parameters object
    band : Band
      Band description object.
    debug : bool
      Debug flag

    Returns
    -------
    list
      Debug information
    '''
    result = []
    if (self.l1Enabled and band == outputConfig.GLONASS.L1):
      intermediateFrequency_hz = band.INTERMEDIATE_FREQUENCIES_HZ[self.prn]
      values = self.doppler.computeBatch(userTimeAll_s,
                                         self.amplitude,
                                         noiseParams,
                                         signals.GLONASS.L1S[self.prn],
                                         intermediateFrequency_hz,
                                         self.l1Message,
                                         self.caCode,
                                         outputConfig,
                                         debug)
      numpy.add(samples[band.INDEX],
                values[0],
                out=samples[band.INDEX])
      debugData = {'type': "GLOL1", 'doppler': values[1]}
      result.append(debugData)
    if (self.l2Enabled and band == outputConfig.GLONASS.L2):
      intermediateFrequency_hz = band.INTERMEDIATE_FREQUENCIES_HZ[self.prn]
      values = self.doppler.computeBatch(userTimeAll_s,
                                         self.amplitude,
                                         noiseParams,
                                         signals.GLONASS.L2S[self.prn],
                                         intermediateFrequency_hz,
                                         self.l2Message,
                                         self.caCode,
                                         outputConfig,
                                         debug)
      numpy.add(samples[band.INDEX],
                values[0],
                out=samples[band.INDEX])
      debugData = {'type': "GLOL2", 'doppler': values[1]}
      result.append(debugData)
    return result

Example 39

Project: peregrine Source File: satellite_gps.py
  def getBatchSignals(self,
                      userTimeAll_s,
                      samples,
                      outputConfig,
                      noiseParams,
                      band,
                      debug):
    '''
    Generates signal samples.

    Parameters
    ----------
    userTimeAll_s : numpy.ndarray(n_samples, dtype=numpy.float64)
      Vector of observer's timestamps in seconds for the interval start.
    samples : numpy.ndarray((4, n_samples))
      Array to which samples are added.
    outputConfig : object
      Output configuration object.
    noiseParams : NoiseParameters
      Noise parameters object
    band : Band
      Band description object.
    debug : bool
      Debug flag

    Returns
    -------
    list
      Debug information
    '''
    result = []
    if (self.l1caEnabled and band == outputConfig.GPS.L1):
      intermediateFrequency_hz = band.INTERMEDIATE_FREQUENCY_HZ
      values = self.doppler.computeBatch(userTimeAll_s,
                                         self.amplitude,
                                         noiseParams,
                                         signals.GPS.L1CA,
                                         intermediateFrequency_hz,
                                         self.l1caMessage,
                                         self.l1caCode,
                                         outputConfig,
                                         debug)
      numpy.add(samples[band.INDEX],
                values[0],
                out=samples[band.INDEX])
      debugData = {'type': "GPSL1", 'doppler': values[1]}
      result.append(debugData)
    if (self.l2cEnabled and band == outputConfig.GPS.L2):
      intermediateFrequency_hz = band.INTERMEDIATE_FREQUENCY_HZ
      values = self.doppler.computeBatch(userTimeAll_s,
                                         self.amplitude,
                                         noiseParams,
                                         signals.GPS.L2C,
                                         intermediateFrequency_hz,
                                         self.l2cMessage,
                                         self.l2cCode,
                                         outputConfig,
                                         debug)
      numpy.add(samples[band.INDEX],
                values[0],
                out=samples[band.INDEX])
      debugData = {'type': "GPSL2", 'doppler': values[1]}
      result.append(debugData)
    return result

Example 40

Project: TensorVision Source File: core.py
Function: do_eval
def do_eval(hypes, eval_list, phase, sess):
    """
    Run one evaluation against the full epoch of data.

    Parameters
    ----------
    hypes : dict
        Hyperparameters
    eval_list : list of tuples
        Each tuple should contain a string (name if the metric) and a
        tensor (storing the result of the metric).
    phase : str
        Describes the data the evaluation is run on.
    sess : tf.Session
        The session in which the model has been trained.

    Returns
    -------
    tuple of lists
        List of names and evaluation results
    """
    # And run one epoch of eval.
    # Checking for List for compability
    if eval_list[phase] is None:
        return [''], [0.0]
    if type(eval_list[phase]) is list:
        eval_names, eval_op = zip(*eval_list[phase])

    else:
        logging.warning("Passing eval_op directly is deprecated. "
                        "Pass a list of tuples instead.")
        eval_names = ['Accuracy']
        eval_op = [eval_list[phase]]

    assert(len(eval_names) == len(eval_op))

    if phase == 'train':
        num_examples = hypes['data']['num_examples_per_epoch_for_train']
    if phase == 'val':
        num_examples = hypes['data']['num_examples_per_epoch_for_eval']

    steps_per_epoch = num_examples // hypes['solver']['batch_size']
    num_examples = steps_per_epoch * hypes['solver']['batch_size']

    logging.info('Data: % s  Num examples: % d ' % (phase, num_examples))
    # run evaluation on num_examples many images
    results = sess.run(eval_op)
    logging.debug('Output of eval: %s', results)
    for step in xrange(1, steps_per_epoch):
        results = map(np.add, results, sess.run(eval_op))

    avg_results = [result / steps_per_epoch for result in results]

    for name, value in zip(eval_names, avg_results):
        logging.info('%s : % 0.04f ' % (name, value))

    return eval_names, avg_results

Example 41

Project: GPy Source File: add.py
    @Cache_this(limit=3, force_kwargs=['which_parts'])
    def psi0(self, Z, variational_posterior):
        if not self._exact_psicomp: return Kern.psi0(self,Z,variational_posterior)
        return reduce(np.add, (p.psi0(Z, variational_posterior) for p in self.parts))

Example 42

Project: GPy Source File: add.py
    @Cache_this(limit=3, force_kwargs=['which_parts'])
    def psi1(self, Z, variational_posterior):
        if not self._exact_psicomp: return Kern.psi1(self,Z,variational_posterior)
        return reduce(np.add, (p.psi1(Z, variational_posterior) for p in self.parts))

Example 43

Project: GPy Source File: add.py
Function: psi2
    @Cache_this(limit=3, force_kwargs=['which_parts'])
    def psi2(self, Z, variational_posterior):
        if not self._exact_psicomp: return Kern.psi2(self,Z,variational_posterior)
        psi2 = reduce(np.add, (p.psi2(Z, variational_posterior) for p in self.parts))
        #return psi2
        # compute the "cross" terms
        from .static import White, Bias
        from .rbf import RBF
        #from rbf_inv import RBFInv
        from .linear import Linear
        #ffrom fixed import Fixed

        for p1, p2 in itertools.combinations(self.parts, 2):
            # i1, i2 = p1._all_dims_active, p2._all_dims_active
            # white doesn;t combine with anything
            if isinstance(p1, White) or isinstance(p2, White):
                pass
            # rbf X bias
            #elif isinstance(p1, (Bias, Fixed)) and isinstance(p2, (RBF, RBFInv)):
            elif isinstance(p1,  Bias) and isinstance(p2, (RBF, Linear)):
                tmp = p2.psi1(Z, variational_posterior).sum(axis=0)
                psi2 += p1.variance * (tmp[:,None]+tmp[None,:]) #(tmp[:, :, None] + tmp[:, None, :])
            #elif isinstance(p2, (Bias, Fixed)) and isinstance(p1, (RBF, RBFInv)):
            elif isinstance(p2, Bias) and isinstance(p1, (RBF, Linear)):
                tmp = p1.psi1(Z, variational_posterior).sum(axis=0)
                psi2 += p2.variance * (tmp[:,None]+tmp[None,:]) #(tmp[:, :, None] + tmp[:, None, :])
            elif isinstance(p2, (RBF, Linear)) and isinstance(p1, (RBF, Linear)):
                assert np.intersect1d(p1._all_dims_active, p2._all_dims_active).size == 0, "only non overlapping kernel dimensions allowed so far"
                tmp1 = p1.psi1(Z, variational_posterior)
                tmp2 = p2.psi1(Z, variational_posterior)
                psi2 += np.einsum('nm,no->mo',tmp1,tmp2)+np.einsum('nm,no->mo',tmp2,tmp1)
                #(tmp1[:, :, None] * tmp2[:, None, :]) + (tmp2[:, :, None] * tmp1[:, None, :])
            else:
                raise NotImplementedError("psi2 cannot be computed for this kernel")
        return psi2

Example 44

Project: attention-lvcsr Source File: extending_theano_solution_1.py
@as_op(itypes=[theano.tensor.fmatrix, theano.tensor.fmatrix],
       otypes=[theano.tensor.fmatrix], infer_shape=infer_shape_numpy_dot)
def numpy_add(a, b):
    return numpy.add(a, b)

Example 45

Project: GPy Source File: add.py
    @Cache_this(limit=3, force_kwargs=['which_parts'])
    def psi2n(self, Z, variational_posterior):
        if not self._exact_psicomp: return Kern.psi2n(self, Z, variational_posterior)
        psi2 = reduce(np.add, (p.psi2n(Z, variational_posterior) for p in self.parts))
        #return psi2
        # compute the "cross" terms
        from .static import White, Bias
        from .rbf import RBF
        #from rbf_inv import RBFInv
        from .linear import Linear
        #ffrom fixed import Fixed

        for p1, p2 in itertools.combinations(self.parts, 2):
            # i1, i2 = p1._all_dims_active, p2._all_dims_active
            # white doesn;t combine with anything
            if isinstance(p1, White) or isinstance(p2, White):
                pass
            # rbf X bias
            #elif isinstance(p1, (Bias, Fixed)) and isinstance(p2, (RBF, RBFInv)):
            elif isinstance(p1,  Bias) and isinstance(p2, (RBF, Linear)):
                tmp = p2.psi1(Z, variational_posterior)
                psi2 += p1.variance * (tmp[:, :, None] + tmp[:, None, :])
            #elif isinstance(p2, (Bias, Fixed)) and isinstance(p1, (RBF, RBFInv)):
            elif isinstance(p2, Bias) and isinstance(p1, (RBF, Linear)):
                tmp = p1.psi1(Z, variational_posterior)
                psi2 += p2.variance * (tmp[:, :, None] + tmp[:, None, :])
            elif isinstance(p2, (RBF, Linear)) and isinstance(p1, (RBF, Linear)):
                assert np.intersect1d(p1._all_dims_active, p2._all_dims_active).size == 0, "only non overlapping kernel dimensions allowed so far"
                tmp1 = p1.psi1(Z, variational_posterior)
                tmp2 = p2.psi1(Z, variational_posterior)
                psi2 += np.einsum('nm,no->nmo',tmp1,tmp2)+np.einsum('nm,no->nmo',tmp2,tmp1)
                #(tmp1[:, :, None] * tmp2[:, None, :]) + (tmp2[:, :, None] * tmp1[:, None, :])
            else:
                raise NotImplementedError("psi2 cannot be computed for this kernel")
        return psi2

Example 46

Project: pyomo Source File: generate_ph.py
Function: update_weights
def update_weights(x, xbar, rho, w):
    return numpy.add(w,rho*(x-xbar))

Example 47

Project: GPy Source File: finite_dimensional.py
    def dKdiag_dtheta(self,X,target):
        np.add(target[:,0],1.,target[:,0])

Example 48

Project: attention-lvcsr Source File: extending_theano_solution_1.py
@as_op(itypes=[theano.tensor.fmatrix, theano.tensor.fmatrix],
       otypes=[theano.tensor.fmatrix], infer_shape=infer_shape_numpy_add_sub)
def numpy_add(a, b):
    return numpy.add(a, b)

Example 49

Project: GPy Source File: gibbs.py
Function: kdiag
    def Kdiag(self, X, target):
        """Compute the diagonal of the covariance matrix for X."""
        np.add(target, self.variance, target)

Example 50

Project: biggus Source File: test_elementwise.py
Function: test_add
    def test_add(self):
        self._test_elementwise(biggus.add, np.add)
See More Examples - Go to Next Page
Page 1 Selected Page 2