numpy.full

Here are the examples of the python api numpy.full taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

106 Examples 7

Example 1

Project: numpy-groupies Source File: aggregate_numpy.py
Function: prod
def _prod(group_idx, a, size, fill_value, dtype=None):
    dtype = minimum_dtype_scalar(fill_value, dtype, a)
    ret = np.full(size, fill_value, dtype=dtype)
    if fill_value != 1:
        ret[group_idx] = 1  # product starts from 1
    np.multiply.at(ret, group_idx, a)
    return ret

Example 2

Project: numpy-groupies Source File: aggregate_pandas.py
Function: wrapper
def _wrapper(group_idx, a, size, fill_value, func='sum', dtype=None, ddof=0):
    kwargs = dict()
    if func in ('var', 'std'):
        kwargs['ddof'] = ddof
    if isstr(func):
        grouped = getattr(pd.DataFrame({'group_idx': group_idx, 'a': a})
                          .groupby('group_idx'), func)(**kwargs)
    else:
        grouped = pd.DataFrame({'group_idx': group_idx, 'a': a})\
                    .groupby('group_idx').aggregate(func, **kwargs)

    dtype = check_dtype(dtype, getattr(func, '__name__', func), a, size)
    ret = np.full(size, fill_value, dtype=dtype)
    ret[grouped.index] = grouped
    return ret

Example 3

Project: qcache Source File: __init__.py
def _add_stand_in_columns(df, stand_in_columns):
    if not stand_in_columns:
        return df

    for column_name, stand_in_value in stand_in_columns:
        if column_name not in df:
            if stand_in_value in df:
                df.loc[:, column_name] = df[stand_in_value]
            else:
                dtype = _get_dtype(stand_in_value)
                stand_in_value = unquote(stand_in_value)
                arr = numpy.full(len(df), stand_in_value, dtype=dtype)
                df.loc[:, column_name] = pandas.Series(arr, index=df.index)

Example 4

Project: landlab Source File: test_flow_routing.py
@with_setup(setup_dans_grid1)
def test_check_field_input():
    """Check we can successfully pass water__discharge_in."""
    mg.add_field('node', 'water__unit_flux_in',
                 np.full(25, 3.), units='m**3/s')
    fr = FlowRouter(mg)
    assert_array_equal(np.full(25, 3.), mg.at_node['water__unit_flux_in'])
    fr = FlowRouter(mg, infile)
    assert_array_equal(np.full(25, 2.), mg.at_node['water__unit_flux_in'])

Example 5

Project: datashader Source File: test_transfer_functions.py
Function: test_density
def test_density():
    b = 0xffff0000
    data = np.full((4, 4), b, dtype='uint32')
    assert tf._density(data) == 1.0
    data = np.zeros((4, 4), dtype='uint32')
    assert tf._density(data) == np.inf
    data[2, 2] = b
    assert tf._density(data) == 0
    data[2, 1] = data[1, 2] = data[1, 1] = b
    assert np.allclose(tf._density(data), 3./8.)

Example 6

Project: zipline Source File: core.py
Function: get_history_window
    def get_history_window(self, assets, end_dt, bar_count, frequency, field,
                           ffill=True):
        if frequency == "1d":
            end_idx = \
                self.trading_calendar.all_sessions.searchsorted(end_dt)
            days = self.trading_calendar.all_sessions[
                (end_idx - bar_count + 1):(end_idx + 1)
            ]

            df = pd.DataFrame(
                np.full((bar_count, len(assets)), 100.0),
                index=days,
                columns=assets
            )

            return df

Example 7

Project: numpy-groupies Source File: aggregate_numpy_ufunc.py
Function: any
def _any(group_idx, a, size, fill_value, dtype=None):
    check_boolean(fill_value)
    ret = np.full(size, fill_value, dtype=bool)
    if fill_value:
        ret[group_idx] = False  # any-test should start from False
    np.logical_or.at(ret, group_idx, a)
    return ret

Example 8

Project: numpy-groupies Source File: aggregate_numpy_ufunc.py
Function: all
def _all(group_idx, a, size, fill_value, dtype=None):
    check_boolean(fill_value)
    ret = np.full(size, fill_value, dtype=bool)
    if not fill_value:
        ret[group_idx] = True  # all-test should start from True
    np.logical_and.at(ret, group_idx, a)
    return ret

Example 9

Project: NNBlocks Source File: init.py
Function: call
    def __call__(self, shape):
        """Returns a tensor initialized with the constant `value` and the shape
        specified
        """
        r = np.full(shape, self.value)
        return np.asarray(r, dtype=theano.config.floatX)

Example 10

Project: AWS-Lambda-ML-Microservice-Skeleton Source File: test_indexing.py
    def test_uncontiguous_subspace_assignment(self):
        # During development there was a bug activating a skip logic
        # based on ndim instead of size.
        a = np.full((3, 4, 2), -1)
        b = np.full((3, 4, 2), -1)

        a[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T
        b[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T.copy()

        assert_equal(a, b)

Example 11

Project: deep-go-wrap Source File: analyze_board.py
def npclose(a, empty, closeset, verbose=False):
    inf = a.shape[0] * 2
    ret = np.full(a.shape, inf)
    it = np.nditer(empty, flags=['multi_index'])
    while not it.finished:
        x,y = it.multi_index

        dist,xs,ys = closeset[x][y]
        arg = a[xs,ys].argmax()
        if a[xs[arg],ys[arg]]:
            ret[x][y] = dist[arg]

        it.iternext()

    return ret

Example 12

Project: distributed Source File: test_collections.py
@pytest.mark.skipif(not sys.platform.startswith('linux'),
                    reason='KQueue error - uncertain cause')
def test_futures_to_dask_array(loop):
    with cluster() as (c, [a, b]):
        with Client(('127.0.0.1', c['port']), loop=loop) as c:
            remote_arrays = [[c.submit(np.full, (3, 3), i + j)
                                for i in range(3)]
                                for j in range(3)]

            x = futures_to_dask_array(remote_arrays, client=c)
            assert x.chunks == ((3, 3, 3), (3, 3, 3))
            assert x.dtype == np.full((), 0).dtype

            assert x.sum().compute(get=c.get) == 162
            assert (x + x.T).sum().compute(get=c.get) == 162 * 2

            y = futures_to_collection(remote_arrays, client=c)
            assert x.dask == y.dask

Example 13

Project: zipline Source File: labelarray.py
Function: empty_like
    def empty_like(self, shape):
        """
        Make an empty LabelArray with the same categories as ``self``, filled
        with ``self.missing_value``.
        """
        return type(self)._from_codes_and_metadata(
            codes=np.full(
                shape,
                self.reverse_categories[self.missing_value],
                dtype=int_dtype_with_size_in_bytes(self.itemsize),
            ),
            categories=self.categories,
            reverse_categories=self.reverse_categories,
            missing_value=self.missing_value,
        )

Example 14

Project: landlab Source File: test_status_at_node.py
def test_set_status_with_array_bool():
    """Test setting node status with boolean array."""
    grid = RasterModelGrid((4, 5))
    inds = np.full((20, ), False, dtype=bool)
    inds[6] = True
    inds[7] = True
    inds[13] = True

    grid.status_at_node[inds] = 2
    assert_array_equal(grid.status_at_node,
                       [FV, FV, FV, FV, FV,
                        FV,  2,  2,  0, FV,
                        FV,  0,  0,  2, FV,
                        FV, FV, FV, FV, FV])

Example 15

Project: pyNastran Source File: cbush.py
Function: allocate
    def allocate(self, card_count):
        ncards = card_count[self.type]
        if ncards:
            self.n = ncards
            float_fmt = self.model.float_fmt
            #: Element ID
            self.element_id = zeros(ncards, 'int32')
            #: Property ID
            self.property_id = zeros(ncards, 'int32')
            self.node_ids = zeros((ncards, 2), 'int32')
            self.is_g0 = zeros(ncards, 'bool')
            self.g0 = full(ncards, nan, 'int32')
            self.x = full((ncards, 3), nan, float_fmt)
            self.cid = full(ncards, nan, 'int32')
            self.s = full(ncards, nan, float_fmt)
            self.ocid = full(ncards, nan, 'int32')
            self.si = full((ncards, 3), nan, float_fmt)

Example 16

Project: numpy-groupies Source File: aggregate_numpy.py
Function: any
def _any(group_idx, a, size, fill_value, dtype=None):
    check_boolean(fill_value)
    ret = np.full(size, fill_value, dtype=bool)
    if fill_value:
        ret[group_idx] = False
    ret[group_idx.compress(a)] = True
    return ret

Example 17

Project: pyNastran Source File: cbar.py
Function: allocate
    def allocate(self, card_count):
        ncards = card_count[self.type]
        self.n = ncards
        if self.n:
            assert isinstance(ncards, int), ncards
            float_fmt = self.model.float_fmt
            #: Element ID
            self.element_id = zeros(ncards, 'int32')
            #: Property ID
            self.property_id = zeros(ncards, 'int32')
            self.node_ids = zeros((ncards, 2), 'int32')
            self.is_g0 = zeros(ncards, 'bool')
            self.g0 = full(ncards, nan, 'int32')
            self.x = full((ncards, 3), nan, float_fmt)
            self.offt = full(ncards, nan, '|S3')
            self.pin_flags = zeros((ncards, 2), 'int32')
            self.wa = zeros((ncards, 3), float_fmt)
            self.wb = zeros((ncards, 3), float_fmt)

Example 18

Project: numpy-groupies Source File: aggregate_numpy_ufunc.py
Function: sum
def _sum(group_idx, a, size, fill_value, dtype=None):
    dtype = minimum_dtype_scalar(fill_value, dtype, a)
    ret = np.full(size, fill_value, dtype=dtype)
    if fill_value != 0:
        ret[group_idx] = 0  # sums should start at 0
    np.add.at(ret, group_idx, a)
    return ret

Example 19

Project: numpy-groupies Source File: aggregate_numpy.py
Function: all
def _all(group_idx, a, size, fill_value, dtype=None):
    check_boolean(fill_value)
    ret = np.full(size, fill_value, dtype=bool)
    if not fill_value:
        ret[group_idx] = True
    ret[group_idx.compress(np.logical_not(a))] = False
    return ret

Example 20

Project: AWS-Lambda-ML-Microservice-Skeleton Source File: test_ufunc.py
Function: test_true_divide
    def test_true_divide(self):
        # True_divide has a non uniform signature, see #3484.
        # This also tests type_tuple_type_resolver.
        a = np.full(5, 12.5)
        b = np.full(5, 10.0)
        tgt = np.full(5, 1.25)
        assert_almost_equal(np.true_divide(a, b, dtype=np.float64), tgt)
        assert_almost_equal(np.true_divide(a, b, dtype=np.float32), tgt)
        assert_raises(TypeError, np.true_divide, a, b, dtype=np.int)

Example 21

Project: drmad Source File: grads.py
def make_grad_np_mean(ans, x, axis=None, keepdims=False):
    if not isarray(x):
        return [I]
    shape = x.shape
    if axis is None:
        return [lambda g : np.full(shape, g) / np.prod(shape)]
    else:
        if keepdims:
            return [lambda g : np.repeat(g, shape[axis], axis) / shape[axis]]
        else:
            return [lambda g : np.repeat(np.expand_dims(g, axis),
                                         shape[axis], axis) / shape[axis]]

Example 22

Project: drmad Source File: nn_utils.py
def fill_parser(parser, items):
    """Build a vector by assigning each block the corresponding value in
       the items vector."""
    partial_vects = [np.full(parser[name].size, items[i])
                     for i, name in enumerate(parser.names)]
    return np.concatenate(partial_vects, axis=0)

Example 23

Project: chainer Source File: test_constant.py
    def check_shaped_initializer(self, xp):
        initializer = initializers.Constant(
            fill_value=self.fill_value, dtype=self.dtype)
        w = initializers.generate_array(initializer, self.shape, xp)
        self.assertIs(cuda.get_array_module(w), xp)
        self.assertTupleEqual(w.shape, self.shape)
        self.assertEqual(w.dtype, self.dtype)
        testing.assert_allclose(
            w, numpy.full(self.shape, self.fill_value),
            **self.check_options)

Example 24

Project: distributed Source File: test_collections.py
Function: test_futures_to_dask_array
@gen_cluster(timeout=60, client=True)
def test__futures_to_dask_array(c, s, a, b):
    import dask.array as da
    remote_arrays = [[[c.submit(np.full, (2, 3, 4), i + j + k)
                        for i in range(2)]
                        for j in range(2)]
                        for k in range(4)]

    x = yield _futures_to_dask_array(remote_arrays, client=c)
    assert x.chunks == ((2, 2, 2, 2), (3, 3), (4, 4))
    assert x.dtype == np.full((), 0).dtype

    assert isinstance(x, da.Array)
    expr = x.sum()
    result = yield c._get(expr.dask, expr._keys())
    assert isinstance(result[0], np.number)

Example 25

Project: zipline Source File: test_numerical_expression.py
    def setUp(self):
        self.dates = date_range('2014-01-01', periods=5, freq='D')
        self.assets = Int64Index(range(5))
        self.f = F()
        self.g = G()
        self.h = H()
        self.d = DateFactor()
        self.fake_raw_data = {
            self.f: full((5, 5), 3, float),
            self.g: full((5, 5), 2, float),
            self.h: full((5, 5), 1, float),
            self.d: full((5, 5), 0, dtype='datetime64[ns]'),
        }
        self.mask = DataFrame(True, index=self.dates, columns=self.assets)

Example 26

Project: automl-phase-2 Source File: sandpit.py
def global_test():
    raw_input('I begin')
    # global my_global
    global_data.my_global = np.full((2**17, 2 * 2**10), 42)
    raw_input('Globals created')
    processing_pool = Pool(10)
    processing_pool.map(import_and_print_globals, [None] * 10)
    processing_pool.close()
    processing_pool.join()
    raw_input('Multiprocessing complete')

Example 27

Project: minpy Source File: numpy_core.py
Function: sum_grad
def _sum_grad(ans, x, axis=None, keepdims=False):
    """ Generate gradient function of sum """
    if axis is None:
        return lambda g: np.full(x.shape, g)
    if isinstance(axis, int):
        axis = [axis]
    elif isinstance(axis, tuple):
        axis = list(axis)
    ans_shape_expanded = list(x.shape)
    for a in axis:
        ans_shape_expanded[a] = 1
    xshape = x.shape  # Only shape is needed, hope array `x` could be GC'ed.
    return lambda g: np.zeros(xshape) + np.reshape(g, ans_shape_expanded)

Example 28

Project: zipline Source File: dispatch_bar_reader.py
    def _make_raw_array_out(self, field, shape):
        if field != 'volume' and field != 'sid':
            out = full(shape, nan)
        else:
            out = zeros(shape, dtype=int64)
        return out

Example 29

Project: RecurrentHighwayNetworks Source File: theano_rhn.py
Function: make_param
  def make_param(self, shape, init_scheme):
    """Create Theano shared variables, which are used as trainable model parameters."""
    if isinstance(init_scheme, numbers.Number):
      init_value = np.full(shape, init_scheme, floatX)
    elif init_scheme == 'uniform':
      init_value = self._np_rng.uniform(low=-self._init_scale, high=self._init_scale, size=shape).astype(floatX)
    else:
      raise AssertionError('unsupported init_scheme')
    p = theano.shared(init_value)
    self._params.append(p)
    return p

Example 30

Project: numpy-groupies Source File: aggregate_numpy.py
def _generic_callable(group_idx, a, size, fill_value, dtype=None,
                      func=lambda g: g):
    """groups a by inds, and then applies foo to each group in turn, placing
    the results in an array."""
    groups = _array(group_idx, a, size, (), dtype=dtype)
    ret = np.full(size, fill_value, dtype=object)

    for i, grp in enumerate(groups):
        if np.ndim(grp) == 1 and len(grp) > 0:
            ret[i] = func(grp)
    return ret

Example 31

Project: landlab Source File: dual.py
    def _create_link_at_face(self):
        link_at_nodes = {}
        for link, pair in enumerate(self.nodes_at_link):
            pair.sort()
            link_at_nodes[tuple(pair)] = link

        link_at_face = np.full((self.number_of_faces, ), -1, dtype=int)
        for face, pair in enumerate(self._nodes_at_face):
            pair.sort()
            link_at_face[face] = link_at_nodes[tuple(pair)]
        self._link_at_face = link_at_face
        return self._link_at_face

Example 32

Project: zipline Source File: expression.py
    def _compute(self, arrays, dates, assets, mask):
        """
        Compute our stored expression string with numexpr.
        """
        out = full(mask.shape, self.missing_value, dtype=self.dtype)
        # This writes directly into our output buffer.
        numexpr.evaluate(
            self._expr,
            local_dict={
                "x_%d" % idx: array
                for idx, array in enumerate(arrays)
            },
            global_dict={'inf': inf},
            out=out,
        )
        return out

Example 33

Project: numpy-groupies Source File: aggregate_numba.py
Function: initialize
    def _initialize(self, flat_size, fill_value, dtype):
        if self.nans:
            # For avoiding branches
            flat_size += 1
        if self.forced_fill_value is None:
            ret = np.full(flat_size, fill_value, dtype=dtype)
        else:
            ret = np.full(flat_size, self.forced_fill_value, dtype=dtype)
        counter = np.full_like(ret, self.counter_fill_value, dtype=self.counter_dtype)
        if self.mean_fill_value is not None:
            mean = np.full_like(ret, self.mean_fill_value, dtype=ret.dtype)
        else:
            mean = None
        return ret, counter, mean

Example 34

Project: filterpy Source File: sigma_points.py
Function: weights
    def weights(self):
        """ Computes the weights for the scaled unscented Kalman filter.

        Returns
        -------

        Wm : ndarray[n+1]
            weights for mean

        Wc : ndarray[n+1]
            weights for the covariances
        """

        n = self.n
        c = 1. / (n + 1)
        W = np.full(n + 1, c)

        return W, W

Example 35

Project: numpy-groupies Source File: aggregate_numpy_ufunc.py
Function: prod
def _prod(group_idx, a, size, fill_value, dtype=None):
    """Same as aggregate_numpy.py"""
    dtype = minimum_dtype_scalar(fill_value, dtype, a)
    ret = np.full(size, fill_value, dtype=dtype)
    if fill_value != 1:
        ret[group_idx] = 1  # product should start from 1
    np.multiply.at(ret, group_idx, a)
    return ret

Example 36

Project: drmad Source File: grads.py
def make_grad_np_sum(ans, x, axis=None, keepdims=False):
    if not isarray(x):
        return [I]
    shape = x.shape
    if axis is None:
        return [lambda g : np.full(shape, g)]
    else:
        if keepdims:
            return [lambda g : np.repeat(g, shape[axis], axis)]
        else:
            return [lambda g : np.repeat(np.expand_dims(g, axis),
                                         shape[axis], axis)]

Example 37

Project: chainer Source File: test_constant.py
    def check_initializer(self, w):
        initializer = initializers.Constant(fill_value=self.fill_value)
        initializer(w)
        testing.assert_allclose(
            w, numpy.full(self.shape, self.fill_value),
            **self.check_options)

Example 38

Project: hmmlearn Source File: base.py
Function: init
    def _init(self, X, lengths):
        """Initializes model parameters prior to fitting.

        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)
            Feature matrix of individual samples.

        lengths : array-like of integers, shape (n_sequences, )
            Lengths of the individual sequences in ``X``. The sum of
            these should be ``n_samples``.
        """
        init = 1. / self.n_components
        if 's' in self.init_params or not hasattr(self, "startprob_"):
            self.startprob_ = np.full(self.n_components, init)
        if 't' in self.init_params or not hasattr(self, "transmat_"):
            self.transmat_ = np.full((self.n_components, self.n_components),
                                     init)

Example 39

Project: zipline Source File: test_minute_bars.py
    def test_differing_nans(self):
        """
        Also test nans of differing values/construction.
        """
        sid = 1
        last_date = self.writer.last_date_in_output_for_sid(sid)
        self.assertIs(last_date, NaT)

        self.writer.pad(sid, TEST_CALENDAR_START)

        last_date = self.writer.last_date_in_output_for_sid(sid)
        self.assertEqual(last_date, TEST_CALENDAR_START)

        freq = self.market_opens.index.freq
        minute = self.market_opens[TEST_CALENDAR_START + freq]
        minutes = date_range(minute, periods=9, freq='min')
        data = DataFrame(
            data={
                'open': ((0b11111111111 << 52) + arange(1, 10, dtype=int64)).
                view(float64),
                'high': ((0b11111111111 << 52) + arange(11, 20, dtype=int64)).
                view(float64),
                'low': ((0b11111111111 << 52) + arange(21, 30, dtype=int64)).
                view(float64),
                'close': ((0b11111111111 << 52) + arange(31, 40, dtype=int64)).
                view(float64),
                'volume': full(9, 0.0),
            },
            index=[minutes])
        self.writer.write_sid(sid, data)

        fields = ['open', 'high', 'low', 'close', 'volume']

        ohlcv_window = list(map(transpose, self.reader.load_raw_arrays(
            fields, minutes[0], minutes[-1], [sid],
        )))

        for i, field in enumerate(fields):
            if field != 'volume':
                assert_array_equal(full(9, nan), ohlcv_window[i][0])
            else:
                assert_array_equal(zeros(9), ohlcv_window[i][0])

Example 40

Project: pvlib-python Source File: test_clearsky.py
def test_simplified_solis_nans_arrays():

    # construct input arrays that each have 1 nan offset from each other,
    # the last point is valid for all arrays

    length = 6

    apparent_elevation = np.full(length, 80.)
    apparent_elevation[0] = np.nan

    aod700 = np.full(length, 0.1)
    aod700[1] = np.nan

    precipitable_water = np.full(length, 0.5)
    precipitable_water[2] = np.nan

    pressure = np.full(length, 98000.)
    pressure[3] = np.nan

    dni_extra = np.full(length, 1370.)
    dni_extra[4] = np.nan

    expected = OrderedDict()
    expected['ghi'] = np.full(length, np.nan)
    expected['dni'] = np.full(length, np.nan)
    expected['dhi'] = np.full(length, np.nan)

    expected['ghi'][length-1] = 1096.022736
    expected['dni'][length-1] = 990.306854
    expected['dhi'][length-1] = 128.664594

    out = clearsky.simplified_solis(apparent_elevation, aod700,
                                    precipitable_water, pressure, dni_extra)

    for k, v in expected.items():
        assert_allclose(expected[k], out[k])

Example 41

Project: zipline Source File: test_resample.py
    def test_load_raw_arrays(self):
        reindex_reader = ReindexMinuteBarReader(
            self.trading_calendar,
            self.bcolz_equity_minute_bar_reader,
            self.START_DATE,
            self.END_DATE,
        )
        m_open, m_close = self.trading_calendar.open_and_close_for_session(
            self.START_DATE)
        outer_minutes = self.trading_calendar.minutes_in_range(m_open, m_close)
        result = reindex_reader.load_raw_arrays(
            OHLCV, m_open, m_close, [1, 2])

        opens = DataFrame(data=result[0], index=outer_minutes,
                          columns=[1, 2])
        opens_with_price = opens.dropna()

        self.assertEqual(
            1440,
            len(opens),
            "The result should have 1440 bars, the number of minutes in a "
            "trading session on the target calendar."
        )

        self.assertEqual(
            390,
            len(opens_with_price),
            "The result, after dropping nans, should have 390 bars, the "
            " number of bars in a trading session in the reader's calendar."
        )

        slicer = outer_minutes.slice_indexer(
            end=pd.Timestamp('2015-12-01 14:30', tz='UTC'))

        assert_almost_equal(
            opens[1][slicer],
            full(slicer.stop, nan),
            err_msg="All values before the NYSE market open should be nan.")

        slicer = outer_minutes.slice_indexer(
            start=pd.Timestamp('2015-12-01 21:01', tz='UTC'))

        assert_almost_equal(
            opens[1][slicer],
            full(slicer.stop - slicer.start, nan),
            err_msg="All values after the NYSE market close should be nan.")

        first_minute_loc = outer_minutes.get_loc(pd.Timestamp(
            '2015-12-01 14:31', tz='UTC'))

        # Spot check a value.
        # The value is the autogenerated value from test fixtures.
        assert_almost_equal(
            10.0,
            opens[1][first_minute_loc],
            err_msg="The value for Equity 1, should be 10.0, at NYSE open.")

Example 42

Project: zipline Source File: test_adjusted_array.py
def _gen_multiplicative_adjustment_cases(dtype):
    """
    Generate expected moving windows on a buffer with adjustments.

    We proceed by constructing, at each row, the view of the array we expect in
    in all windows anchored on that row.

    In general, if we have an adjustment to be applied once we process the row
    at index N, should see that adjustment applied to the underlying buffer for
    any window containing the row at index N.

    We then build all legal windows over these buffers.
    """
    adjustment_type = {
        float64_dtype: Float64Multiply,
    }[dtype]

    nrows, ncols = 6, 3
    adjustments = {}
    buffer_as_of = [None] * 6
    baseline = buffer_as_of[0] = full((nrows, ncols), 1, dtype=dtype)

    # Note that row indices are inclusive!
    adjustments[1] = [
        adjustment_type(0, 0, 0, 0, coerce_to_dtype(dtype, 2)),
    ]
    buffer_as_of[1] = array([[2, 1, 1],
                             [1, 1, 1],
                             [1, 1, 1],
                             [1, 1, 1],
                             [1, 1, 1],
                             [1, 1, 1]], dtype=dtype)

    # No adjustment at index 2.
    buffer_as_of[2] = buffer_as_of[1]

    adjustments[3] = [
        adjustment_type(1, 2, 1, 1, coerce_to_dtype(dtype, 3)),
        adjustment_type(0, 1, 0, 0, coerce_to_dtype(dtype, 4)),
    ]
    buffer_as_of[3] = array([[8, 1, 1],
                             [4, 3, 1],
                             [1, 3, 1],
                             [1, 1, 1],
                             [1, 1, 1],
                             [1, 1, 1]], dtype=dtype)

    adjustments[4] = [
        adjustment_type(0, 3, 2, 2, coerce_to_dtype(dtype, 5))
    ]
    buffer_as_of[4] = array([[8, 1, 5],
                             [4, 3, 5],
                             [1, 3, 5],
                             [1, 1, 5],
                             [1, 1, 1],
                             [1, 1, 1]], dtype=dtype)

    adjustments[5] = [
        adjustment_type(0, 4, 1, 1, coerce_to_dtype(dtype, 6)),
        adjustment_type(2, 2, 2, 2, coerce_to_dtype(dtype, 7)),
    ]
    buffer_as_of[5] = array([[8,  6,  5],
                             [4, 18,  5],
                             [1, 18, 35],
                             [1,  6,  5],
                             [1,  6,  1],
                             [1,  1,  1]], dtype=dtype)

    return _gen_expectations(
        baseline,
        default_missing_value_for_dtype(dtype),
        adjustments,
        buffer_as_of,
        nrows,
        perspective_offsets=(0, 1),
    )

Example 43

Project: pvlib-python Source File: test_clearsky.py
def test_simplified_solis_nans_series():

    # construct input arrays that each have 1 nan offset from each other,
    # the last point is valid for all arrays

    length = 6

    apparent_elevation = pd.Series(np.full(length, 80.))
    apparent_elevation[0] = np.nan

    aod700 = np.full(length, 0.1)
    aod700[1] = np.nan

    precipitable_water = np.full(length, 0.5)
    precipitable_water[2] = np.nan

    pressure = np.full(length, 98000.)
    pressure[3] = np.nan

    dni_extra = np.full(length, 1370.)
    dni_extra[4] = np.nan

    expected = OrderedDict()
    expected['ghi'] = np.full(length, np.nan)
    expected['dni'] = np.full(length, np.nan)
    expected['dhi'] = np.full(length, np.nan)

    expected['ghi'][length-1] = 1096.022736
    expected['dni'][length-1] = 990.306854
    expected['dhi'][length-1] = 128.664594

    expected = pd.DataFrame.from_dict(expected)

    out = clearsky.simplified_solis(apparent_elevation, aod700,
                                    precipitable_water, pressure, dni_extra)

    assert_frame_equal(expected, out)

Example 44

Project: zipline Source File: test_filter.py
    @parameter_space(factor_len=[2, 3, 4])
    def test_window_safe(self, factor_len):
        # all true data set of (days, securities)
        data = full(self.default_shape, True, dtype=bool)

        class InputFilter(Filter):
            inputs = ()
            window_length = 0

        class TestFactor(CustomFactor):
            dtype = float64_dtype
            inputs = (InputFilter(), )
            window_length = factor_len

            def compute(self, today, assets, out, filter_):
                # sum for each column
                out[:] = np_sum(filter_, axis=0)

        n = self.default_shape[0]
        output_shape = ((n - factor_len + 1), self.default_shape[1])
        full(output_shape, factor_len, dtype=float64)

        self.check_terms(
            terms={
                'windowsafe': TestFactor(),
            },
            expected={
                'windowsafe': full(output_shape, factor_len, dtype=float64),
            },
            initial_workspace={InputFilter(): data},
            mask=self.build_mask(self.ones_mask()),
        )

Example 45

Project: xarray Source File: variable.py
    def _shift_one_dim(self, dim, count):
        axis = self.get_axis_num(dim)

        if count > 0:
            keep = slice(None, -count)
        elif count < 0:
            keep = slice(-count, None)
        else:
            keep = slice(None)

        trimmed_data = self[(slice(None),) * axis + (keep,)].data
        dtype, fill_value = common._maybe_promote(self.dtype)

        shape = list(self.shape)
        shape[axis] = min(abs(count), shape[axis])

        if isinstance(trimmed_data, dask_array_type):
            chunks = list(trimmed_data.chunks)
            chunks[axis] = (shape[axis],)
            full = functools.partial(da.full, chunks=chunks)
        else:
            full = np.full

        nans = full(shape, fill_value, dtype=dtype)

        if count > 0:
            arrays = [nans, trimmed_data]
        else:
            arrays = [trimmed_data, nans]

        data = ops.concatenate(arrays, axis)

        if isinstance(data, dask_array_type):
            # chunked data should come out with the same chunks; this makes
            # it feasible to combine shifted and unshifted data
            # TODO: remove this once dask.array automatically aligns chunks
            data = data.rechunk(self.data.chunks)

        return type(self)(self.dims, data, self._attrs, fastpath=True)

Example 46

Project: zipline Source File: test_numerical_expression.py
    def check_constant_output(self, expr, expected):
        self.assertFalse(isnan(expected))
        return self.check_output(expr, full((5, 5), expected, float))

Example 47

Project: zipline Source File: test_numerical_expression.py
    def test_many_inputs(self):
        """
        Test adding NumericalExpressions with >10 inputs.
        """
        # Create an initial NumericalExpression by adding two factors together.
        f = self.f
        expr = f + f

        self.fake_raw_data = {f: full((5, 5), 0, float)}
        expected = 0

        # Alternate between adding and subtracting factors. Because subtraction
        # is not commutative, this ensures that we are combining factors in the
        # correct order.
        ops = (add, sub)

        for i, name in enumerate(ascii_uppercase):
            op = ops[i % 2]
            NewFactor = type(
                name,
                (Factor,),
                dict(dtype=float64_dtype, inputs=(), window_length=0),
            )
            new_factor = NewFactor()

            # Again we need a NumericalExpression, so add two factors together.
            new_expr = new_factor + new_factor
            self.fake_raw_data[new_factor] = full((5, 5), i + 1, float)
            expr = op(expr, new_expr)

            # Double the expected output since each factor is counted twice.
            expected = op(expected, (i + 1) * 2)

        self.check_output(expr, full((5, 5), expected, float))

Example 48

Project: pvlib-python Source File: test_clearsky.py
def test_ineichen_nans():
    length = 4

    apparent_zenith = np.full(length, 10.)
    apparent_zenith[0] = np.nan

    linke_turbidity = np.full(length, 3.)
    linke_turbidity[1] = np.nan

    dni_extra = np.full(length, 1370.)
    dni_extra[2] = np.nan

    airmass_absolute = np.full(length, 1.)

    expected = OrderedDict()
    expected['ghi'] = np.full(length, np.nan)
    expected['dni'] = np.full(length, np.nan)
    expected['dhi'] = np.full(length, np.nan)

    expected['ghi'][length-1] = 1053.205472
    expected['dni'][length-1] = 946.352797
    expected['dhi'][length-1] = 121.2299

    out = clearsky.ineichen(apparent_zenith, airmass_absolute,
                            linke_turbidity, dni_extra=dni_extra)

    for k, v in expected.items():
        assert_allclose(expected[k], out[k])

Example 49

Project: zipline Source File: test_minute_bars.py
    def test_nans(self):
        """
        Test writing empty data.
        """
        sid = 1
        last_date = self.writer.last_date_in_output_for_sid(sid)
        self.assertIs(last_date, NaT)

        self.writer.pad(sid, TEST_CALENDAR_START)

        last_date = self.writer.last_date_in_output_for_sid(sid)
        self.assertEqual(last_date, TEST_CALENDAR_START)

        freq = self.market_opens.index.freq
        minute = self.market_opens[TEST_CALENDAR_START + freq]
        minutes = date_range(minute, periods=9, freq='min')
        data = DataFrame(
            data={
                'open': full(9, nan),
                'high': full(9, nan),
                'low': full(9, nan),
                'close': full(9, nan),
                'volume': full(9, 0.0),
            },
            index=[minutes])
        self.writer.write_sid(sid, data)

        fields = ['open', 'high', 'low', 'close', 'volume']

        ohlcv_window = list(map(transpose, self.reader.load_raw_arrays(
            fields, minutes[0], minutes[-1], [sid],
        )))

        for i, field in enumerate(fields):
            if field != 'volume':
                assert_array_equal(full(9, nan), ohlcv_window[i][0])
            else:
                assert_array_equal(zeros(9), ohlcv_window[i][0])

Example 50

Project: zipline Source File: test_numerical_expression.py
    def test_math_functions(self):
        f, g = self.f, self.g

        fake_raw_data = self.fake_raw_data
        alt_fake_raw_data = {
            self.f: full((5, 5), .5),
            self.g: full((5, 5), -.5),
        }

        for funcname in NUMEXPR_MATH_FUNCS:
            method = methodcaller(funcname)
            func = getattr(numpy, funcname)

            # These methods have domains in [0, 1], so we need alternate inputs
            # that are in the domain.
            if funcname in ('arcsin', 'arccos', 'arctanh'):
                self.fake_raw_data = alt_fake_raw_data
            else:
                self.fake_raw_data = fake_raw_data

            f_val = self.fake_raw_data[f][0, 0]
            g_val = self.fake_raw_data[g][0, 0]

            self.check_constant_output(method(f), func(f_val))
            self.check_constant_output(method(g), func(g_val))

            self.check_constant_output(method(f) + 1, func(f_val) + 1)
            self.check_constant_output(1 + method(f), 1 + func(f_val))

            self.check_constant_output(method(f + .25), func(f_val + .25))
            self.check_constant_output(method(.25 + f), func(.25 + f_val))

            self.check_constant_output(
                method(f) + method(g),
                func(f_val) + func(g_val),
            )
            self.check_constant_output(
                method(f + g),
                func(f_val + g_val),
            )
See More Examples - Go to Next Page
Page 1 Selected Page 2 Page 3