numpy.nan

Here are the examples of the python api numpy.nan taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

200 Examples 7

Example 1

Project: sherpa
Source File: test_estmethods.py
View license
    def test_covar(self):
        standard = numpy.array([[ 0.4935702,  0.06857833, numpy.nan],
                                [ 0.06857833, 0.26405554, numpy.nan],
                                [ numpy.nan,  numpy.nan,  2.58857314]])
        results = Covariance().compute(stat, None, fittedpars,
                                       minpars, maxpars,
                                       hardminpars, hardmaxpars,
                                       limit_parnums, freeze_par, thaw_par,
                                       report_progress, get_par_name)
        self.assertEqualWithinTol(standard.diagonal(),
                                  #results[2].diagonal(), 1e-4)
                                  results[1], 1e-4)

Example 2

Project: bamboo
Source File: operations.py
View license
    def eval(self, row, dataset):
        for token in self.value:
            case_result = token.eval(row, dataset)
            if case_result:
                return case_result

        return np.nan

Example 3

Project: pypot
Source File: sonar.py
View license
    def __init__(self, name, i2c_pin, address, sync_freq=50.0):
        Sensor.__init__(self, name)

        self._d = numpy.nan

        self._sonar = Sonar(i2c_pin, [address])

        self._controller = StoppableLoopThread(sync_freq, update=self.update)
        self._controller.start()

Example 4

Project: xarray
Source File: test_ops.py
View license
    @mark.parametrize("val1, val2, val3, null", [
        (1, 2, 3, None),
        (1., 2., 3., np.nan),
        (1., 2., 3., None),
        ('foo', 'bar', 'baz', None),
    ])
    def test_types(self, val1, val2, val3, null):
        arr1 = np.array([val1, null, val3, null])
        arr2 = np.array([val1, val2, null, null])
        assert array_notnull_equiv(arr1, arr2)

Example 5

Project: RLScore
Source File: cindex_measure.py
View license
def cindex_multitask(Y, P):
    perfs = []
    for i in range(Y.shape[1]):
        try:
            perfs.append(cindex_singletask(Y[:,i], P[:,i]))
        except UndefinedPerformance:
            perfs.append(np.nan)
    return perfs

Example 6

Project: gensim
Source File: svd_error.py
View license
def norm2(a):
    """Spectral norm ("norm 2") of a symmetric matrix `a`."""
    if COMPUTE_NORM2:
        logging.info("computing spectral norm of a %s matrix" % str(a.shape))
        return scipy.linalg.eigvalsh(a).max() # much faster than numpy.linalg.norm(2)
    else:
        return numpy.nan

Example 7

Project: attention-lvcsr
Source File: test_bricks.py
View license
def test_linear_nan_allocation():
    x = tensor.matrix()

    linear = Linear(input_dim=16, output_dim=8, weights_init=Constant(2),
                    biases_init=Constant(1))
    linear.apply(x)
    w1 = numpy.nan * numpy.zeros((16, 8))
    w2 = linear.parameters[0].get_value()
    b1 = numpy.nan * numpy.zeros(8)
    b2 = linear.parameters[1].get_value()
    numpy.testing.assert_equal(w1, w2)
    numpy.testing.assert_equal(b1, b2)

Example 8

Project: GPy
Source File: bernoulli.py
View license
    def predictive_variance(self, mu, variance, pred_mean, Y_metadata=None):

        if isinstance(self.gp_link, link_functions.Heaviside):
            return 0.
        else:
            return np.nan

Example 9

Project: InterMol
Source File: convert.py
View license
def find_match(key, dictionary, unit):
    """Helper function for `summarize_energy_results`. """
    if key in dictionary:
        return dictionary[key].value_in_unit(unit)
    else:
        return np.nan

Example 10

Project: pyopendata
Source File: jstat.py
View license
def _parse_values(values, size):
    if isinstance(values, list):
        return values
    elif isinstance(values, dict):
        result = [np.nan] * size
        for k, v in compat.iteritems(values):
            result[int(k)] = v
        return result
    else:
        raise ValueError("'values' must be list or dict")

Example 11

Project: opticspy
Source File: art3d.py
View license
    def do_3d_projection(self, renderer):
        xs, ys, zs = self._offsets3d
        vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)

        fcs = (zalpha(self._facecolor3d, vzs) if self._depthshade else
               self._facecolor3d)
        fcs = mcolors.colorConverter.to_rgba_array(fcs, self._alpha)
        self.set_facecolors(fcs)

        ecs = (zalpha(self._edgecolor3d, vzs) if self._depthshade else
               self._edgecolor3d)
        ecs = mcolors.colorConverter.to_rgba_array(ecs, self._alpha)
        self.set_edgecolors(ecs)
        PathCollection.set_offsets(self, list(zip(vxs, vys)))

        if vzs.size > 0 :
            return min(vzs)
        else :
            return np.nan

Example 12

Project: properscoring
Source File: test_crps.py
View license
    def test_crps_toy_examples_nan(self):
        examples = [
            (np.nan, 0),
            (0, np.nan),
            (0, [np.nan, np.nan]),
            (0, [1], [np.nan]),
            (0, [np.nan], [1]),
            (np.nan, [1], [1]),
        ]
        for args in examples:
            self.assertTrue(
                np.isnan(crps_ensemble(*args)))

Example 13

Project: urbansim
Source File: test_util.py
View license
@pytest.fixture
def rates():
    return pd.DataFrame(
        {'var1_min': [np.nan, np.nan, np.nan],
         'var1_max': [1, np.nan, np.nan],
         'var2_min': [np.nan, 7, np.nan],
         'var2_max': [np.nan, 8, np.nan],
         'var3': [np.nan, np.nan, 't'],
         'probability_of_relocating': [1, 1, 1]})

Example 14

Project: mystic
Source File: monitors.py
View license
    def __init__(self, interval=10, xinterval=numpy.inf, all=True, **kwds):
        super(VerboseMonitor,self).__init__(**kwds)
        if not interval or interval is numpy.nan: interval = numpy.inf
        if not xinterval or xinterval is numpy.nan: xinterval = numpy.inf
        self._yinterval = interval
        self._xinterval = xinterval
        self._all = all
        return

Example 15

Project: xlwings
Source File: test_conversion.py
View license
    def test_dataframe_1(self):
        df_expected = pd.DataFrame([[1, 'test1'],
                                    [2, 'test2'],
                                    [np.nan, None],
                                    [3.3, 'test3']], columns=['a', 'b'])
        self.wb1.sheets[0].range('A1').value = df_expected
        df_result = self.wb1.sheets[0].range('A1:C5').options(pd.DataFrame).value
        df_result.index = pd.Int64Index(df_result.index)
        assert_frame_equal(df_expected, df_result)

Example 16

Project: odo
Source File: test_pandas.py
View license
def test_nan_to_nat():
    assert odo(float('nan'), pd.Timestamp) is pd.NaT
    assert odo(float('nan'), pd.Timedelta) is pd.NaT
    assert odo(np.nan, pd.Timestamp) is pd.NaT
    assert odo(np.nan, pd.Timedelta) is pd.NaT

    with pytest.raises(NetworkXNoPath):
        # Check that only nan can be converted.
        odo(0.5, pd.Timestamp)

    with pytest.raises(NetworkXNoPath):
        # Check that only nan can be converted.
        odo(0.5, pd.Timedelta)

Example 17

Project: datashader
Source File: test_glyphs.py
View license
def test_extend_lines_nan():
    xs = np.array([-3, -2, np.nan, 0, 1])
    ys = np.array([-3, -2, np.nan, 0, 1])
    agg = new_agg()
    extend_line(vt, bounds, xs, ys, True, agg)
    out = np.diag([1, 1, 0, 1, 1])
    np.testing.assert_equal(agg, out)

Example 18

Project: brut
Source File: cluster_confusion.py
View license
def plot_stamps(stamps, **kwargs):
    kwargs.setdefault('facecolor', 'none')
    kwargs.setdefault('edgecolor', 'b')
    kwargs.setdefault('alpha', .1)
    label = kwargs.pop('label', None)

    ax = plt.gca()
    for s in stamps:
        r = Rectangle((s[1] - s[-1], s[2] - s[-1]),
                      width = 2 * s[-1], height = 2 * s[-1], **kwargs)
        ax.add_patch(r)

    if label is not None:
        plt.plot([np.nan], [np.nan], '-', color = kwargs['edgecolor'],
                 label=label)

Example 19

Project: bruges
Source File: util_test.py
View license
    def test_toptail(self):
        a = np.asarray([np.nan, np.nan, 3, 4, 5, np.nan])
        b = np.asarray([6, 5, 4, 3, 2, 1])
        c = np.asarray([1, 2, 3, 4, 5, 6])
        d = np.asarray([6, 5, 4, 3, 2, 1])
        A, B, C, D = top_and_tail(a, b, c, d)
        for arr in (A, B, C, D):
            self.assertEqual(len(arr), 3)

Example 20

Project: dedupe
Source File: price.py
View license
    @staticmethod
    def comparator(price_1, price_2) :
        if price_1 <= 0 :
            return numpy.nan
        elif price_2 <= 0 :
            return numpy.nan
        else :
            return abs(numpy.log10(price_1) - numpy.log10(price_2))

Example 21

Project: ProjectEuler
Source File: stats.py
View license
def build_per_language(df):
    index = df.Problem.map(int).max()
    languages = set(df.Language)

    data = {lang: np.full(index, np.nan) for lang in languages}
    for _, row in df.iterrows():
        data[row['Language']][int(row['Problem']) - 1] = row['Time']

    df_ = pd.DataFrame(data, index=range(1, index + 1)).dropna(how='all')
    df_.index.name = 'Problems'

    return df_

Example 22

Project: dplython
Source File: test.py
View license
  def test_suffixes_join(self):
    a = DplyFrame(pd.DataFrame({'x': [1, 1, 2, 3]
                                , 'z': [1, 2, 3, 4]}))
    b = DplyFrame(pd.DataFrame({'x': [1, 2, 2, 4]
                                , 'z': [1, 2, 3, 4]}))
    j_suffix_test = a >> left_join(b, by=['x'])
    j_suffix_pd = DplyFrame(pd.DataFrame({'x': [1, 1, 2, 2, 3]
                              , 'z_x': [1, 2, 3, 3, 4]
                              , 'z_y': [1.0, 1.0, 2.0, 3.0, np.nan]}))
    self.assertTrue((j_suffix_test.columns == j_suffix_pd.columns).all())
    j_suffix_test = a >> left_join(b, by=['x'], suffixes=('_1', '_2'))
    j_suffix_pd = DplyFrame(pd.DataFrame({'x': [1, 1, 2, 2, 3]
                              , 'z_1': [1, 2, 3, 3, 4]
                              , 'z_2': [1.0, 1.0, 2.0, 3.0, np.nan]}))
    self.assertTrue((j_suffix_test.columns == j_suffix_pd.columns).all())

Example 23

Project: SANS_THIR16
Source File: dga_classifier.py
View license
def get_subdomain(hostname):
    try:
        return tldextract.extract(hostname).subdomain
    except ValueError:
        print 'Error extracting domain from %s'%(hostname,)
    return np.nan

Example 24

Project: deepTools
Source File: test_countReadsPerBin.py
View license
    def test_get_coverage_of_region_zeros_to_nan(self):
        self.c.zerosToNans = True
        resp, _ = self.c.count_reads_in_region(self.chrom, 0, 200)

        nt.assert_equal(resp, np.array([[np.nan, np.nan],
                                        [np.nan, 1],
                                        [1, 1],
                                        [1, 2]]))

Example 25

Project: courtlistener
Source File: cl_import_judges.py
View license
    def import_fjc_judges(self,infile=None):
        if infile is None:
            self.ensure_input_file()
            infile = self.options['input_file']
        textfields = ['firstname', 'midname', 'lastname', 'gender',
                      'Place of Birth (City)', 'Place of Birth (State)',
                      'Place of Death (City)', 'Place of Death (State)']
        df = pd.read_excel(infile, 0)
        for x in textfields:
            df[x] = df[x].replace(np.nan, '', regex=True)
        df['Employment text field'].replace(to_replace=r';\sno', value=r', no', inplace = True, regex = True)
        for i, row in df.iterrows():
            make_federal_judge(dict(row), testing=self.debug)

Example 26

Project: geopandas
Source File: test_geodataframe.py
View license
    def test_to_json_na(self):
        # Set a value as nan and make sure it's written
        self.df.loc[self.df['BoroName']=='Queens', 'Shape_Area'] = np.nan

        text = self.df.to_json()
        data = json.loads(text)
        self.assertTrue(len(data['features']) == 5)
        for f in data['features']:
            props = f['properties']
            self.assertEqual(len(props), 4)
            if props['BoroName'] == 'Queens':
                self.assertTrue(props['Shape_Area'] is None)

Example 27

Project: meterstick
Source File: core_test.py
View license
  def testRelativeToSplitsWithNoAlternativeGivesNaN(self):
    data = pd.DataFrame({"X": [1, 2, 3, 4],
                         "Y": [0, 0, 0, 1],
                         "Z": [0, 0, 1, 1]})

    metric = metrics.Sum("X")
    comparison = comparisons.AbsoluteDifference("Y", 0)
    output = core.Analyze(data).split_by("Z").relative_to(comparison).calculate(
        metric).run()

    correct = pd.DataFrame({"sum(X) Absolute Difference": [np.nan, 4 - 3],
                            "Z": [0, 1],
                            "Y": [1, 1]})
    correct = correct.set_index(["Z", "Y"])

    self.assertTrue(output.equals(correct))

Example 28

Project: robothon
Source File: test_regression.py
View license
    def test_array_str_64bit(self, level=rlevel):
        """Ticket #501"""
        s = np.array([1, np.nan],dtype=np.float64)
        errstate = np.seterr(all='raise')
        try:
            sstr = np.array_str(s)
        finally:
            np.seterr(**errstate)

Example 29

Project: physt
Source File: histogram1d.py
View license
    def mean(self):
        """Statistical mean of all values entered into histogram.

        This number is precise, because we keep the necessary data
        separate from bin contents.

        Returns
        -------
        float
        """
        if self._stats:    # TODO: should be true always?
            if self.total > 0:
                return self._stats["sum"] / self.total
            else:
                return np.nan
        else:
            return None    # TODO: or error

Example 30

Project: pIDLy
Source File: pidly.py
View license
    def test_nan(self):
        x = numpy.nan
        y = self.sendAndReceive(x)
        # NB NaN != NaN
        self.assertTrue(numpy.isnan(x))
        self.assertTrue(numpy.isnan(y))
        self.assertEqual(numpy.array(x).shape, y.shape)
        self.assertEqual(numpy.array(x).dtype, y.dtype)

Example 31

Project: mdf
Source File: test_filtering.py
View license
    def test_delay(self):
        actual = self._run(datanode.delaynode(periods=1, initial_value=np.nan))
        self.assertEqual(_normalize(actual), _normalize(delay_expected))

        actual = self._run(datanode.delaynode(periods=1, initial_value=np.nan, filter=filternode))
        self.assertEqual(_normalize(actual), _normalize(delay_filtered_expected))

Example 32

Project: qstrader
Source File: simple.py
View license
    def calculate_max_drawdown_pct(self):
        """
        Calculate the percentage drop related to the "worst"
        drawdown seen.
        """
        drawdown_series = pd.Series(self.drawdowns)
        equity_series = pd.Series(self.equity)
        bottom_index = drawdown_series.idxmax()
        try:
            top_index = equity_series[:bottom_index].idxmax()
            pct = (
                (equity_series.ix[top_index] - equity_series.ix[bottom_index]) /
                equity_series.ix[top_index] * 100
            )
            return round(pct, 4)
        except ValueError:
            return np.nan

Example 33

Project: FaST-LMM
Source File: lrt.py
View license
    def _nullModelMixedEffectNonLinear1Kernel(self, approx, link, penalty):
        if self.forcefullrank:
            assert False, "Not implemented yet."
        else:
            glmm0 = inference.getGLMM(approx, link, self.Y, None, None, penalty=penalty)
        glmm0.setX(self.X)
        glmm0.sety(self.Y)
        glmm0.optimize()
        self.model0 = {}        
        self.model0['h2']=0.0
        self.model0['a2']=NP.nan
        self.model0['nLL']=-glmm0.marginal_loglikelihood()
        self.model0['sig02'] = glmm0.sig02
        self.model0['sig12'] = glmm0.sig12
        self.model0['sign2'] = glmm0.sign2
        for i in range(len(glmm0.beta)):
            self.model0['beta' + str(i)] = glmm0.beta[i]

Example 34

Project: PySnpTools
Source File: unit.py
View license
    def standardize(self, snps, block_size=None, return_trained=False, force_python_only=False):
        if block_size is not None:
            warnings.warn("block_size is deprecated (and not needed, since standardization is in-place", DeprecationWarning)

        if hasattr(snps,"val"):
            val = snps.val
        else:
            warnings.warn("standardizing an nparray instead of a SnpData is deprecated", DeprecationWarning)
            val = snps

        stats = self._standardize_unit_and_beta(val, is_beta=False, a=np.nan, b=np.nan, apply_in_place=True,use_stats=False,stats=None,force_python_only=force_python_only,)

        if return_trained:
            assert hasattr(snps,"val"), "return_trained=True requires that snps be a SnpData"
            return snps, UnitTrained(snps.sid, stats)
        else:
            return snps

Example 35

Project: pyculiarity
Source File: test_na.py
View license
    def test_handling_of_leading_trailing_nas(self):
        for i in range(10) + [len(self.raw_data) - 1]:
            self.raw_data.set_value(i, 'count', np.nan)

        results = detect_ts(self.raw_data, max_anoms=0.02,
                            direction='both', plot=False)
        eq_(len(results['anoms'].columns), 2)
        eq_(len(results['anoms'].iloc[:,1]), 131)

Example 36

Project: numba
Source File: test_auto_constants.py
View license
    def test_numpy_nan(self):
        def pyfunc():
            return np.nan

        cres = compile_isolated(pyfunc, ())
        cfunc = cres.entry_point

        self.assertTrue(math.isnan(pyfunc()))
        self.assertTrue(math.isnan(cfunc()))

Example 37

Project: pandas-ml
Source File: test_preprocessing.py
View license
    def test_Imputer(self):
        arr = np.array([1, np.nan, 3, 2])
        s = pdml.ModelSeries(arr)

        mod1 = s.pp.Imputer(axis=0)
        s.fit(mod1)
        result = s.transform(mod1)

        expected = np.array([1, 2, 3, 2])

        self.assertIsInstance(result, pdml.ModelSeries)
        self.assert_numpy_array_almost_equal(result.values, expected)

        mod1 = s.pp.Imputer(axis=0)
        result = s.fit_transform(mod1)

        self.assertIsInstance(result, pdml.ModelSeries)
        self.assert_numpy_array_almost_equal(result.values, expected)

Example 38

Project: pygmi
Source File: crisp_clust.py
View license
def gcentroids(data, index, no_clust, mindist):
    """Gcentroids"""
#    no_samples=data.shape[0]
    no_datatypes = data.shape[1]
    centroids = np.tile(np.nan, (no_clust, no_datatypes))
    for j in range(no_clust):
        # find all members of the j-th cluster
        members = (index == j).nonzero()[0]
        # if j is an empty cluster, put one sample into this cluster
        if members.size == 0:
            # take the sample that has the greatest distance to its current
            # cluster and make this the center of the j-th cluster
            idx1 = mindist.argmax(0)
            centroids[j] = data[idx1]
            index[idx1] = j
            mindist[idx1] = 0
        else:
            centroids[j] = data[members].mean(0)
    return centroids, index

Example 39

Project: pyspeckit
Source File: collapse_gaussfit.py
View license
def adaptive_collapse_gaussfit(cube,axis=2,nsig=3,nrsig=4,prefix='interesting',
        vconv=lambda x: x,xtora=lambda x: x,ytodec=lambda x: x,doplot=True):
    """
    Attempts to fit one or two Gaussians to each spectrum in a data cube and returns the parameters of the fits.
    Adaptively determines where to fit two Gaussian components based on residuals.  Will fit 3 gaussians if a
    two-gaussian fit is not better than a certain threshold (specified by nsig), and those fits will be output
    to images with filename prefix+(coordinate).png.  The 3-gaussian fit parameters will not be returned because
    the automated fitting is very unlikely to get that part right.

    inputs:
    cube - a data cube with two spatial and one spectral dimensions
    axis - the axis of the spectral dimension
    nsig - number of sigma over the mean residual to trigger double-gaussian fitting
           also, cutoff to do any fitting at all
    prefix - the prefix (including directory name) of the output images from 3-gaussian fitting
    doplot - option to turn off plotting of triple-gaussian fits

    vconv,xtora,ytodec - functions to convert the axes from pixel coordinates to ra/dec/velocity coordinates

    returns:
    width_arr1,width_arr2,chi2_arr,offset_arr1,offset_arr2,amp_arr1,amp_arr2
    The Gaussian widths, line centers (in pixel units), amplitudes, and the chi-squared value, not in that order
    These returns are identical to the returns from double_gaussian, but all components will be zero for the second
    gaussian in the case of a single-gaussian fit

    the triple gaussian is guessed to be the double gaussian plus a broad, low-amplitude gaussian.  Ideally this should
    fit outflows reasonably well, but who knows if it really will.
    Another option is to fit a negative-amplitude gaussian to account for self-absorption

    """
    std_coll = cube.std(axis=axis)       # standard deviation of each spectrum
#    mad_coll = MAD(cube,axis=axis)
    mean_std = median(std_coll.ravel())  # median standard deviation (to reject high-signal spectra that have high std)
    if axis > 0:                         # force spectral axis to first axis
        cube = cube.swapaxes(0,axis)
    width_arr = zeros(cube.shape[1:])   # define gaussian param arrays
    width_arr1 = zeros(cube.shape[1:])   # define gaussian param arrays
    width_arr2 = zeros(cube.shape[1:])   # define gaussian param arrays
    amp_arr = zeros(cube.shape[1:])     # define gaussian param arrays
    amp_arr1 = zeros(cube.shape[1:])     # define gaussian param arrays
    amp_arr2 = zeros(cube.shape[1:])     # define gaussian param arrays
    chi2_arr = zeros(cube.shape[1:])     # define gaussian param arrays
    resid_arr = zeros(cube.shape[1:])    # define gaussian param arrays
    offset_arr = zeros(cube.shape[1:])  # define gaussian param arrays 
    offset_arr1 = zeros(cube.shape[1:])  # define gaussian param arrays 
    offset_arr2 = zeros(cube.shape[1:])  # define gaussian param arrays 
    ncarr = (cube.max(axis=0) > mean_std*nsig)        # cutoff: don't fit no-signal spectra
    starttime = time.time()              # timing for output
    print(cube.shape)
    print("Fitting a total of %i spectra with peak signal above %f" % (ncarr.sum(),mean_std*nsig))
    for i in xrange(cube.shape[1]):      # Loop over all elements for 
        t0 = time.time()
        nspec = (cube[:,i,:].max(axis=0) > mean_std*nsig).sum()
        print("Working on row %d with %d spectra to fit" % (i,nspec), end=' ')
        for j in xrange(cube.shape[2]):
            if cube[:,i,j].max() > mean_std*nsig:
#            if cube[:,i,j].max() > MAD(cube[:,i,j]):            
                pars = return_param(cube[:,i,j])
                width_arr[i,j] = pars[1]
                width_arr1[i,j] = pars[1]
                amp_arr[i,j] = pars[2]
                amp_arr1[i,j] = pars[2]
#                chi2_arr[i,j] = sum(( gerr(cube[:,i,j])(pars) )**2) 
                resid_arr[i,j] = (gerr(cube[:,i,j])(pars)).sum()
                offset_arr[i,j] = pars[0]
                offset_arr1[i,j] = pars[0]
            else: 
                width_arr1[i,j] = numpy.nan
                chi2_arr[i,j] = numpy.nan
                resid_arr[i,j] = numpy.nan
                offset_arr1[i,j] = numpy.nan
        dt = time.time()-t0
        if nspec > 0:
            print("in %f seconds (average: %f)" % (dt,dt/float(nspec)))
        else: 
            print() 
    chi2_arr = resid_arr**2
    resids = ma.masked_where(numpy.isnan(chi2_arr),chi2_arr) # hide bad values
#    residcut = (resids.mean() + (resids.std() * nrsig) )  # Old versino - used standard deviation and mean
    residcut = (nanmedian(chi2_arr.ravel()) + (MAD(chi2_arr.ravel()) * nrsig) ) # New version: set cutoff by median + nrsig * MAD
    to_refit = (resids > residcut).astype('bool')
#    to_refit[numpy.isnan(to_refit)] = 0
    inds = array(nonzero(to_refit)).transpose()
    dgc,tgc = 0,0
    print("Refitting a total of %i spectra with peak residual above %f" % (to_refit.sum(),residcut))
    f=open("%s_triples.txt" % prefix,'w')
#    vconv = lambda x: (x-p3+1)*dv+v0    # convert to velocity frame
    vind = vconv(arange(cube[:,0,0].shape[0]))
    xind = arange(cube[:,0,0].shape[0])
    for ind in inds:
        i,j = ind
        doublepars = return_double_param(cube[:,i,j])
        old_chi2 = chi2_arr[i,j]
        new_chi2 = sum(square( double_gerr(cube[:,i,j])(doublepars) )) 
        if new_chi2 < old_chi2: # if 2 gaussians is an improvement, use it!
            chi2_arr[i,j] = new_chi2
            width_arr1[i,j] = doublepars[2]
            width_arr2[i,j] = doublepars[3]
            amp_arr1[i,j] = doublepars[4]
            amp_arr2[i,j] = doublepars[5]
            offset_arr1[i,j] = doublepars[0]
            offset_arr2[i,j] = doublepars[1]
            ncarr[i,j] += 1
        if new_chi2 > residcut: # Even if double was better, see if a triple might be better yet [but don't store it in the params arrays!]
            print("Triple-gaussian fitting at %i,%i (%i'th double, %i'th triple)" % (i,j,dgc,tgc), file=f)
            if tgc % 100 == 0:
                print("Triple-gaussian fitting at %i,%i (%i'th double, %i'th triple)" % (i,j,dgc,tgc))
            tgc += 1
            tpguess = [doublepars[0],doublepars[1],(doublepars[0]+doublepars[1])/2.,doublepars[2],doublepars[3],doublepars[2]*5.,doublepars[4],doublepars[5],doublepars[4]/5.]
            triplepars = return_triple_param(cube[:,i,j],params=tpguess)
            pars = [offset_arr[i,j],width_arr[i,j],amp_arr[i,j]]
            if doplot: # if you don't, there's really no point in fitting at all...
                ax = axes([.05,.05,.7,.9])
                plot(vind,cube[:,i,j],color='black',linestyle='steps',linewidth='.5')
                plot(vind,gaussian(*pars)(xind),'r-.',label="Single %f" % ( (gerr(cube[:,i,j])(pars)).sum() ) )
                plot(vind,double_gaussian(*doublepars)(xind),'g--',label="Double %f" % ( (double_gerr(cube[:,i,j])(doublepars)).sum() ))
                plot(vind,triple_gaussian(*triplepars)(xind),'b:',label="Triple %f" % ( (triple_gerr(cube[:,i,j])(triplepars)).sum() ),linewidth=2)
                pars[0] = vconv(pars[0])
                text(1.05,.8,"c1 %3.2f w1 %3.2f a1 %3.2f" % tuple(pars),transform=ax.transAxes,size='smaller')
                dp = [ vconv(doublepars[0]) , doublepars[2], doublepars[4], vconv(doublepars[1]), doublepars[3], doublepars[5] ]
                text(1.05,.6,"c1 %3.2f w1 %3.2f a1 %3.2f\nc2 %3.2f w2 %3.2f a2 %3.2f" % tuple(dp),transform=ax.transAxes,size='smaller')
                tp = [ vconv(triplepars[0]) , triplepars[3], triplepars[6], vconv(triplepars[1]), triplepars[4], triplepars[7], vconv(triplepars[2]), triplepars[5], triplepars[8]  ]
                text(1.05,.4,"c1 %3.2f w1 %3.2f a1 %3.2f\nc2 %3.2f w2 %3.2f a2 %3.2f\nc3 %3.2f w3 %3.2f a3 %3.2f" % tuple(tp),transform=ax.transAxes,size='smaller')
                title("Spectrum at %s %s" % (ratos(xtora(i)),dectos(ytodec(j))) ) 
                legend(loc='best')
                savefig("%s_%s.%s.png" % (prefix,i,j))
                clf()
            ncarr[i,j] += 1
            print(triplepars, file=f)
        dgc += 1

    f.close()
    print("Total time %f seconds for %i double and %i triple gaussians" % (time.time()-starttime,dgc,tgc))

    return width_arr1,width_arr2,chi2_arr,offset_arr1,offset_arr2,amp_arr1,amp_arr2,ncarr

Example 40

Project: pyspeckit
Source File: collapse_gaussfit.py
View license
def collapse_double_gaussfit(cube,axis=2):
    std_coll = cube.std(axis=axis)
    mean_std = median(std_coll.ravel())
    if axis > 0:
        cube = cube.swapaxes(0,axis)
    width_arr1 = zeros(cube.shape[1:])
    width_arr2 = zeros(cube.shape[1:])
    amp_arr1 = zeros(cube.shape[1:])
    amp_arr2 = zeros(cube.shape[1:])
    chi2_arr = zeros(cube.shape[1:])
    offset_arr1 = zeros(cube.shape[1:])
    offset_arr2 = zeros(cube.shape[1:])
    starttime = time.time()
    print(cube.shape)
    print("Fitting a total of %i spectra with peak signal above %f" % ((cube.max(axis=0) > mean_std).sum(),mean_std))
    for i in xrange(cube.shape[1]):
        t0 = time.time()
        nspec = (cube[:,i,:].max(axis=0) > mean_std).sum()
        print("Working on row %d with %d spectra to fit" % (i,nspec), end=' ')
        for j in xrange(cube.shape[2]):
            if cube[:,i,j].max() > mean_std:
                pars = return_double_param(cube[:,i,j])
                width_arr1[i,j] = pars[2]
                width_arr2[i,j] = pars[3]
                amp_arr1[i,j] = pars[4]
                amp_arr2[i,j] = pars[5]
                chi2_arr[i,j] = sum(( double_gerr(cube[:,i,j])(pars) )**2) 
                offset_arr1[i,j] = pars[0]
                offset_arr2[i,j] = pars[1]
            else: 
                width_arr1[i,j] = numpy.nan
                width_arr2[i,j] = numpy.nan
                chi2_arr[i,j] = numpy.nan
                offset_arr1[i,j] = numpy.nan
                offset_arr2[i,j] = numpy.nan
        dt = time.time()-t0
        if nspec > 0:
            print("in %f seconds (average: %f)" % (dt,dt/float(nspec)))
    print("Total time %f seconds" % (time.time()-starttime))

    return width_arr1,width_arr2,chi2_arr,offset_arr1,offset_arr2,amp_arr1,amp_arr2

Example 41

View license
def findCircularArcCentrePoint_new(r, x_1, y_1, x_2, y_2, largeArc, sweep, debug=False ):
    '''
    http://www.w3.org/TR/SVG/paths.html#PathDataEllipticalArcCommands
    The elliptical arc command draws a section of an ellipse which meets the following constraints:

    Of the four candidate arc sweeps, two will represent an arc sweep of greater than or equal to 180 degrees (the "large-arc"), and two will represent an arc sweep of less than or equal to 180 degrees (the "small-arc"). 
    If large-arc-flag is '1', then one of the two larger arc sweeps will be chosen; otherwise, if large-arc-flag is '0', one of the smaller arc sweeps will be chosen.
    If sweep-flag is '1', then the arc will be drawn in a "positive-angle" direction (i.e., the ellipse formula x=cx+rx*cos(theta) and y=cy+ry*sin(theta) is evaluated such that theta starts at an angle corresponding to the current point and increases positively until the arc reaches (x,y)). 
    A value of 0 causes the arc to be drawn in a "negative-angle" direction (i.e., theta starts at an angle value corresponding to the current point and decreases until the arc reaches (x,y)).

    Center calculation
    (x_1 - x_c)**2 + (y_1 - y_c)**2 = r**2  (1)
    (x_2 - x_c)**2 + (y_2 - y_c)**2 = r**2  (2)
    giving 2 posible centre points from, that is where largeArc and Sweep come in
    using geometry to solve for centre point...


    '''
    # the law of cosines states c^2 = a^2 + b^2 - 2ab*cos(gamma)
    c,a = r,r
    b = ( ( x_2-x_1 )**2 + ( y_2-y_1 )**2 ) ** 0.5
    if a*b != 0:
        cos_gamma = ( a**2 + b**2 - c**2 ) / ( 2*a*b )
    else:
        return numpy.nan, numpy.nan
    gamma = arccos2( cos_gamma )
    if isnan(gamma):
        return numpy.nan, numpy.nan
    if debug: print('x1,y1 : %1.2f, %1.2f' % (x_1, y_1))
    if debug: print('x2,y2 : %1.2f, %1.2f' % (x_2, y_2))
    if debug: print('large arc : %s' % largeArc)
    if debug: print('sweep     : %s' % sweep ) 
    if debug: print('gamma %3.1f' % (gamma/pi*180))
    
    angle_1_2 = arctan2( y_2 - y_1, x_2 - x_1) #range ``[-pi, pi]``
    # given the two possible center points of
    #c_x = x_1 + r*cos(angle_1_2 + gamma)
    #c_y = y_1 + r*sin(angle_1_2 + gamma)
    #if debug: print('possible c_x,c_y at %1.2f, %1.2f' % (c_x, c_y))
    #c_x_alt = x_1 + r*cos(angle_1_2 - gamma)
    #c_y_alt = y_1 + r*sin(angle_1_2 - gamma)
    #if debug: print('      or c_x,c_y at %1.2f, %1.2f' % (c_x_alt, c_y_alt))

    #A = array([x_1, y_1, 0.0])
    #B = array([x_2, y_2, 0.0])
    #C = array([c_x, c_y, 0.0])
    #if debug: print('cross(A-C, B-A)[2]  :  %s' % cross(A-C, B-A))  #Always positve, must be a result of construction!
    #small_arc_theta_inc = cross(A-C, B-A)[2] > 0  #CW = clock wise
    #large_arc_theta_inc = not small_arc_theta_inc
    #if debug: print('small_arc_theta_inc : %s' % small_arc_theta_inc)
    #if largeArc:
    #    correctCentre = large_arc_theta_inc == sweep
    #else: #small arc
    #    correctCentre = small_arc_theta_inc == sweep
    if largeArc: #from geometric construction (i thinks)
        addGamma = not sweep
    else:
        addGamma = sweep
    if addGamma:
        c_x = x_1 + r*cos(angle_1_2 + gamma)
        c_y = y_1 + r*sin(angle_1_2 + gamma)
    else:
        c_x = x_1 + r*cos(angle_1_2 - gamma)
        c_y = y_1 + r*sin(angle_1_2 - gamma)
    return c_x, c_y

Example 42

View license
def findCircularArcCentrePoint_old(r, x_1, y_1, x_2, y_2, largeArc, sweep, debug=False ):
    '''
    (x_1 - x_c)**2 + (y_1 - y_c)**2 = r**2  (1)
    (x_2 - x_c)**2 + (y_2 - y_c)**2 = r**2  (2)
    giving 2 posible centre points from, that is where largeArc and Sweep come in
    using geometry to solve for centre point...
    '''
    from numpy import arccos, arctan2, sin, cos, pi
    # the law of cosines states c^2 = a^2 + b^2 - 2ab*cos(gamma)
    c,a = r,r
    b = ( ( x_2-x_1 )**2 + ( y_2-y_1 )**2 ) ** 0.5
    if a*b != 0:
        cos_gamma = ( a**2 + b**2 - c**2 ) / ( 2*a*b )
    else:
        return numpy.nan, numpy.nan
    gamma = arccos2( cos_gamma )
    if isnan(gamma):
        return numpy.nan, numpy.nan
    if debug: print('x1,y1 : %1.2f, %1.2f' % (x_1, y_1))
    if debug: print('x2,y2 : %1.2f, %1.2f' % (x_2, y_2))
    if debug: print('large arc : %s' % largeArc)
    if debug: print('sweep     : %s' % sweep ) 
    if debug: print('x2,y2 : %1.2f, %1.2f' % (x_2, y_2))
    if debug: print('gamma %3.1f' % (gamma/pi*180))
    
    angle_1_2 = arctan2( y_2 - y_1, x_2 - x_1) #range ``[-pi, pi]``
    # given the two possible center points of
    c_x = x_1 + r*cos(angle_1_2 + gamma)
    c_y = y_1 + r*sin(angle_1_2 + gamma)
    if debug: print('possible c_x,c_y at %1.2f, %1.2f' % (c_x, c_y))
    c_x_alt = x_1 + r*cos(angle_1_2 - gamma)
    c_y_alt = y_1 + r*sin(angle_1_2 - gamma)
    if debug: print('      or c_x,c_y at %1.2f, %1.2f' % (c_x_alt, c_y_alt))

    angle_1 = arctan2( y_1 - c_y, x_1 - c_x)
    angle_2 = arctan2( y_2 - c_y, x_2 - c_x)
    if debug: print('  angle_1 %3.1f deg' % (angle_1 / pi * 180))
    if debug: print('  angle_2 %3.1f deg' % (angle_2 / pi * 180))
    if not largeArc:
        if abs(angle_1 - angle_2) > pi:
            if angle_1 < angle_2:
                angle_1 = angle_1 + 2*pi
            else:
                angle_2 = angle_2 + 2*pi
    else:
        if abs(angle_1 - angle_2) < pi:
            if angle_1 < angle_2:
                angle_1 = angle_1 + 2*pi
            else:
                angle_2 = angle_2 + 2*pi
    if debug: print('after largeArc flag correction')
    if debug: print('  angle_1 %3.1f deg' % (angle_1 / pi * 180))
    if debug: print('  angle_2 %3.1f deg' % (angle_2 / pi * 180))
    if sweep:
        correctCentre = angle_2 > angle_1
    else:
        correctCentre = angle_2 < angle_1
    if correctCentre:
        return c_x, c_y
    else:
        return c_x_alt,  c_y_alt 

Example 43

Project: mhcflurry
Source File: scoring.py
View license
def make_scores(
        ic50_y,
        ic50_y_pred,
        sample_weight=None,
        threshold_nm=500,
        max_ic50=50000):
    """
    Calculate AUC, F1, and Kendall Tau scores.

    Parameters
    -----------
    ic50_y : float list
        true IC50s (i.e. affinities)

    ic50_y_pred : float list
        predicted IC50s

    sample_weight : float list [optional]

    threshold_nm : float [optional]

    max_ic50 : float [optional]

    Returns
    -----------
    dict with entries "auc", "f1", "tau"
    """

    y_pred = mhcflurry.regression_target.ic50_to_regression_target(
        ic50_y_pred, max_ic50)
    try:
        auc = sklearn.metrics.roc_auc_score(
            ic50_y <= threshold_nm,
            y_pred,
            sample_weight=sample_weight)
    except ValueError:
        auc = numpy.nan
    try:
        f1 = sklearn.metrics.f1_score(
            ic50_y <= threshold_nm,
            ic50_y_pred <= threshold_nm,
            sample_weight=sample_weight)
    except ValueError:
        f1 = numpy.nan
    try:
        tau = scipy.stats.kendalltau(ic50_y_pred, ic50_y)[0]
    except ValueError:
        tau = numpy.nan

    return dict(
        auc=auc,
        f1=f1,
        tau=tau)

Example 44

Project: apogee
Source File: __init__.py
View license
def wv2pix(wv,apStarWavegrid=False):
    """
    NAME:
       wv2pix
    PURPOSE:
       convert wavelength to pixel using interpolated function
    INPUT:
       wv - wavelength (int), range of wavelengths (tuple) or list of wavelengths
            (list/numpy array) in Angstroms
       apStarWavegrid = (False) uses aspcapStarWavegrid by default
    OUTPUT:
       array of pixel(s) corresponding to input wavelength(s)
       nan - indicates input wavelength(s) outside the range
       None - indicates input wavelength type not recognized
       0 - indicates the wavelength can be found but is outside the bounds of the a
           spcapStarWavegrid
    HISTORY:
       2016-10-18 - Written - Price-Jones
    """
    # Check input cases
    if isinstance(wv,(int,float)):
        if wv >= wvs[0] and wv <= wvs[-1]:
            pixels = apStar_pixel_interp(wv)
        else:
            warnings.warn("wavelength outside allowed wavelength range",RuntimeWarning)
            return numpy.nan 
    elif isinstance(wv,tuple):
        if wv[0] >= wvs[0] and wv[1] <= wvs[-1]:
            wvlist = numpy.arange(wv[0],wv[1],wv[2])
            pixels = apStar_pixel_interp(wvlist)
        else:
            warnings.warn("wavelength bounds outside allowed wavelength range",RuntimeWarning)
            return numpy.nan       
    elif isinstance(wv,(list,numpy.ndarray)):
        if isinstance(wv,list):
          wv = numpy.array(wv)
        pixels = numpy.zeros(len(wv))
        valid = (wv>=wvs[0]) & (wv<wvs[-1])
        invalid = (wv<wvs[0]) | (wv>wvs[-1])
        pixels[valid] = apStar_pixel_interp(wv[valid])
        pixels[invalid] = numpy.nan
        if sum(invalid)!=0:
            warnings.warn("wavelength outside allowed wavelength range",RuntimeWarning)
    # If input not recognized inform the user
    elif not isinstance(wv,(int,float,tuple,list,numpy.ndarray)):
        warnings.warn("unrecognized wavelength input",RuntimeWarning)
        return None

    if apStarWavegrid:
        return pixels.astype(int)

    # If on aspcapStarWavegrid, convert appropriately    
    elif not apStarWavegrid:        
        # find where pixel list matches detectors
        blue = numpy.where((pixels >= apStarBlu_lo) & (pixels < apStarBlu_hi))
        green = numpy.where((pixels >= apStarGre_lo) & (pixels < apStarGre_hi))
        red = numpy.where((pixels >= apStarRed_lo) & (pixels < apStarRed_hi))
        # find where pixel list does not match detectors
        if pixels.size > 1:
            nomatch = (numpy.array([i for i in range(len(pixels)) if i not in blue[0] and i not in green[0] and i not in red[0]]),)
             # adjust pixel values to match aspcap wavegrid
            pixels[blue] -= (apStarBlu_lo-aspcapBlu_start)
            pixels[green] -= (apStarGre_lo-aspcapGre_start)
            pixels[red] -= (apStarRed_lo-aspcapRed_start)
        # Case of single wavelength
        elif pixels.size == 1:
            if blue[0].size==1:
                pixels -= (apStarBlu_lo-aspcapBlu_start)
            elif green[0].size==1:
                pixels -= (apStarGre_lo-aspcapGre_start)
            elif red[0].size==1:
                pixels -= (apStarRed_lo-aspcapRed_start)
            elif blue[0].size==0 and green[0].size==0 and red[0].size==0:
                nomatch = ([0],)
                pixels = 0
        return numpy.floor(pixels).astype(int)

Example 45

Project: apogee
Source File: __init__.py
View license
def wv2pix(wv,apStarWavegrid=False):
    """
    NAME:
       wv2pix
    PURPOSE:
       convert wavelength to pixel using interpolated function
    INPUT:
       wv - wavelength (int), range of wavelengths (tuple) or list of wavelengths
            (list/numpy array) in Angstroms
       apStarWavegrid = (False) uses aspcapStarWavegrid by default
    OUTPUT:
       array of pixel(s) corresponding to input wavelength(s)
       nan - indicates input wavelength(s) outside the range
       None - indicates input wavelength type not recognized
       0 - indicates the wavelength can be found but is outside the bounds of the a
           spcapStarWavegrid
    HISTORY:
       2016-10-18 - Written - Price-Jones
    """
    # Check input cases
    if isinstance(wv,(int,float)):
        if wv >= wvs[0] and wv <= wvs[-1]:
            pixels = apStar_pixel_interp(wv)
        else:
            warnings.warn("wavelength outside allowed wavelength range",RuntimeWarning)
            return numpy.nan 
    elif isinstance(wv,tuple):
        if wv[0] >= wvs[0] and wv[1] <= wvs[-1]:
            wvlist = numpy.arange(wv[0],wv[1],wv[2])
            pixels = apStar_pixel_interp(wvlist)
        else:
            warnings.warn("wavelength bounds outside allowed wavelength range",RuntimeWarning)
            return numpy.nan       
    elif isinstance(wv,(list,numpy.ndarray)):
        if isinstance(wv,list):
          wv = numpy.array(wv)
        pixels = numpy.zeros(len(wv))
        valid = (wv>=wvs[0]) & (wv<wvs[-1])
        invalid = (wv<wvs[0]) | (wv>wvs[-1])
        pixels[valid] = apStar_pixel_interp(wv[valid])
        pixels[invalid] = numpy.nan
        if sum(invalid)!=0:
            warnings.warn("wavelength outside allowed wavelength range",RuntimeWarning)
    # If input not recognized inform the user
    elif not isinstance(wv,(int,float,tuple,list,numpy.ndarray)):
        warnings.warn("unrecognized wavelength input",RuntimeWarning)
        return None

    if apStarWavegrid:
        return pixels.astype(int)

    # If on aspcapStarWavegrid, convert appropriately    
    elif not apStarWavegrid:        
        # find where pixel list matches detectors
        blue = numpy.where((pixels >= apStarBlu_lo) & (pixels < apStarBlu_hi))
        green = numpy.where((pixels >= apStarGre_lo) & (pixels < apStarGre_hi))
        red = numpy.where((pixels >= apStarRed_lo) & (pixels < apStarRed_hi))
        # find where pixel list does not match detectors
        if pixels.size > 1:
            nomatch = (numpy.array([i for i in range(len(pixels)) if i not in blue[0] and i not in green[0] and i not in red[0]]),)
             # adjust pixel values to match aspcap wavegrid
            pixels[blue] -= (apStarBlu_lo-aspcapBlu_start)
            pixels[green] -= (apStarGre_lo-aspcapGre_start)
            pixels[red] -= (apStarRed_lo-aspcapRed_start)
        # Case of single wavelength
        elif pixels.size == 1:
            if blue[0].size==1:
                pixels -= (apStarBlu_lo-aspcapBlu_start)
            elif green[0].size==1:
                pixels -= (apStarGre_lo-aspcapGre_start)
            elif red[0].size==1:
                pixels -= (apStarRed_lo-aspcapRed_start)
            elif blue[0].size==0 and green[0].size==0 and red[0].size==0:
                nomatch = ([0],)
                pixels = 0
        return numpy.floor(pixels).astype(int)

Example 46

Project: chainer
Source File: link.py
View license
    def add_param(self, name, shape, dtype=numpy.float32, initializer=None):
        """Registers a parameter to the link.

        The registered parameter is saved and loaded on serialization and
        deserialization, and involved in the optimization. The data and
        gradient of the variable are initialized by NaN arrays.
        If ``initializer`` is not ``None``, the data is initialized by
        ``initializer``.

        If the supplied ``name`` argument corresponds to an uninitialized
        parameter (that is, one that was added with the
        :meth:`add_uninitialized_param` method), ``name`` will be removed
        from the set of uninitialized parameters.

        The parameter is set to an attribute of the link with the given name.

        Args:
            name (str): Name of the parameter. This name is also used as the
                attribute name. Any uninitialized parameters with the same
                name will be removed.
            shape (int or tuple of ints): Shape of the parameter array.
            dtype: Data type of the parameter array.
            initializer(chainer.initializer.Initializer): If it is not
                ``None``, the data is initialized with the given initializer.
                Note that in this case ``dtype`` argument is ignored.

        """
        d = self.__dict__
        if name in d:
            raise AttributeError(
                'cannot register a new parameter %s: attribute exists'
                % name)
        if initializer is None:
            data = self.xp.full(shape, numpy.nan, dtype=dtype)
        else:
            data = initializers.generate_array(initializer, shape, self.xp)
        grad = self.xp.full_like(data, numpy.nan)
        var = variable.Variable(data, volatile='auto', name=name)
        var.grad = grad
        self._params.append(name)
        d[name] = var
        if name in self._uninitialized_params:
            self._uninitialized_params.remove(name)

Example 47

Project: bt
Source File: core.py
View license
    def setup(self, universe):
        """
        Setup strategy with universe. This will speed up future calculations
        and updates.
        """
        # save full universe in case we need it
        self._original_data = universe

        # determine if needs paper trading
        # and setup if so
        if self is not self.parent:
            self._paper_trade = True
            self._paper_amount = 1000000

            paper = deepcopy(self)
            paper.parent = paper
            paper.root = paper
            paper._paper_trade = False
            paper.setup(self._original_data)
            paper.adjust(self._paper_amount)
            self._paper = paper

        # setup universe
        funiverse = universe

        if self._universe_tickers is not None:
            # if we have universe_tickers defined, limit universe to
            # those tickers
            valid_filter = list(set(universe.columns)
                                .intersection(self._universe_tickers))

            funiverse = universe[valid_filter].copy()

            # if we have strat children, we will need to create their columns
            # in the new universe
            if self._has_strat_children:
                for c in self._strat_children:
                    funiverse[c] = np.nan

            # must create to avoid pandas warning
            funiverse = pd.DataFrame(funiverse)

        self._universe = funiverse
        # holds filtered universe
        self._funiverse = funiverse
        self._last_chk = None

        # We're not bankrupt yet
        self.bankrupt = False

        # setup internal data
        self.data = pd.DataFrame(index=funiverse.index,
                                 columns=['price', 'value', 'cash', 'fees'],
                                 data=0.0)

        self._prices = self.data['price']
        self._values = self.data['value']
        self._cash = self.data['cash']
        self._fees = self.data['fees']

        # setup children as well - use original universe here - don't want to
        # pollute with potential strategy children in funiverse
        if self.children is not None:
            [c.setup(universe) for c in self._childrenv]

Example 48

Project: bt
Source File: core.py
View license
    @cy.locals(delta=cy.double, weight=cy.double, base=cy.double)
    def rebalance(self, weight, child, base=np.nan, update=True):
        """
        Rebalance a child to a given weight.

        This is a helper method to simplify code logic. This method is used
        when we want to se the weight of a particular child to a set amount.
        It is similar to allocate, but it calculates the appropriate allocation
        based on the current weight.

        Args:
            * weight (float): The target weight. Usually between -1.0 and 1.0.
            * child (str): child to allocate to - specified by name.
            * base (float): If specified, this is the base amount all weight
                delta calculations will be based off of. This is useful when we
                determine a set of weights and want to rebalance each child
                given these new weights. However, as we iterate through each
                child and call this method, the base (which is by default the
                current value) will change. Therefore, we can set this base to
                the original value before the iteration to ensure the proper
                allocations are made.
            * update (bool): Force update?

        """
        # if weight is 0 - we want to close child
        if weight == 0:
            if child in self.children:
                return self.close(child)
            else:
                return

        # if no base specified use self's value
        if np.isnan(base):
            base = self.value

        # else make sure we have child
        if child not in self.children:
            c = SecurityBase(child)
            c.setup(self._universe)
            # update child to bring up to speed
            c.update(self.now)
            self._add_child(c)

        # allocate to child
        # figure out weight delta
        c = self.children[child]
        delta = weight - c.weight
        c.allocate(delta * base)

Example 49

Project: pvlib-python
Source File: atmosphere.py
View license
def relativeairmass(zenith, model='kastenyoung1989'):
    '''
    Gives the relative (not pressure-corrected) airmass.

    Gives the airmass at sea-level when given a sun zenith angle (in
    degrees). The ``model`` variable allows selection of different
    airmass models (described below). If ``model`` is not included or is
    not valid, the default model is 'kastenyoung1989'.

    Parameters
    ----------
    zenith : numeric
        Zenith angle of the sun in degrees. Note that some models use
        the apparent (refraction corrected) zenith angle, and some
        models use the true (not refraction-corrected) zenith angle. See
        model descriptions to determine which type of zenith angle is
        required. Apparent zenith angles must be calculated at sea level.

    model : string
        Available models include the following:

        * 'simple' - secant(apparent zenith angle) -
          Note that this gives -inf at zenith=90
        * 'kasten1966' - See reference [1] -
          requires apparent sun zenith
        * 'youngirvine1967' - See reference [2] -
          requires true sun zenith
        * 'kastenyoung1989' - See reference [3] -
          requires apparent sun zenith
        * 'gueymard1993' - See reference [4] -
          requires apparent sun zenith
        * 'young1994' - See reference [5] -
          requries true sun zenith
        * 'pickering2002' - See reference [6] -
          requires apparent sun zenith

    Returns
    -------
    airmass_relative : numeric
        Relative airmass at sea level. Will return NaN values for any
        zenith angle greater than 90 degrees.

    References
    ----------
    [1] Fritz Kasten. "A New Table and Approximation Formula for the
    Relative Optical Air Mass". Technical Report 136, Hanover, N.H.:
    U.S. Army Material Command, CRREL.

    [2] A. T. Young and W. M. Irvine, "Multicolor Photoelectric
    Photometry of the Brighter Planets," The Astronomical Journal, vol.
    72, pp. 945-950, 1967.

    [3] Fritz Kasten and Andrew Young. "Revised optical air mass tables
    and approximation formula". Applied Optics 28:4735-4738

    [4] C. Gueymard, "Critical analysis and performance assessment of
    clear sky solar irradiance models using theoretical and measured
    data," Solar Energy, vol. 51, pp. 121-138, 1993.

    [5] A. T. Young, "AIR-MASS AND REFRACTION," Applied Optics, vol. 33,
    pp. 1108-1110, Feb 1994.

    [6] Keith A. Pickering. "The Ancient Star Catalog". DIO 12:1, 20,

    [7] Matthew J. Reno, Clifford W. Hansen and Joshua S. Stein, "Global
    Horizontal Irradiance Clear Sky Models: Implementation and Analysis"
    Sandia Report, (2012).
    '''

    # need to filter first because python 2.7 does not support raising a
    # negative number to a negative power.
    z = np.where(zenith > 90, np.nan, zenith)
    zenith_rad = np.radians(z)

    model = model.lower()

    if 'kastenyoung1989' == model:
        am = (1.0 / (np.cos(zenith_rad) +
              0.50572*(((6.07995 + (90 - z)) ** - 1.6364))))
    elif 'kasten1966' == model:
        am = 1.0 / (np.cos(zenith_rad) + 0.15*((93.885 - z) ** - 1.253))
    elif 'simple' == model:
        am = 1.0 / np.cos(zenith_rad)
    elif 'pickering2002' == model:
        am = (1.0 / (np.sin(np.radians(90 - z +
              244.0 / (165 + 47.0 * (90 - z) ** 1.1)))))
    elif 'youngirvine1967' == model:
        am = ((1.0 / np.cos(zenith_rad)) *
              (1 - 0.0012*((1.0 / np.cos(zenith_rad)) ** 2) - 1))
    elif 'young1994' == model:
        am = ((1.002432*((np.cos(zenith_rad)) ** 2) +
              0.148386*(np.cos(zenith_rad)) + 0.0096467) /
              (np.cos(zenith_rad) ** 3 +
              0.149864*(np.cos(zenith_rad) ** 2) +
              0.0102963*(np.cos(zenith_rad)) + 0.000303978))
    elif 'gueymard1993' == model:
        am = (1.0 / (np.cos(zenith_rad) +
              0.00176759*(z)*((94.37515 - z) ** - 1.21563)))
    else:
        raise ValueError('%s is not a valid model for relativeairmass', model)

    if isinstance(zenith, pd.Series):
        am = pd.Series(am, index=zenith.index)

    return am

Example 50

Project: pvlib-python
Source File: test_pvsystem.py
View license
def test_sapm(sapm_module_params):

    times = pd.DatetimeIndex(start='2015-01-01', periods=5, freq='12H')
    effective_irradiance = pd.Series([-1, 0.5, 1.1, np.nan, 1], index=times)
    temp_cell = pd.Series([10, 25, 50, 25, np.nan], index=times)

    out = pvsystem.sapm(effective_irradiance, temp_cell, sapm_module_params)

    expected = pd.DataFrame(np.array(
      [[  -5.0608322 ,   -4.65037767,           nan,           nan,
                  nan,   -4.91119927,   -4.15367716],
       [   2.545575  ,    2.28773882,   56.86182059,   47.21121608,
         108.00693168,    2.48357383,    1.71782772],
       [   5.65584763,    5.01709903,   54.1943277 ,   42.51861718,
         213.32011294,    5.52987899,    3.48660728],
       [          nan,           nan,           nan,           nan,
                  nan,           nan,           nan],
       [          nan,           nan,           nan,           nan,
                  nan,           nan,           nan]]),
        columns=['i_sc', 'i_mp', 'v_oc', 'v_mp', 'p_mp', 'i_x', 'i_xx'],
        index=times)

    assert_frame_equal(out, expected, check_less_precise=4)

    out = pvsystem.sapm(1, 25, sapm_module_params)

    expected = OrderedDict()
    expected['i_sc'] = 5.09115
    expected['i_mp'] = 4.5462909092579995
    expected['v_oc'] = 59.260800000000003
    expected['v_mp'] = 48.315600000000003
    expected['p_mp'] = 219.65677305534581
    expected['i_x'] = 4.9759899999999995
    expected['i_xx'] = 3.1880204359100004

    for k, v in expected.items():
        assert_allclose(out[k], v, atol=1e-4)

    # just make sure it works with a dict input
    pvsystem.sapm(effective_irradiance, temp_cell,
                  sapm_module_params.to_dict())