numpy.log1p

Here are the examples of the python api numpy.log1p taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

223 Examples 7

5 Source : basic.py
with MIT License
from dmitriy-serdyuk

    def impl(self, x):
        # If x is an int8 or uint8, numpy.log1p will compute the result in
        # half-precision (float16), where we want float32.
        x_dtype = str(getattr(x, 'dtype', ''))
        if x_dtype in ('int8', 'uint8'):
            return numpy.log1p(x, sig='f')
        return numpy.log1p(x)

    def grad(self, inputs, gout):

3 Source : least_squares.py
with GNU General Public License v3.0
from adityaprakash-bobby

def cauchy(z, rho, cost_only):
    rho[0] = np.log1p(z)
    if cost_only:
        return
    t = 1 + z
    rho[1] = 1 / t
    rho[2] = -1 / t**2


def arctan(z, rho, cost_only):

3 Source : _discrete_distns.py
with GNU General Public License v3.0
from adityaprakash-bobby

    def _ppf(self, q, lambda_):
        vals = ceil(-1.0/lambda_ * log1p(-q)-1)
        vals1 = (vals-1).clip(self.a, np.inf)
        temp = self._cdf(vals1, lambda_)
        return np.where(temp >= q, vals1, vals)

    def _stats(self, lambda_):

3 Source : test_loom.py
with GNU General Public License v3.0
from aertslab

def get_gene_expression(loom_file):
    matrix, row_attrs, col_attrs, attrs = loom_file
    lp.create(filename=str(LOOM_PATH), layers=matrix, row_attrs=row_attrs, col_attrs=col_attrs, file_attrs=attrs)
    with lp.connect(LOOM_PATH, mode="r", validate=False) as ds:
        test_loom = Loom(LOOM_PATH, LOOM_PATH, ds, LOOM_FILE_HANDLER)
        np.testing.assert_equal(test_loom.get_gene_expression("Gene_1", True, False), np.log1p(matrix[0]))
        np.testing.assert_equal(test_loom.get_gene_expression("Gene_100", False, False), matrix[99])

3 Source : test_theta.py
with BSD 3-Clause "New" or "Revised" License
from alan-turing-institute

def test_forecaster_with_initial_level():
    """Check prediction performance on airline dataset.

    Performance on this dataset should be reasonably good.

    Raises
    ------
    AssertionError - if point forecasts do not lie close to the test data
    """
    y = np.log1p(load_airline())
    y_train, y_test = temporal_train_test_split(y)
    fh = np.arange(len(y_test)) + 1

    f = ThetaForecaster(initial_level=0.1, sp=12)
    f.fit(y_train)
    y_pred = f.predict(fh=fh)

    np.testing.assert_allclose(y_pred, y_test, rtol=0.05)

3 Source : utils.py
with BSD 3-Clause "New" or "Revised" License
from aristoteleo

def log1p_(adata, X_data):
    if "norm_method" not in adata.uns["pp"].keys():
        return X_data
    else:
        if adata.uns["pp"]["norm_method"] is None:
            if sp.issparse(X_data):
                X_data.data = np.log1p(X_data.data)
            else:
                X_data = np.log1p(X_data)

        return X_data


def inverse_norm(adata, layer_x):

3 Source : _rgr.py
with MIT License
from AtrCheema

    def gmean_diff(self) -> float:
        """Geometric mean difference. First geometric mean is calculated for each of two samples and their difference
        is calculated."""
        sim_log = np.log1p(self.predicted)
        obs_log = np.log1p(self.true)
        return float(np.exp(gmean(sim_log) - gmean(obs_log)))

    def gmrae(self, benchmark: np.ndarray = None) -> float:

3 Source : _rgr.py
with MIT License
from AtrCheema

    def msle(self, weights=None) -> float:
        """
        mean square logrithmic error
        """
        return float(np.average((np.log1p(self.true) - np.log1p(self.predicted)) ** 2, axis=0, weights=weights))

    def norm_euclid_distance(self) -> float:

3 Source : _rgr.py
with MIT License
from AtrCheema

    def rmsle(self) -> float:
        """Root mean square log error.

        This error is less sensitive to [outliers](https://stats.stackexchange.com/q/56658/314919).
        Compared to RMSE, RMSLE only considers the relative error between predicted
        and actual values, and the scale of the error is nullified by the log-transformation.
        Furthermore, RMSLE penalizes underestimation more than overestimation.
        This is especially useful in those studies where the underestimation
        of the target variable is not acceptable but overestimation can be tolerated. [1]

         [1] https://doi.org/10.1016/j.scitotenv.2020.137894
         """
        return float(np.sqrt(np.mean(np.power(np.log1p(self.predicted) - np.log1p(self.true), 2))))

    def rmdspe(self) -> float:

3 Source : _discrete_distns.py
with MIT License
from buds-lab

    def _ppf(self, q, p):
        vals = ceil(log1p(-q) / log1p(-p))
        temp = self._cdf(vals-1, p)
        return np.where((temp >= q) & (vals > 0), vals-1, vals)

    def _stats(self, p):

3 Source : _discrete_distns.py
with MIT License
from buds-lab

    def _logsf(self, k, M, n, N):
        res = []
        for quant, tot, good, draw in zip(k, M, n, N):
            if (quant + 0.5) * (tot + 0.5)   <   (good - 0.5) * (draw - 0.5):
                # Less terms to sum if we calculate log(1-cdf)
                res.append(log1p(-exp(self.logcdf(quant, tot, good, draw))))
            else:
                # Integration over probability mass function using logsumexp
                k2 = np.arange(quant + 1, draw + 1)
                res.append(logsumexp(self._logpmf(k2, tot, good, draw)))
        return np.asarray(res)

    def _logcdf(self, k, M, n, N):

3 Source : _discrete_distns.py
with MIT License
from buds-lab

    def _logcdf(self, k, M, n, N):
        res = []
        for quant, tot, good, draw in zip(k, M, n, N):
            if (quant + 0.5) * (tot + 0.5) > (good - 0.5) * (draw - 0.5):
                # Less terms to sum if we calculate log(1-sf)
                res.append(log1p(-exp(self.logsf(quant, tot, good, draw))))
            else:
                # Integration over probability mass function using logsumexp
                k2 = np.arange(0, quant + 1)
                res.append(logsumexp(self._logpmf(k2, tot, good, draw)))
        return np.asarray(res)


hypergeom = hypergeom_gen(name='hypergeom')

3 Source : _discrete_distns.py
with MIT License
from buds-lab

    def _ppf(self, q, lambda_):
        vals = ceil(-1.0/lambda_ * log1p(-q)-1)
        vals1 = (vals-1).clip(*(self._get_support(lambda_)))
        temp = self._cdf(vals1, lambda_)
        return np.where(temp >= q, vals1, vals)

    def _rvs(self, lambda_):

3 Source : _discrete_distns.py
with MIT License
from buds-lab

    def _rvs(self, alpha):
        E1 = self._random_state.standard_exponential(self._size)
        E2 = self._random_state.standard_exponential(self._size)
        ans = ceil(-E1 / log1p(-exp(-E2 / alpha)))
        return ans

    def _pmf(self, x, alpha):

3 Source : utils.py
with MIT License
from cpinte

def Jy_to_Tb(Fnu, nu, pixelscale):
    '''
     Convert Flux density in Jy/pixel to brightness temperature [K]
     Flux [Jy]
     nu [Hz]
     bmaj, bmin in [arcsec]

     T [K]
    '''
    pixel_area = (pixelscale * arcsec) ** 2
    exp_m1 = 1e16 * pixel_area * 2.0 * sc.h / sc.c ** 2 * nu ** 3 / Fnu
    hnu_kT = np.log1p(exp_m1 + 1e-10)

    Tb = sc.h * nu / (hnu_kT * sc.k)

    return Tb


def Wm2_to_Tb(nuFnu, nu, pixelscale):

3 Source : utils.py
with MIT License
from cpinte

def Wm2_to_Tb(nuFnu, nu, pixelscale):
    """Convert flux converted from Wm2/pixel to K using full Planck law.
        Convert Flux density in Jy/beam to brightness temperature [K]
        Flux [W.m-2/pixel]
        nu [Hz]
        bmaj, bmin, pixelscale in [arcsec]
        """
    pixel_area = (pixelscale * arcsec) ** 2
    exp_m1 = pixel_area * 2.0 * sc.h * nu ** 4 / (sc.c ** 2 * nuFnu)
    hnu_kT = np.log1p(exp_m1)

    Tb = sc.h * nu / (sc.k * hnu_kT)

    return Tb


# -- Functions to deal the synthesized beam.
def _beam_area(self):

3 Source : util.py
with MIT License
from czbiohub

def log1p_poisson_around_mean(x):
    """
    Use Taylor expansion of log(1 + y) around y = x to evaluate
    the expected value if y ~ Poisson(x). Note that the central 2nd and 3rd
    moments of Poisson(x) are both equal to x, and that the second and third
    derivatives of log(1 + y) are -(1 + y)**(-2) and 2*(1 + y)**(-3).

    :param x: mean of poisson
    :return: expected value of log(pseudocount + x)
    """
    return np.log1p(x) - x * (1.0 + x) ** (-2) / 2 + x * (1.0 + x) ** (-3) / 3


def expected_sqrt(mean_expression: np.ndarray, cutoff: float = 85.61) -> np.ndarray:

3 Source : _discrete_distns.py
with Apache License 2.0
from dashanji

    def _ppf(self, q, lambda_):
        vals = ceil(-1.0/lambda_ * log1p(-q)-1)
        vals1 = (vals-1).clip(*(self._get_support(lambda_)))
        temp = self._cdf(vals1, lambda_)
        return np.where(temp >= q, vals1, vals)

    def _rvs(self, lambda_, size=None, random_state=None):

3 Source : _discrete_distns.py
with Apache License 2.0
from dashanji

    def _rvs(self, alpha, size=None, random_state=None):
        E1 = random_state.standard_exponential(size)
        E2 = random_state.standard_exponential(size)
        ans = ceil(-E1 / log1p(-exp(-E2 / alpha)))
        return ans

    def _pmf(self, x, alpha):

3 Source : element_wise_test.py
with BSD 3-Clause "New" or "Revised" License
from deepanshs

def test_log1p():
    b = np.log1p(a)
    assert np.allclose(b.dependent_variables[0].components[0], np.log1p(data))


# test unit of components

data2 = np.random.rand(15 * 5 * 10).reshape(15, 5, 10) - 0.5

3 Source : hopper.py
with MIT License
from denisyarats

    def touch(self):
        """Returns the signals from two foot touch sensors."""
        return np.log1p(self.named.data.sensordata[['touch_toe',
                                                    'touch_heel']])
    
    def angmomentum(self):

3 Source : snippet.py
with Apache License 2.0
from dockerizeme

def log1p (q):
    return _maybewrap (np.log1p (_usable (q)), q)

# skipped: logaddexp logaddexp2 i0 sinc signbit copysign frexp
# ldexp

def add (left, right):

3 Source : sciann_datagenerator.py
with MIT License
from dtu-act

  def generate_uniform_T_samples(self, num_sample):
    if self.logT is True:
      t_dom = np.random.uniform(np.log1p(self.Tdomain[0]), np.log1p(self.Tdomain[1]), num_sample)
      t_dom = np.exp(t_dom) - 1.
    else:
      t_dom = np.random.uniform(self.Tdomain[0], self.Tdomain[1], num_sample)
    return t_dom

  def generate_data(self):

3 Source : Utils.py
with MIT License
from f90

def normalise_spectrogram(mag, cut_last_freq=True):
    '''
    Normalise audio spectrogram with log-normalisation
    :param mag: Magnitude spectrogram to be normalised
    :param cut_last_freq: Whether to cut highest frequency bin to reach power of 2 in number of bins
    :return: Normalised spectrogram
    '''
    if cut_last_freq:
        # Throw away last freq bin to make it number of freq bins a power of 2
        out = mag[:-1,:]

    # Normalize with log1p
    out = np.log1p(out)
    return out

def normalise_spectrogram_torch(mag):

3 Source : single_cell.py
with BSD 2-Clause "Simplified" License
from flatironinstitute

def ln_data(data, **kwargs):
    """
    Transform the expression data by adding one and then taking ln. Ignore any kwargs.

    :param data: InferelatorData [N x G]
    """
    utils.Debug.vprint('Logging data [ln+1]... ')
    data.transform(np.log1p, add_pseudocount=False)


def tf_sqrt_data(data, **kwargs):

3 Source : inference.py
with Apache License 2.0
from ForeverZyh

    def log1p(args: list, node):
        assert len(args) == 1
        if isinstance(args[0].value, Range):
            if args[0].value.left   <  = -1:
                return Range(left=-OVERFLOW_LIMIT, right=np.log1p(args[0].value.right))
            else:
                return Range(left=np.log1p(args[0].value.left), right=np.log1p(args[0].value.right))
        else:
            return np.log1p(args[0].value)

    @staticmethod

3 Source : label_transforms.py
with BSD 2-Clause "Simplified" License
from havakv

    def fit_transform(self, durations, events):
        train_durations = durations
        durations = durations.astype('float32')
        events = events.astype('float32')
        if self.log_duration:
            durations = np.log1p(durations)
        durations = self.duration_scaler.fit_transform(durations.reshape(-1, 1)).flatten()
        self._inverse_duration_map = {scaled: orig for orig, scaled in zip(train_durations, durations)}
        self._inverse_duration_map = np.vectorize(self._inverse_duration_map.get)
        return durations, events

    def transform(self, durations, events):

3 Source : label_transforms.py
with BSD 2-Clause "Simplified" License
from havakv

    def transform(self, durations, events):
        durations = durations.astype('float32')
        events = events.astype('float32')
        if self.log_duration:
            durations = np.log1p(durations)
        durations = self.duration_scaler.transform(durations.reshape(-1, 1)).flatten()
        return durations, events

    @property

3 Source : data.py
with MIT License
from hmartelb

def amplitude_to_db(mag, amin=1/(2**16), normalize=True):
    mag_db = 20*np.log1p(mag/amin)
    if(normalize):
        mag_db /= 20*np.log1p(1/amin)
    return mag_db

def db_to_amplitude(mag_db, amin=1/(2**16), normalize=True):

3 Source : data.py
with MIT License
from hmartelb

def db_to_amplitude(mag_db, amin=1/(2**16), normalize=True):
    if(normalize):
        mag_db *= 20*np.log1p(1/amin)
    return amin*np.expm1(mag_db/20)

def add_hf(mag, target_shape):

3 Source : target_transform_inverse_example.py
with MIT License
from HunterMcGushion

def log_transform(all_targets):
    all_targets = np.log1p(all_targets)
    return all_targets, np.expm1


def standard_scale(train_inputs, non_train_inputs):

3 Source : test_core_transformers.py
with Apache License 2.0
from IBM

    def test_init_fit_predict(self):
        import numpy as np

        import lale.datasets

        ft = FunctionTransformer(func=np.log1p)
        lr = LogisticRegression()
        trainable = ft >> lr
        (train_X, train_y), (test_X, test_y) = lale.datasets.digits_df()
        trained = trainable.fit(train_X, train_y)
        _ = trained.predict(test_X)

    def test_not_callable(self):

3 Source : test_lale_lib_versions.py
with Apache License 2.0
from IBM

    def test_with_defaults(self):
        trainable = FunctionTransformer(func=np.log1p) >> LogisticRegression()
        trained = trainable.fit(self.train_X, self.train_y)
        _ = trained.predict(self.test_X)

    def test_pass_y(self):

3 Source : test_lale_lib_versions.py
with Apache License 2.0
from IBM

    def test_pass_y(self):
        trainable = (
            FunctionTransformer(func=np.log1p, pass_y=False) >> LogisticRegression()
        )
        trained = trainable.fit(self.train_X, self.train_y)
        _ = trained.predict(self.test_X)

    def test_validate(self):

3 Source : test_lale_lib_versions.py
with Apache License 2.0
from IBM

    def test_with_hyperopt(self):
        planned = FunctionTransformer(func=np.log1p) >> LogisticRegression
        trained = planned.auto_configure(
            self.train_X, self.train_y, optimizer=Hyperopt, cv=3, max_evals=3
        )
        _ = trained.predict(self.test_X)


class TestGradientBoostingClassifier(unittest.TestCase):

3 Source : lincs_dl.py
with MIT License
from insilicomedicine

    def transform_dose(self, dose_array):
        transformed_dose = torch.from_numpy(np.log1p(dose_array) / np.log(11.))[
                           :,
                           None].float()  # logarithmic scale, s.t. 10 micromoles -> 1 unit
        return transformed_dose

    def transform_ptime(self, ptime_array):

3 Source : smooth_sensitivity.py
with MIT License
from isobar-us

def _compute_data_dep_bound_gnmax(sigma, logq, order):
  # Applies Theorem 6 in Appendix without checking that logq satisfies necessary
  # constraints. The pre-conditions must be assured by comparing logq against
  # logq0 by the caller.
  variance = sigma**2
  mu1, mu2 = _compute_mu1_mu2_gnmax(sigma, logq)
  eps1 = mu1 / variance
  eps2 = mu2 / variance

  log1q = np.log1p(-math.exp(logq))  # log1q = log(1-q)
  log_a = (order - 1) * (
      log1q - (np.log1p(-math.exp((logq + eps2) * (1 - 1 / mu2)))))
  log_b = (order - 1) * (eps1 - logq / (mu1 - 1))

  return np.logaddexp(log1q + log_a, logq + log_b) / (order - 1)


def _compute_rdp_gnmax(sigma, logq, order):

3 Source : aggregate.py
with MIT License
from ivirshup

def comp_stats(comps):
    stats = pd.DataFrame(
        {
            "n_clusts": [len(c) for c in comps],
            "intersect": [len(c.intersect) for c in comps],
            "union": [len(c.union) for c in comps],
        }
    )
    # TODO: Add some summary of param ranges
    # if param_stats:

    for col in stats:
        stats[f"log1p_{col}"] = np.log1p(stats[col])
    return stats

3 Source : clustree.py
with MIT License
from ivirshup

def set_edge_alpha(g):
    """Create edge_alpha attribute for edges based on normalized log scaled weights."""
    edge_weights = pd.Series(
        {(rec[0], rec[1]): float(rec[2]) for rec in g.edges.data("weight")}
    )
    np.log1p(edge_weights, edge_weights.values)  # inplace log
    nx.set_edge_attributes(
        g, (edge_weights / edge_weights.max()).to_dict(), "edge_alpha"
    )


def clustree(

3 Source : test_data_utils.py
with MIT License
from jiwoncpark

    def test_plus_1_log(self):
        """Test the torch log(1+X) vs. numpy

        """
        actual = data_utils.plus_1_log(self.img_torch)
        expected = np.log1p(self.img_numpy)
        np.testing.assert_array_almost_equal(actual, expected, err_msg='test_plus_1_log')

    def test_rescale_01(self):

3 Source : test_xy_data.py
with MIT License
from jiwoncpark

    def test_X_transformation_log(self):
        """Test if the images transform as expected, with log(1+X)

        """
        train_data = XYData(True, self.Y_cols, 'FloatTensor', define_src_pos_wrt_lens=True, rescale_pixels=False, log_pixels=True, add_pixel_noise=False, eff_exposure_time={'TDLMC_F160W': self.original_exptime}, train_Y_mean=self.train_Y_mean, train_Y_std=self.train_Y_std, train_baobab_cfg_path=self.train_baobab_cfg_path, val_baobab_cfg_path=self.val_baobab_cfg_path, for_cosmology=False)
        actual_img, _ = train_data[0]
        expected_img = self.img_0
        expected_img = np.log1p(expected_img)
        np.testing.assert_array_almost_equal(actual_img, expected_img, err_msg='test_X_transformation_log')

    def test_X_transformation_rescale(self):

3 Source : distributions.py
with MIT License
from ktraunmueller

    def _pdf(self, x, c):
        cx = c*x
        logex2 = where((c == 0)*(x == x),0.0,log1p(-cx))
        logpex2 = where((c == 0)*(x == x),-x,logex2/c)
        pex2 = exp(logpex2)
        # Handle special cases
        logpdf = where((cx == 1) | (cx == -inf),-inf,-pex2+logpex2-logex2)
        putmask(logpdf, (c == 1) & (x == 1), 0.0)
        return exp(logpdf)

    def _cdf(self, x, c):

3 Source : distributions.py
with MIT License
from ktraunmueller

    def _ppf(self, q, lambda_):
        vals = ceil(-1.0/lambda_ * log1p(-q)-1)
        vals1 = (vals-1).clip(self.a, np.inf)
        temp = self._cdf(vals1, lambda_)
        return where(temp >= q, vals1, vals)

    def _stats(self, lambda_):

3 Source : test_array.py
with Apache License 2.0
from lasersonlab

    def test_log1p(self, x, xd):
        log1pnps = np.asarray(np.log1p(xd))
        log1pnp = np.log1p(x)
        assert_allclose(log1pnps, log1pnp)

    def test_sum(self, x, xd):

3 Source : regressor.py
with BSD 3-Clause "New" or "Revised" License
from LaureBerti

def LT_log_transform_skew_features(dataset):

    numeric_feats = dataset.dtypes[dataset.dtypes != "object"].index

    Y = dataset.select_dtypes(['object'])

    skewed_feats = dataset[numeric_feats].apply(
        lambda x: skew(x.dropna()))  # compute skewness

    skewed_feats = skewed_feats[skewed_feats >= 0.75]

    skewed_feats = skewed_feats.index

    dataset[skewed_feats] = np.log1p(dataset[skewed_feats])

    return dataset[skewed_feats].join(Y)


class Regressor():

3 Source : _arith.py
with MIT License
from michaelnowotny

def log1p(a: ndarray):
    """
    Return the natural logarithm of one plus the input array,
    element-wise.
    """

    return _unary_function(a, af_func=af.log1p, np_func=np.log1p)


def log10(a: ndarray):

3 Source : hopper.py
with MIT License
from nicklashansen

  def touch(self):
    """Returns the signals from two foot touch sensors."""
    return np.log1p(self.named.data.sensordata[['touch_toe', 'touch_heel']])


class Hopper(base.Task):

3 Source : functions.py
with GNU General Public License v3.0
from nrc-cnrc

def log1p(x):
    """
    Returns the natural logrithm of x plus 1 where x can be float, complex, gummy 
    or jummy.
    """
    return _bcallg(np.log1p,x)

def logaddexp(x1,x2):

3 Source : _discrete_distns.py
with MIT License
from osamhack2021

    def _logcdf(self, x, n, p):
        k = floor(x)
        cdf = self._cdf(k, n, p)
        cond = cdf > 0.5

        def f1(k, n, p):
            return np.log1p(-special.betainc(k + 1, n, 1 - p))

        def f2(k, n, p):
            return np.log(cdf)

        with np.errstate(divide='ignore'):
            return _lazywhere(cond, (x, n, p), f=f1, f2=f2)

    def _sf(self, x, n, p):

3 Source : _discrete_distns.py
with MIT License
from osamhack2021

    def _logsf(self, k, M, n, N):
        res = []
        for quant, tot, good, draw in zip(*np.broadcast_arrays(k, M, n, N)):
            if (quant + 0.5) * (tot + 0.5)   <   (good - 0.5) * (draw - 0.5):
                # Less terms to sum if we calculate log(1-cdf)
                res.append(log1p(-exp(self.logcdf(quant, tot, good, draw))))
            else:
                # Integration over probability mass function using logsumexp
                k2 = np.arange(quant + 1, draw + 1)
                res.append(logsumexp(self._logpmf(k2, tot, good, draw)))
        return np.asarray(res)

    def _logcdf(self, k, M, n, N):

See More Examples