Here are the examples of the python api numpy.logaddexp taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
12 Examples
3
Example 1
Project: chainer Source File: hierarchical_softmax.py
def _forward_cpu_one(self, x, t, W):
begin = self.begins[t]
end = self.begins[t + 1]
w = W[self.paths[begin:end]]
wxy = w.dot(x) * self.codes[begin:end]
loss = numpy.logaddexp(0.0, -wxy) # == log(1 + exp(-wxy))
return numpy.sum(loss)
3
Example 2
Project: chainer Source File: test_log_softmax.py
def check_forward(self, x_data, use_cudnn=True):
x = chainer.Variable(x_data)
y = functions.log_softmax(x, use_cudnn)
self.assertEqual(y.data.dtype, self.dtype)
log_z = numpy.ufunc.reduce(
numpy.logaddexp, self.x, axis=1, keepdims=True)
y_expect = self.x - log_z
testing.assert_allclose(
y_expect, y.data, **self.check_forward_options)
3
Example 3
Project: lifetimes Source File: estimation.py
@staticmethod
def _negative_log_likelihood(params, freq, rec, T, penalizer_coef):
if npany(asarray(params) <= 0.):
return np.inf
r, alpha, s, beta = params
x = freq
r_s_x = r + s + x
A_1 = special.gammaln(r + x) - special.gammaln(r) + r * log(alpha) + s * log(beta)
log_A_0 = ParetoNBDFitter._log_A_0(params, freq, rec, T)
A_2 = logaddexp(-(r + x) * log(alpha + T) - s * log(beta + T), log(s) + log_A_0 - log(r_s_x))
penalizer_term = penalizer_coef * log(params).sum()
return -(A_1 + A_2).sum() + penalizer_term
3
Example 4
Project: kameleon-mcmc Source File: InfluenceCombination.py
def log_pdf(self, X):
GenericTests.check_type(X,'X',numpy.ndarray,2)
# this also enforce correct data ranges
if X.dtype != numpy.bool8:
raise ValueError("X must be a bool8 numpy array")
if not X.shape[1] == self.dimension:
raise ValueError("Dimension of X does not match own dimension")
result = zeros(len(X))
for i in range(len(X)):
result[i]= inner(self.biasx,X[i])+ sum([logaddexp(0,inner(self.W[j],X[i])+self.biash[j]) for j in range(self.num_hidden_units)])
return result
0
Example 5
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the base
estimators in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
p : array of shape = [n_samples, n_classes]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, "classes_")
if hasattr(self.base_estimator_, "predict_log_proba"):
# Check data
X = check_array(X, accept_sparse=['csr', 'csc'])
if self.n_features_ != X.shape[1]:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is {0} "
"and input n_features is {1} "
"".format(self.n_features_, X.shape[1]))
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(
self.n_estimators, self.n_jobs)
all_log_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_predict_log_proba)(
self.estimators_[starts[i]:starts[i + 1]],
self.estimators_features_[starts[i]:starts[i + 1]],
X,
self.n_classes_)
for i in range(n_jobs))
# Reduce
log_proba = all_log_proba[0]
for j in range(1, len(all_log_proba)):
log_proba = np.logaddexp(log_proba, all_log_proba[j])
log_proba -= np.log(self.n_estimators)
return log_proba
else:
return np.log(self.predict_proba(X))
0
Example 6
Project: lifetimes Source File: estimation.py
@staticmethod
def _loglikelihood(params, x, tx, T):
alpha, beta, gamma, delta = params
beta_ab = special.betaln(alpha, beta)
beta_gd = special.betaln(gamma, delta)
indiv_loglike = (special.betaln(alpha + x, beta + T - x) - beta_ab +
special.betaln(gamma, delta + T) - beta_gd)
recency_T = T - tx - 1
J = np.arange(max(recency_T) + 1)
@np.vectorize
def _sum(x, tx, recency_T):
j = J[:recency_T + 1]
return log(
np.sum(exp(special.betaln(alpha + x, beta + tx - x + j) - beta_ab +
special.betaln(gamma + 1, delta + tx + j) - beta_gd)))
s = _sum(x, tx, recency_T)
indiv_loglike = logaddexp(indiv_loglike, s)
return indiv_loglike
0
Example 7
Project: cvxpy Source File: logistic.py
@Elementwise.numpy_numeric
def numeric(self, values):
"""Evaluates e^x elementwise, adds 1, and takes the log.
"""
return np.logaddexp(0, values[0])
0
Example 8
def test_logaddexp():
binary(np.logaddexp)
0
Example 9
def __call__(self, y_pred):
return numpy.sum(self.sample_weight * numpy.logaddexp(0, - self.y_signed * y_pred))
0
Example 10
def __call__(self, y_pred):
result = numpy.sum(self.sig_w * numpy.logaddexp(0, -y_pred))
result += numpy.sum(self.bck_w * numpy.exp(0.5 * y_pred))
return result
0
Example 11
def loglike(theta, c1, c2):
return np.logaddexp(logcirc(theta, c1), logcirc(theta, c2))
0
Example 12
Project: scikit-kge Source File: hole.py
def _gradients(self, xys):
ss, ps, os, ys = unzip_triples(xys, with_ys=True)
yscores = ys * self._scores(ss, ps, os)
self.loss = np.sum(np.logaddexp(0, -yscores))
#preds = af.Sigmoid.f(yscores)
fs = -(ys * af.Sigmoid.f(-yscores))[:, np.newaxis]
#self.loss -= np.sum(np.log(preds))
ridx, Sm, n = grad_sum_matrix(ps)
gr = Sm.dot(fs * ccorr(self.E[ss], self.E[os])) / n
gr += self.rparam * self.R[ridx]
eidx, Sm, n = grad_sum_matrix(list(ss) + list(os))
ge = Sm.dot(np.vstack((
fs * ccorr(self.R[ps], self.E[os]),
fs * cconv(self.E[ss], self.R[ps])
))) / n
ge += self.rparam * self.E[eidx]
return {'E': (ge, eidx), 'R':(gr, ridx)}