Here are the examples of the python api numpy.average taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
953 Examples
3
Source : calculate_P_bootstrap_significance.py
with MIT License
from abaheti95
with MIT License
from abaheti95
def compute_delta(scores1, scores2):
return np.average(scores1) - np.average(scores2)
scores_1 = np.zeros_like(scores1)
scores_1[scores1 == 1] = 1
scores_2 = np.zeros_like(scores2)
scores_2[scores2 == 1] = 1
return np.average(scores_1) - np.average(scores_2)
# Computes the significance between models A and B
def significance_test(A, B):
3
Source : fps.py
with Apache License 2.0
from abhiTronix
with Apache License 2.0
from abhiTronix
def average_fps(self):
"""
calculates and return average FPS
"""
self.__terminate = True
if not (self.__timer is None):
self.__timer.join()
self.__timer = None
av_fps = np.average(self.__fps) if self.__fps else 0.0
return av_fps
3
Source : common.py
with MIT License
from aeon0
with MIT License
from aeon0
def slot_has_item(slot_img: np.ndarray) -> bool:
"""
Check if a specific slot in the inventory has an item or not based on color
:param slot_img: Image of the slot
:return: Bool if there is an item or not
"""
slot_img = cv2.cvtColor(slot_img, cv2.COLOR_BGR2HSV)
avg_brightness = np.average(slot_img[:, :, 2])
return avg_brightness > 16.0
def close(img: np.ndarray = None) -> np.ndarray:
3
Source : anya.py
with MIT License
from aeon0
with MIT License
from aeon0
def wait_for_loading_screen(timeout):
start = time.time()
while time.time() - start < timeout:
img = grab()
is_loading_black_roi = np.average(img[:700, 0:250]) < 4.0
if is_loading_black_roi:
return True
return False
class AnyaShopper:
3
Source : inventory_collection.py
with MIT License
from aeon0
with MIT License
from aeon0
def _is_slot_empty(img, treshold=16.0):
slot_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
avg_brightness = np.average(slot_img[:, :, 2])
return avg_brightness > treshold
class InventoryCollection:
3
Source : preproc.py
with GNU General Public License v3.0
from AFM-analysis
with GNU General Public License v3.0
from AFM-analysis
def preproc_correct_force_offset(apret):
"""Correct the force offset with an average baseline value
"""
idp = poc.compute_poc(force=apret["force"],
method="deviation_from_baseline")
if idp:
apret["force"] = apret["force"] - np.average(apret["force"][:idp])
else:
apret["force"] = apret["force"] - apret["force"][0]
@preprocessing_step(
3
Source : test_indent2.py
with GNU General Public License v3.0
from AFM-analysis
with GNU General Public License v3.0
from AFM-analysis
def test_correct_force_offset():
grp = IndentationGroup(jpkfile)
idnt = grp[0]
idnt.apply_preprocessing(["compute_tip_position",
"correct_force_offset"])
idp = idnt.estimate_contact_point_index()
assert np.allclose(np.average(idnt["force"][:idp]), 0)
@pytest.mark.parametrize(
3
Source : partner.py
with MIT License
from Akegarasu
with MIT License
from Akegarasu
def _recognize_has_hint(rp: mathtools.ResizeProxy, icon_img: Image) -> bool:
bbox = rp.vector4((50, 0, 58, 8), 540)
hint_mark_color = (127, 67, 255)
hint_mark_img = icon_img.crop(bbox)
hint_mask = imagetools.constant_color_key(
imagetools.cv_image(hint_mark_img), hint_mark_color
)
return np.average(hint_mask) > 200
def _recognize_has_training(
3
Source : RNN.py
with Apache License 2.0
from alan-turing-institute
with Apache License 2.0
from alan-turing-institute
def col_predict(mtabs):
X_test = list()
for mt in mtabs:
x = Table_Encode_WV(micro_table=mt, table_size=FLAGS.micro_table_size, w2v_model=w2v_model,
cell_seq_size=FLAGS.cell_seq_size)
X_test.append(x[0, 0, :, :])
Y_test = rnn_predict(test_x=X_test, rnn_dir=rnn_dir)
return np.average(Y_test, axis=0)
rnn_dir = os.path.join(FLAGS.io_dir, 'rnn')
3
Source : _column_ensemble.py
with BSD 3-Clause "New" or "Revised" License
from alan-turing-institute
with BSD 3-Clause "New" or "Revised" License
from alan-turing-institute
def _predict_proba(self, X):
"""Predict class probabilities for X using 'soft' voting."""
self.check_is_fitted()
avg = np.average(self._collect_probas(X), axis=0)
return avg
def _predict(self, X):
3
Source : preelmo.py
with GNU General Public License v3.0
from allanj
with GNU General Public License v3.0
from allanj
def parse_sentence(elmo, words, mode:str="average"):
vectors = elmo.embed_sentence(words)
if mode == "average":
return np.average(vectors, 0)
elif mode == 'weighted_average':
return np.swapaxes(vectors, 0, 1)
elif mode == 'last':
return vectors[-1, :, :]
elif mode == 'all':
return vectors
else:
return vectors
def load_elmo():
3
Source : voting_classifier.py
with MIT License
from alvarobartt
with MIT License
from alvarobartt
def _predict_proba(self, X):
"""Predict class probabilities for X in 'soft' voting """
if self.voting == 'hard':
raise AttributeError("predict_proba is not available when"
" voting=%r" % self.voting)
check_is_fitted(self, 'estimators_')
avg = np.average(self._collect_probas(X), axis=0,
weights=self._weights_not_none)
return avg
@property
3
Source : classification.py
with MIT License
from alvarobartt
with MIT License
from alvarobartt
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
3
Source : reader.py
with MIT License
from AmitMY
with MIT License
from AmitMY
def retries(self):
pairs = {"seen": {}, "unseen": {}}
for d in self.data:
pairs["seen" if d.info["seen"] else "unseen"][d.plan] = d.plan_changes - 1 if hasattr(d,
"plan_changes") else 1
sums = {k: np.average(list(v.values())) for k, v in pairs.items()}
print("sums", sums)
return sums
def for_manual_evaluation(self):
3
Source : utilities.py
with GNU General Public License v3.0
from Andrea94c
with GNU General Public License v3.0
from Andrea94c
def plot_X(X, plt_title, plt_path, window_size=30, is_avg=True):
if len(X) >= window_size:
df = pd.Series(X)
scatter_print = X[window_size:]
to_plot_data = df.rolling(window_size).mean()[window_size:]
plt.clf()
plt.plot(range(len(scatter_print)), to_plot_data, label="Moving Average-" + str(window_size))
if is_avg:
plt.plot(range(len(scatter_print)), [np.average(scatter_print)] * len(scatter_print), label="avg")
plt.legend()
plt.title(plt_title)
plt.savefig(plt_path)
plt.clf()
""" This class handle the return to depot for
3
Source : utils.py
with GNU General Public License v3.0
from AndreaCossu
with GNU General Public License v3.0
from AndreaCossu
def compute_average_intermediate_accuracy(folder, intermediate_result_name='intermediate_results.csv'):
"""
Return average accuracy over all tasks on a specified result folder,
after training on all tasks.
"""
cur_file = os.path.join(folder, intermediate_result_name)
data = pd.read_csv(cur_file)
data = data[data['training_task'] == data['training_task'].max()] # choose last task
data = data[['loss', 'acc']].values
# both are array of 2 elements (loss, acc)
loss, acc = np.average(data, axis=0)
loss_std, acc_std = np.std(data, axis=0)
return loss, acc, loss_std, acc_std
def compute_training_mean_std(
3
Source : active_plasma_lens.py
with GNU General Public License v3.0
from AngelFP
with GNU General Public License v3.0
from AngelFP
def _get_optimized_dt(self, beam):
""" Get tracking time step. """
# If plasma wakefields are active, use default dt.
if self.wakefields:
dt = super()._get_optimized_dt(beam)
# Otherwise, determine dt from the APL focusing strength.
else:
gamma = np.sqrt(1 + beam.px**2 + beam.py**2 + beam.pz**2)
mean_gamma = np.average(gamma, weights=beam.q)
w_x = np.sqrt(ct.e*ct.c/ct.m_e * self.foc_strength/mean_gamma)
T_x = 1/w_x
dt = 0.1*T_x
return dt
3
Source : plasma_stage.py
with GNU General Public License v3.0
from AngelFP
with GNU General Public License v3.0
from AngelFP
def _get_optimized_dt(self, beam):
""" Get tracking time step. """
gamma = np.sqrt(1 + beam.px**2 + beam.py**2 + beam.pz**2)
mean_gamma = np.average(gamma, weights=beam.q)
# calculate maximum focusing along stage.
z = np.linspace(0, self.length, 100)
n_p = self.density(z)
w_p = np.sqrt(max(n_p)*ct.e**2/(ct.m_e*ct.epsilon_0))
max_kx = (ct.m_e/(2*ct.e*ct.c))*w_p**2
w_x = np.sqrt(ct.e*ct.c/ct.m_e * max_kx/mean_gamma)
period_x = 1/w_x
dt = 0.1*period_x
return dt
3
Source : particle_bunch.py
with GNU General Public License v3.0
from AngelFP
with GNU General Public License v3.0
from AngelFP
def get_alternative_6D_matrix(self):
"""
Returns the 6D matrix of the bunch containing
(x, x', y, y', xi, dp)
"""
g = np.sqrt(1 + self.px**2 + self.py**2 + self.pz**2)
g_avg = np.average(g, weights=self.q)
b_avg = np.sqrt(1 - g_avg**(-2))
dp = (g-g_avg)/(g_avg*b_avg)
p_kin = np.sqrt(g**2 - 1)
return np.array([self.x, self.px/p_kin, self.y, self.py/p_kin,
self.xi, dp]), g_avg
def increase_prop_distance(self, dist):
3
Source : particle_bunch.py
with GNU General Public License v3.0
from AngelFP
with GNU General Public License v3.0
from AngelFP
def reposition_xi(self, xi_c):
"""Recenter bunch along xi around the specified xi_c"""
current_xi_c = np.average(self.xi, weights=self.q)
dxi = xi_c - current_xi_c
self.xi += dxi
def get_openpmd_diagnostics_data(self):
3
Source : bunch_generation.py
with GNU General Public License v3.0
from AngelFP
with GNU General Public License v3.0
from AngelFP
def get_from_file(file_path, code_name, preserve_prop_dist=False, name=None,
**kwargs):
x, y, z, px, py, pz, q = dr.read_beam(code_name, file_path, **kwargs)
z_avg = np.average(z, weights=q)
xi = z - z_avg
bunch = ParticleBunch(q, x, y, xi, px, py, pz, name=name)
if preserve_prop_dist:
bunch.prop_distance = z_avg
return bunch
3
Source : ExperimentLogger.py
with MIT License
from antonpuz
with MIT License
from antonpuz
def create_batch(self, batch_size):
if len(self.experience_buffer) < batch_size:
raise ValueError('Not enough stored batches in experience creator, requested {}, currently buffer of size {}'.format(batch_size, len(self.experience_buffer)))
if self.graph_creator != None:
self.graph_creator.add_score_sample(np.average(self.rewards[0:batch_size]))
self.rewards = self.rewards[batch_size:]
to_be_returned = self.experience_buffer[0:batch_size]
self.experience_buffer = self.experience_buffer[batch_size:]
self.batch_number += 1
return to_be_returned
def number_of_batches(self):
3
Source : logger.py
with MIT License
from apexrl
with MIT License
from apexrl
def record_tabular_misc_stat(key, values, placement="back"):
if placement == "front":
prefix = ""
suffix = key
else:
prefix = key
suffix = ""
if len(values) > 0:
record_tabular(prefix + "Average" + suffix, np.average(values))
record_tabular(prefix + "Std" + suffix, np.std(values))
record_tabular(prefix + "Median" + suffix, np.median(values))
record_tabular(prefix + "Min" + suffix, np.min(values))
record_tabular(prefix + "Max" + suffix, np.max(values))
else:
record_tabular(prefix + "Average" + suffix, np.nan)
record_tabular(prefix + "Std" + suffix, np.nan)
record_tabular(prefix + "Median" + suffix, np.nan)
record_tabular(prefix + "Min" + suffix, np.nan)
record_tabular(prefix + "Max" + suffix, np.nan)
3
Source : timers.py
with Apache License 2.0
from Ascend
with Apache License 2.0
from Ascend
def TM_DISPLAY(remove_first_one=True):
for key, values in G_TIMERS.items():
startPos = 0
if remove_first_one:
startPos = 1
if len(values) < = 1:
continue
print("{}:{} times, cost time avg:{}".format(key, len(values), np.average(values[startPos:])))
def TM_PICK(name):
3
Source : statistics.py
with Apache License 2.0
from asreview
with Apache License 2.0
from asreview
def n_keywords(data):
"""Return the number of keywords.
Arguments
---------
data: asreview.data.ASReviewData
An ASReviewData object with the records.
Return
------
int:
The statistic
"""
if data.keywords is None:
return None
return np.average([len(keywords) for keywords in data.keywords])
3
Source : wordtwovec.py
with MIT License
from Aunsiels
with MIT License
from Aunsiels
def embed(self, words: Iterable[str]) -> np.ndarray:
"""given a list of words, find their vector embeddings and return the vector mean"""
# first find the vector embedding for each word
vectors = [self.model[word] for word in words if word in self.model]
if vectors:
# if there are vector embeddings, take the vector average
return np.average(vectors, axis=0)
else:
# otherwise just return a zero vector
return np.zeros(self.model.vector_size)
def goodness(self, question_stem: str, choice_text: str) -> float:
3
Source : action_space.py
with MIT License
from awilliea
with MIT License
from awilliea
def averageBacktest(self, M):
# Average states within M
N = []
observed = []
for x in M:
state = x[0]
if state in observed:
continue
observed.append(state)
paid = []
reward = []
for y in M:
if y[0] == state:
paid.append(y[3])
reward.append(y[4])
N.append([state, x[1], x[2], np.average(paid), np.average(reward)])
return N
3
Source : train.py
with Apache License 2.0
from awslabs
with Apache License 2.0
from awslabs
def get_scores(g, h, scoring_fn, calc_mrr=False, edge_mask=None):
scores, mrrs = [], []
for etype_id, etype in enumerate(g.canonical_etypes):
if g.num_edges(etype):
src_type, rel_type, dst_type = etype
u, v = g.edges(etype=etype)
etype_ids = torch.full_like(u, etype_id)
scores.append(scoring_fn(h[src_type][u], h[dst_type][v], etype_ids))
if calc_mrr:
mrr_score, _ = mrr(h, (u, v, etype_ids), scoring_fn, masked_edges=edge_mask,
src_type=src_type, dst_type=dst_type)
mrrs.append(mrr_score)
mrr_metric = np.average(mrrs) if calc_mrr else None
return torch.cat(scores), mrr_metric
def train_n_epochs(model, optimizer, features, loss_fn, train_dataloader, validation_dataloader, eval_edge_mask, n_epochs,
3
Source : data_process.py
with GNU General Public License v3.0
from baoy-nlp
with GNU General Public License v3.0
from baoy-nlp
def detail(data_set):
tgt_len = [len(e.tgt) for e in data_set]
print('Max target len: %d' % max(tgt_len), file=sys.stderr)
print('Avg target len: %d' % np.average(tgt_len), file=sys.stderr)
source_len = [len(e.src) for e in data_set]
print('Max source len: {}'.format(max(source_len)), file=sys.stderr)
print('Avg source len: {}'.format(np.average(source_len)), file=sys.stderr)
def data_details(train_list, dev_list, test_list):
3
Source : insp_plot.py
with GNU General Public License v3.0
from bch0w
with GNU General Public License v3.0
from bch0w
def get_histogram_stats(n, bins):
"""
Get mean, variance and standard deviation from a histogram
:type n: array or list of arrays
:param n: values of histogram bins
:type bins: array
:param bins: edges of the bins
"""
mids = 0.5 * (bins[1:] + bins[:-1])
mean = np.average(mids, weights=n)
var = np.average((mids - mean) ** 2, weights=n)
std = np.sqrt(var)
return mean, var, std
def annotate_txt(ax, txt, anno_location="lower-right", **kwargs):
3
Source : audio_utils.py
with MIT License
from beefoo
with MIT License
from beefoo
def scaleAudioData(arr):
# get the average
avg = np.average(arr)
# scale from 20,20000 to 0,1
return (avg - 20) / (20000 - 20)
def stretchSound(sound, amount=2.0, fade_out=0.8):
3
Source : math_utils.py
with MIT License
from beefoo
with MIT License
from beefoo
def weightedMean(values, weights=None):
count = len(values)
if count < = 0:
return 0
if weights is None:
weights = [w**2 for w in range(count, 0, -1)]
return np.average(values, weights=weights)
def weightedShuffle(arr, weights, count=None, seed=3):
3
Source : linear_model.py
with MIT License
from birforce
with MIT License
from birforce
def centered_tss(self):
model = self.model
weights = getattr(model, 'weights', None)
if weights is not None:
return np.sum(weights * (
model.endog - np.average(model.endog, weights=weights))**2)
else: # this is probably broken for GLS
centered_endog = model.wendog - model.wendog.mean()
return np.dot(centered_endog, centered_endog)
@cache_readonly
3
Source : scatter_plot.py
with MIT License
from blackmints
with MIT License
from blackmints
def find_average_trial(dataset, model, metric="test_rmse"):
path = find_best_hyper(dataset, model, metric=metric)
# Choose closest trial to the average
loss = []
with open(path + '/raw_results.csv') as f:
reader = csv.DictReader(f)
for row in reader:
loss.append(float(row[metric]))
avg = np.average(loss)
idx = np.argmin(np.abs(np.array(loss) - avg))
path += 'trial_' + str(idx + 1) + '/'
return path
def draw_confusion_graph(dataset, base_path):
3
Source : first_swap.py
with MIT License
from buaazyc
with MIT License
from buaazyc
def boundary_expend_and_shrink(landmarks, offset=0.97):
"""
根据特征点中心,调整特征点位置
:param landmarks:
:param offset:
:return:
"""
x = np.average(landmarks[:, 0])
y = np.average(landmarks[:, 1])
new_landmarks = [(int((x + (p[0] - x) * offset)), int((y + (p[1] - y) * offset))) for p in landmarks]
return new_landmarks
def get_face_mask(img_shape, landmarks, color=(1, 1, 1)):
3
Source : models.py
with MIT License
from cambridge-mlg
with MIT License
from cambridge-mlg
def predict(self, Xs):
ms, vs = self._predict(Xs, self.ARGS.num_posterior_samples)
m = np.average(ms, 0)
v = np.average(vs + ms**2, 0) - m**2
return m, v
def calculate_density(self, Xs, Ys):
3
Source : numerical_column_stats.py
with Apache License 2.0
from capitalone
with Apache License 2.0
from capitalone
def _estimate_stats_from_histogram(self):
# test estimated mean and var
bin_counts = self._stored_histogram['histogram']['bin_counts']
bin_edges = self._stored_histogram['histogram']['bin_edges']
mids = 0.5 * (bin_edges[1:] + bin_edges[:-1])
mean = np.average(mids, weights=bin_counts)
var = np.average((mids - mean) ** 2, weights=bin_counts)
return var
def _total_histogram_bin_variance(self, input_array):
3
Source : embedder_models.py
with Apache License 2.0
from cisco
with Apache License 2.0
from cisco
def encode(self, text_list):
token_list = [self._tokenize(text) for text in text_list]
vector_list = [self.model.encode_sequence_of_tokens(tl) for tl in token_list]
encoded_vecs = []
for vl in vector_list:
if len(vl) == 1:
encoded_vecs.append(vl[0])
else:
encoded_vecs.append(np.average(vl, axis=0))
return encoded_vecs
def _tokenize(self, text):
3
Source : plot_cpu_load.py
with MIT License
from CMU-cabot
with MIT License
from CMU-cabot
def sort_pids(data2):
keys = data2.keys()
return sorted(keys, reverse=True, key=lambda x: numpy.average(data2[x][0]))
if options.pid:
3
Source : cdb.py
with MIT License
from CogStack
with MIT License
from CogStack
def _make_stats(self):
stats = {}
stats["Number of concepts"] = len(self.cui2names)
stats["Number of names"] = len(self.name2cuis)
stats["Number of concepts that received training"] = len([cui for cui in self.cui2count_train if self.cui2count_train[cui] > 0])
stats["Number of seen training examples in total"] = sum(self.cui2count_train.values())
stats["Average training examples per concept"] = np.average(
[self.cui2count_train[cui] for cui in self.cui2count_train if self.cui2count_train[cui] > 0])
return stats
def print_stats(self) -> None:
3
Source : vector_context_model.py
with MIT License
from CogStack
with MIT License
from CogStack
def train_using_negative_sampling(self, cui: str) -> None:
vectors = {}
# Get vectors for each context type
for context_type in self.config.linking['context_vector_sizes'].keys():
size = self.config.linking['context_vector_sizes'][context_type]
# While it should be size*2 it is already too many negative examples, so we leave it at size
inds = self.vocab.get_negative_samples(size, ignore_punct_and_num=self.config.linking['negative_ignore_punct_and_num'])
values = [self.vocab.vec(self.vocab.index2word[ind]) for ind in inds]
if len(values) > 0:
vectors[context_type] = np.average(values, axis=0)
# Debug
self.log.debug("Updating CUI: %s, with %s negative words", cui, len(inds))
# Do the update for all context types
self.cdb.update_context_vector(cui=cui, vectors=vectors, negative=True)
3
Source : model_free.py
with MIT License
from cool-RR
with MIT License
from cool-RR
def get_observation_v(self, observation: Observation,
epsilon: Optional[numbers.Real] = None) -> numbers.Real:
if epsilon is None:
epsilon = self.epsilon
q_map = self.get_qs_for_observation(observation)
return np.average(
(
max(q_map.values()),
np.average(tuple(q_map.values()))
),
weights=(1 - epsilon, epsilon)
)
def __repr__(self) -> str:
3
Source : evaluators.py
with GNU General Public License v3.0
from D3lt4lph4
with GNU General Public License v3.0
from D3lt4lph4
def compute_mean_average_precision(self, average_precisions):
'''
Computes the mean average precision over all classes.
# Arguments:
average_precisions: A list of all the average precision for each of the classes.
# Returns:
A float, the mean average precision.
'''
# The first element is for the background class, so skip it.
mean_average_precision = np.average(average_precisions[1:])
return mean_average_precision
def display_results(self):
3
Source : sds.py
with Apache License 2.0
from danieldeutsch
with Apache License 2.0
from danieldeutsch
def compute_final_statistics(stats: Dict[str, float]) -> Dict[str, float]:
avg_doc_sents = np.average(stats[NUM_DOC_SENTS])
std_doc_sents = np.std(stats[NUM_DOC_SENTS])
avg_doc_sent_tokens = np.average(np.array(stats[NUM_DOC_TOKENS]) / np.array(stats[NUM_DOC_SENTS]))
std_doc_sent_tokens = np.std(np.array(stats[NUM_DOC_TOKENS]) / np.array(stats[NUM_DOC_SENTS]))
avg_sum_sents = np.average(stats[NUM_SUM_SENTS])
std_sum_sents = np.std(stats[NUM_SUM_SENTS])
avg_sum_sent_tokens = np.average(np.array(stats[NUM_SUM_TOKENS]) / np.array(stats[NUM_SUM_SENTS]))
std_sum_sent_tokens = np.std(np.array(stats[NUM_SUM_TOKENS]) / np.array(stats[NUM_SUM_SENTS]))
return {
NUM_INSTANCES: stats[NUM_INSTANCES],
AVG_DOC_SENTS: f'{avg_doc_sents:.2f} ({std_doc_sents:.2f})',
AVG_DOC_SENT_TOKENS: f'{avg_doc_sent_tokens:.2f} ({std_doc_sent_tokens:.2f})',
AVG_SUM_SENTS: f'{avg_sum_sents:.2f} ({std_sum_sents:.2f})',
AVG_SUM_SENT_TOKENS: f'{avg_sum_sent_tokens:.2f} ({std_sum_sent_tokens:.2f})',
}
def main(args):
3
Source : matplotlib_graph.py
with MIT License
from danielwilczak101
with MIT License
from danielwilczak101
def average_config_id(self,function):
"""Graph average line of all config_id's from data stored
in the database."""
# Get all the config's
config_ids = self.database.get_all_config_id()
stored_list = []
# Store each list so it can be averaged later
for config_id in config_ids:
stored_list.append(function(config_id))
y = np.average(stored_list, axis=0)
x = self.database.get_each_generation_number(config_id)
self.type_of_graph(x, y)
def all_config_id(self,function):
3
Source : loss.py
with Apache License 2.0
from dashanji
with Apache License 2.0
from dashanji
def __call__(self, y_true, raw_predictions, sample_weight):
"""Return the weighted average loss"""
return np.average(self.pointwise_loss(y_true, raw_predictions),
weights=sample_weight)
@abstractmethod
3
Source : loss.py
with Apache License 2.0
from dashanji
with Apache License 2.0
from dashanji
def get_baseline_prediction(self, y_train, sample_weight, prediction_dim):
y_pred = np.average(y_train, weights=sample_weight)
eps = np.finfo(y_train.dtype).eps
y_pred = np.clip(y_pred, eps, None)
return np.log(y_pred)
def update_gradients_and_hessians(self, gradients, hessians, y_true,
3
Source : loss.py
with Apache License 2.0
from dashanji
with Apache License 2.0
from dashanji
def get_baseline_prediction(self, y_train, sample_weight, prediction_dim):
if prediction_dim > 2:
raise ValueError(
"loss='binary_crossentropy' is not defined for multiclass"
" classification with n_classes=%d, use"
" loss='categorical_crossentropy' instead" % prediction_dim)
proba_positive_class = np.average(y_train, weights=sample_weight)
eps = np.finfo(y_train.dtype).eps
proba_positive_class = np.clip(proba_positive_class, eps, 1 - eps)
# log(x / 1 - x) is the anti function of sigmoid, or the link function
# of the Binomial model.
return np.log(proba_positive_class / (1 - proba_positive_class))
def update_gradients_and_hessians(self, gradients, hessians, y_true,
3
Source : _voting.py
with Apache License 2.0
from dashanji
with Apache License 2.0
from dashanji
def _predict_proba(self, X):
"""Predict class probabilities for X in 'soft' voting."""
check_is_fitted(self)
avg = np.average(self._collect_probas(X), axis=0,
weights=self._weights_not_none)
return avg
@property
3
Source : _classification.py
with Apache License 2.0
from dashanji
with Apache License 2.0
from dashanji
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
@_deprecate_positional_args
See More Examples