numpy.argmax

Here are the examples of the python api numpy.argmax taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

5001 Examples 7

5 Source : LeNet.py
with MIT License
from DependableSystemsLab

def error_rate(predictions, labels, isPrint=False):
  """Return the error rate based on dense predictions and sparse labels."""
#  if(isPrint):
#    print("pred :", numpy.argmax(predictions, 1))
#    print("label:", labels) 

  res = (numpy.argmax(predictions, 1) == labels)
  indexOfCorrectSample = numpy.where(res == True)[0]

#  print(indexOfCorrectSample)

  return (100.0 - (
      100.0 *
      numpy.sum(numpy.argmax(predictions, 1) == labels) /
      predictions.shape[0])) , indexOfCorrectSample


def main(_):

3 Source : face.py
with MIT License
from 1024210879

    def identify(self, face):
        if face.embedding is not None:
            predictions = self.model.predict_proba([face.embedding])
            best_class_indices = np.argmax(predictions, axis=1)
            return self.class_names[best_class_indices[0]]


class Encoder:

3 Source : mnist_center_loss.py
with MIT License
from 1024210879

def error_rate(predictions, labels):
    """Return the error rate based on dense predictions and sparse labels."""
    return 100.0 - (
        100.0 *
        np.sum(np.argmax(predictions, 1) == labels) /
        predictions.shape[0])


def main(argv=None):  # pylint: disable=unused-argument

3 Source : eval_util.py
with Apache License 2.0
from 17Skye17

def calculate_hit_at_one(predictions, actuals):
  """Performs a local (numpy) calculation of the hit at one.

  Args:
    predictions: Matrix containing the outputs of the model.
      Dimensions are 'batch' x 'num_classes'.
    actuals: Matrix containing the ground truth labels.
      Dimensions are 'batch' x 'num_classes'.

  Returns:
    float: The average hit at one across the entire batch.
  """
  top_prediction = numpy.argmax(predictions, 1)
  hits = actuals[numpy.arange(actuals.shape[0]), top_prediction]
  return numpy.average(hits)


def calculate_precision_at_equal_recall_rate(predictions, actuals):

3 Source : Fuzzy_PID.py
with MIT License
from 1989Ryan

    def update_K(self, error, d_error):
        self.Kp = self.re[np.argmax(self.membership(error,self.tfm)),\
            np.argmax(self.membership(d_error, self.dtfm))]/6 *(self.Kpmax-self.Kpmin)+self.Kpmin
        self.Kd = self.rde[np.argmax(self.membership(error, self.tfm)),\
            np.argmax(self.membership(d_error, self.dtfm))]/6 *(self.Kdmax-self.Kdmin)+self.Kdmin
        self.alpha = self.a[np.argmax(self.membership(error, self.tfm)),\
            np.argmax(self.membership(d_error, self.dtfm))]
        self.Ki = self.rie[np.argmax(self.membership(error, self.tfm)),\
            np.argmax(self.membership(d_error, self.dtfm))]/4 *(self.Kimax - self.Kimin)+self.Kimin

    def update(self, feedback_value, speed):

3 Source : othello.py
with MIT License
from 2Bear

def pick_move_probabilistically(pi):
    r = random.random()
    s = 0
    for move in range(len(pi)):
        s += pi[move]
        if s >= r:
            return move
    return np.argmax(pi)


def pick_move_greedily(pi):

3 Source : tree.py
with MIT License
from 2Bear

    def select(self, nodes):
        best_nodes_batch = [None] * len(nodes)
        for i, node in enumerate(nodes):
            current = node
            while current.expanded:
                best_edge = np.argmax(current.edge_Q_plus_U)
                if best_edge not in current.child_nodes:
                    current.child_nodes[best_edge] = Node(current, best_edge, -current.player)
                if current.is_terminal:
                    break
                if best_edge == config.pass_move and current.child_nodes[best_edge].legal_moves[config.pass_move] == 1:
                    current.is_terminal = True
                    break
                current = current.child_nodes[best_edge]
            best_nodes_batch[i] = current
        return best_nodes_batch

    def expand_and_evaluate(self, nodes_batch):

3 Source : bounding.py
with MIT License
from 3fon3fonov

    def major_axis_endpoints(self):
        """Return the endpoints of the major axis."""

        i = np.argmax(self.axlens)  # find the major axis
        v = self.paxes[:, i]  # vector from center to major axis endpoint

        return self.ctr - v, self.ctr + v

    def distance(self, x):

3 Source : functions.py
with MIT License
from 3fon3fonov

def get_best_lnl_of_samples(samples,lnl, nsamp):

    best_ln_samp = []
    lnL_best_idx = np.argmax(lnl)
    lnL_best = lnl[lnL_best_idx]


    for i in range(nsamp):

        minlnL = samples[lnL_best_idx,i]
        best_ln_samp.append(minlnL)


    return best_ln_samp,lnL_best #,err1_samp,err2_samp




def cornerplot(obj, level=(100.0-68.3)/2.0, type_plot = 'mcmc', **kwargs):

3 Source : waveform.py
with MIT License
from 3ll3d00d

    def __find_extension(self, x, y):
        upper_y_idx = np.argmax(y)
        upper_x = x[upper_y_idx]
        lower_y = y[upper_y_idx] + self.extensionLimit.value()
        lower_x = x[np.argmax(y >= lower_y)]
        return (lower_x, upper_x), (lower_y, y[upper_y_idx])

    def __set_peaks(self):

3 Source : TSP_GA.py
with Apache License 2.0
from 425776024

    def select_pop(self, pop):
        best_f_index = np.argmax(self.fitness)
        av = np.median(self.fitness, axis=0)
        for i in range(self.pop_size):
            if i != best_f_index and self.fitness[i]   <   av:
                pi = self.cross(pop[best_f_index], pop[i])
                pi = self.mutate(pi)
                # d1 = self.distance(pi)
                # d2 = self.distance(pop[i])
                # if d1  <  d2:
                pop[i, :] = pi[:]

        return pop

    def select_pop2(self, pop):

3 Source : util.py
with GNU General Public License v3.0
from 4rChon

def exploit_max(policy, valid_actions, size):
    # Choose 'best' valid action
    act_id = valid_actions[np.argmax(policy["non_spatial"][-1][valid_actions])]
    target = np.argmax(policy["spatial"][-1])

    # Resize to provided resolution
    # Example:
    #   target = 535 -> 535 // 64 = 8, 535 % 64 = 24
    #   target = [8, 24]
    target = [int(target // size), int(target % size)]

    return act_id, target

3 Source : metrics.py
with MIT License
from 4uiiurz1

def quadratic_weighted_kappa(y_pred, y_true):
    if torch.is_tensor(y_pred):
        y_pred = y_pred.data.cpu().numpy()
    if torch.is_tensor(y_true):
        y_true = y_true.data.cpu().numpy()
    if y_pred.shape[1] == 1:
        y_pred = y_pred[:, 0]
    else:
        y_pred = np.argmax(y_pred, axis=1)
    return metrics.cohen_kappa_score(y_pred, y_true, weights='quadratic')

3 Source : test.py
with MIT License
from 921kiyo

def eval_result(result_tensor, ground_truth, idx2label):

    if not check_confidence_tensor(result_tensor):
        raise InvalidInputError('Result confidence tensor invalid!')

    result = np.argmax(result_tensor,axis=1)
    prediction = (ground_truth==result[0])
    correct_label = idx2label[ground_truth]
    predicted_label = idx2label[result[0]]
    print('predicted: ', predicted_label, ' correct: ', correct_label)
    return prediction, correct_label, predicted_label


def extract_summary_tensors(test_results, label2idx):

3 Source : feature_extraction_functions.py
with MIT License
from a-n-rose

def get_domfreq(y,sr):
    '''
    collecting the frequencies with highest magnitude
    '''
    frequencies, magnitudes = get_freq_mag(y,sr)
    #select only frequencies with largest magnitude, i.e. dominant frequency
    dom_freq_index = [np.argmax(item) for item in magnitudes]
    dom_freq = np.array([frequencies[i][item] for i,item in enumerate(dom_freq_index)])
    #dom_freq -= (np.mean(dom_freq, axis=0) + 1e-8)
    
    return np.array(dom_freq)

#get a collection of frequencies at the same windows as other extraction techniques i.e. 25ms with 10ms shifts (which is standard for much research)
#this can be adjusted here.. this script is prepared for these window settings
#it might work with others but I haven't tested that yet.
def get_freq_mag(y,sr,window_size=None, window_shift=None):

3 Source : mnist_anode.py
with MIT License
from a-norcliffe

def learning_rate_with_decay(batch_size, batch_denom, batches_per_epoch, boundary_epochs, decay_rates):
    initial_learning_rate = args.lr * batch_size / batch_denom

    boundaries = [int(batches_per_epoch * epoch) for epoch in boundary_epochs]
    vals = [initial_learning_rate * decay for decay in decay_rates]

    def learning_rate_fn(itr):
        lt = [itr   <   b for b in boundaries] + [True]
        i = np.argmax(lt)
        return vals[i]

    return learning_rate_fn


def one_hot(x, K):

3 Source : concentration.py
with MIT License
from aangelopoulos

def get_lhat_conformal_from_table(calib_loss_table, lambdas_table, alpha):
    avg_loss = calib_loss_table.mean(axis=0)
    idx = np.argmax(avg_loss   <   alpha)
    return lambdas_table[idx]

def test_table(Rhat,delta,bound_fn):

3 Source : risk_histogram.py
with MIT License
from aangelopoulos

def trial_precomputed(example_loss_table, example_height_table, lambdas_example_table, gamma, delta, num_lam, num_calib, batch_size, tlambda, bound_str):
    total=example_loss_table.shape[0]
    perm = torch.randperm(example_loss_table.shape[0])
    example_loss_table = example_loss_table[perm]
    example_height_table = example_height_table[perm]
    calib_losses, val_losses = (example_loss_table[0:num_calib], example_loss_table[num_calib:])
    calib_heights, val_heights = (example_height_table[0:num_calib], example_height_table[num_calib:])

    lhat = get_lhat_from_table(calib_losses, lambdas_example_table, gamma, delta, tlambda, bound_str)

    losses = val_losses[:,np.argmax(lambdas_example_table == lhat)]
    heights = val_heights[:,np.argmax(lambdas_example_table == lhat)]

    return losses.mean(), torch.tensor(heights), lhat

def plot_histograms(df_list,gamma,delta,bounds_to_plot):

3 Source : risk_histogram.py
with MIT License
from aangelopoulos

def trial_precomputed(example_loss_table, example_size_table, lambdas_example_table, gamma, delta, num_lam, num_calib, batch_size, tlambda, bound_str):
    total=example_loss_table.shape[0]
    perm = torch.randperm(example_loss_table.shape[0])
    example_loss_table = example_loss_table[perm]
    example_size_table = example_size_table[perm]
    calib_losses, val_losses = (example_loss_table[0:num_calib], example_loss_table[num_calib:])
    calib_sizes, val_sizes = (example_size_table[0:num_calib], example_size_table[num_calib:])

    lhat_rcps = get_lhat_from_table(calib_losses, lambdas_example_table, gamma, delta, tlambda, bound_str)

    losses_rcps = val_losses[:,np.argmax(lambdas_example_table == lhat_rcps)]
    sizes_rcps = val_sizes[:,np.argmax(lambdas_example_table == lhat_rcps)]

    return losses_rcps.mean(), torch.tensor(sizes_rcps), lhat_rcps

def plot_histograms(df_list,gamma,delta,bounds_to_plot):

3 Source : plot_risk_hoeffding.py
with MIT License
from aangelopoulos

def trial_precomputed(example_loss_table, example_size_table,  gamma, delta, num_calib, num_lam, deltas_precomputed, num_grid_hbb, ub, ub_sigma, lambdas_example_table, epsilon, maxiters, tlambda, bound_str):
    total=example_loss_table.shape[0]
    perm = torch.randperm(example_loss_table.shape[0])
    example_loss_table = example_loss_table[perm]
    example_size_table = example_size_table[perm]
    calib_losses, val_losses = (example_loss_table[0:num_calib], example_loss_table[num_calib:])
    calib_sizes, val_sizes = (example_size_table[0:num_calib], example_size_table[num_calib:])

    lhat_rcps = get_lhat_from_table(calib_losses, lambdas_example_table, gamma, delta, tlambda, bound_str)

    losses_rcps = val_losses[:,np.argmax(lambdas_example_table == lhat_rcps)]
    sizes_rcps = val_sizes[:,np.argmax(lambdas_example_table == lhat_rcps)]

    return lhat_rcps, losses_rcps.mean(), sizes_rcps

def plot_histograms(dfs, gamma, delta, num_calib, output_dir):

3 Source : protein_utils.py
with MIT License
from aangelopoulos

def set_from_probs(probs, preds, dists, lam):
    mask = probs >= lam
    idx_null = mask.sum(axis=2)==0
    max_i = mask.shape[2] - np.argmax(mask[:,:,::-1],axis=2) - 1
    min_i = np.argmax(mask,axis=2)
    maxes = dists[max_i]
    mins = dists[min_i]
    maxes[idx_null] = preds[idx_null]
    mins[idx_null] = preds[idx_null]
    return mins, maxes

def ls_sets_from_probs(ls_probs, ls_preds, ls_dists, lam):

3 Source : risk_histogram.py
with MIT License
from aangelopoulos

def trial_precomputed(example_loss_table, example_size_table, lambdas_example_table, gamma, delta, num_lam, num_calib, batch_size, tlambda, bound_str):
    total=example_loss_table.shape[0]
    perm = torch.randperm(example_loss_table.shape[0])
    example_loss_table = example_loss_table[perm]
    example_size_table = example_size_table[perm]
    calib_losses, val_losses = (example_loss_table[0:num_calib], example_loss_table[num_calib:])
    calib_sizes, val_sizes = (example_size_table[0:num_calib], example_size_table[num_calib:])

    lhat = get_lhat_from_table(calib_losses[:,::-1], lambdas_example_table[::-1], gamma, delta, tlambda, bound_str)

    losses = val_losses[:,np.argmax(lambdas_example_table == lhat)]
    sizes = val_sizes[:,np.argmax(lambdas_example_table == lhat)]

    return losses.mean(), torch.tensor(sizes), lhat

def plot_histograms(df_list,gamma,delta,bounds_to_plot):

3 Source : utils.py
with Apache License 2.0
from abacusai

def get_best_thresh(scores, threshs, data, config, valid=False, margin=0.00):
    objectives = []
    for thresh in threshs:
        valid_objective = get_valid_objective(scores > thresh, data, config, valid=valid, margin=margin)
        objectives.append(valid_objective['objective'])
    return threshs[np.argmax(objectives)], np.max(objectives)

3 Source : util.py
with MIT License
from AbangLZU

def visualize_seg(label_map, mc, one_hot=False):
  if one_hot:
    label_map = np.argmax(label_map, axis=-1)

  out = np.zeros(
      (label_map.shape[0], label_map.shape[1], label_map.shape[2], 3))

  for l in range(1, mc.NUM_CLASS):
    out[label_map==l, :] = mc.CLS_COLOR_MAP[l]

  return out



def bgr_to_rgb(ims):

3 Source : test_smoothing.py
with MIT License
from abel-research

    def test_coincident(self):
        idx_max = np.argmax(self.amp4.vert[:, 1])
        idx_min = np.argmin(self.amp4.vert[:, 1])
        delta = self.amp4.vert[idx_max, 1] - self.amp4.vert[idx_min, 1]
        self.amp4.vert[idx_max, :] = self.amp4.vert[idx_min, :]
        self.amp4.vert[idx_max, :] = self.amp4.vert[idx_min, :]
        self.amp4.adjustCoincident(beta=1)
        delta2 = self.amp4.vert[idx_max, 1] - self.amp4.vert[idx_min, 1]
        self.assertGreater(delta2, delta*0.99)
        
        

3 Source : clauses.py
with GNU General Public License v3.0
from abhijithneilabraham

    def adapt(self, q, inttype=False, summable=False,distinct=False):
        emb = self.get_embeddings([q])
        if distinct:
            self.clause = self.distinct_types[argmax(model.predict(emb))]
        else:
            self.clause = self.types[argmax(model.predict(emb))]

        if summable and inttype and "COUNT" in self.clause:
            self.clause = '''SELECT SUM({}) FROM {}'''

        return self.clause

    


                
                

            

3 Source : column_types.py
with GNU General Public License v3.0
from abhijithneilabraham

    def adapt(self, x):
        scores = [_overlap(x, v, self.exclude) for v in self.values]
        idx = np.argmax(scores)
        mx = scores[idx]
        if mx == 0:
            return None
        return self.values[idx]


class Date(Integer):

3 Source : langmodel.py
with GNU General Public License v3.0
from abidaks

	def predictWord(self, speech):

		prob = self.speechModel.predict(speech.reshape(1,16000))
		maxProb = max(prob[0])
		probClass = self.classes[np.argmax(prob[0])]
		return probClass, maxProb, prob[0]

3 Source : i_senti_lstm.py
with MIT License
from abrazinskas

    def test(self, **data_source_kwargs):
        """Iterates over data batches, computes and prints accuracy [0, 1]."""
        correct = 0
        total = 0
        itr = self.data_pipeline.iter(**data_source_kwargs)
        for tweets_batch, labels_batch in itr:
            predictions = self.model.predict(tweets_batch)
            correct += np.sum(predictions == np.argmax(labels_batch, axis=1))
            total += len(tweets_batch)
        print ("accuracy: %f" % (float(correct)/total))

3 Source : sharpmask.py
with Apache License 2.0
from aby2s

    def _eval_resnet(self, eval_source):
        self.sess.run([self.placeholder_init_op], feed_dict={self.image_placeholder: eval_source})
        prediction = self.sess.run([self.resnet_output])
        return IM_CLASSES[np.argmax(prediction[0])]

    def _create_dataset(self, data_path, batch_size):

3 Source : imtools.py
with Apache License 2.0
from Accenture

def get_aabb_max_axis(aabb):
    ''' split heuristic to obtain good subdivision in minimal steps '''
    return np.argmax([abs(aabb[2] - aabb[0]), abs(aabb[3] - aabb[1])])


def enclosing_aabb(aabbs):

3 Source : mob.py
with Apache License 2.0
from Accenture

def get_aabb_max_axis(aabb):
    ''' split heuristic to obtain good subdivision in minimal steps '''
    return np.argmax([abs(aabb[2] - aabb[0]), abs(aabb[3] - aabb[1])])


def enclosing_aabb(aabbs, scores=None):

3 Source : print_numbers.py
with Apache License 2.0
from acmi-lab

def oracle_results(acc, mpe,orig_acc):
    idx = np.argmax(orig_acc[:, :, 2], axis=1)
    print(idx)

    max_acc = []
    mpe_at_max_acc = [] 
    for i in range(len(idx)): 
        max_acc.append(acc[i,idx[i]])
        mpe_at_max_acc.append(mpe[i, idx[i]])

    max_acc = np.array(max_acc)
    mpe_at_max_acc = np.array(mpe_at_max_acc)

    return max_acc, mpe_at_max_acc

def converged_results(acc, mpe): 

3 Source : numpy_routines.py
with GNU General Public License v3.0
from ad12

def argmax(x, axis=None):
    """See :func:`numpy.argmax`."""
    return reduce_array_op(np.argmax, x, axis=axis)


@implements(np.sum)

3 Source : utils.py
with BSD 3-Clause "New" or "Revised" License
from adambielski

def hardest_negative(loss_values):
    hard_negative = np.argmax(loss_values)
    return hard_negative if loss_values[hard_negative] > 0 else None


def random_hard_negative(loss_values):

3 Source : run.py
with GNU General Public License v3.0
from AdamStelmaszczyk

def greedy_action(env, model, goal, observation):
    next_q_values = predict(env, model, goals=[goal], observations=[observation])
    return np.argmax(next_q_values)


def epsilon_greedy_action(env, model, goal, observation, epsilon):

3 Source : run.py
with MIT License
from AdamStelmaszczyk

def greedy_action(env, model, observation):
    next_q_values = predict(env, model, observations=[observation])
    return np.argmax(next_q_values)


def epsilon_greedy_action(env, model, observation, epsilon):

3 Source : classification.py
with MIT License
from Adapter-Hub

    def compute_metrics(self, predictions, references):
        if isinstance(predictions, tuple):
            predictions = predictions[0]
        predictions = np.argmax(predictions, axis=1)
        return {"accuracy": (predictions == references).mean()}

    @property

3 Source : multiple_choice.py
with MIT License
from Adapter-Hub

    def compute_metrics(self, predictions, references):
        if isinstance(predictions, tuple):
            predictions = predictions[0]
        predictions = np.argmax(predictions, axis=1)
        return {"accuracy": (predictions == references).mean()}

    def get_prediction_head_config(self):

3 Source : super_glue.py
with MIT License
from Adapter-Hub

    def compute_metrics(self, predictions, references):
        if isinstance(predictions, tuple):
            predictions = predictions[0]
        if self.args.task_name == "multirc":
            predictions = np.argmax(predictions, axis=1)
            predictions = [{"idx": idx, "prediction": pred} for idx, pred in zip(self.dev_split["idx"], predictions)]
        elif self.args.task_name == "record":
            max_preds = {}  # group predictions by question id
            for idx, entity, pred, answers in zip(
                self.dev_split["idx"], self.dev_split["entities"], predictions, self.dev_split["answers"]
            ):
                idx_string = f"{idx['passage']}-{idx['query']}"
                if idx_string not in max_preds or pred[1] > max_preds[idx_string]["logit"]:
                    max_preds[idx_string] = {"idx": idx, "logit": pred[1], "entity": entity, "answers": answers}
            predictions = [{"idx": val["idx"], "prediction_text": val["entity"]} for _, val in max_preds.items()]
            references = [{"idx": val["idx"], "answers": val["answers"]} for _, val in max_preds.items()]
        else:
            predictions = np.argmax(predictions, axis=1)
        return self.metric.compute(predictions=predictions, references=references)

3 Source : network.py
with MIT License
from ADGEfficiency

    def consensus(self):
        """ find the node that mined the block """
        chains = [node.chain for node in self]
        new_chain = chains[np.argmax([len(chain) for chain in chains])]

        for node in self:
            node.chain = new_chain

    def validate_transactions(self, balances, new_transactions):

3 Source : pos_tagger.py
with MIT License
from adhaamehab

def _logits_to_tokens(sequences, index):
    token_sequences = []
    for categorical_sequence in sequences:
            token_sequence = []
            for categorical in categorical_sequence:
                    token_sequence.append(index[np.argmax(categorical)])

            token_sequences.append(token_sequence)

    return token_sequences

    ## One-Hot Encoded tags
def to_categorical(sequences, categories):

3 Source : ImgTransform.py
with Apache License 2.0
from adityaintwala

    def OrientationAngle(img):
        I = img
        I = I - mean(I)     
        sinogram = radon(I)        
        r = array([ImgTransform.rms_flat(line) for line in sinogram.transpose()])
        rotation = argmax(r)
  
        OrientationAngle = rotation - 90
        return OrientationAngle

    @staticmethod

3 Source : test_nanfunctions.py
with GNU General Public License v3.0
from adityaprakash-bobby

    def test_nanargmax(self):
        tgt = np.argmax(self.mat)
        for mat in self.integer_arrays():
            assert_equal(np.nanargmax(mat), tgt)

    def test_nansum(self):

3 Source : test_nanops.py
with GNU General Public License v3.0
from adityaprakash-bobby

    def test_nanargmax(self):
        with warnings.catch_warnings(record=True):
            func = partial(self._argminmax_wrap, func=np.argmax)
            self.check_funs(nanops.nanargmax, func,
                            allow_str=False, allow_obj=False,
                            allow_date=True, allow_tdelta=True)

    def test_nanargmin(self):

3 Source : _distn_infrastructure.py
with GNU General Public License v3.0
from adityaprakash-bobby

    def _ppf(self, q):
        qq, sqq = np.broadcast_arrays(q[..., None], self.qvals)
        indx = argmax(sqq >= qq, axis=-1)
        return self.xk[indx]

    def _rvs(self):

3 Source : classification.py
with Apache License 2.0
from advboxes

    def is_adversarial(self, predictions, label):
        """Decides if predictions for an image are adversarial."""
        top1 = np.argmax(predictions)
        return top1 != label


class ConfidentMisclassification(Criterion):

3 Source : classification.py
with Apache License 2.0
from advboxes

    def is_adversarial(self, predictions, label):
        """Decides if predictions for an image are adversarial."""
        top1 = np.argmax(predictions)
        probabilities = softmax(predictions)
        return (np.max(probabilities) >= self.threshold) and (top1 != label)


class TopKMisclassification(Criterion):

3 Source : classification.py
with Apache License 2.0
from advboxes

    def is_adversarial(self, predictions, label):
        """Decides if predictions for an image are adversarial."""
        top1 = np.argmax(predictions)
        return top1 == self.target_class()


class OriginalClassProbability(Criterion):

3 Source : suggest.py
with GNU General Public License v3.0
from aehrc

    def on_trial_complete(self, trial_id: str, result: Optional[Dict] = None, error: bool = False):
        if result:
            if self._mode == "max":
                idx = np.argmax([i[self._metric] for i in self.intermediate_results[trial_id]])
            elif self._mode == "min":
                idx = np.argmin([i[self._metric] for i in self.intermediate_results[trial_id]])
            self._process_result(trial_id, self.intermediate_results[trial_id][idx])
            self.intermediate_results.pop(trial_id, None)
        self._live_trial_mapping.pop(trial_id)

See More Examples