numpy.random.beta

Here are the examples of the python api numpy.random.beta taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

363 Examples 7

3 Source : data_augment.py
with Apache License 2.0
from 1adrianb

    def __call__(self, img):
        mixing_weights = np.float32(
            np.random.dirichlet([self.alpha] * self.width)
        )
        m = np.float32(np.random.beta(self.alpha, self.alpha))
        if self.blended:
            mixed = self._apply_blended(img, mixing_weights, m)
        else:
            mixed = self._apply_basic(img, mixing_weights, m)
        return mixed


def augment_and_mix_transform(config_str, hparams):

3 Source : auto_augment.py
with Apache License 2.0
from 1chimaruGin

    def __call__(self, img):
        mixing_weights = np.float32(np.random.dirichlet([self.alpha] * self.width))
        m = np.float32(np.random.beta(self.alpha, self.alpha))
        if self.blended:
            mixed = self._apply_blended(img, mixing_weights, m)
        else:
            mixed = self._apply_basic(img, mixing_weights, m)
        return mixed


def augment_and_mix_transform(config_str, hparams):

3 Source : net.py
with MIT License
from acl21

def mixup(inputs, labels):
    """Apply mixup to minibatch (https://arxiv.org/abs/1710.09412)."""
    alpha = cfg.TRAIN.MIXUP_ALPHA
    assert labels.shape[1] == cfg.MODEL.NUM_CLASSES, "mixup labels must be one-hot"
    if alpha > 0:
        m = np.random.beta(alpha, alpha)
        permutation = torch.randperm(labels.shape[0])
        inputs = m * inputs + (1.0 - m) * inputs[permutation, :]
        labels = m * labels + (1.0 - m) * labels[permutation, :]
    return inputs, labels, labels.argmax(1)

3 Source : augmix.py
with MIT License
from alibaba

    def aug(image, preprocess, mixture_width=3, mixture_depth=-1, aug_prob_coeff=-1.0, aug_severity=1):
        from .augmix_impls import augmentations_all

        ws = np.float32(
            np.random.dirichlet([aug_prob_coeff] * mixture_width))
        m = np.float32(np.random.beta(aug_prob_coeff, aug_prob_coeff))

        preprocessed = preprocess(image)
        mix = torch.zeros_like(preprocessed)
        for i in range(mixture_width):
            image_aug = image.copy()
            depth = mixture_depth if mixture_depth > 0 else np.random.randint(1, 4)
        for _ in range(depth):
            op = np.random.choice(augmentations_all)
            image_aug = op(image_aug, aug_severity)
        mix += ws[i] * preprocess(image_aug)

        mixed = (1.0 - m) * preprocessed + m * mix
        return mixed

3 Source : augmentations.py
with GNU General Public License v3.0
from aqntks

def mixup(im, labels, im2, labels2):
    # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf
    r = np.random.beta(32.0, 32.0)  # mixup ratio, alpha=beta=32.0
    im = (im * r + im2 * (1 - r)).astype(np.uint8)
    labels = np.concatenate((labels, labels2), 0)
    return im, labels


def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16):  # box1(4,n), box2(4,n)

3 Source : transforms.py
with MIT License
from arberzela

def mixup_data(x, y, alpha=1.0, is_cuda=True):
    lam = np.random.beta(alpha, alpha) if alpha > 0. else 1.
    batch_size = x.size()[0]
    index = randperm(batch_size).cuda() if is_cuda else randperm(batch_size)

    mixed_x = lam * x + (1 - lam) * x[index, :]
    y_a, y_b = y, y[index]
    return mixed_x, y_a, y_b, lam

def mixup_criterion(y_a, y_b, lam):

3 Source : gdumb.py
with MIT License
from arthurdouillard

def apply_cutmix(x, y, alpha=1.0):
    assert (alpha > 0)
    # generate mixed sample
    lam = np.random.beta(alpha, alpha)

    batch_size = x.size()[0]
    index = torch.randperm(batch_size).to(x.device)

    y_a, y_b = y, y[index]
    bbx1, bby1, bbx2, bby2 = rand_bbox(x.size(), lam)
    x[:, :, bbx1:bbx2, bby1:bby2] = x[index, :, bbx1:bbx2, bby1:bby2]

    # adjust lambda to exactly match pixel ratio
    lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (x.size()[-1] * x.size()[-2]))
    return x, y_a, y_b, lam


def rand_bbox(size, lam):

3 Source : cutmix.py
with Apache License 2.0
from Ascend

def cutmix(x, target, beta):
    lam = np.random.beta(beta, beta)
    rand_index = torch.randperm(x.size()[0]).npu()

    target_a = target.clone().detach()
    target_b = target[rand_index].clone().detach()

    bbx1, bby1, bbx2, bby2 = rand_bbox(x.size(), lam)
    x[:, :, bbx1:bbx2, bby1:bby2] = x[rand_index, :, bbx1:bbx2, bby1:bby2]

    lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (x.size()[-1] * x.size()[-2]))
    return x, target_a, target_b, lam


def rand_bbox(size, lam):

3 Source : main.py
with Apache License 2.0
from Ascend

def mixup_data_sup(x, y, alpha=1.0):
    '''Compute the mixup data. Return mixed inputs, pairs of targets, and lambda'''
    if alpha > 0.:
        lam = np.random.beta(alpha, alpha)
    else:
        lam = 1.
    batch_size = x.size()[0]
    index = np.random.permutation(batch_size)
    #x, y = x.numpy(), y.numpy()
    #mixed_x = torch.Tensor(lam * x + (1 - lam) * x[index,:])
    mixed_x = lam * x + (1 - lam) * x[index,:]
    #y_a, y_b = torch.Tensor(y).type(torch.LongTensor), torch.Tensor(y[index]).type(torch.LongTensor)
    y_a, y_b = y, y[index]
    return mixed_x, y_a, y_b, lam

def mixup_criterion(y_a, y_b, lam):

3 Source : utils.py
with Apache License 2.0
from Ascend

def mixup_data(x, y, alpha):

    '''Compute the mixup data. Return mixed inputs, pairs of targets, and lambda'''
    if alpha > 0.:
        lam = np.random.beta(alpha, alpha)
    else:
        lam = 1.
    batch_size = x.size()[0]
    index = torch.randperm(batch_size).npu()
    mixed_x = lam * x + (1 - lam) * x[index,:]
    y_a, y_b = y, y[index]
    return mixed_x, y_a, y_b, lam


def mixup_data_labelled_unlabelled(input_l, input_u, target_l, target_u, mixup_alpha):

3 Source : utils.py
with Apache License 2.0
from Ascend

def mixup_data_labelled_unlabelled(input_l, input_u, target_l, target_u, mixup_alpha):
    
    if mixup_alpha > 0.:
        lam = np.random.beta(mixup_alpha, mixup_alpha)
    else:
        lam = 1.
    
    lam = torch.from_numpy(np.array([lam]).astype('float32')).npu()
    lam = Variable(lam)
    #lam = torch.max(lam, 1-lam)
    #indices = np.random.permutation(out.size(0))
    out = input_l*lam.expand_as(input_l) + input_u*(1-lam.expand_as(input_u))
    target_l = to_one_hot(target_l)
    target = target_l* lam.expand_as(target_l) + target_u*(1 - lam.expand_as(target_u))
    return out, target


def mixup_data_hidden(input, target,  mixup_alpha):

3 Source : utils.py
with Apache License 2.0
from Ascend

def mixup_data_hidden(input, target,  mixup_alpha):
    if mixup_alpha > 0.:
        lam = np.random.beta(mixup_alpha, mixup_alpha)
    else:
        lam = 1.
    lam = torch.from_numpy(np.array([lam]).astype('float32')).npu()
    lam = Variable(lam)
    indices = np.random.permutation(input.size(0))
    #target = to_one_hot(target)
    output = input*lam.expand_as(input) + input[indices]*(1-lam.expand_as(input))
    target_a, target_b = target ,target[indices]
    
    return output, target_a, target_b, lam



def load_data_subset(data_aug, batch_size,workers,dataset, data_target_dir, labels_per_class=100, valid_labels_per_class = 500):

3 Source : mixup.py
with Apache License 2.0
from Ascend

def mixup(alpha, num_classes, data, target):
    with torch.no_grad():
        bs = data.size(0)
        c = np.random.beta(alpha, alpha)

        perm = torch.randperm(bs).cuda()

        md = c * data + (1-c) * data[perm, :]
        mt = c * target + (1-c) * target[perm, :]
        return md, mt


class MixUpWrapper(object):

3 Source : greenbox.py
with MIT License
from asemic-horizon

    def make_sampler(self, left,mode,right,kappa):
        loc = (mode -left)/(right - left)
        al = loc * (kappa - 2) + 1
        be = (1-loc)*(kappa -2) + 1
        std_sampler = lambda size: beta(a = al, b = be, size = size)
        def sample(size):
            """Three-point estimate sampler: takes keyword "size" """
            return left + (right - left) * std_sampler(size)
        return sample

class Greenbox():

3 Source : utils.py
with MIT License
from BIGBALLON

def mixup_data(x, y, alpha, device):
    """Returns mixed inputs, pairs of targets, and lambda"""
    if alpha > 0:
        lam = np.random.beta(alpha, alpha)
    else:
        lam = 1

    batch_size = x.size()[0]
    index = torch.randperm(batch_size).to(device)

    mixed_x = lam * x + (1 - lam) * x[index, :]
    y_a, y_b = y, y[index]
    return mixed_x, y_a, y_b, lam


def mixup_criterion(criterion, pred, y_a, y_b, lam):

3 Source : cutmix.py
with MIT License
from chandar-lab

    def apply(self, inputs, target):
        lam = np.random.beta(self.beta, self.beta)
        rand_index = torch.randperm(inputs.size()[0]).to(device)
        target_a = target
        target_b = target[rand_index]
        bbx1, bby1, bbx2, bby2 = self.rand_bbox(inputs.size(), lam)
        inputs[:, :, bbx1:bbx2, bby1:bby2] = inputs[rand_index, :, bbx1:bbx2, bby1:bby2]
        # adjust lambda to exactly match pixel ratio
        lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (inputs.size()[-1] * inputs.size()[-2]))
        # compute output
        return target_a, target_b, inputs, lam

3 Source : mixup.py
with MIT License
from chandar-lab

def get_lambda(alpha=1.0):
    """
    computes the interpolation policy coefficient in the mixup.
    Args:
        alpha: controls the shape of the Beta distribution.

    Returns:
        lam: a float number in [0, 1] that is the interpolation policy coefficient.
    """
    if alpha > 0.:
        lam = np.random.beta(alpha, alpha)
    else:
        lam = 1.
    return lam

3 Source : relative_thompson_sampling.py
with GNU General Public License v3.0
from chang-li

    def update(self,r,c,w):
        self.W[r,c] = w
        self.allSamples[r,c,:] = np.random.beta(self.W[r,c],self.W[c,r],self.depth)
        self.allSamples[c,r,:] = np.random.beta(self.W[c,r],self.W[r,c],self.depth)

    def getSamples(self):

3 Source : relative_thompson_sampling.py
with GNU General Public License v3.0
from chang-li

    def relative_sample(self, arm_c):
        rel_theta = np.array([np.random.beta(self.w[i][arm_c], self.w[arm_c][i]) for i in range(self.n_arms)])
        rel_theta[arm_c] = 0.5
        return my_argmax(rel_theta)

    def get_arms(self):

3 Source : augmentations.py
with GNU General Public License v3.0
from Chuxwa

def mixup(im, labels, im2, labels2):
    # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf
    r = np.random.beta(32.0, 32.0)  # mixup ratio, alpha=beta=32.0
    im = (im * r + im2 * (1 - r)).astype(np.uint8)
    labels = np.concatenate((labels, labels2), 0)
    return im, labels


def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16):  # box1(4,n), box2(4,n)

3 Source : dataset.py
with MIT License
from conor-horgan

    def image_mixup(self, image1, image2, alpha):
        lam = np.random.beta(alpha, alpha)
        image = (lam * image1) + ((1 - lam) * image2)
        return image
    
    def normalise_image(self, image):

3 Source : dataset.py
with MIT License
from conor-horgan

    def mixup_spectrum(self, input_spectrum1, input_spectrum2, output_spectrum1, output_spectrum2, alpha):
        lam = np.random.beta(alpha, alpha)
        input_spectrum = (lam * input_spectrum1) + ((1 - lam) * input_spectrum2)
        output_spectrum = (lam * output_spectrum1) + ((1 - lam) * output_spectrum2)
        return input_spectrum, output_spectrum
            
    def __getitem__(self, index):       

3 Source : train_cifar10.py
with MIT License
from csdongxian

def mixup_data(x, y, alpha=1.0):
    '''Returns mixed inputs, pairs of targets, and lambda'''
    if alpha > 0:
        lam = np.random.beta(alpha, alpha)
    else:
        lam = 1

    batch_size = x.size()[0]
    index = torch.randperm(batch_size).cuda()

    mixed_x = lam * x + (1 - lam) * x[index, :]
    y_a, y_b = y, y[index]
    return mixed_x, y_a, y_b, lam


def mixup_criterion(criterion, pred, y_a, y_b, lam):

3 Source : torch_utils.py
with Apache License 2.0
from daisukelab

    def get_lambda(alpha, size, attr_ref_var):
        lambd = np.random.beta(alpha, alpha, size)
        lambd = np.concatenate([lambd[:,None], 1-lambd[:,None]], 1).max(1)
        return attr_ref_var.new_tensor(lambd)

    def transform(self, inputs, targets, train:bool):

3 Source : torch_utils.py
with Apache License 2.0
from daisukelab

    def get_lambda(self, alpha, size, attr_ref_var):
        lambd = np.random.beta(alpha, alpha, size)
        lambd = np.concatenate([lambd[:,None], 1-lambd[:,None]], 1).max(1)
        return attr_ref_var.new_tensor(lambd)

    def transform(self, inputs, targets, train:bool=True):

3 Source : cutmix.py
with MIT License
from data-sachez-2511

    def on_train_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx):
        batch_x, batch_y = batch
        if self.device != batch_x.device:
            self.device = batch_x.device
        assert isinstance(batch_x, torch.Tensor)
        assert isinstance(batch_y, torch.Tensor)
        batch_size = batch_x.size(0)
        if self.on_batch:
            batch_x_, batch_y_ = self._generate_batch_sample(batch_x, batch_y)
        else:
            batch_x_, batch_y_ = self._generate_dataset_sample(batch_size, trainer.train_dataloader)
        alpha = torch.from_numpy(np.random.beta(self.alpha, self.alpha, batch_size)).to(self.device)
        for i in range(batch_size):
            x, y = self.__mix(batch_x[i], batch_x_[i], batch_y[i], batch_y_[i], alpha[i])
            batch_x[i] = x
            batch_y[i] = y

3 Source : mixup.py
with MIT License
from data-sachez-2511

    def on_train_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx):
        batch_x, batch_y = batch
        if self.device != batch_x.device:
            self.device = batch_x.device
        assert isinstance(batch_x, torch.Tensor)
        assert isinstance(batch_y, torch.Tensor)
        batch_size = batch_x.size(0)
        if self.on_batch:
            batch_x_, batch_y_ = self._generate_batch_sample(batch_x, batch_y)
        else:
            batch_x_, batch_y_ = self._generate_dataset_sample(batch_size, trainer.train_dataloader)
        alpha = torch.from_numpy(np.random.beta(self.alpha, self.alpha, batch_size)).to(self.device)
        batch_x_ = batch_x * self._unsqueeze(alpha, 3, -1) + batch_x_ * self._unsqueeze((1 - alpha), 3, -1)
        batch_y_ = batch_y * self._unsqueeze(alpha, 1, -1) + batch_y_ * self._unsqueeze((1 - alpha), 1, -1)
        for i in range(batch_size):
            batch_x[i] = batch_x_[i]
            batch_y[i] = batch_y_[i]

3 Source : policies.py
with GNU General Public License v3.0
from ddbourgin

    def _select_arm(self, bandit, context):
        if not self.is_initialized:
            self._initialize_prior(bandit)

        # draw a sample from the current model posterior
        posterior_sample = np.random.beta(self.alphas, self.betas)

        # greedily select an action based on this sample
        return np.argmax(posterior_sample)

    def _update_params(self, arm_id, rwd, context):

3 Source : lightningclassifier.py
with Apache License 2.0
from Diyago

def mixup_data(x, y, alpha=1.0, use_cuda=True):
    '''Returns mixed inputs, pairs of targets, and lambda'''
    if alpha > 0:
        lam = np.random.beta(alpha, alpha)
    else:
        lam = 1

    batch_size = x.size()[0]
    if use_cuda:
        index = torch.randperm(batch_size).cuda()
    else:
        index = torch.randperm(batch_size)

    mixed_x = lam * x + (1 - lam) * x[index, :]
    y_a, y_b = y, y[index]
    return mixed_x, y_a, y_b, lam

3 Source : mixtureModels.py
with GNU General Public License v3.0
from djbolder

def crPlusOneFactorLGD(N,M,w,p,c,v,gBar,xi,alpha):
    a1,b1 = calibrateBeta(gBar,xi)
    LGD = np.random.beta(a1,b1,[M,N])
    S = np.random.gamma(v, 1/v, [M]) 
    wS =  np.transpose(np.tile(1-w + w*S,[N,1]))
    pS = np.tile(p,[M,1])*wS
    H = np.random.poisson(pS,[M,N])
    lossIndicator = 1*np.greater_equal(H,1)
    lossDistribution = np.sort(np.dot(LGD*lossIndicator,c),axis=None)
    el,ul,var,es=util.computeRiskMeasures(M,lossDistribution,alpha)
    return el,ul,var,es   

def crPlusMultifactor(N,M,wMat,p,c,aVec,alpha,rId):

3 Source : test_arr_estimators.py
with MIT License
from encryptogroup

def test_fingerprint():
    np.testing.assert_array_equal(get_fingerprint([0, 0, 0, 1]), [0, 1, 0, 1])
    np.testing.assert_array_equal(get_fingerprint([0, 1, 4, 3, 0, 0, 2, 4]), [0, 3, 1, 1])

    t_data0 = np.ceil(beta(1, 1, 500) * 20)
    t_data1 = np.ceil(beta(1, 3, 2000) * 500)

    fp0 = get_fingerprint(t_data0)
    assert sum([i * x for i, x in enumerate(fp0)]) == 500

    fp1 = get_fingerprint(t_data1)
    assert sum([i * x for i, x in enumerate(fp1)]) == 2000


# Takes about 3 seconds
@pytest.mark.skip()

3 Source : utils_ssl.py
with MIT License
from EricArazo

def mixup_data(x, y, alpha=1.0, device='cuda'):
    '''Returns mixed inputs, pairs of targets, and lambda'''
    if alpha > 0:
        lam = np.random.beta(alpha, alpha)
    else:
        lam = 1

    batch_size = x.size()[0]
    if device=='cuda':
        index = torch.randperm(batch_size).cuda()
    else:
        index = torch.randperm(batch_size)

    mixed_x = lam * x + (1 - lam) * x[index, :]
    y_a, y_b = y, y[index]
    return mixed_x, y_a, y_b, lam

def loss_mixup_reg_ep(preds, labels, targets_a, targets_b, device, lam, args):

3 Source : noisy_mixup.py
with GNU General Public License v3.0
from erichson

def _noise(x, add_noise_level=0.0, mult_noise_level=0.0, sparsity_level=0.0):
    add_noise = 0.0
    mult_noise = 1.0
    with torch.cuda.device(0):
        if add_noise_level > 0.0:
            add_noise = add_noise_level * np.random.beta(2, 5) * torch.cuda.FloatTensor(x.shape).normal_()
            #torch.clamp(add_noise, min=-(2*var), max=(2*var), out=add_noise) # clamp
        if mult_noise_level > 0.0:
            mult_noise = mult_noise_level * np.random.beta(2, 5) * (2*torch.cuda.FloatTensor(x.shape).uniform_()-1) + 1 
    return mult_noise * x + add_noise      

def do_noisy_mixup(x, y, alpha=0.0, add_noise_level=0.0, mult_noise_level=0.0):

3 Source : noisy_mixup.py
with GNU General Public License v3.0
from erichson

def do_noisy_mixup(x, y, alpha=0.0, add_noise_level=0.0, mult_noise_level=0.0):
    lam = np.random.beta(alpha, alpha) if alpha > 0.0 else 1.0
    index = torch.randperm(x.size()[0]).cuda()
    mixed_x = lam * x + (1 - lam) * x[index]
    y_a, y_b = y, y[index]
    return _noise(mixed_x, add_noise_level=add_noise_level, mult_noise_level=mult_noise_level), y_a, y_b, lam

def mixup_criterion(criterion, pred, y_a, y_b, lam):

3 Source : collators.py
with MIT License
from Erlemar

def mixup(batch: Dict[str, torch.Tensor], alpha: float) -> Dict[str, Union[torch.Tensor, float]]:
    image = batch['image']
    target = batch['target']
    indices = torch.randperm(image.shape[0])
    shuffled_data = image[indices]
    shuffled_target = target[indices]
    # TODO compare sampling from numpy and pytorch. from torch.distributions import beta
    lam = np.random.beta(alpha, alpha)
    image = image * lam + shuffled_data * (1 - lam)

    return {'image': image, 'target': target, 'shuffled_target': shuffled_target, 'lam': lam}


def cutmix(batch: Dict[str, torch.Tensor], alpha: float) -> Dict[str, Union[torch.Tensor, float]]:

3 Source : mixup.py
with MIT License
from FengHZ

def mixup_data(image, label, alpha=1.0, use_cuda=True):
    '''Returns mixed inputs, pairs of targets, and lambda'''
    if alpha > 0:
        lam = np.random.beta(alpha, alpha)
    else:
        lam = 1

    batch_size = image.size()[0]
    if use_cuda:
        index = torch.randperm(batch_size).cuda()
    else:
        index = torch.randperm(batch_size)

    mixed_image = lam * image + (1 - lam) * image[index, :]
    label_a, label_b = label, label[index]
    return mixed_image, label_a, label_b, lam


def mixup_criterion(criterion, prediction, label_a, label_b, lam):

3 Source : densenet.py
with MIT License
from FengHZ

    def mixup_data(image, label, alpha=1.0, use_cuda=True):
        '''Returns mixed inputs, pairs of targets, and lambda'''
        if alpha > 0:
            lam = np.random.beta(alpha, alpha)
        else:
            lam = 1

        batch_size = image.size(0)
        if use_cuda:
            index = torch.randperm(batch_size).cuda()
        else:
            index = torch.randperm(batch_size)

        mixed_image = lam * image + (1 - lam) * image[index, :]
        label_a, label_b = label, label[index]
        return mixed_image, label_a, label_b, lam


densenet_dict = {

3 Source : preactresnet.py
with MIT License
from FengHZ

    def mixup_data(image, label, alpha=1.0, use_cuda=True):
        '''Returns mixed inputs, pairs of targets, and lambda'''
        if alpha > 0:
            lam = np.random.beta(alpha, alpha)
        else:
            lam = 1

        batch_size = image.size(0)
        if use_cuda:
            index = torch.randperm(batch_size).cuda()
        else:
            index = torch.randperm(batch_size)

        mixed_image = lam * image + (1 - lam) * image[index, :]
        label_a, label_b = label, label[index]
        return mixed_image, label_a, label_b, lam


preactresnet_dict = {

3 Source : wideresnet.py
with MIT License
from FengHZ

    def mixup_data(image, label, alpha=1.0, use_cuda=True):
        '''Returns mixed inputs, pairs of targets, and lambda'''
        if alpha > 0:
            lam = np.random.beta(alpha, alpha)
        else:
            lam = 1

        batch_size = image.size(0)
        if use_cuda:
            index = torch.randperm(batch_size).cuda()
        else:
            index = torch.randperm(batch_size)

        mixed_image = lam * image + (1 - lam) * image[index, :]
        label_a, label_b = label, label[index]
        return mixed_image, label_a, label_b, lam


if __name__ == "__main__":

3 Source : agent.py
with Apache License 2.0
from fetchai

    def sample(self) -> int:
        """
        Sample from the bandit.

        :return: the sampled value
        """
        return round(np.random.beta(self.beta_a, self.beta_b))

    def update(self, outcome: float) -> None:

3 Source : rl_agent.py
with Apache License 2.0
from fetchai

    def sample(self) -> int:
        """
        Sample from the bandit.

        :return: the sampled value
        """
        return round(np.random.beta(self.beta_a, self.beta_b))

    def update(self, outcome: bool) -> None:

3 Source : train_C2L_dense121.py
with MIT License
from funnyzhou

def mixup_data(x, y, alpha=1.0, index=None, lam=None, use_cuda=True):
    '''Returns mixed inputs, pairs of targets, and lambda'''
    if lam is None:
        lam = np.random.beta(alpha, alpha)
    else:
        lam = lam

    lam = max(lam, 1-lam)
    batch_size = x.size()[0]
    if index is None:
        index = torch.randperm(batch_size).cuda()
    else:
        index = index

    mixed_x = lam * x + (1 - lam) * x[index, :]
    mixed_y = lam * y + (1 - lam) * y[index]
    return mixed_x, mixed_y, lam, index

def main():

3 Source : cutmix.py
with MIT License
from GenDisc

def cutmix(x, target, beta):
    lam = np.random.beta(beta, beta)
    rand_index = torch.randperm(x.size()[0]).cuda()

    target_a = target.clone().detach()
    target_b = target[rand_index].clone().detach()

    bbx1, bby1, bbx2, bby2 = rand_bbox(x.size(), lam)
    x[:, :, bbx1:bbx2, bby1:bby2] = x[rand_index, :, bbx1:bbx2, bby1:bby2]

    lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (x.size()[-1] * x.size()[-2]))
    return x, target_a, target_b, lam


def rand_bbox(size, lam):

3 Source : random.py
with GNU General Public License v3.0
from gerberlab

    def sample(a: Union[float, np.ndarray], b: Union[float, np.ndarray], 
        size: int=None) -> Union[float, np.ndarray]:
        '''Sample from a beta random distribution. This can be vectorized

        Parameters
        ----------
        a, b : np.ndarray, float
            These are the a and b parmeters of the distribution

        Returns
        -------
        np.ndarray, float
        '''
        return npr.beta(a=a, b=b, size=size)


class sics(_BaseSample):

3 Source : mixup.py
with Apache License 2.0
from google-research

def mixup_lam_idx(batch_size, alpha, use_cuda=True):
    '''Compute the mixup data. Return mixed inputs, pairs of targets, and lambda'''
    if alpha > 0.:
        lam = np.random.beta(alpha, alpha)
    else:
        lam = 1.
    if use_cuda:
        index = torch.randperm(batch_size).cuda()
    else:
        index = torch.randperm(batch_size)

    return lam, index

def mixup_criterion(y_a, y_b, lam):

3 Source : utils.py
with MIT License
from haifangong

def mixup_data(x, y, alpha=1.0, use_cuda=True):
    '''Compute the mixup data. Return mixed inputs, pairs of targets, and lambda'''
    if alpha > 0.:
        lam = np.random.beta(alpha, alpha)
    else:
        lam = 1.
    batch_size = x.size()[0]
    if use_cuda:
        index = torch.randperm(batch_size).cuda()
    else:
        index = torch.randperm(batch_size)

    mixed_x = lam * x + (1 - lam) * x[index, :]
    y_a, y_b = y, y[index]
    return mixed_x, y_a, y_b, lam


def mixup_criterion(y_a, y_b, lam):

3 Source : data_augmentation.py
with Apache License 2.0
from hankyul2

def make_random_mask(H, W):
    ratio = np.random.beta(1.0, 1.0)
    h, w = int(math.sqrt(1 - ratio) * H), int(math.sqrt(1 - ratio) * W)
    row, col = random.randint(0, H - h), random.randint(0, W - w)
    mask = torch.ones((H, W))
    mask[row:row + h, col:col + w] = 0
    ratio = 1 - (h * w) / (H * W)
    return mask, ratio


def cutmix(x, y):

3 Source : aug_mixup.py
with MIT License
from HanxunH

def mixup(data, targets, alpha):
    indices = torch.randperm(data.size(0))
    shuffled_data = data[indices]
    shuffled_targets = targets[indices]

    lam = np.random.beta(alpha, alpha)
    lam = max(lam, 1. - lam)
    assert 0.0   <  = lam  < = 1.0, lam
    data = data * lam + shuffled_data * (1 - lam)

    return data, targets, shuffled_targets, lam


class CrossEntropyMixUpLabelSmooth(torch.nn.Module):

3 Source : dirichlet.py
with MIT License
from hanyas

    def rvs(self, size=1, truncate=True):
        # stick-breaking construction
        betas = npr.beta(self.gammas[:-1], self.deltas[:-1])
        betas = np.hstack((betas, 1.))

        probs = np.zeros((self.K, ))
        probs[0] = betas[0]
        probs[1:] = betas[1:] * np.cumprod(1.0 - betas[:-1])

        return probs

    def mean(self):

3 Source : utils.py
with BSD 3-Clause "New" or "Revised" License
from hongyi-zhang

def mixup_lam_idx(batch_size, alpha, use_cuda=True):
    '''Compute the mixup data. Return mixed inputs, pairs of targets, and lambda'''
    if alpha > 0.:
        lam = np.random.beta(alpha, alpha)
    else:
        lam = 1.
    if use_cuda:
        index = torch.randperm(batch_size).cuda()
    else:
        index = torch.randperm(batch_size)

    return lam, index    

def mixup_criterion(y_a, y_b, lam):

See More Examples