numpy.ceil

Here are the examples of the python api numpy.ceil taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

2676 Examples 7

3 Source : meters.py
with MIT License
from 2han9x1a0release

    def get_iter_stats(self, cur_epoch, cur_iter):
        mem_usage = gpu_mem_usage()
        iter_stats = {
            "_type": "test_iter",
            "epoch": "{}/{}".format(cur_epoch + 1, cfg.OPTIM.MAX_EPOCH),
            "iter": "{}/{}".format(cur_iter + 1, self.max_iter),
            "time_avg": self.iter_timer.average_time,
            "time_diff": self.iter_timer.diff,
            "top1_err": self.mb_top1_err.get_win_median(),
            "top5_err": self.mb_top5_err.get_win_median(),
            "mem": int(np.ceil(mem_usage)),
        }
        return iter_stats

    def log_iter_stats(self, cur_epoch, cur_iter):

3 Source : meters.py
with MIT License
from 2han9x1a0release

    def get_epoch_stats(self, cur_epoch):
        top1_err = self.num_top1_mis / self.num_samples
        top5_err = self.num_top5_mis / self.num_samples
        self.min_top1_err = min(self.min_top1_err, top1_err)
        self.min_top5_err = min(self.min_top5_err, top5_err)
        mem_usage = gpu_mem_usage()
        stats = {
            "_type": "test_epoch",
            "epoch": "{}/{}".format(cur_epoch + 1, cfg.OPTIM.MAX_EPOCH),
            "time_avg": self.iter_timer.average_time,
            "top1_err": top1_err,
            "top5_err": top5_err,
            "min_top1_err": self.min_top1_err,
            "min_top5_err": self.min_top5_err,
            "mem": int(np.ceil(mem_usage)),
        }
        return stats

    def log_epoch_stats(self, cur_epoch):

3 Source : seg_model.py
with GNU General Public License v3.0
from a514514772

def outS(i):
    i = int(i)
    i = (i + 1) / 2
    i = int(np.ceil((i + 1) / 2.0))
    i = (i + 1) / 2
    return i


def conv3x3(in_planes, out_planes, stride=1):

3 Source : image_geom.py
with Apache License 2.0
from achao2013

def impad_to_multiple(img, divisor, pad_val=0):
    """Pad an image to ensure each edge to be multiple to some number.

    Args:
        img (ndarray): Image to be padded.
        divisor (int): Padded image edges will be multiple to divisor.
        pad_val (Number | Sequence[Number]): Same as :func:`impad`.

    Returns:
        ndarray: The padded image.
    """
    pad_h = int(np.ceil(img.shape[0] / divisor)) * divisor
    pad_w = int(np.ceil(img.shape[1] / divisor)) * divisor
    return impad(img, shape=(pad_h, pad_w), pad_val=pad_val)


def cutout(img, shape, pad_val=0):

3 Source : plot.py
with MIT License
from AcutronicRobotics

def smooth_reward_curve(x, y):
    halfwidth = int(np.ceil(len(x) / 60))  # Halfwidth of our smoothing convolution
    k = halfwidth
    xsmoo = x
    ysmoo = np.convolve(y, np.ones(2 * k + 1), mode='same') / np.convolve(np.ones_like(y), np.ones(2 * k + 1),
        mode='same')
    return xsmoo, ysmoo


def load_results(file):

3 Source : run.py
with MIT License
from acvictor

def subsample(y, fps_from = 100.0, fps_to = 30):
	factor = int(np.ceil(fps_from / fps_to))
	# Subsample the points
	new_y = np.zeros((int(y.shape[0] / factor), 20, 2)) #(timesteps, 20) = (500, 20x2)
	for idx in range(new_y.shape[0]):
		if not (idx * factor > y.shape[0] - 1):
			new_y[idx, :, 0] = y[idx * factor, 0:20]
			new_y[idx, :, 1] = y[idx * factor, 20:]
		else:
			break
	new_y = [np.array(each) for each in new_y.tolist()]
	return new_y

def drawLips(keypoints, new_img, c = (255, 255, 255), th = 1, show = False):

3 Source : _discrete_distns.py
with GNU General Public License v3.0
from adityaprakash-bobby

    def _ppf(self, q, n, p):
        vals = ceil(special.bdtrik(q, n, p))
        vals1 = np.maximum(vals - 1, 0)
        temp = special.bdtr(vals1, n, p)
        return np.where(temp >= q, vals1, vals)

    def _stats(self, n, p, moments='mv'):

3 Source : _discrete_distns.py
with GNU General Public License v3.0
from adityaprakash-bobby

    def _ppf(self, q, n, p):
        vals = ceil(special.nbdtrik(q, n, p))
        vals1 = (vals-1).clip(0.0, np.inf)
        temp = self._cdf(vals1, n, p)
        return np.where(temp >= q, vals1, vals)

    def _stats(self, n, p):

3 Source : _discrete_distns.py
with GNU General Public License v3.0
from adityaprakash-bobby

    def _ppf(self, q, p):
        vals = ceil(log(1.0-q)/log(1-p))
        temp = self._cdf(vals-1, p)
        return np.where((temp >= q) & (vals > 0), vals-1, vals)

    def _stats(self, p):

3 Source : _discrete_distns.py
with GNU General Public License v3.0
from adityaprakash-bobby

    def _ppf(self, q, mu):
        vals = ceil(special.pdtrik(q, mu))
        vals1 = np.maximum(vals - 1, 0)
        temp = special.pdtr(vals1, mu)
        return np.where(temp >= q, vals1, vals)

    def _stats(self, mu):

3 Source : _discrete_distns.py
with GNU General Public License v3.0
from adityaprakash-bobby

    def _ppf(self, q, lambda_):
        vals = ceil(-1.0/lambda_ * log1p(-q)-1)
        vals1 = (vals-1).clip(self.a, np.inf)
        temp = self._cdf(vals1, lambda_)
        return np.where(temp >= q, vals1, vals)

    def _stats(self, lambda_):

3 Source : _discrete_distns.py
with GNU General Public License v3.0
from adityaprakash-bobby

    def _ppf(self, q, lambda_, N):
        qnew = q*(1-exp(-lambda_*N))
        vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
        vals1 = (vals-1).clip(0.0, np.inf)
        temp = self._cdf(vals1, lambda_, N)
        return np.where(temp >= q, vals1, vals)

    def _stats(self, lambda_, N):

3 Source : _discrete_distns.py
with GNU General Public License v3.0
from adityaprakash-bobby

    def _ppf(self, q, low, high):
        vals = ceil(q * (high - low) + low) - 1
        vals1 = (vals - 1).clip(low, high)
        temp = self._cdf(vals1, low, high)
        return np.where(temp >= q, vals1, vals)

    def _stats(self, low, high):

3 Source : _discrete_distns.py
with GNU General Public License v3.0
from adityaprakash-bobby

    def _ppf(self, q, a):
        const = 1 + exp(a)
        vals = ceil(np.where(q   <   1.0 / (1 + exp(-a)), log(q*const) / a - 1,
                                                      -log((1-q) * const) / a))
        vals1 = vals - 1
        return np.where(self._cdf(vals1, a) >= q, vals1, vals)

    def _stats(self, a):

3 Source : util.py
with MIT License
from afourast

def gaussian_impulse(sigma, size=None):
    import scipy.ndimage
    if size is None:
        size = 4*np.ceil(sigma) + 1
    a = np.zeros((size, size))
    if size > 0:
        a[size/2, size/2] = 1.
    G = scipy.ndimage.gaussian_filter(a, sigma)
    G /= np.sum(G)
    return G

# def catnew(xs, axis = -1):
#   return np.concatenate([np.array(x)[np.newaxis,...] for x in xs], axis = axis)


def istup(x):

3 Source : pyatsa.py
with MIT License
from agroimpacts

def cloud_height_ranges(h_high, h_low):
    """
    Takes two arrays of the max cloud height and minimum cloud height,
    returning a list of arrays the same length as the time series
    containing the range of cloud heights used to compute the cloud shadow masks.

    Returns: Difference between heighest potential height and lowest, in pixel units.
    """
    h_range_lengths = np.ceil((h_high-h_low)/3.0)
    h_ranges = []
    for i, x in enumerate(h_range_lengths):
        h_ranges.append(np.arange(x)*3+h_low[i])
    return h_ranges


def shadow_shift_coords(h_ranges, angles):

3 Source : apply_filter.py
with MIT License
from ahn-github

def filter_show(filters, nx=4, show_num=16):
    """
    c.f. https://gist.github.com/aidiary/07d530d5e08011832b12#file-draw_weight-py
    """
    FN, C, FH, FW = filters.shape
    ny = int(np.ceil(show_num / nx))

    fig = plt.figure()
    fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)

    for i in range(show_num):
        ax = fig.add_subplot(4, 4, i+1, xticks=[], yticks=[])
        ax.imshow(filters[i, 0], cmap=plt.cm.gray_r, interpolation='nearest')


network = SimpleConvNet(input_dim=(1,28,28), 

3 Source : visualize_filter.py
with MIT License
from ahn-github

def filter_show(filters, nx=8, margin=3, scale=10):
    """
    c.f. https://gist.github.com/aidiary/07d530d5e08011832b12#file-draw_weight-py
    """
    FN, C, FH, FW = filters.shape
    ny = int(np.ceil(FN / nx))

    fig = plt.figure()
    fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)

    for i in range(FN):
        ax = fig.add_subplot(ny, nx, i+1, xticks=[], yticks=[])
        ax.imshow(filters[i, 0], cmap=plt.cm.gray_r, interpolation='nearest')
    plt.show()


network = SimpleConvNet()

3 Source : hendrycks_robustness.py
with MIT License
from aiaudit-org

def clipped_zoom(img, zoom_factor):
    h = img.shape[0]
    # ceil crop height(= crop width)
    ch = int(np.ceil(h / zoom_factor))

    top = (h - ch) // 2
    img = scizoom(img[top:top + ch, top:top + ch], (zoom_factor, zoom_factor, 1), order=1)
    # trim off any extra pixels
    trim_top = (img.shape[0] - h) // 2

    return img[trim_top:trim_top + h, trim_top:trim_top + h]


# /////////////// End Distortion Helpers ///////////////


# /////////////// Distortions ///////////////

class Distortions:

3 Source : fid_tf.py
with MIT License
from AilsaF

def get_inception_activations(inps):
    n_batches = int(np.ceil(float(inps.shape[0]) / BATCH_SIZE))
    act = np.zeros([inps.shape[0], 2048], dtype=np.float32)
    for i in range(n_batches):
        inp = inps[i * BATCH_SIZE: (i + 1) * BATCH_SIZE] / 255. * 2 - 1
        act[i * BATCH_SIZE: i * BATCH_SIZE + min(BATCH_SIZE, inp.shape[0])] = session.run(activations, feed_dict={
            inception_images: inp})
    return act


def activations2distance(act1, act2):

3 Source : inception_score_tf.py
with MIT License
from AilsaF

def get_inception_probs(inps):
    n_batches = int(np.ceil(float(inps.shape[0]) / BATCH_SIZE))
    preds = np.zeros([inps.shape[0], 1000], dtype = np.float32)
    for i in range(n_batches):
        inp = inps[i * BATCH_SIZE:(i + 1) * BATCH_SIZE] / 255. * 2 - 1
        preds[i * BATCH_SIZE : i * BATCH_SIZE + min(BATCH_SIZE, inp.shape[0])] = session.run(logits,{inception_images: inp})[:, :1000]
    preds = np.exp(preds) / np.sum(np.exp(preds), 1, keepdims=True)
    return preds


def preds2score(preds, splits=10):

3 Source : net_blocks.py
with MIT License
from aimagelab

def bilinear_init(kernel_size=4):
    # Following Caffe's BilinearUpsamplingFiller
    # https://github.com/BVLC/caffe/pull/2213/files
    import numpy as np
    width = kernel_size
    height = kernel_size
    f = int(np.ceil(width / 2.))
    cc = (2 * f - 1 - f % 2) / (2.*f)
    weights = torch.zeros((height, width))
    for y in range(height):
        for x in range(width):
            weights[y, x] = (1 - np.abs(x / f - cc)) * (1 - np.abs(y / f - cc))

    return weights

3 Source : net_blocks.py
with MIT License
from akanazawa

def bilinear_init(kernel_size=4):
    # Following Caffe's BilinearUpsamplingFiller
    # https://github.com/BVLC/caffe/pull/2213/files
    import numpy as np
    width = kernel_size
    height = kernel_size
    f = int(np.ceil(width / 2.))
    cc = (2 * f - 1 - f % 2) / (2.*f)
    weights = torch.zeros((height, width))
    for y in range(height):
        for x in range(width):
            weights[y, x] = (1 - np.abs(x / f - cc)) * (1 - np.abs(y / f - cc))

    return weights


if __name__ == '__main__':

3 Source : test_split.py
with BSD 3-Clause "New" or "Revised" License
from alan-turing-institute

def _get_n_incomplete_windows(window_length, step_length) -> int:
    return int(
        np.ceil(
            _coerce_duration_to_int(duration=window_length, freq="D")
            / _coerce_duration_to_int(duration=step_length, freq="D")
        )
    )


@pytest.mark.parametrize("y", TEST_YS)

3 Source : test_naive.py
with BSD 3-Clause "New" or "Revised" License
from alan-turing-institute

def test_strategy_last_seasonal(fh, sp):
    """Test last strategy on seasonal data."""
    f = NaiveForecaster(strategy="last", sp=sp)
    f.fit(y_train)
    y_pred = f.predict(fh)

    # check predicted index
    _assert_correct_pred_time_index(y_pred.index, y_train.index[-1], fh)

    # check values
    fh = check_fh(fh)  # get well formatted fh
    reps = int(np.ceil(max(fh) / sp))
    expected = np.tile(y_train.iloc[-sp:], reps=reps)[fh - 1]
    np.testing.assert_array_equal(y_pred, expected)


@pytest.mark.parametrize("fh", TEST_OOS_FHS)

3 Source : catch22.py
with BSD 3-Clause "New" or "Revised" License
from alan-turing-institute

    def _IN_AutoMutualInfoStats_40_gaussian_fmmi(X_ac):
        # First minimum of the automutual information function.
        tau = int(min(40, np.ceil(len(X_ac) / 2)))

        diffs = np.zeros(tau - 1)
        prev = -0.5 * np.log(1 - np.power(X_ac[1], 2))
        for i in range(len(diffs)):
            corr = -0.5 * np.log(1 - np.power(X_ac[i + 2], 2))
            diffs[i] = corr - prev
            prev = corr

        for i in range(len(diffs) - 1):
            if diffs[i] * diffs[i + 1]   <   0 and diffs[i]  <  0:
                return i + 1

        return tau

    @staticmethod

3 Source : panel.py
with BSD 3-Clause "New" or "Revised" License
from alan-turing-institute

def _make_classification_y(
    n_instances=20, n_classes=2, return_numpy=True, random_state=None
):
    if not n_instances > n_classes:
        raise ValueError("n_instances must be bigger than n_classes")
    rng = check_random_state(random_state)
    n_repeats = int(np.ceil(n_instances / n_classes))
    y = np.tile(np.arange(n_classes), n_repeats)[:n_instances]
    rng.shuffle(y)
    if return_numpy:
        return y
    else:
        return pd.Series(y)


def make_classification_problem(

3 Source : util.py
with MIT License
from alexanderimanicowenrivers

def make_batches(size, batch_size):
    nb_batch = int(np.ceil(size / float(batch_size)))
    res = [(i * batch_size, min(size, (i + 1) * batch_size)) for i in range(0, nb_batch)]
    return res


class IndexGenerator:

3 Source : utils.py
with MIT License
from alexanderkroner

    def write_summary(self, mean_train_loss, mean_valid_loss):
        train_time = str(timedelta(seconds=np.ceil(self._train_time)))
        valid_time = str(timedelta(seconds=np.ceil(self._valid_time)))

        train_information = (mean_train_loss, train_time)
        valid_information = (mean_valid_loss, valid_time)

        train_output = "\n\tTrain loss: %.6f (%s)" % train_information
        valid_output = "\tValid loss: %.6f (%s)" % valid_information

        print(train_output, flush=True)
        print(valid_output, flush=True)

        self._flush()

3 Source : ToolBlockAudio.py
with MIT License
from alexanderlerch

def ToolBlockAudio(x, iBlockLength, iHopLength, f_s):

    iNumBlocks = np.ceil(x.shape[0] / iHopLength).astype(int)

    # time stamp vector
    t = np.arange(0, iNumBlocks) * iHopLength / f_s + iBlockLength / (2*f_s)

    # pad with block length zeros just to make sure it runs for weird inputs, too
    afAudioPadded = np.concatenate((x, np.zeros([iBlockLength+iHopLength, ])), axis=0)

    return np.vstack([np.array(afAudioPadded[n*iHopLength:n*iHopLength+iBlockLength]) for n in range(iNumBlocks)]), t

3 Source : trn.py
with MIT License
from alexandonian

    def _prepare_module(self):
        depth = int(np.ceil((self.num_inputs - self.relation_size) / (self.relation_size - 1)))
        num_inputs_final = self.num_inputs + depth * (1 - self.relation_size)
        self.relations = nn.ModuleList([
            Relation(self.relation_size,
                     self.in_features,
                     self.in_features)
            for _ in range(depth)])
        self.linears = nn.ModuleList([
            nn.Linear(self.in_features,
                      self.out_features)
            for _ in range(depth)])
        self.final_linear = nn.Linear(self.in_features, self.out_features)
        self.final_relation = Relation(num_inputs_final, self.in_features, self.out_features)

    def forward(self, input):

3 Source : visualize.py
with MIT License
from alexsax

def smooth_reward_curve(x, y):
    # Halfwidth of our smoothing convolution
    halfwidth = min(31, int(np.ceil(len(x) / 30)))
    k = halfwidth
    xsmoo = x[k:-k]
    ysmoo = np.convolve(y, np.ones(2 * k + 1), mode='valid') / \
        np.convolve(np.ones_like(y), np.ones(2 * k + 1), mode='valid')
    downsample = max(int(np.floor(len(xsmoo) / 1e3)), 1)
    return xsmoo[::downsample], ysmoo[::downsample]


def fix_point(x, y, interval):

3 Source : fGWAS.py
with MIT License
from AlexTISYoung

def compute_batch_boundaries(snp_ids,batch_size):
    nsnp = snp_ids.shape[0]
    n_blocks = np.int(np.ceil(float(nsnp)/float(batch_size)))
    block_bounds = np.zeros((n_blocks,2),dtype=int)
    start = 0
    for i in range(n_blocks-1):
        block_bounds[i,0] = start
        block_bounds[i,1] = start+batch_size
        start += batch_size
    block_bounds[n_blocks-1,:] = np.array([start,nsnp])
    return block_bounds

def process_batch(snp_ids, y, pheno_ids, pargts_f, gts_f, fit_null=False, tau=None, sigma2=None, null_alpha=None, covar=None, parsum=False,

3 Source : contour.py
with MIT License
from alvarobartt

    def _process_linewidths(self):
        linewidths = self.linewidths
        Nlev = len(self.levels)
        if linewidths is None:
            tlinewidths = [(mpl.rcParams['lines.linewidth'],)] * Nlev
        else:
            if not cbook.iterable(linewidths):
                linewidths = [linewidths] * Nlev
            else:
                linewidths = list(linewidths)
                if len(linewidths)   <   Nlev:
                    nreps = int(np.ceil(Nlev / len(linewidths)))
                    linewidths = linewidths * nreps
                if len(linewidths) > Nlev:
                    linewidths = linewidths[:Nlev]
            tlinewidths = [(w,) for w in linewidths]
        return tlinewidths

    def _process_linestyles(self):

3 Source : test_rbm.py
with MIT License
from alvarobartt

def test_partial_fit():
    X = Xdigits.copy()
    rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
                       batch_size=20, random_state=9)
    n_samples = X.shape[0]
    n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
    batch_slices = np.array_split(X, n_batches)

    for i in range(7):
        for batch in batch_slices:
            rbm.partial_fit(batch)

    assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
    assert_array_equal(X, Xdigits)


def test_transform():

3 Source : cb_run.py
with MIT License
from amiratag

def sess_run(model, variable, images, labels=None, batch_size=BATCH_SIZE):
    '''Divides inputs into smaller chunks and performs sess.run'''
    output = []    
    num_batches = int(np.ceil(len(images) / batch_size))
    for batch in range(num_batches):
        batch_idxs = np.arange(
            batch * batch_size, min((batch+1) * batch_size, len(images))
        )
        input_dic = {model.input: images[batch_idxs]}
        if labels is not None:
            input_dic[model.y_input] = labels[batch_idxs]
        output.append(model.sess.run(variable, input_dic))
    try:
        return np.concatenate(output, 0)
    except:
        return np.array(output)
    
    
def value(model, images, labels, metric='accuracy', batch_size=BATCH_SIZE):

3 Source : utils_classif.py
with MIT License
from andresperezlopez

def get_num_instances_per_file(f_name, patch_len=25, patch_hop=12):
    """
    Return the number of context_windows or instances generated out of a given file
    """
    shape = get_shape(os.path.join(f_name.replace('.data', '.shape')))
    file_frames = float(shape[0])
    return np.maximum(1, int(np.ceil((file_frames-patch_len)/patch_hop)))


def get_feature_size_per_file(f_name):

3 Source : evaluation_metrics.py
with MIT License
from andresperezlopez

    def f1_overall_1sec(self, O, T):
        new_size = int(np.ceil(O.shape[0] / self._block_size))
        O_block = np.zeros((new_size, O.shape[1]))
        T_block = np.zeros((new_size, O.shape[1]))
        for i in range(0, new_size):
            O_block[i, :] = np.max(O[int(i * self._block_size):int(i * self._block_size + self._block_size - 1), :], axis=0)
            T_block[i, :] = np.max(T[int(i * self._block_size):int(i * self._block_size + self._block_size - 1), :], axis=0)
        return self.f1_overall_framewise(O_block, T_block)

    def er_overall_1sec(self, O, T):

3 Source : evaluation_metrics.py
with MIT License
from andresperezlopez

def f1_overall_1sec(O, T, block_size):
    if len(O.shape) == 3:
        O, T = reshape_3Dto2D(O), reshape_3Dto2D(T)
    new_size = int(np.ceil(O.shape[0] / block_size))
    O_block = np.zeros((new_size, O.shape[1]))
    T_block = np.zeros((new_size, O.shape[1]))
    for i in range(0, new_size):
        O_block[i, :] = np.max(O[int(i * block_size):int(i * block_size + block_size - 1), :], axis=0)
        T_block[i, :] = np.max(T[int(i * block_size):int(i * block_size + block_size - 1), :], axis=0)
    return f1_overall_framewise(O_block, T_block)


def er_overall_1sec(O, T, block_size):

3 Source : grid.py
with GNU General Public License v3.0
from AntSimi

    def get_step_in_km(self, lat, wave_length):
        step_y_km = self.ystep * distance(0, 0, 0, 1) / 1000
        step_x_km = self.xstep * distance(0, lat, 1, lat) / 1000
        min_wave_length = max(step_x_km, step_y_km) * 2
        if wave_length   <   min_wave_length:
            logger.error(
                "wave_length too short for resolution, must be > %d km",
                ceil(min_wave_length),
            )
            raise Exception()
        return step_x_km, step_y_km

    def estimate_kernel_shape(self, lat, wave_length, order):

3 Source : vid2vid_model_G.py
with GNU General Public License v3.0
from anubhav0fnu

    def update_training_batch(self, ratio): # increase number of backpropagated frames and number of frames in each GPU
        nfb = self.n_frames_bp
        nfl = self.n_frames_load
        if nfb   <   nfl:            
            nfb = min(self.opt.max_frames_backpropagate, 2**ratio)
            self.n_frames_bp = nfl // int(np.ceil(float(nfl) / nfb))
            print('-------- Updating number of backpropagated frames to %d ----------' % self.n_frames_bp)

        if self.n_frames_per_gpu  <  self.opt.max_frames_per_gpu:
            self.n_frames_per_gpu = min(self.n_frames_per_gpu*2, self.opt.max_frames_per_gpu)
            self.n_frames_load = self.n_gpus * self.n_frames_per_gpu
            print('-------- Updating number of frames per gpu to %d ----------' % self.n_frames_per_gpu)

3 Source : parameterization.py
with Apache License 2.0
from aplbrain

    def __init__(self, parameterization_dict, job, batch_size, **kwargs):
        super().__init__(parameterization_dict, job)
        self.batch_size = batch_size
        self.param_grid = parameterize(parameterization_dict)
        self.num_of_batches = int(np.ceil(len(self.param_grid) / self.batch_size))
        self.batch_index = 0
        self.update(None)

    def update(self, results):

3 Source : utils.py
with MIT License
from arcchang1236

def get_its(n_batch_train, n_batch_test, n_train, n_test):
    train_its = int(np.ceil(n_train / n_batch_train))
    test_its = int(np.ceil(n_test / n_batch_test))
    train_epoch = train_its * n_batch_train
    logging.info("Train epoch size: {}".format(train_epoch))
    return train_its, test_its


def int_shape(x):

3 Source : utils.py
with BSD 3-Clause "New" or "Revised" License
from aristoteleo

def Jacobian_rkhs_gaussian_parallel(x, vf_dict, cores=None):
    n = len(x)
    if cores is None:
        cores = mp.cpu_count()
    n_j_per_core = int(np.ceil(n / cores))
    xx = []
    for i in range(0, n, n_j_per_core):
        xx.append(x[i : i + n_j_per_core])
    # with mp.Pool(cores) as p:
    #    ret = p.starmap(Jacobian_rkhs_gaussian, zip(xx, itertools.repeat(vf_dict)))
    with ThreadPool(cores) as p:
        ret = p.starmap(Jacobian_rkhs_gaussian, zip(xx, itertools.repeat(vf_dict)))
    ret = [np.transpose(r, axes=(2, 0, 1)) for r in ret]
    ret = np.transpose(np.vstack(ret), axes=(1, 2, 0))
    return ret


def Jacobian_numerical(f: Callable, input_vector_convention: str = "row"):

3 Source : smpl.py
with MIT License
from Arthur151

    def forward(self, batch_size=128, **kwargs):
        person_num = len(kwargs['poses'])
        if person_num>batch_size and batch_size>0:
            result_dict_list = []
            for inds in range(int(np.ceil(person_num/float(batch_size)))):
                batch_data = {k:v[inds*batch_size:(inds+1)*batch_size] if isinstance(v,torch.Tensor) else v for k,v in kwargs.items()}
                result_dict_list.append(self.single_forward(**batch_data))
            result_dict = {}
            for k in result_dict_list[0].keys():
                result_dict[k] = torch.cat([rdict[k] for rdict in result_dict_list], 0).contiguous()
            return result_dict
        return self.single_forward(**kwargs)

    def single_forward(self, betas=None, poses=None,

3 Source : transforms.py
with Apache License 2.0
from Ascend

    def __init__(self, config, device_num):
        self.config = config
        self.seed = 0
        self.size_list = []
        self.resize_rate = config.resize_rate
        self.dataset_size = config.dataset_size
        self.size_dict = {}
        self.seed_num = int(1e6)
        self.seed_list = self.generate_seed_list(seed_num=self.seed_num)
        self.resize_count_num = int(np.ceil(self.dataset_size / self.resize_rate))
        self.device_num = device_num
        self.anchor_scales = config.anchor_scales
        self.num_classes = config.num_classes
        self.max_box = config.max_box
        self.label_smooth = config.label_smooth
        self.label_smooth_factor = config.label_smooth_factor

    def generate_seed_list(self, init_seed=1234, seed_num=int(1e6), seed_range=(1, 1000)):

3 Source : sabl_head.py
with Apache License 2.0
from Ascend

    def side_aware_split(self, feat):
        """Split side-aware features aligned with orders of bucketing
        targets."""
        l_end = int(np.ceil(self.up_reg_feat_size / 2))
        r_start = int(np.floor(self.up_reg_feat_size / 2))
        feat_fl = feat[:, :l_end]
        feat_fr = feat[:, r_start:].flip(dims=(1, ))
        feat_fl = feat_fl.contiguous()
        feat_fr = feat_fr.contiguous()
        feat = torch.cat([feat_fl, feat_fr], dim=-1)
        return feat

    def bbox_pred_split(self, bbox_pred, num_proposals_per_img):

3 Source : net_symbols.py
with MIT License
from ascust

def upsample(data, name, scale, num_filter, workspace=512):
    p = int(np.ceil((scale-1)/2.0))
    weight = mx.sym.Variable('upsampling_{}'.format(name), init=mx.init.Bilinear(), lr_mult=0)
    return mx.symbol.Deconvolution(data=data, kernel=(scale*2-scale%2,scale*2-scale%2), stride=(scale, scale), num_filter=num_filter, pad=(p, p), 
        workspace=workspace, num_group=num_filter, no_bias=True, weight=weight)


def crop(data, ref):

3 Source : pacman_util.py
with MIT License
from ASzot

def pix_to_target(next_states):
    target = []
    assert next_states.shape[-1] == 3

    for pixel in next_states.reshape(-1, 3):
        target.append(pixel_to_categorical[tuple([np.ceil(pixel[0]), np.ceil(pixel[1]), np.ceil(pixel[2])])])
    return target

def target_to_pix(imagined_states):

3 Source : bin_thresholds.py
with Apache License 2.0
from automl

def get_lce_bins(train_info, key="TRAIN_LOSS_lc", max_bins=9):

    train_size = len(train_info)
    losses = sorted([i[key][-1] for i in train_info])
    n = min(max_bins, max(1, train_size // 5))
    if n == 1:
        return [losses[train_size // 2]]
    bin_size = int(np.ceil(train_size / n))
    indices = range(bin_size, train_size, bin_size)
    return [losses[i] for i in indices]


def get_bins(zero_cost, train_size, ss_type, dataset):

See More Examples