numpy.amax

Here are the examples of the python api numpy.amax taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

106 Examples 7

Example 1

Project: minpy Source File: mlp_numpy_cpu.py
def softmax_loss_gradient(activation, one_hot):
    if False:
        n = activation.shape[0]
        m = np.amax(activation, axis=1, keepdims=True)
        probs = activation - m
        exp = np.exp(probs)
        loss = -np.sum(probs * one_hot - np.log(
            np.sum(exp, axis=1, keepdims=True))) / n
        g = -1 / n * (np.ones_like(activation) * one_hot - np.broadcast_to(
            1 / np.sum(exp, axis=1, keepdims=True), activation.shape) * exp)
        g = g * (1 - (np.broadcast_to(m, activation.shape) == activation))
        return g
    else:
        probs = activation - np.amax(activation, axis=1, keepdims=True)
        e = np.exp(probs)
        p = e / np.sum(e, axis=1, keepdims=True)
        q = p - one_hot
        return q

Example 2

Project: msmbuilder Source File: test_contactfeaturizers.py
def test_logistics():
    trajectories = MinimalFsPeptide().get_cached().trajectories
    logisticcontactfeaturizer = LogisticContactFeaturizer()
    logistics = logisticcontactfeaturizer.transform(trajectories)

    assert logistics[0].shape[1] == 171
    assert np.amax(logistics[0]) < 1.0
    assert np.amin(logistics[0]) > 0.0

Example 3

Project: scipy Source File: test_kdtree.py
    def test_m_nearest(self):
        x = self.x
        m = self.m
        dd, ii = self.kdtree.query(x, m)
        d = np.amax(dd)
        i = ii[np.argmax(dd)]
        assert_almost_equal(d**2,np.sum((x-self.data[i])**2))
        eps = 1e-8
        assert_equal(np.sum(np.sum((self.data-x[np.newaxis,:])**2,axis=1) < d**2+eps),m)

Example 4

Project: camr Source File: parser.py
    def get_best_act(self,scores,actions):
        best_label_index = None
        best_act_ind = np.argmax(map(np.amax,scores))
        best_act = actions[best_act_ind]
        if best_act['type'] in ACTION_WITH_EDGE or best_act['type'] in ACTION_WITH_TAG:
            best_label_index = scores[best_act_ind].argmax()
        return best_act_ind, best_label_index

Example 5

Project: thunder Source File: images.py
    def max_projection(self, axis=2):
        """
        Compute maximum projections of images along a dimension.

        Parameters
        ----------
        axis : int, optional, default = 2
            Which axis to compute projection along.
        """
        if axis >= size(self.value_shape):
            raise Exception('Axis for projection (%s) exceeds '
                            'image dimensions (%s-%s)' % (axis, 0, size(self.value_shape)-1))

        new_value_shape = list(self.value_shape)
        del new_value_shape[axis]
        return self.map(lambda x: amax(x, axis), value_shape=new_value_shape)

Example 6

Project: qutip Source File: test_fileio.py
    def testRWRealExp(self):
        "Read and write real valued exp formatted data"

        # create some random data
        N = 10
        data = (1 - 2 * scipy.rand(N, N))

        file_data_store("test.dat", data, "real", "exp")
        data2 = file_data_read("test.dat", ",")
        # make sure the deviation is small:
        assert_(amax(abs((data - data2))) < 1e-8)
        os.remove("test.dat")

Example 7

Project: ssp Source File: plot.py
    def __init__(self, fig, data, pcm):
        FramePlot.__init__(self, fig, data)
        e = ssp.Energy(data)
        self.max = 10*np.log10(np.amax(e))
        self.min = 10*np.log10(np.amin(e))
        self.axT.plot(10*np.log10(e)-self.max)
        self.axT.set_xlim(0, self.data.shape[0]-1)
        self.axT.set_ylim(np.max([self.min-self.max, -90]), 0)
        self.axT.set_ylabel('Level (dB)')
        self.axT.grid(True)
        self._plotF(0)

Example 8

Project: landlab Source File: generate_overland_flow_Bates.py
Function: calc_time_step
    def calc_time_step(self):

        # Adaptive time stepper from Bates et al., 2010 and de Almeida et al.,
        # 2012
        self.dt = self.alpha * self._grid.dx / np.sqrt(self.g * np.amax(
            self._grid.at_node['surface_water__depth']))

        return self.dt

Example 9

Project: iaf Source File: data.py
def create_semisupervised(x, key_y, n_labeled, shuffle=True):
    if shuffle:
        G.ndict.shuffle(x)
    n_classes = np.amax(x[key_y])+1
    if n_labeled%n_classes != 0: raise("Cannot create stratisfied semi-supervised set since n_labeled (wished number of labeled samples) not divisible by n_classes (number of classes)")
    n_labels_per_class = n_labeled/n_classes
    x_l = {j: [0]*n_classes for j in x} #labeled
    x_u = {j: [0]*n_classes for j in x} #unlabeld
    for i in range(n_classes):
        idx = x[key_y] == i
        for j in x:
            x_l[j][i] = x[j][idx][:n_labels_per_class]
            x_u[j][i] = x[j][idx][n_labels_per_class:]
    x_l = {i: np.concatenate(x_l[i]) for i in x}
    x_u = {i: np.concatenate(x_u[i]) for i in x}
    if shuffle:
        G.ndict.shuffle(x_l)
        G.ndict.shuffle(x_u)
    return x_l, x_u

Example 10

Project: spinalcordtoolbox Source File: sct_extract_spinal_levels.py
def find_mid_point_vertebral_level(data):

    vertebral_levels = np.zeros(int(np.amax(data)))
    for i in range((int(np.amin(data))+1),(int(np.amax(data))+1)):
    
        #finding the co-ordinates of voxels in each level 
        x,y,z = np.where(data==i)
        z = np.sort(z)
        vertebral_levels[i-1] = np.amin(z) + round((np.amax(z)-np.amin(z))/2)
    return vertebral_levels

Example 11

Project: async-deep-rl Source File: emulator.py
    def process_frame_pool(self):
        """ Preprocess frame pool """
        
        img = None
        if BLEND_METHOD == "max_pool":
            img = np.amax(self.frame_pool, axis=0)
        
        #img resize(img[:210, :], (84, 84))
        img = cv2.resize(img[:210, :], (84, 84), 
            interpolation=cv2.INTER_LINEAR)
        
        img = img.astype(np.float32)
        img *= (1.0/255.0)
        
        return img

Example 12

Project: Neural-Style-Transfer Source File: color_transfer.py
Function: load_mask
def load_mask(mask_path, shape):
    mask = imread(mask_path, mode="L") # Grayscale mask load
    width, height, _ = shape
    mask = imresize(mask, (width, height), interp='bicubic').astype('float32')

    # Perform binarization of mask
    mask[mask <= 127] = 0
    mask[mask > 128] = 255

    max = np.amax(mask)
    mask /= max

    return mask

Example 13

Project: tfdeploy Source File: tfdeploy.py
Function: max
@Operation.factory(attrs=("keep_dims",))
def Max(a, reduction_indices, keep_dims):
    """
    Max reduction op.
    """
    return np.amax(a, axis=tuple(reduction_indices), keepdims=keep_dims),

Example 14

Project: bruges Source File: energy_test.py
Function: test_amplitude
    def test_amplitude(self):
        """
        Tests the basic algorithm returns the right amplitude
        location.
        """
        amplitude = energy(self.data, self.n_samples)
        max_amp = amax(amplitude)

        ms_sin = 0.5  # The MS energy of a sin wave
        self.assertAlmostEquals(ms_sin, max_amp, places=3)

        # Check that it is in the right location
        self.assertAlmostEqual(max_amp, amplitude[501], places=3)

Example 15

Project: qutip Source File: pulsegen.py
    def get_guess_pulse_scale(self):
        scale = 0.0
        if self.guess_pulse is not None:
            scale = max(np.amax(self.guess_pulse) - np.amin(self.guess_pulse),
                        np.amax(self.guess_pulse))
        return scale

Example 16

Project: chaco Source File: zoom_overlay.py
    def _selection_update_handler(self, value):
        if value is not None and self.destination is not None:
            r = self.destination.index_mapper.range
            start, end = amin(value), amax(value)
            r.low = start
            r.high = end

        self.source.request_redraw()
        self.destination.request_redraw()
        return

Example 17

Project: qutip Source File: test_fileio.py
    def testRWComplexDefault(self):
        "Read and write complex valued default formatted data"

        # create some random data
        N = 10
        data = (1 - 2 * scipy.rand(N, N)) + 1j * (1 - 2 * scipy.rand(N, N))

        file_data_store("test.dat", data)
        data2 = file_data_read("test.dat")
        # make sure the deviation is small:
        assert_(amax(abs((data - data2))) < 1e-8)
        os.remove("test.dat")

Example 18

Project: landlab Source File: generate_overland_flow_deAlmeida.py
Function: calc_time_step
    def calc_time_step(self):
        """Calculate time step.

        Adaptive time stepper from Bates et al., 2010 and de Almeida
        et al., 2012
        """
        self.dt = (self.alpha * self._grid.dx / np.sqrt(self.g * np.amax(
            self._grid.at_node['surface_water__depth'])))

        return self.dt

Example 19

Project: qutip Source File: test_fileio.py
    def testRWComplexDecimal(self):
        "Read and write complex valued decimal formatted data"

        # create some random data
        N = 10
        data = (1 - 2 * scipy.rand(N, N)) + 1j * (1 - 2 * scipy.rand(N, N))

        file_data_store("test.dat", data, "complex", "decimal")
        data2 = file_data_read("test.dat", ",")
        # make sure the deviation is small:
        assert_(amax(abs((data - data2))) < 1e-8)
        os.remove("test.dat")

Example 20

Project: nupic Source File: contour.py
Function: print_label
    def print_label(self, linecontour,labelwidth):
        "if contours are too short, don't plot a label"
        lcsize = len(linecontour)
        if lcsize > 10 * labelwidth:
            return 1

        xmax = np.amax(linecontour[:,0])
        xmin = np.amin(linecontour[:,0])
        ymax = np.amax(linecontour[:,1])
        ymin = np.amin(linecontour[:,1])

        lw = labelwidth
        if (xmax - xmin) > 1.2* lw or (ymax - ymin) > 1.2 * lw:
            return 1
        else:
            return 0

Example 21

Project: music_genre_classifier Source File: tempo.py
Function: peak_detect
def peak_detect(data):
    max_val = numpy.amax(abs(data))
    peak_ndx = numpy.where(data == max_val)
    if len(peak_ndx[0]) == 0:  # if nothing found then the max must be negative
        peak_ndx = numpy.where(data == -max_val)
    return peak_ndx

Example 22

Project: bdol-ml Source File: MLP.py
Function: soft_max
def softmax(X):
  # Use the log-sum-exp trick for numerical stability
  m = np.atleast_2d(np.amax(X, axis=1)).T
  y_exp = np.exp(X-m)

  s = np.atleast_2d(np.sum(y_exp, axis=1)).T

  return y_exp/s

Example 23

Project: qutip Source File: test_eigenstates.py
def test_diagHamiltonian1():
    """
    Diagonalization of random two-level system
    """

    H = scipy.rand() * sigmax() + scipy.rand() * sigmay() +\
        scipy.rand() * sigmaz()

    evals, ekets = H.eigenstates()

    for n in range(len(evals)):
        # assert that max(H * ket - e * ket) is small
        assert_equal(amax(
            abs((H * ekets[n] - evals[n] * ekets[n]).full())) < 1e-10, True)

Example 24

Project: thunder Source File: images.py
    def max_min_projection(self, axis=2):
        """
        Compute maximum-minimum projection along a dimension.

        This computes the sum of the maximum and minimum values.

        Parameters
        ----------
        axis : int, optional, default = 2
            Which axis to compute projection along.
        """
        if axis >= size(self.value_shape):
            raise Exception('Axis for projection (%s) exceeds '
                            'image dimensions (%s-%s)' % (axis, 0, size(self.value_shape)-1))

        new_value_shape = list(self.value_shape)
        del new_value_shape[axis]
        return self.map(lambda x: amax(x, axis) + amin(x, axis), value_shape=new_value_shape)

Example 25

Project: qutip Source File: test_fileio.py
    def testRWComplexExp(self):
        "Read and write complex valued exp formatted data"

        # create some random data
        N = 10
        data = (1 - 2 * scipy.rand(N, N)) + 1j * (1 - 2 * scipy.rand(N, N))

        file_data_store("test.dat", data, "complex", "exp")
        data2 = file_data_read("test.dat", ",")
        # make sure the deviation is small:
        assert_(amax(abs((data - data2))) < 1e-8)
        os.remove("test.dat")

Example 26

Project: deep_recommend_system Source File: xent_op_test.py
  def _npXent(self, features, labels, dim=-1):
    if dim is -1:
      dim = len(features.shape) - 1
    one_only_on_dim = list(features.shape)
    one_only_on_dim[dim] = 1
    e = np.exp(features - np.reshape(
        np.amax(
            features, axis=dim), one_only_on_dim))
    probs = e / np.reshape(np.sum(e, axis=dim), one_only_on_dim)
    bp = (probs - labels)
    l = -np.sum(labels * np.log(probs + 1.0e-20), axis=dim)
    return l, bp

Example 27

Project: deep_recommend_system Source File: reduction_ops_test.py
Function: compare
  def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
    np_ans = x
    if reduction_axes is None:
      np_ans = np.amax(np_ans, keepdims=keep_dims)
    else:
      for ra in reduction_axes[::-1]:
        np_ans = np.amax(np_ans, axis=ra, keepdims=keep_dims)
    with self.test_session(use_gpu=use_gpu):
      if reduction_axes is not None:
        reduction_axes = np.array(reduction_axes).astype(np.int32)
      tf_ans = tf.reduce_max(x, reduction_axes, keep_dims)
      out = tf_ans.eval()
    self.assertAllClose(np_ans, out)
    self.assertShapeEqual(np_ans, tf_ans)

Example 28

Project: scipy-tutorial-2014 Source File: eyesize.py
Function: estimate
    def estimate(self):

### "segmented"
      color_region_growing = sitk.VectorConfidenceConnectedImageFilter()
      color_region_growing.SetNumberOfIterations(4)
      color_region_growing.SetMultiplier(5.3)
      color_region_growing.SetInitialNeighborhoodRadius(2)
      color_region_growing.SetReplaceValue(255)
      color_region_growing.AddSeed(self.seed_point)
      eyes_segmented = color_region_growing.Execute(self.input_image)

### "radius"
      distance_filter = sitk.SignedMaurerDistanceMapImageFilter()
      distance_filter.SetInsideIsPositive(True)
      distance_map = distance_filter.Execute(eyes_segmented)
      radius_estimate = np.amax(sitk.GetArrayFromImage(distance_map))

      return eyes_segmented,radius_estimate

Example 29

Project: PyKrige Source File: core.py
def calculate_variogram_model(lags, semivariance, variogram_model, variogram_function, weight):
    """Function that fits a variogram model when parameters are not specified."""

    if variogram_model == 'linear':
        x0 = [(np.amax(semivariance) - np.amin(semivariance))/(np.amax(lags) - np.amin(lags)),
              np.amin(semivariance)]
        bnds = ((0.0, 1000000000.0), (0.0, np.amax(semivariance)))
    elif variogram_model == 'power':
        x0 = [(np.amax(semivariance) - np.amin(semivariance))/(np.amax(lags) - np.amin(lags)),
              1.1, np.amin(semivariance)]
        bnds = ((0.0, 1000000000.0), (0.01, 1.99), (0.0, np.amax(semivariance)))
    else:
        x0 = [np.amax(semivariance), 0.5*np.amax(lags), np.amin(semivariance)]
        bnds = ((0.0, 10*np.amax(semivariance)), (0.0, np.amax(lags)), (0.0, np.amax(semivariance)))

    res = minimize(variogram_function_error, x0, args=(lags, semivariance, variogram_function, weight),
                   method='SLSQP', bounds=bnds)

    return res.x

Example 30

Project: qutip Source File: test_fileio.py
    def testRWRealDecimal(self):
        "Read and write real valued decimal formatted data"

        # create some random data
        N = 10
        data = (1 - 2 * scipy.rand(N, N))

        file_data_store("test.dat", data, "real", "decimal")
        data2 = file_data_read("test.dat", ",")
        # make sure the deviation is small:
        assert_(amax(abs((data - data2))) < 1e-8)
        os.remove("test.dat")

Example 31

Project: camr Source File: parser.py
    def get_best_act_constraint(self,scores,actions,argset):
        best_label_index = None
        best_act_ind = np.argmax(map(np.amax,scores))
        if actions[best_act_ind]['type'] in ACTION_WITH_EDGE:
            best_label_index = scores[best_act_ind].argmax()
            # best label violates the constraint
            while best_label_index in argset:
                scores[best_act_ind][best_label_index] = -float('inf')
                best_act_ind = np.argmax(map(np.amax,scores))
                if actions[best_act_ind]['type'] in ACTION_WITH_EDGE or actions[best_act_ind]['type'] in ACTION_WITH_TAG:
                    best_label_index = scores[best_act_ind].argmax()
                else:
                    best_label_index = None
        elif actions[best_act_ind]['type'] in ACTION_WITH_TAG:
            best_label_index = scores[best_act_ind].argmax()
        return best_act_ind, best_label_index

Example 32

Project: tfdeploy Source File: tfdeploy.py
@Operation.factory
def SegmentMax(a, ids):
    """
    Segmented max op.
    """
    func = lambda idxs: np.amax(a[idxs], axis=0)
    return seg_map(func, a, ids),

Example 33

Project: minpy Source File: mlp_numpy_cpu.py
Function: soft_max
def softmax(activation, one_hot):
    n = activation.shape[0]
    probs = activation - np.amax(activation, axis=1, keepdims=True)
    loss = -np.sum(probs * one_hot - np.log(
        np.sum(np.exp(probs), axis=1, keepdims=True))) / n
    return loss

Example 34

Project: IkaLog Source File: special_weapon.py
    def _is_my_special_weapon(self, context, img_special_bgr):
        img = img_special_bgr[:, :150]

        img_s = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)[:, :, 2]
        img_s[matcher.MM_WHITE()(img) > 127] = 127

        img_s_hist = cv2.calcHist(img_s[:, :], [0], None, [5], [0, 256])
        img_s_hist_black = float(np.amax(img_s_hist[0:1]))
        img_s_hist_non_black = float(np.amax(img_s_hist[3:4]))
        return img_s_hist_black < img_s_hist_non_black

Example 35

Project: chaco Source File: formatters.py
Function: compute_offset
    def _compute_offset(self, ticks):
        first, last = ticks[0], ticks[-1]
        data_range = ticks[-1] - ticks[0]
        range_oom = int(ceil(log10(data_range)))
        pow_of_ten = 10 ** range_oom
        if all(asarray(ticks) < 0):
            return ceil(amax(ticks) / pow_of_ten) * pow_of_ten
        else:
            return floor(amin(ticks) / pow_of_ten) * pow_of_ten

Example 36

Project: sima Source File: ROI.py
    @property
    def im_shape(self):
        if self._im_shape is not None:
            return self._im_shape
        if self._mask is not None:
            z = len(self._mask)
            y = np.amax([x.shape[0] for x in self._mask])
            x = np.amax([x.shape[1] for x in self._mask])
            return (z, y, x)
        return None

Example 37

Project: bayespy Source File: categorical.py
    def random(self, *phi, plates=None):
        """
        Draw a random sample from the distribution.
        """
        logp = phi[0]
        logp -= np.amax(logp, axis=-1, keepdims=True)
        p = np.exp(logp)
        return random.categorical(p, size=plates)

Example 38

Project: qutip Source File: test_fileio.py
    def testRWRealDefault(self):
        "Read and write real valued default formatted data"

        # create some random data
        N = 10
        data = (1 - 2 * scipy.rand(N, N))

        file_data_store("test.dat", data)
        data2 = file_data_read("test.dat")
        # make sure the deviation is small:
        assert_(amax(abs((data - data2))) < 1e-8)
        os.remove("test.dat")

Example 39

Project: deep_recommend_system Source File: softmax_op_test.py
  def _npSoftmax(self, features, dim=-1, log=False):
    if dim is -1:
      dim = len(features.shape) - 1
    one_only_on_dim = list(features.shape)
    one_only_on_dim[dim] = 1
    e = np.exp(features - np.reshape(
        np.amax(
            features, axis=dim), one_only_on_dim))
    softmax = e / np.reshape(np.sum(e, axis=dim), one_only_on_dim)
    if log:
      return np.log(softmax)
    else:
      return softmax

Example 40

Project: homer Source File: staffboundary.py
def boundary_cost(page, staff):
    if staff == 0:
        y0 = 0
    else:
        staff_y_above = page.staves()[staff-1, :, 1]
        y0 = max(0, np.amax(staff_y_above) + page.staff_dist*2)
    if staff == len(page.staves()):
        y1 = page.orig_size[0]
    else:
        staff_y_below = page.staves()[staff, :, 1]
        y1 = min(page.orig_size[0], np.amin(staff_y_below) - page.staff_dist*2)

    if y0 >= y1:
        # Use staff medians instead of extrema, should have at least
        # staff_dist*4 amount of space
        if staff > 0:
            y0 = max(0, np.median(staff_y_above).astype(int)
                            + page.staff_dist*2)
        if staff < len(page.staves()):
            y1 = min(page.orig_size[0],
                     np.median(staff_y_below).astype(int) - page.staff_dist*2)
    # Try to find a horizontal line that doesn't touch any dark pixels
    proj = page.img[y0:y1].get().sum(axis=1)
    slices, num_slices = util.label_1d(proj == 0)
    if slices.any():
        slice_size = np.bincount(slices)
        slice_num = np.argmax(slice_size[1:]) + 1
        slice_pixels, = np.where(slices == slice_num)
        slice_y = y0 + int(np.mean(slice_pixels))
        return np.array([[0, slice_y], [page.orig_size[1], slice_y]])
    y0 /= DT_SCALE
    y1 /= DT_SCALE
    xstep = ystep = page.staff_thick
    x0 = 0
    x1 = 2048
    edge_costs = boundary_cost_kernel(page.distance_transform,
                                      int(y0), ystep, int(y1),
                                      int(x0), xstep, int(x1)).get()
    if staff == 0:
        start_y = edge_costs.shape[1] - 1
    elif staff == len(page.staves()):
        start_y = 0
    else:
        start_y = edge_costs.shape[1] // 2
    path = shortest_path(edge_costs, start_y)
    path[:, 0] = DT_SCALE * (x0 + xstep * path[:, 0])
    path[:, 1] = DT_SCALE * (y0 + ystep * path[:, 1])
    if path[-1, 0] < page.orig_size[1]:
        path = np.concatenate((path, [[page.orig_size[1],
                    DT_SCALE * (y0 + ystep * start_y)]]))
    return path

Example 41

Project: pgmpy Source File: mplp.py
    def _is_converged(self, dual_threshold=None, integrality_gap_threshold=None):
        """
        This method checks the integrality gap to ensure either:
            * we have found a near to exact solution or
            * stuck on a local minima.

        Parameters
        ----------
        dual_threshold: double
                        This sets the minimum width between the dual objective decrements. If the decrement is lesser
                        than the threshold, then that means we have stuck on a local minima.

        integrality_gap_threshold: double
                                   This sets the threshold for the integrality gap below which we say that the solution
                                   is satisfactory.

        References
        ----------
        code presented by Sontag in 2012 here: http://cs.nyu.edu/~dsontag/code/README_v2.html
        """
        # Find the new objective after the message updates
        new_dual_lp = sum([np.amax(self.objective[obj].values) for obj in self.objective])

        # Update the dual_gap as the difference between the dual objective of the previous and the current iteration.
        self.dual_gap = abs(self.dual_lp - new_dual_lp)

        # Update the integrality_gap as the difference between our best result vs the dual objective of the lp.
        self.integrality_gap = abs(self.dual_lp - self.best_int_objective)

        # As the decrement of the dual_lp gets very low, we assume that we might have stuck in a local minima.
        if dual_threshold and self.dual_gap < dual_threshold:
            return True
        # Check the threshold for the integrality gap
        elif integrality_gap_threshold and self.integrality_gap < integrality_gap_threshold:
            return True
        else:
            self.dual_lp = new_dual_lp
            return False

Example 42

Project: mondrianforest Source File: mondrianforest_utils.py
def compute_metrics_classification(y_test, pred_prob, do_not_compute_log_prob=False):
    acc, log_prob = 0.0, 0.0
    for n, y in enumerate(y_test):
        tmp = pred_prob[n, :]
        #pred = np.argmax(tmp)
        pred = random.choice(np.argwhere(tmp == np.amax(tmp)).flatten())    # randomly break ties
        acc += (pred == y)
        if not do_not_compute_log_prob:
            log_tmp_pred = math.log(tmp[y]) 
            try:
                assert(not np.isinf(abs(log_tmp_pred)))
            except AssertionError:
                'print abs(log_tmp_pred) = inf in compute_metrics_classification; tmp = '
                print tmp
                raise AssertionError
            log_prob += log_tmp_pred
    acc /= (n + 1)
    if not do_not_compute_log_prob:
        log_prob /= (n + 1)
    else:
        log_prob = -np.inf
    metrics = {'acc': acc, 'log_prob': log_prob}
    return metrics

Example 43

Project: scipy Source File: polyint.py
def krogh_interpolate(xi, yi, x, der=0, axis=0):
    """
    Convenience function for polynomial interpolation.

    See `KroghInterpolator` for more details.

    Parameters
    ----------
    xi : array_like
        Known x-coordinates.
    yi : array_like
        Known y-coordinates, of shape ``(xi.size, R)``.  Interpreted as
        vectors of length R, or scalars if R=1.
    x : array_like
        Point or points at which to evaluate the derivatives.
    der : int or list, optional
        How many derivatives to extract; None for all potentially
        nonzero derivatives (that is a number equal to the number
        of points), or a list of derivatives to extract. This number
        includes the function value as 0th derivative.
    axis : int, optional
        Axis in the yi array corresponding to the x-coordinate values.

    Returns
    -------
    d : ndarray
        If the interpolator's values are R-dimensional then the
        returned array will be the number of derivatives by N by R.
        If `x` is a scalar, the middle dimension will be dropped; if
        the `yi` are scalars then the last dimension will be dropped.

    See Also
    --------
    KroghInterpolator

    Notes
    -----
    Construction of the interpolating polynomial is a relatively expensive
    process. If you want to evaluate it repeatedly consider using the class
    KroghInterpolator (which is what this function uses).

    """
    P = KroghInterpolator(xi, yi, axis=axis)
    if der == 0:
        return P(x)
    elif _isscalar(der):
        return P.derivative(x,der=der)
    else:
        return P.derivatives(x,der=np.amax(der)+1)[der]

Example 44

Project: scipy Source File: matfuncs.py
def signm(A, disp=True):
    """
    Matrix sign function.

    Extension of the scalar sign(x) to matrices.

    Parameters
    ----------
    A : (N, N) array_like
        Matrix at which to evaluate the sign function
    disp : bool, optional
        Print warning if error in the result is estimated large
        instead of returning estimated error. (Default: True)

    Returns
    -------
    signm : (N, N) ndarray
        Value of the sign function at `A`
    errest : float
        (if disp == False)

        1-norm of the estimated error, ||err||_1 / ||A||_1

    Examples
    --------
    >>> from scipy.linalg import signm, eigvals
    >>> a = [[1,2,3], [1,2,1], [1,1,1]]
    >>> eigvals(a)
    array([ 4.12488542+0.j, -0.76155718+0.j,  0.63667176+0.j])
    >>> eigvals(signm(a))
    array([-1.+0.j,  1.+0.j,  1.+0.j])

    """
    A = _asarray_square(A)

    def rounded_sign(x):
        rx = np.real(x)
        if rx.dtype.char == 'f':
            c = 1e3*feps*amax(x)
        else:
            c = 1e3*eps*amax(x)
        return sign((absolute(rx) > c) * rx)
    result, errest = funm(A, rounded_sign, disp=0)
    errtol = {0:1e3*feps, 1:1e3*eps}[_array_precision[result.dtype.char]]
    if errest < errtol:
        return result

    # Handle signm of defective matrices:

    # See "E.D.Denman and J.Leyva-Ramos, Appl.Math.Comp.,
    # 8:237-250,1981" for how to improve the following (currently a
    # rather naive) iteration process:

    # a = result # sometimes iteration converges faster but where??

    # Shifting to avoid zero eigenvalues. How to ensure that shifting does
    # not change the spectrum too much?
    vals = svd(A, compute_uv=0)
    max_sv = np.amax(vals)
    # min_nonzero_sv = vals[(vals>max_sv*errtol).tolist().count(1)-1]
    # c = 0.5/min_nonzero_sv
    c = 0.5/max_sv
    S0 = A + c*np.identity(A.shape[0])
    prev_errest = errest
    for i in range(100):
        iS0 = inv(S0)
        S0 = 0.5*(S0 + iS0)
        Pp = 0.5*(dot(S0,S0)+S0)
        errest = norm(dot(Pp,Pp)-Pp,1)
        if errest < errtol or prev_errest == errest:
            break
        prev_errest = errest
    if disp:
        if not isfinite(errest) or errest >= errtol:
            print("signm result may be inaccurate, approximate err =", errest)
        return S0
    else:
        return S0, errest

Example 45

Project: qutip Source File: test_eigenstates.py
def test_diagHamiltonian2():
    """
    Diagonalization of composite systems
    """

    H1 = scipy.rand() * sigmax() + scipy.rand() * sigmay() +\
        scipy.rand() * sigmaz()
    H2 = scipy.rand() * sigmax() + scipy.rand() * sigmay() +\
        scipy.rand() * sigmaz()

    H = tensor(H1, H2)

    evals, ekets = H.eigenstates()

    for n in range(len(evals)):
        # assert that max(H * ket - e * ket) is small
        assert_equal(amax(
            abs((H * ekets[n] - evals[n] * ekets[n]).full())) < 1e-10, True)

    N1 = 10
    N2 = 2

    a1 = tensor(destroy(N1), qeye(N2))
    a2 = tensor(qeye(N1), destroy(N2))
    H = scipy.rand() * a1.dag() * a1 + scipy.rand() * a2.dag() * a2 + \
        scipy.rand() * (a1 + a1.dag()) * (a2 + a2.dag())
    evals, ekets = H.eigenstates()

    for n in range(len(evals)):
        # assert that max(H * ket - e * ket) is small
        assert_equal(amax(
            abs((H * ekets[n] - evals[n] * ekets[n]).full())) < 1e-10, True)

Example 46

Project: qutip Source File: test_fileio.py
    def testRWSeparatorDetection(self):
        "Read and write with automatic separator detection"

        # create some random data
        N = 10
        data = (1 - 2 * scipy.rand(N, N)) + 1j * (1 - 2 * scipy.rand(N, N))

        # comma separated values
        file_data_store("test.dat", data, "complex", "exp", ",")
        data2 = file_data_read("test.dat")
        assert_(amax(abs((data - data2))) < 1e-8)

        # semicolon separated values
        file_data_store("test.dat", data, "complex", "exp", ";")
        data2 = file_data_read("test.dat")
        assert_(amax(abs((data - data2))) < 1e-8)

        # tab separated values
        file_data_store("test.dat", data, "complex", "exp", "\t")
        data2 = file_data_read("test.dat")
        assert_(amax(abs((data - data2))) < 1e-8)

        # space separated values
        file_data_store("test.dat", data, "complex", "exp", " ")
        data2 = file_data_read("test.dat")
        assert_(amax(abs((data - data2))) < 1e-8)

        # mixed-whitespace separated values
        file_data_store("test.dat", data, "complex", "exp", " \t ")
        data2 = file_data_read("test.dat")
        assert_(amax(abs((data - data2))) < 1e-8)
        os.remove("test.dat")

Example 47

Project: pgmpy Source File: mplp.py
Function: init
    def __init__(self, model):
        if not isinstance(model, MarkovModel):
            raise TypeError('Only MarkovModel is supported')

        super(Mplp, self).__init__(model)
        self.model = model

        # S = \{c \cap c^{'} : c, c^{'} \in C, c \cap c^{'} \neq \emptyset\}
        self.intersection_set_variables = set()
        # We generate the Intersections of all the pairwise edges taken one at a time to form S
        for edge_pair in it.combinations(model.edges(), 2):
            self.intersection_set_variables.add(frozenset(edge_pair[0]) & frozenset(edge_pair[1]))

        # The corresponding optimization problem = \min_{\delta}{dual_lp(\delta)} where:
        # dual_lp(\delta) = \sum_{i \in V}{max_{x_i}(Objective[nodes])} + \sum_{f /in F}{max_{x_f}(Objective[factors])
        # Objective[nodes] = \theta_i(x_i) + \sum_{f \mid i \in f}{\delta_{fi}(x_i)}
        # Objective[factors] = \theta_f(x_f) - \sum_{i \in f}{\delta_{fi}(x_i)}
        # In a way Objective stores the corresponding optimization problem for all the nodes and the factors.

        # Form Objective and cluster_set in the form of a dictionary.
        self.objective = {}
        self.cluster_set = {}
        for factor in model.get_factors():
            scope = frozenset(factor.scope())
            self.objective[scope] = factor
            # For every factor consisting of more that a single node, we initialize a cluster.
            if len(scope) > 1:
                self.cluster_set[scope] = self.Cluster(self.intersection_set_variables, factor)

        # dual_lp(\delta) is the dual linear program
        self.dual_lp = sum([np.amax(self.objective[obj].values) for obj in self.objective])

        # Best integral value of the primal objective is stored here
        self.best_int_objective = 0

        # Assignment of the nodes that results in the "maximum" integral value of the primal objective
        self.best_assignment = {}
        # Results of the "maximum" integral value of the primal objective.
        self.best_decoded_result = {}
        # This sets the minimum width between the dual objective decrements. Default value = 0.0002. This can be
        # changed in the map_query() method.
        self.dual_threshold = 0.0002
        # This sets the threshold for the integrality gap below which we say that the solution is satisfactory.
        # Default value = 0.0002. This can be changed in the map_query() method.
        self.integrality_gap_threshold = 0.0002

Example 48

Project: pgmpy Source File: mplp.py
    def _get_triplet_scores(self, triangles_list):
        """
        Returns the score of each of the triplets found in the current model

        Parameters
        ---------
        triangles_list: list
                        The list of variables forming the triangles to be updated. It is of the form of
                        [['var_5', 'var_8', 'var_7'], ['var_4', 'var_5', 'var_7']]

        Return: {frozenset({'var_8', 'var_5', 'var_7'}): 5.024, frozenset({'var_5', 'var_4', 'var_7'}): 10.23}
        """
        triplet_scores = {}
        for triplet in triangles_list:

            # Find the intersection sets of the current triplet
            triplet_intersections = [intersect for intersect in it.combinations(triplet, 2)]

            # Independent maximization
            ind_max = sum([np.amax(self.objective[frozenset(intersect)].values) for intersect in triplet_intersections])

            # Joint maximization
            joint_max = self.objective[frozenset(triplet_intersections[0])]
            for intersect in triplet_intersections[1:]:
                joint_max += self.objective[frozenset(intersect)]
            joint_max = np.amax(joint_max.values)
            # score = Independent maximization solution - Joint maximization solution
            score = ind_max - joint_max
            triplet_scores[frozenset(triplet)] = score

        return triplet_scores

Example 49

Project: category2vec Source File: cat2vec.py
    def sanity_check(self):
        veclens = empty(self.cat_len, dtype=REAL)
        for i in xrange(self.cat_len):
            veclens[i] = np_norm(self.cats[i])
        max_len = amax(veclens)
        logger.info("max vector length: %f" % max_len)
        if max_len > self.sane_vec_len:
            return False, "insane max vector length > %f" % (self.sane_vec_len)
        if self.sg:
            return True, None
        rand_indices = random.randint(len(self.w2v.vocab),size=10)
        sim_top10_avg = 0
        for idx in rand_indices:
            w = self.w2v.index2word[idx]
            sim_words = self.w2v.most_similar(positive=[w],topn=10)
            sim_top10_avg += sim_words[9][1]
        sim_top10_avg /= len(rand_indices)
        logger.info("average similarity: %f"% sim_top10_avg)
        if sim_top10_avg > self.sane_max_sim10:
            return False, "insane average similarity > %f" % (self.sane_max_sim10)
        return True, None

Example 50

Project: SaltwashAR Source File: neuraldata.py
def _get_input():
    
    # load audio data (wav format)
    rate, data = wavfile.read(WAV_FILE)
    data = data / (2.**15)

    # get start and end position of word (word will be either Yes or No)
    thresold_level = np.amax(data) / THRESHOLD_DIVISION
    print "Thresold level: {}".format(thresold_level)

    start_pos = 0
    end_pos = 0
    
    prev_val = 0
    for idx, val in enumerate(data):
            
        if start_pos == 0 and val >= thresold_level:
            start_pos = idx
            
        if prev_val >= thresold_level and val < thresold_level:
            end_pos = idx

        prev_val = val

    print "Start position: {}".format(start_pos)
    print "End position: {}".format(end_pos)

    # get top peak in each slice
    slice_size = (end_pos - start_pos) / NUMBER_OF_SLICES
    print "Slice size: {}".format(slice_size)

    top_peaks = []

    for i in range(NUMBER_OF_SLICES):
        end_pos = start_pos + slice_size

        slice = data[start_pos:end_pos]
        top_peaks.append(np.amax(slice))

        start_pos = end_pos

    print "Top peaks: {}".format(top_peaks)

    # obtain input data as percentages e.g. [47, 100, 74, 70, 32, 16, 35, 41]
    # top peak in each slice will be a percentage of overall top peak
    input = []
    overall_top_peak = np.amax(top_peaks)

    for val in top_peaks:
        input_item = int((val / overall_top_peak) * 100)
        input.append(input_item)
     
    print "Input: {}".format(input)

    return input
See More Examples - Go to Next Page
Page 1 Selected Page 2 Page 3