numpy.sign

Here are the examples of the python api numpy.sign taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

158 Examples 7

Example 51

Project: msmbuilder-legacy Source File: utils.py
def construct_right_eigenvectors(left_eigenvectors, populations, num_macrostates):
    """Calculate normalized right eigenvectors from left eigenvectors and populations."""
    right_eigenvectors = left_eigenvectors.copy()
    for i in range(num_macrostates):
        right_eigenvectors[:, i] /= populations
        right_eigenvectors[:, i] *= np.sign(right_eigenvectors[0, i])
        right_eigenvectors[:, i] /= np.sqrt(dot(right_eigenvectors[:, i] * populations, right_eigenvectors[:, i]))

    return right_eigenvectors

Example 52

Project: phy Source File: panzoom.py
Function: on_mouse_wheel
    def on_mouse_wheel(self, event):
        """Zoom with the mouse wheel."""
        # NOTE: not called on OS X because of touchpad
        if event.modifiers:
            return
        dx = np.sign(event.delta[1]) * self._wheel_coeff
        # Zoom toward the mouse pointer.
        x0, y0 = self._normalize(event.pos)
        self.zoom_delta((dx, dx), (x0, y0))

Example 53

Project: mpldatacursor Source File: datacursor.py
    def _adjust_alignment(self, annotation):
        """
        Make text alignment match the specified offsets (this allows easier
        changing of relative position of text box...)
        """
        try:
            # annotation.xytext is depreciated in recent versions
            dx, dy = annotation.xyann
        except AttributeError:
            # but xyann doesn't exist in older versions...
            dx, dy = annotation.xytext

        horizontal = {1:'left', 0:'center', -1:'right'}[np.sign(dx)]
        vertical = {1:'bottom', 0:'center', -1:'top'}[np.sign(dy)]
        if not self._user_set_ha:
            annotation.set_horizontalalignment(horizontal)
        if not self._user_set_va:
            annotation.set_verticalalignment(vertical)

Example 54

Project: neural-network-animation Source File: scale.py
Function: transform_non_affine
    def transform_non_affine(self, a):
        sign = np.sign(a)
        masked = ma.masked_inside(a,
                                  -self.linthresh,
                                  self.linthresh,
                                  copy=False)
        log = sign * self.linthresh * (
            self._linscale_adj +
            ma.log(np.abs(masked) / self.linthresh) / self._log_base)
        if masked.mask.any():
            return ma.where(masked.mask, a * self._linscale_adj, log)
        else:
            return log

Example 55

Project: neural-network-animation Source File: scale.py
Function: transform_non_affine
    def transform_non_affine(self, a):
        sign = np.sign(a)
        masked = ma.masked_inside(a, -self.invlinthresh,
                                  self.invlinthresh, copy=False)
        exp = sign * self.linthresh * (
            ma.power(self.base, (sign * (masked / self.linthresh))
            - self._linscale_adj))
        if masked.mask.any():
            return ma.where(masked.mask, a / self._linscale_adj, exp)
        else:
            return exp

Example 56

Project: lxmls-toolkit Source File: rnn.py
    def derivate_activation(self, z, function_name):
        '''
        '''        
        if function_name == 'logistic':
            dx = z * (1. - z)
        elif function_name == 'tanh':
            dx = (1. - z * z)
        elif function_name == 'relu':
            dx = (np.sign(z)+1)/2.
        else:
            raise NotImplementedError("Unknown activation %s" % function_name)
        return dx

Example 57

Project: keras-steering-angle-visualizations Source File: run.py
def grad_cam_loss(x, angle):
    if angle > 5.0 * scipy.pi / 180.0:
        return x
    elif angle < -5.0 * scipy.pi / 180.0:
        return -x
    else:
        return tf.inv(x) * np.sign(angle)

Example 58

Project: rayopt Source File: elements.py
    def excidence(self, mu):
        i = self.incidence
        if mu == 1:
            return i
        r = 0, 0, 1
        a = abs(mu)*i[2]
        g = -a + np.sign(mu)*np.sqrt(a**2 - mu**2 + 1)
        e = abs(mu)*i + g[None]*r
        return e

Example 59

Project: ML-and-DM-in-action Source File: SVM.py
Function: classify
	def classify(self,inX):
		if len(self._alphaXy) == 0:
			raise  ValueError("You must train the model")
		length = len(self._X[0])
		if len(inX) != length:
			raise  ValueError("inX must have %d unit"%(length))
		temp = [self._alphaXy[i]*self._kernel(inX,self._X[i],self._optType,self._sigma)
		        for i in range(len(self._X))]
		temp = sum(temp) + self._b
		res = numpy.sign(temp)
		return res

Example 60

Project: qstrader Source File: position.py
    def update_market_value(self, bid, ask):
        """
        The market value is tricky to calculate as we only have
        access to the top of the order book through Interactive
        Brokers, which means that the true redemption price is
        unknown until executed.

        However, it can be estimated via the mid-price of the
        bid-ask spread. Once the market value is calculated it
        allows calculation of the unrealised and realised profit
        and loss of any transactions.
        """
        midpoint = (bid + ask) // 2
        self.market_value = self.quantity * midpoint * sign(self.net)
        self.unrealised_pnl = self.market_value - self.cost_basis
        self.realised_pnl = self.market_value + self.net_incl_comm

Example 61

Project: PyEMMA Source File: units.py
def bytes_to_string(num, suffix='B'):
    """
    Returns the size of num (bytes) in a human readable form up to Yottabytes (YB).
    :param num: The size of interest in bytes.
    :param suffix: A suffix, default 'B' for 'bytes'.
    :return: a human readable representation of a size in bytes
    """
    extensions = ["%s%s" % (x, suffix) for x in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']]
    if num == 0:
        return "0%s" % extensions[0]
    else:
        n_bytes = float(abs(num))
        place = int(math.floor(math.log(n_bytes, 1024)))
        return "%.1f%s" % (np.sign(num) * (n_bytes / pow(1024, place)), extensions[place])

Example 62

Project: nipy Source File: test_screen.py
def pca_pos(data4d):
    """ Flips signs equal over volume for PCA

    Needed because Windows appears to generate random signs for PCA components
    across PCA runs on the same data.
    """
    signs = np.sign(data4d[0, 0, 0, :])
    return data4d * signs

Example 63

Project: ML-and-DM-in-action Source File: adaboost.py
Function: classify
	def classify(self,inX):
		length = len(self._classifyList)
		if length == 0:
			raise ValueError("You must train the model")
		temp = [self._alpha[index]*self._classifyList[index].classify(inX)
		        for index in range(length)]
		temp = sum(temp)
		return numpy.sign(temp)

Example 64

Project: brainx Source File: util.py
def all_positive(adjacency_matrix):
    """ checks if edge values in adjacency matrix are all positive
    or positive and negative 
    Returns
    -------
    all_positive : bool
        True if all values are >=0
        False if values < 0
    """
    # add 1 so 0-> 1(True) , -1 -> 0 False
    signs = set( np.sign(adjacency_matrix) + 1 )
    return bool(sorted(signs)[0])

Example 65

Project: nupic Source File: ticker.py
Function: call
    def __call__(self, x, pos=None):
        'Return the format for tick val *x* at position *pos*'
        vmin, vmax = self.axis.get_view_interval()
        d = abs(vmax - vmin)
        b=self._base
        if x == 0.0:
            return '0'
        sign = np.sign(x)
        # only label the decades
        fx = math.log(abs(x))/math.log(b)
        isDecade = self.is_decade(fx)
        if not isDecade and self.labelOnlyBase: s = ''
        elif x>10000: s= '%1.0e'%x
        elif x<1: s =  '%1.0e'%x
        else        : s =  self.pprint_val(x,d)
        if sign == -1:
            s =  '-%s' % s

        return self.fix_minus(s)

Example 66

Project: neupy Source File: discrete_hopfield_network.py
    def predict(self, input_data, n_times=None):
        self.discrete_validation(input_data)
        input_data = format_data(bin2sign(input_data), is_feature1d=False)

        if self.mode == 'async':
            if n_times is None:
                n_times = self.n_times

            _, n_features = input_data.shape
            output_data = input_data

            for _ in range(n_times):
                position = random.randint(0, n_features - 1)
                raw_new_value = output_data.dot(self.weight[:, position])
                output_data[:, position] = sign(raw_new_value)
        else:
            output_data = input_data.dot(self.weight)

        return step_function(output_data).astype(int)

Example 67

Project: sparkit-learn Source File: test_truncated_svd.py
def match_sign(a, b):
    a_sign = np.sign(a)
    b_sign = np.sign(b)
    if np.array_equal(a_sign, -b_sign):
        return -b
    elif np.array_equal(a_sign, b_sign):
        return b
    else:
        raise AssertionError("inconsistent matching of sign")

Example 68

Project: mne-python Source File: transforms.py
def _sh_real_to_complex(shs, order):
    """Convert real spherical harmonic pair to complex.

    Parameters
    ----------
    shs : ndarray, shape (2, ...)
        The real spherical harmonics at ``[order, -order]``.
    order : int
        Order (usually 'm') of multipolar moment.

    Returns
    -------
    sh : array-like, shape (...)
        The complex version of the spherical harmonics.
    """
    if order == 0:
        return shs[0]
    else:
        return (shs[0] + 1j * np.sign(order) * shs[1]) / np.sqrt(2.)

Example 69

Project: nipy Source File: family.py
    def devresid(self, Y, mu):
        """
        Binomial deviance residual

        INPUTS:
           Y     -- response variable
           mu    -- mean parameter

        OUTPUTS: resid
           resid -- deviance residuals

        """

        mu = self.link.clean(mu)
        return np.sign(Y - mu) * np.sqrt(-2 * (Y * np.log(mu / self.n) + (self.n - Y) * np.log(1 - mu / self.n)))

Example 70

Project: ift6390_machine_learning_fundamentals Source File: distortion.py
Function: init
    def __init__(self, index, gradient, pixel):
        self.index = index
        self.sgn = numpy.sign(gradient)
        self.abs_gradient = numpy.abs(gradient)
        self.distortion = 0.
        self.max_dist = pixel if self.sgn==-1 else 1 - pixel  # to keep the value between 0 and 1
        self.gradient_cost = self.get_gradient_cost()

Example 71

Project: scikit-learn Source File: test_samples_generator.py
def test_make_classification_informative_features():
    """Test the construction of informative features in make_classification

    Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
    fully-specified `weights`.
    """
    # Create very separate clusters; check that vertices are unique and
    # correspond to classes
    class_sep = 1e6
    make = partial(make_classification, class_sep=class_sep, n_redundant=0,
                   n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)

    for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
                                                         (2, [1/3] * 3, 1),
                                                         (2, [1/4] * 4, 1),
                                                         (2, [1/2] * 2, 2),
                                                         (2, [3/4, 1/4], 2),
                                                         (10, [1/3] * 3, 10)
                                                         ]:
        n_classes = len(weights)
        n_clusters = n_classes * n_clusters_per_class
        n_samples = n_clusters * 50

        for hypercube in (False, True):
            X, y = make(n_samples=n_samples, n_classes=n_classes,
                        weights=weights, n_features=n_informative,
                        n_informative=n_informative,
                        n_clusters_per_class=n_clusters_per_class,
                        hypercube=hypercube, random_state=0)

            assert_equal(X.shape, (n_samples, n_informative))
            assert_equal(y.shape, (n_samples,))

            # Cluster by sign, viewed as strings to allow uniquing
            signs = np.sign(X)
            signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
            unique_signs, cluster_index = np.unique(signs,
                                                    return_inverse=True)

            assert_equal(len(unique_signs), n_clusters,
                         "Wrong number of clusters, or not in distinct "
                         "quadrants")

            clusters_by_class = defaultdict(set)
            for cluster, cls in zip(cluster_index, y):
                clusters_by_class[cls].add(cluster)
            for clusters in clusters_by_class.values():
                assert_equal(len(clusters), n_clusters_per_class,
                             "Wrong number of clusters per class")
            assert_equal(len(clusters_by_class), n_classes,
                         "Wrong number of classes")

            assert_array_almost_equal(np.bincount(y) / len(y) // weights,
                                      [1] * n_classes,
                                      err_msg="Wrong number of samples "
                                              "per class")

            # Ensure on vertices of hypercube
            for cluster in range(len(unique_signs)):
                centroid = X[cluster_index == cluster].mean(axis=0)
                if hypercube:
                    assert_array_almost_equal(np.abs(centroid),
                                              [class_sep] * n_informative,
                                              decimal=0,
                                              err_msg="Clusters are not "
                                                      "centered on hypercube "
                                                      "vertices")
                else:
                    assert_raises(AssertionError,
                                  assert_array_almost_equal,
                                  np.abs(centroid),
                                  [class_sep] * n_informative,
                                  decimal=0,
                                  err_msg="Clusters should not be cenetered "
                                          "on hypercube vertices")

    assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
                  n_clusters_per_class=1)
    assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
                  n_clusters_per_class=2)

Example 72

Project: mpop Source File: assemble_segments.py
Function: polygon
def polygon(area_corners, segment_corners):
    """Get the intersection polygon between two areas.
    """
    area_boundaries = [Arc(area_corners[0], area_corners[1]),
                       Arc(area_corners[1], area_corners[2]),
                       Arc(area_corners[2], area_corners[3]),
                       Arc(area_corners[3], area_corners[0])]
    segment_boundaries = [Arc(segment_corners[0], segment_corners[1]),
                          Arc(segment_corners[1], segment_corners[2]),
                          Arc(segment_corners[2], segment_corners[3]),
                          Arc(segment_corners[3], segment_corners[0])]

    angle1 = area_boundaries[0].angle(area_boundaries[1])
    angle2 = segment_boundaries[0].angle(segment_boundaries[1])
    if np.sign(angle1) != np.sign(angle2):
        segment_corners.reverse()
        segment_boundaries = [Arc(segment_corners[0], segment_corners[1]),
                              Arc(segment_corners[1], segment_corners[2]),
                              Arc(segment_corners[2], segment_corners[3]),
                              Arc(segment_corners[3], segment_corners[0])]
    poly = []

    boundaries = area_boundaries
    other_boundaries = segment_boundaries

    b__ = None

    for b__ in boundaries:
        if point_inside(b__.start, segment_corners):
            poly.append(b__.start)
            break
        else:
            inter = get_first_intersection(b__, other_boundaries)
            if inter is not None:
                poly.append(inter)
                break
    if len(poly) == 0:
        return None
    while len(poly) < 2 or poly[0] != poly[-1]:
        inter, b2_ = get_next_intersection(poly[-1], b__, other_boundaries)
        if inter is None:
            poly.append(b__.end)
            idx = (boundaries.index(b__) + 1) % len(boundaries)
            b__ = boundaries[idx]
        else:
            poly.append(inter)
            b__ = b2_
            boundaries, other_boundaries = other_boundaries, boundaries
    return poly[:-1]

Example 73

Project: scikit-learn Source File: isotonic.py
Function: check_increasing
def check_increasing(x, y):
    """Determine whether y is monotonically correlated with x.

    y is found increasing or decreasing with respect to x based on a Spearman
    correlation test.

    Parameters
    ----------
    x : array-like, shape=(n_samples,)
            Training data.

    y : array-like, shape=(n_samples,)
        Training target.

    Returns
    -------
    `increasing_bool` : boolean
        Whether the relationship is increasing or decreasing.

    Notes
    -----
    The Spearman correlation coefficient is estimated from the data, and the
    sign of the resulting estimate is used as the result.

    In the event that the 95% confidence interval based on Fisher transform
    spans zero, a warning is raised.

    References
    ----------
    Fisher transformation. Wikipedia.
    https://en.wikipedia.org/wiki/Fisher_transformation
    """

    # Calculate Spearman rho estimate and set return accordingly.
    rho, _ = spearmanr(x, y)
    increasing_bool = rho >= 0

    # Run Fisher transform to get the rho CI, but handle rho=+/-1
    if rho not in [-1.0, 1.0]:
        F = 0.5 * math.log((1. + rho) / (1. - rho))
        F_se = 1 / math.sqrt(len(x) - 3)

        # Use a 95% CI, i.e., +/-1.96 S.E.
        # https://en.wikipedia.org/wiki/Fisher_transformation
        rho_0 = math.tanh(F - 1.96 * F_se)
        rho_1 = math.tanh(F + 1.96 * F_se)

        # Warn if the CI spans zero.
        if np.sign(rho_0) != np.sign(rho_1):
            warnings.warn("Confidence interval of the Spearman "
                          "correlation coefficient spans zero. "
                          "Determination of ``increasing`` may be "
                          "suspect.")

    return increasing_bool

Example 74

Project: scikit-learn Source File: _scipy_sparse_lsqr_backport.py
def _sym_ortho(a, b):
    """
    Stable implementation of Givens rotation.

    Notes
    -----
    The routine 'SymOrtho' was added for numerical stability. This is
    recommended by S.-C. Choi in [1]_.  It removes the unpleasant potential of
    ``1/eps`` in some important places (see, for example text following
    "Compute the next plane rotation Qk" in minres.py).

    References
    ----------
    .. [1] S.-C. Choi, "Iterative Methods for Singular Linear Equations
           and Least-Squares Problems", Dissertation,
           http://www.stanford.edu/group/SOL/dissertations/sou-cheng-choi-thesis.pdf

    """
    if b == 0:
        return np.sign(a), 0, abs(a)
    elif a == 0:
        return 0, np.sign(b), abs(b)
    elif abs(b) > abs(a):
        tau = a / b
        s = np.sign(b) / sqrt(1 + tau * tau)
        c = s * tau
        r = b / s
    else:
        tau = b / a
        c = np.sign(a) / sqrt(1+tau*tau)
        s = c * tau
        r = a / c
    return c, s, r

Example 75

Project: pyspeckit Source File: baseline.py
    def plot_baseline(self, annotate=True, baseline_fit_color='orange',
                      use_window_limits=None, linewidth=1, alpha=0.75,
                      plotkwargs={}, **kwargs):
        """
        Overplot the baseline fit

        Parameters
        ----------
        annotate : bool
            Display the fit parameters for the best-fit baseline on the
            top-left of the plot
        baseline_fit_color : matplotlib color
            What color to use for overplotting the line
            (default is slightly transparent orange)
        use_window_limits : None or bool
            Keep the current window or expand the plot limits?  If left as None,
            will use `self.use_window_limits`

        Other Parameters
        ----------------
        linewidth : number
        alpha : float [0-1]
        plotkwargs : dict
            Are passed to matplotlib's plot function
        """

        # clear out the errorplot.  This should not be relevant...
        if self.Spectrum.plotter.errorplot is not None:
            for p in self.Spectrum.plotter.errorplot:
                if isinstance(p,matplotlib.collections.PolyCollection):
                    if p in self.Spectrum.plotter.axis.collections:
                        self.Spectrum.plotter.axis.collections.remove(p)
                if isinstance(p,matplotlib.lines.Line2D):
                    if p in self.Spectrum.plotter.axis.lines:
                        self.Spectrum.plotter.axis.lines.remove(p)

        # if we subtract the baseline, replot the now-subtracted data with rescaled Y axes
        if self.subtracted:
            if self.Spectrum.plotter.axis is not None:
                for p in self.Spectrum.plotter.axis.lines:
                    self.Spectrum.plotter.axis.lines.remove(p)
            plotmask = self.OKmask*False # include nothing...
            plotmask[self.xmin:self.xmax] = self.OKmask[self.xmin:self.xmax] # then include everything OK in range
            self.Spectrum.plotter.ymin = abs(self.Spectrum.data[plotmask].min())*1.1*np.sign(self.Spectrum.data[plotmask].min())
            self.Spectrum.plotter.ymax = abs(self.Spectrum.data[plotmask].max())*1.1*np.sign(self.Spectrum.data[plotmask].max())
            # don't change the zoom (by default)!
            uwl = use_window_limits if use_window_limits is not None else self.use_window_limits
            self.Spectrum.plotter.plot(use_window_limits=uwl)
        else: # otherwise just overplot the fit
            self.Spectrum.plotter.axis.set_autoscale_on(False)
            for p in self._plots:
                # remove the old baseline plots
                if p in self.Spectrum.plotter.axis.lines:
                    self.Spectrum.plotter.axis.lines.remove(p)
            self._plots += self.Spectrum.plotter.axis.plot(
                    self.Spectrum.xarr,
                    self.basespec,
                    color=baseline_fit_color,
                    linewidth=linewidth,
                    alpha=alpha,
                    **plotkwargs)

        if annotate:
            self.annotate() # refreshes automatically
        elif self.Spectrum.plotter.autorefresh:
            self.Spectrum.plotter.refresh()

Example 76

Project: pyresample Source File: spherical_geometry.py
def intersection_polygon(area_corners, segment_corners):
    """Get the intersection polygon between two areas.
    """
    area_boundaries = [Arc(area_corners[0], area_corners[1]),
                       Arc(area_corners[1], area_corners[2]),
                       Arc(area_corners[2], area_corners[3]),
                       Arc(area_corners[3], area_corners[0])]
    segment_boundaries = [Arc(segment_corners[0], segment_corners[1]),
                          Arc(segment_corners[1], segment_corners[2]),
                          Arc(segment_corners[2], segment_corners[3]),
                          Arc(segment_corners[3], segment_corners[0])]

    angle1 = area_boundaries[0].angle(area_boundaries[1])
    angle2 = segment_boundaries[0].angle(segment_boundaries[1])
    if np.sign(angle1) != np.sign(angle2):
        segment_corners.reverse()
        segment_boundaries = [Arc(segment_corners[0], segment_corners[1]),
                              Arc(segment_corners[1], segment_corners[2]),
                              Arc(segment_corners[2], segment_corners[3]),
                              Arc(segment_corners[3], segment_corners[0])]
    poly = []

    boundaries = area_boundaries
    other_boundaries = segment_boundaries

    b__ = None

    for b__ in boundaries:
        if point_inside(b__.start, segment_corners):
            poly.append(b__.start)
            break
        else:
            inter = get_first_intersection(b__, other_boundaries)
            if inter is not None:
                poly.append(inter)
                break
    if len(poly) == 0:
        return None
    while len(poly) < 2 or poly[0] != poly[-1]:
        inter, b2_ = get_next_intersection(poly[-1], b__, other_boundaries)
        if inter is None:
            poly.append(b__.end)
            idx = (boundaries.index(b__) + 1) % len(boundaries)
            b__ = boundaries[idx]
        else:
            poly.append(inter)
            b__ = b2_
            boundaries, other_boundaries = other_boundaries, boundaries
    return poly[:-1]

Example 77

Project: polylearn Source File: test_polynomial_network.py
Function: check_warm_start
def check_warm_start(degree):
    y = np.sign(_lifted_predict(U[:degree], X))
    # Result should be the same if:
    # (a) running 10 iterations

    common_settings = dict(fit_lower=None, degree=degree, n_components=2,
                           random_state=0)
    clf_10 = PolynomialNetworkRegressor(max_iter=10, warm_start=False,
                                        **common_settings)
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        clf_10.fit(X, y)

    # (b) running 5 iterations and 5 more
    clf_5_5 = PolynomialNetworkRegressor(max_iter=5, warm_start=True,
                                         **common_settings)
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        clf_5_5.fit(X, y)
        U_fit = clf_5_5.U_.copy()
        clf_5_5.fit(X, y)

    # (c) running 5 iterations when starting from previous point.
    clf_5 = PolynomialNetworkRegressor(max_iter=5, warm_start=True,
                                       **common_settings)
    clf_5.U_ = U_fit
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        clf_5.fit(X, y)

    assert_array_almost_equal(clf_10.U_, clf_5_5.U_)
    assert_array_almost_equal(clf_10.U_, clf_5.U_)

    # Prediction results should also be the same if:
    # (note: could not get this test to work for the exact P_.)
    # This test is very flimsy!

    y = np.sign(_lifted_predict(U[:degree], X))

    beta_low = 0.51
    beta = 0.5
    beta_hi = 0.49

    common_settings = dict(degree=degree, n_components=n_components,
                           tol=1e-3, random_state=0)
    ref = PolynomialNetworkRegressor(beta=beta, **common_settings)
    ref.fit(X, y)
    y_pred_ref = ref.predict(X)

    # # (a) starting from lower beta, increasing and refitting
    from_low = PolynomialNetworkRegressor(beta=beta_low, warm_start=True,
                                          **common_settings)
    from_low.fit(X, y)
    from_low.set_params(beta=beta)
    from_low.fit(X, y)
    y_pred_low = from_low.predict(X)

    # (b) starting from higher beta, decreasing and refitting
    from_hi = PolynomialNetworkRegressor(beta=beta_hi, warm_start=True,
                                         **common_settings)
    from_hi.fit(X, y)
    from_hi.set_params(beta=beta)
    from_hi.fit(X, y)
    y_pred_hi = from_hi.predict(X)

    decimal = 3
    assert_array_almost_equal(y_pred_low, y_pred_ref, decimal=decimal)
    assert_array_almost_equal(y_pred_hi, y_pred_ref, decimal=decimal)

Example 78

Project: scipy Source File: matfuncs.py
def signm(A, disp=True):
    """
    Matrix sign function.

    Extension of the scalar sign(x) to matrices.

    Parameters
    ----------
    A : (N, N) array_like
        Matrix at which to evaluate the sign function
    disp : bool, optional
        Print warning if error in the result is estimated large
        instead of returning estimated error. (Default: True)

    Returns
    -------
    signm : (N, N) ndarray
        Value of the sign function at `A`
    errest : float
        (if disp == False)

        1-norm of the estimated error, ||err||_1 / ||A||_1

    Examples
    --------
    >>> from scipy.linalg import signm, eigvals
    >>> a = [[1,2,3], [1,2,1], [1,1,1]]
    >>> eigvals(a)
    array([ 4.12488542+0.j, -0.76155718+0.j,  0.63667176+0.j])
    >>> eigvals(signm(a))
    array([-1.+0.j,  1.+0.j,  1.+0.j])

    """
    A = _asarray_square(A)

    def rounded_sign(x):
        rx = np.real(x)
        if rx.dtype.char == 'f':
            c = 1e3*feps*amax(x)
        else:
            c = 1e3*eps*amax(x)
        return sign((absolute(rx) > c) * rx)
    result, errest = funm(A, rounded_sign, disp=0)
    errtol = {0:1e3*feps, 1:1e3*eps}[_array_precision[result.dtype.char]]
    if errest < errtol:
        return result

    # Handle signm of defective matrices:

    # See "E.D.Denman and J.Leyva-Ramos, Appl.Math.Comp.,
    # 8:237-250,1981" for how to improve the following (currently a
    # rather naive) iteration process:

    # a = result # sometimes iteration converges faster but where??

    # Shifting to avoid zero eigenvalues. How to ensure that shifting does
    # not change the spectrum too much?
    vals = svd(A, compute_uv=0)
    max_sv = np.amax(vals)
    # min_nonzero_sv = vals[(vals>max_sv*errtol).tolist().count(1)-1]
    # c = 0.5/min_nonzero_sv
    c = 0.5/max_sv
    S0 = A + c*np.identity(A.shape[0])
    prev_errest = errest
    for i in range(100):
        iS0 = inv(S0)
        S0 = 0.5*(S0 + iS0)
        Pp = 0.5*(dot(S0,S0)+S0)
        errest = norm(dot(Pp,Pp)-Pp,1)
        if errest < errtol or prev_errest == errest:
            break
        prev_errest = errest
    if disp:
        if not isfinite(errest) or errest >= errtol:
            print("signm result may be inaccurate, approximate err =", errest)
        return S0
    else:
        return S0, errest

Example 79

Project: RasterFairy Source File: coonswarp.py
def leftOrRight(p,l1,l2):
    return np.sign((l2[0] - l1[0]) * (p[1] - l1[1]) - (l2[1] - l1[1]) * (p[0] - l1[0]))

Example 80

Project: scipy Source File: common.py
def logsumexp(a, axis=None, b=None, keepdims=False, return_sign=False):
    """Compute the log of the sum of exponentials of input elements.

    Parameters
    ----------
    a : array_like
        Input array.
    axis : None or int or tuple of ints, optional
        Axis or axes over which the sum is taken. By default `axis` is None,
        and all elements are summed.

        .. versionadded:: 0.11.0
    keepdims : bool, optional
        If this is set to True, the axes which are reduced are left in the
        result as dimensions with size one. With this option, the result
        will broadcast correctly against the original array.

        .. versionadded:: 0.15.0
    b : array-like, optional
        Scaling factor for exp(`a`) must be of the same shape as `a` or
        broadcastable to `a`. These values may be negative in order to
        implement subtraction.

        .. versionadded:: 0.12.0
    return_sign : bool, optional
        If this is set to True, the result will be a pair containing sign
        information; if False, results that are negative will be returned
        as NaN. Default is False (no sign information).

        .. versionadded:: 0.16.0
    Returns
    -------
    res : ndarray
        The result, ``np.log(np.sum(np.exp(a)))`` calculated in a numerically
        more stable way. If `b` is given then ``np.log(np.sum(b*np.exp(a)))``
        is returned.
    sgn : ndarray
        If return_sign is True, this will be an array of floating-point
        numbers matching res and +1, 0, or -1 depending on the sign
        of the result. If False, only one result is returned.

    See Also
    --------
    numpy.logaddexp, numpy.logaddexp2

    Notes
    -----
    Numpy has a logaddexp function which is very similar to `logsumexp`, but
    only handles two arguments. `logaddexp.reduce` is similar to this
    function, but may be less stable.

    Examples
    --------
    >>> from scipy.misc import logsumexp
    >>> a = np.arange(10)
    >>> np.log(np.sum(np.exp(a)))
    9.4586297444267107
    >>> logsumexp(a)
    9.4586297444267107

    With weights

    >>> a = np.arange(10)
    >>> b = np.arange(10, 0, -1)
    >>> logsumexp(a, b=b)
    9.9170178533034665
    >>> np.log(np.sum(b*np.exp(a)))
    9.9170178533034647

    Returning a sign flag

    >>> logsumexp([1,2],b=[1,-1],return_sign=True)
    (1.5413248546129181, -1.0)

    Notice that `logsumexp` does not directly support masked arrays. To use it
    on a masked array, convert the mask into zero weights:

    >>> a = np.ma.array([np.log(2), 2, np.log(3)],
    ...                  mask=[False, True, False])
    >>> b = (~a.mask).astype(int)
    >>> logsumexp(a.data, b=b), np.log(5)
    1.6094379124341005, 1.6094379124341005

    """
    a = _asarray_validated(a, check_finite=False)
    if b is not None:
        a, b = broadcast_arrays(a,b)
        if np.any(b == 0):
            a = a + 0.  # promote to at least float
            a[b == 0] = -np.inf

    a_max = amax(a, axis=axis, keepdims=True)

    if a_max.ndim > 0:
        a_max[~isfinite(a_max)] = 0
    elif not isfinite(a_max):
        a_max = 0

    if b is not None:
        b = asarray(b)
        tmp = b * exp(a - a_max)
    else:
        tmp = exp(a - a_max)

    # suppress warnings about log of zero
    with np.errstate(divide='ignore'):
        s = np.sum(tmp, axis=axis, keepdims=keepdims)
        if return_sign:
            sgn = sign(s)
            s *= sgn  # /= makes more sense but we need zero -> zero
        out = log(s)

    if not keepdims:
        a_max = squeeze(a_max, axis=axis)
    out += a_max

    if return_sign:
        return out, sgn
    else:
        return out

Example 81

Project: scipy Source File: zeros.py
def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50,
           fprime2=None):
    """
    Find a zero using the Newton-Raphson or secant method.

    Find a zero of the function `func` given a nearby starting point `x0`.
    The Newton-Raphson method is used if the derivative `fprime` of `func`
    is provided, otherwise the secant method is used.  If the second order
    derivate `fprime2` of `func` is provided, parabolic Halley's method
    is used.

    Parameters
    ----------
    func : function
        The function whose zero is wanted. It must be a function of a
        single variable of the form f(x,a,b,c...), where a,b,c... are extra
        arguments that can be passed in the `args` parameter.
    x0 : float
        An initial estimate of the zero that should be somewhere near the
        actual zero.
    fprime : function, optional
        The derivative of the function when available and convenient. If it
        is None (default), then the secant method is used.
    args : tuple, optional
        Extra arguments to be used in the function call.
    tol : float, optional
        The allowable error of the zero value.
    maxiter : int, optional
        Maximum number of iterations.
    fprime2 : function, optional
        The second order derivative of the function when available and
        convenient. If it is None (default), then the normal Newton-Raphson
        or the secant method is used. If it is given, parabolic Halley's
        method is used.

    Returns
    -------
    zero : float
        Estimated location where function is zero.

    See Also
    --------
    brentq, brenth, ridder, bisect
    fsolve : find zeroes in n dimensions.

    Notes
    -----
    The convergence rate of the Newton-Raphson method is quadratic,
    the Halley method is cubic, and the secant method is
    sub-quadratic.  This means that if the function is well behaved
    the actual error in the estimated zero is approximately the square
    (cube for Halley) of the requested tolerance up to roundoff
    error. However, the stopping criterion used here is the step size
    and there is no guarantee that a zero has been found. Consequently
    the result should be verified. Safer algorithms are brentq,
    brenth, ridder, and bisect, but they all require that the root
    first be bracketed in an interval where the function changes
    sign. The brentq algorithm is recommended for general use in one
    dimensional problems when such an interval has been found.

    """
    if tol <= 0:
        raise ValueError("tol too small (%g <= 0)" % tol)
    if maxiter < 1:
        raise ValueError("maxiter must be greater than 0")
    if fprime is not None:
        # Newton-Rapheson method
        # Multiply by 1.0 to convert to floating point.  We don't use float(x0)
        # so it still works if x0 is complex.
        p0 = 1.0 * x0
        fder2 = 0
        for iter in range(maxiter):
            myargs = (p0,) + args
            fder = fprime(*myargs)
            if fder == 0:
                msg = "derivative was zero."
                warnings.warn(msg, RuntimeWarning)
                return p0
            fval = func(*myargs)
            if fprime2 is not None:
                fder2 = fprime2(*myargs)
            if fder2 == 0:
                # Newton step
                p = p0 - fval / fder
            else:
                # Parabolic Halley's method
                discr = fder ** 2 - 2 * fval * fder2
                if discr < 0:
                    p = p0 - fder / fder2
                else:
                    p = p0 - 2*fval / (fder + sign(fder) * sqrt(discr))
            if abs(p - p0) < tol:
                return p
            p0 = p
    else:
        # Secant method
        p0 = x0
        if x0 >= 0:
            p1 = x0*(1 + 1e-4) + 1e-4
        else:
            p1 = x0*(1 + 1e-4) - 1e-4
        q0 = func(*((p0,) + args))
        q1 = func(*((p1,) + args))
        for iter in range(maxiter):
            if q1 == q0:
                if p1 != p0:
                    msg = "Tolerance of %s reached" % (p1 - p0)
                    warnings.warn(msg, RuntimeWarning)
                return (p1 + p0)/2.0
            else:
                p = p1 - q1*(p1 - p0)/(q1 - q0)
            if abs(p - p1) < tol:
                return p
            p0 = p1
            q0 = q1
            p1 = p
            q1 = func(*((p1,) + args))
    msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p)
    raise RuntimeError(msg)

Example 82

Project: bt Source File: algos.py
Function: call
    def __call__(self, target):
        tw = target.temp['weights']
        all_keys = set(list(target.children.keys()) + list(tw.keys()))

        for k in all_keys:
            tgt = tw[k] if k in tw else 0.
            cur = target.children[k].weight if k in target.children else 0.
            delta = tgt - cur

            # check if we need to limit
            if self.global_limit:
                if abs(delta) > self.limit:
                    tw[k] = cur + (self.limit * np.sign(delta))
            else:
                # make sure we have a limit defined in case of limit dict
                if k in self.limit:
                    lmt = self.limit[k]
                    if abs(delta) > lmt:
                        tw[k] = cur + (lmt * np.sign(delta))

        return True

Example 83

Project: pyspeckit Source File: measurements.py
    def bracket_root(self, f, x_guess, atol = 1e-4):
        """
        Bracket root by finding points where function goes from positive to negative.
        """

        f1 = f(x_guess)
        f2 = f(x_guess + 1)
        df = f2 - f1

        # Determine whether increasing or decreasing x_guess will lead us to zero
        if (f1 > 0 and df < 0) or (f1 < 0 and df > 0): sign = 1
        else: sign = -1

        # Find root bracketing points
        xpre = x_guess
        xnow = x_guess + sign
        fpre = f1
        fnow = f(xnow)
        while (np.sign(fnow) == np.sign(fpre)):
            xpre = xnow
            xnow += sign * 0.1
            fpre = f(xpre)
            fnow = f(xnow)

        x1 = min(xnow, xpre)
        x2 = max(xnow, xpre)

        if not np.all([np.sign(fpre), np.sign(fnow)]):
            x1 -= 1e-4
            x2 += 1e-4

        return x1, x2

Example 84

Project: pymeasure Source File: experiment.py
Function: get_array
def get_array(start, stop, step):
    """Returns a numpy array from start to stop"""
    step = np.sign(stop-start)*abs(step)
    return np.arange(start, stop+step, step)

Example 85

Project: scipy Source File: _spectral.py
def _root_df_sane(func, x0, args=(), ftol=1e-8, fatol=1e-300, maxfev=1000,
                  fnorm=None, callback=None, disp=False, M=10, eta_strategy=None,
                  sigma_eps=1e-10, sigma_0=1.0, line_search='cruz', **unknown_options):
    r"""
    Solve nonlinear equation with the DF-SANE method

    Options
    -------
    ftol : float, optional
        Relative norm tolerance.
    fatol : float, optional
        Absolute norm tolerance.
        Algorithm terminates when ``||func(x)|| < fatol + ftol ||func(x_0)||``.
    fnorm : callable, optional
        Norm to use in the convergence check. If None, 2-norm is used.
    maxfev : int, optional
        Maximum number of function evaluations.
    disp : bool, optional
        Whether to print convergence process to stdout.
    eta_strategy : callable, optional
        Choice of the ``eta_k`` parameter, which gives slack for growth
        of ``||F||**2``.  Called as ``eta_k = eta_strategy(k, x, F)`` with
        `k` the iteration number, `x` the current iterate and `F` the current
        residual. Should satisfy ``eta_k > 0`` and ``sum(eta, k=0..inf) < inf``.
        Default: ``||F||**2 / (1 + k)**2``.
    sigma_eps : float, optional
        The spectral coefficient is constrained to ``sigma_eps < sigma < 1/sigma_eps``.
        Default: 1e-10
    sigma_0 : float, optional
        Initial spectral coefficient.
        Default: 1.0
    M : int, optional
        Number of iterates to include in the nonmonotonic line search.
        Default: 10
    line_search : {'cruz', 'cheng'}
        Type of line search to employ. 'cruz' is the original one defined in
        [Martinez & Raydan. Math. Comp. 75, 1429 (2006)], 'cheng' is
        a modified search defined in [Cheng & Li. IMA J. Numer. Anal. 29, 814 (2009)].
        Default: 'cruz'

    References
    ----------
    .. [1] "Spectral residual method without gradient information for solving
           large-scale nonlinear systems of equations." W. La Cruz,
           J.M. Martinez, M. Raydan. Math. Comp. **75**, 1429 (2006).
    .. [2] W. La Cruz, Opt. Meth. Software, 29, 24 (2014).
    .. [3] W. Cheng, D.-H. Li. IMA J. Numer. Anal. **29**, 814 (2009).

    """
    _check_unknown_options(unknown_options)

    if line_search not in ('cheng', 'cruz'):
        raise ValueError("Invalid value %r for 'line_search'" % (line_search,))

    nexp = 2

    if eta_strategy is None:
        # Different choice from [1], as their eta is not invariant
        # vs. scaling of F.
        def eta_strategy(k, x, F):
            # Obtain squared 2-norm of the initial residual from the outer scope
            return f_0 / (1 + k)**2

    if fnorm is None:
        def fnorm(F):
            # Obtain squared 2-norm of the current residual from the outer scope
            return f_k**(1.0/nexp)

    def fmerit(F):
        return np.linalg.norm(F)**nexp

    nfev = [0]
    f, x_k, x_shape, f_k, F_k, is_complex = _wrap_func(func, x0, fmerit, nfev, maxfev, args)

    k = 0
    f_0 = f_k
    sigma_k = sigma_0

    F_0_norm = fnorm(F_k)

    # For the 'cruz' line search
    prev_fs = collections.deque([f_k], M)

    # For the 'cheng' line search
    Q = 1.0
    C = f_0

    converged = False
    message = "too many function evaluations required"

    while True:
        F_k_norm = fnorm(F_k)

        if disp:
            print("iter %d: ||F|| = %g, sigma = %g" % (k, F_k_norm, sigma_k))

        if callback is not None:
            callback(x_k, F_k)

        if F_k_norm < ftol * F_0_norm + fatol:
            # Converged!
            message = "successful convergence"
            converged = True
            break

        # Control spectral parameter, from [2]
        if abs(sigma_k) > 1/sigma_eps:
            sigma_k = 1/sigma_eps * np.sign(sigma_k)
        elif abs(sigma_k) < sigma_eps:
            sigma_k = sigma_eps

        # Line search direction
        d = -sigma_k * F_k

        # Nonmonotone line search
        eta = eta_strategy(k, x_k, F_k)
        try:
            if line_search == 'cruz':
                alpha, xp, fp, Fp = _nonmonotone_line_search_cruz(f, x_k, d, prev_fs, eta=eta)
            elif line_search == 'cheng':
                alpha, xp, fp, Fp, C, Q = _nonmonotone_line_search_cheng(f, x_k, d, f_k, C, Q, eta=eta)
        except _NoConvergence:
            break

        # Update spectral parameter
        s_k = xp - x_k
        y_k = Fp - F_k
        sigma_k = np.vdot(s_k, s_k) / np.vdot(s_k, y_k)

        # Take step
        x_k = xp
        F_k = Fp
        f_k = fp

        # Store function value
        if line_search == 'cruz':
            prev_fs.append(fp)

        k += 1

    x = _wrap_result(x_k, is_complex, shape=x_shape)
    F = _wrap_result(F_k, is_complex)

    result = OptimizeResult(x=x, success=converged,
                            message=message,
                            fun=F, nfev=nfev[0], nit=k)

    return result

Example 86

Project: biggus Source File: test__Elementwise.py
Function: test_sign
    def test_sign(self):
        self._test(np.sign)

Example 87

Project: pyflux Source File: egarch.py
Function: mean_prediction
    def _mean_prediction(self, lmda, Y, scores, h, t_params):
        """ Creates an h-step ahead mean prediction
        
        Parameters
        ----------
        
        lmda : np.array
            The past predicted values
        
        Y : np.array
            The past data
        
        scores : np.array
            The past scores
        
        h : int
            How many steps ahead for the prediction
        
        t_params : np.array
            A vector of (transformed) latent variables
        
        Returns
        ----------
        h-length vector of mean predictions
        """     

        # Create arrays to iteratre over
        lmda_exp = lmda.copy()
        scores_exp = scores.copy()
        Y_exp = Y.copy()

        # Loop over h time periods          
        for t in range(0, h):
            new_value = t_params[0]

            if self.p != 0:
                for j in range(1, self.p+1):
                    new_value += t_params[j]*lmda_exp[-j]

            if self.q != 0:
                for k in range(1, self.q+1):
                    new_value += t_params[k+self.p]*scores_exp[-k]

            if self.leverage is True:
                new_value += t_params[1+self.p+self.q]*np.sign(-(Y_exp[-1]-t_params[-1]))*(scores_exp[-1]+1)

            lmda_exp = np.append(lmda_exp, [new_value]) # For indexing consistency
            scores_exp = np.append(scores_exp, [0]) # expectation of score is zero
            Y_exp = np.append(Y_exp, [t_params[-1]])

        return lmda_exp

Example 88

Project: iris Source File: _ff.py
    def _det_border(self, field_dim, halo_dim):
        # Update field coordinates for a variable resolution LBC file where
        # the resolution of the very edge (within the rim width) is assumed to
        # be same as the halo.
        def range_order(range1, range2, resolution):
            # Handles whether increasing/decreasing ranges.
            if np.sign(resolution) > 0:
                lower = range1
                upper = range2
            else:
                upper = range1
                lower = range2
            return lower, upper

        # Ensure that the resolution is the same on both edges.
        res_low = field_dim[1] - field_dim[0]
        res_high = field_dim[-1] - field_dim[-2]
        if not np.allclose(res_low, res_high):
            msg = ('The x or y coordinates of your boundary condition field '
                   'may be incorrect, not having taken into account the '
                   'boundary size.')
            warnings.warn(msg)
        else:
            range2 = field_dim[0] - res_low
            range1 = field_dim[0] - halo_dim * res_low
            lower, upper = range_order(range1, range2, res_low)
            extra_before = np.linspace(lower, upper, halo_dim)

            range1 = field_dim[-1] + res_high
            range2 = field_dim[-1] + halo_dim * res_high
            lower, upper = range_order(range1, range2, res_high)
            extra_after = np.linspace(lower, upper, halo_dim)

            field_dim = np.concatenate([extra_before, field_dim, extra_after])
        return field_dim

Example 89

Project: GPy Source File: brownian.py
Function: k
    def K(self,X,X2=None):
        if X2 is None:
            X2 = X
        return self.variance*np.where(np.sign(X)==np.sign(X2.T),np.fmin(np.abs(X),np.abs(X2.T)), 0.)

Example 90

Project: pystruct Source File: svm.py
Function: predict
    def predict(self, X):
        return np.sign(self.decision_function(X))

Example 91

Project: PRST Source File: __init__.py
Function: sign
    def sign(u):
        return np.sign(u.val)

Example 92

Project: GPy Source File: brownian.py
Function: update_gradients_full
    def update_gradients_full(self, dL_dK, X, X2=None):
        if X2 is None:
            X2 = X
        self.variance.gradient = np.sum(dL_dK * np.where(np.sign(X)==np.sign(X2.T),np.fmin(np.abs(X),np.abs(X2.T)), 0.))

Example 93

Project: statsmodels Source File: norms.py
Function: psi_deriv
    def psi_deriv(self, z):
        t1, t2, t3 = self._subset(z)
        return t1 + t3 * (self.a*np.sign(z)*z)/(np.fabs(z)*(self.c-self.b))

Example 94

Project: PYPOWER Source File: toggle_iflims.py
def userfcn_iflims_formulation(om, *args):
    """This is the 'formulation' stage userfcn callback that defines the
    user costs and constraints for interface flow limits. It expects to
    find an 'if' field in the ppc stored in om, as described above. The
    optional args are not currently used.
    """
    ## initialize some things
    ppc = om.get_ppc()
    baseMVA, bus, branch = ppc['baseMVA'], ppc['bus'], ppc['branch']
    ifmap = ppc['if']['map']
    iflims = ppc['if']['lims']

    ## form B matrices for DC model
    _, Bf, _, Pfinj = makeBdc(baseMVA, bus, branch)
    n = Bf.shape[1]                    ## dim of theta

    ## form constraints
    ifidx = unique(iflims[:, 0])   ## interface number list
    nifs = len(ifidx)              ## number of interfaces
    Aif = lil_matrix((nifs, n))
    lif = zeros(nifs)
    uif = zeros(nifs)
    for k in range(nifs):
        ## extract branch indices
        br = ifmap[ifmap[:, 0] == ifidx[k], 1]
        if len(br) == 0:
            stderr.write('userfcn_iflims_formulation: interface %d has no in-service branches\n' % k)

        d = sign(br)
        br = abs(br)
        Ak = sparse((1, n))              ## Ak = sum( d(i) * Bf(i, :) )
        bk = 0                           ## bk = sum( d(i) * Pfinj(i) )
        for i in range(len(br)):
            Ak = Ak + d[i] * Bf[br[i], :]
            bk = bk + d[i] * Pfinj[br[i]]

        Aif[k, :] = Ak
        lif[k] = iflims[k, 1] / baseMVA - bk
        uif[k] = iflims[k, 2] / baseMVA - bk

    ## add interface constraint
    om.add_constraints('iflims',  Aif, lif, uif, ['Va'])      ## nifs

    return om

Example 95

Project: pyNastran Source File: intersect.py
Function: intersect
    def intersect(self, e1, e2, element1, element2, nodes, n):
        """
        http://fileadmin.cs.lth.se/cs/Personal/Tomas_Akenine-Moller/pubs/tritri.pdf
        """
        n2 = n[e2]
        #print("nodes.shape =", nodes.shape)
        pt = nodes[element2[0], :]
        d2 = -dot(n2, pt)  # vo2 - node 0 on element 2
        #dvi = []
        #for i in range(3):
            #ei = element1[i]
            #dvii = dot(n2, nodes[ei, :]) + d2
            #dvi.append(dvii)
        #print("    dvi = %s" % dvi)
        #e1 = elements1
        dvi2 = dot(n2, nodes[element1, :].T) + d2

        sdvi = sign(dvi2)
        sign_range = sdvi.max() - sdvi.min()
        if allclose(dvi2.min(), 0.) or sign_range == 2.:
            print("     element2 = ", element2[0])
            print("     ", pt)
            print("     d2", d2)
            print("     dvi = %s" % dvi2)
            print("     sign_range = %s" % sign_range)
            is_intersection = True
            raise NotImplementedError()
        else:
            is_intersection = False


        #print("    n2=%s" % (n2))
        return is_intersection

Example 96

Project: chainer Source File: basic_math.py
    def backward_cpu(self, x, gy):
        return utils.force_array(numpy.sign(x[0]) * gy[0]),

Example 97

Project: pybrain Source File: bicycle.py
    def step(self):
        # Unpack the state and actions.
        # -----------------------------
        # Want to ignore the previous value of omegadd; it could only cause a
        # bug if we assign to it.

        (theta, thetad, omega, omegad, _,
                xf, yf, xb, yb, psi) = self.sensors
        (T, d) = self.actions

        # For recordkeeping.
        # ------------------
        if self._save_wheel_contact_trajectories:
            self.xfhist.append(xf)
            self.yfhist.append(yf)
            self.xbhist.append(xb)
            self.ybhist.append(yb)

        # Intermediate time-dependent quantities.
        # ---------------------------------------
        # Avoid divide-by-zero, just as Randlov did.
        if theta == 0:
            rf = 1e8
            rb = 1e8
            rCM = 1e8
        else:
            rf = self.L / np.abs(sin(theta))
            rb = self.L / np.abs(tan(theta))
            rCM = sqrt((self.L - self.c)**2 + self.L**2 / tan(theta)**2)

        phi = omega + np.arctan(d / self.h)

        # Equations of motion.
        # --------------------
        # Second derivative of angular acceleration:
        omegadd = 1 / self.Itot * (self.M * self.h * self.g * sin(phi)
                - cos(phi) * (self.Idc * self.sigmad * thetad
                    + sign(theta) * self.v**2 * (
                        self.Md * self.r * (1.0 / rf + 1.0 / rb)
                        + self.M * self.h / rCM)))
        thetadd = (T - self.Idv * self.sigmad * omegad) / self.Idl

        # Integrate equations of motion using Euler's method.
        # ---------------------------------------------------
        # yt+1 = yt + yd * dt.
        # Must update omega based on PREVIOUS value of omegad.
        omegad += omegadd * self.time_step
        omega += omegad * self.time_step
        thetad += thetadd * self.time_step
        theta += thetad * self.time_step

        # Handlebars can't be turned more than 80 degrees.
        theta = np.clip(theta, -1.3963, 1.3963)

        # Wheel ('tyre') contact positions.
        # ---------------------------------

        # Front wheel contact position.
        front_temp = self.v * self.time_step / (2 * rf)
        # See Randlov's code.
        if front_temp > 1:
            front_temp = sign(psi + theta) * 0.5 * np.pi
        else:
            front_temp = sign(psi + theta) * arcsin(front_temp)
        xf += self.v * self.time_step * -sin(psi + theta + front_temp)
        yf += self.v * self.time_step * cos(psi + theta + front_temp)

        # Rear wheel.
        back_temp = self.v * self.time_step / (2 * rb)
        # See Randlov's code.
        if back_temp > 1:
            back_temp = np.sign(psi) * 0.5 * np.pi
        else:
            back_temp = np.sign(psi) * np.arcsin(back_temp)
        xb += self.v * self.time_step * -sin(psi + back_temp)
        yb += self.v * self.time_step * cos(psi + back_temp)

        # Preventing numerical drift.
        # ---------------------------
        # Copying what Randlov did.
        current_wheelbase = sqrt((xf - xb)**2 + (yf - yb)**2)
        if np.abs(current_wheelbase - self.L) > 0.01:
            relative_error = self.L / current_wheelbase - 1.0
            xb += (xb - xf) * relative_error
            yb += (yb - yf) * relative_error

        # Update heading, psi.
        # --------------------
        delta_y = yf - yb
        if (xf == xb) and delta_y < 0.0:
            psi = np.pi
        else:
            if delta_y > 0.0:
                psi = arctan((xb - xf) / delta_y)
            else:
                psi = sign(xb - xf) * 0.5 * np.pi - arctan(delta_y / (xb - xf))

        self.sensors = np.array([theta, thetad, omega, omegad, omegadd,
                xf, yf, xb, yb, psi])

Example 98

Project: python-acoustics Source File: iec_61260_1_2014.py
Function: nominal_center_frequency
def _nominal_center_frequency(center, fraction):
    """Nominal frequency according to standard.
    
    :param center: Exact mid-frequency to be rounded.
    :param fraction: Bandwidth designator or fraction.
    """
    def _roundn(x, n):
        return round(x, -int(np.floor(np.sign(x) * np.log10(abs(x)))) + n)
    
    b = fraction
    x = center
    
    # Section E.1: 1/1-octaves
    if b == 1:
        n = index_of_frequency(x, b)
        if -6 <= n < 5: # Correspond to indices when n=0 corresponds to 1000 Hz
            return acoustics.standards.iec_61672_1_2013.NOMINAL_OCTAVE_CENTER_FREQUENCIES[n+6]
        elif n >= 5:
            return 2.0 * _nominal_center_frequency(exact_center_frequency(n-1, b), b) # WARNING: Unclear in standard!
        else:
            return 1./2.0 * _nominal_center_frequency(exact_center_frequency(n+1, b), b) # WARNING: Unclear in standard!
        
    # Section E.2: 1/2-octaves
    elif b == 2:
        return _roundn(x, 2)

    # Section E.1: 1/3-octaves
    elif b == 3:
        n = index_of_frequency(x, b)
        
        if -20 <= n < 14: # Correspond to indices when n=0 corresponds to 1000 Hz
            return acoustics.standards.iec_61672_1_2013.NOMINAL_THIRD_OCTAVE_CENTER_FREQUENCIES[n+20]
        elif n >= 14:
            return 10.*_nominal_center_frequency(exact_center_frequency(n-10, b), b) # WARNING: Unclear in standard!
        else:
            return 1./10.*_nominal_center_frequency(exact_center_frequency(n+10, b), b) # WARNING: Unclear in standard!
    
    # Section E3.3: 1/4 to 1/24-octaves, inclusive
    elif 4 <= b <= 24:
        msd = x // 10.0**np.floor(np.log10(x))
        if msd < 5:
            return _roundn(x, 2) # E3.2
        else:
            return _roundn(x, 1) # E3.3
   
    # Section E3.5: > 1/24-octaves
    elif b > 24:
        raise NotImplementedError("b > 24 is not implemented")
    else:
        raise ValueError("Wrong value for b")

Example 99

Project: PYPOWER Source File: toggle_iflims.py
def userfcn_iflims_int2ext(results, *args):
    """This is the 'int2ext' stage userfcn callback that converts everything
    back to external indexing and packages up the results. It expects to
    find an 'if' field in the C{results} dict as described for ppc above.
    It also expects the results to contain solved branch flows and linear
    constraints named 'iflims' which are used to populate output fields
    in C{results['if']}. The optional args are not currently used.
    """
    ## get internal ifmap
    ifmap = results['if']['map']
    iflims = results['if']['lims']

    ##-----  convert stuff back to external indexing  -----
    results['if']['map'] = results['order']['ext']['ifmap']

    ##-----  results post-processing  -----
    ifidx = unique(iflims[:, 0])   ## interface number list
    nifs = len(ifidx)           ## number of interfaces
    results['if']['P'] = zeros(nifs)
    for k in range(nifs):
        ## extract branch indices
        br = ifmap[ifmap[:, 0] == ifidx[k], 1]
        d = sign(br)
        br = abs(br)
        results['if']['P'][k] = sum( d * results['branch'][br, PF] )

    if 'mu' not in results['if']:
        results['if']['mu'] = {}
    results['if']['mu']['l'] = results['lin']['mu']['l']['iflims']
    results['if']['mu']['u'] = results['lin']['mu']['u']['iflims']

    return results

Example 100

Project: OpenNFB Source File: waterfall.py
Function: on_mouse_wheel
    def on_mouse_wheel(self, event):
        dx = np.sign(event.delta[1]) * .05
        self.update()
See More Examples - Go to Next Page
Page 1 Page 2 Selected Page 3 Page 4