autograd.numpy.any

Here are the examples of the python api autograd.numpy.any taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

7 Examples 7

3 Source : numeric_array_patterns.py
with Apache License 2.0
from rgiordan

    def flat_indices(self, folded_bool, free=None):
        # If no indices are specified, save time and return an empty array.
        if not np.any(folded_bool):
            return np.array([], dtype=int)

        free = self._free_with_default(free)
        folded_bool = np.atleast_1d(folded_bool)
        shape_ok, err_msg = self._validate_folded_shape(folded_bool)
        if not shape_ok:
            raise ValueError(err_msg)
        if free:
            return self.__free_folded_indices[folded_bool]
        else:
            return self.__nonfree_folded_indices[folded_bool]

    def freeing_jacobian(self, folded_val, sparse=True):

3 Source : simplex_patterns.py
with Apache License 2.0
from rgiordan

    def validate_folded(self, folded_val, validate_value=None):
        shape_ok, err_msg = self._validate_folded_shape(folded_val)
        if not shape_ok:
            raise ValueError(err_msg)
        if validate_value is None:
            validate_value = self.default_validate
        if validate_value:
            if np.any(folded_val   <   0):
                return False, 'Some values are negative.'
            simplex_sums = np.sum(folded_val, axis=-1)
            if np.any(np.abs(simplex_sums - 1) > 1e-12):
                return False, 'The simplexes do not sum to one.'
        return True, ''

    def fold(self, flat_val, free=None, validate_value=None):

0 Source : wrapper.py
with Apache License 2.0
from PennyLaneAI

def tensor_wrapper(obj):
    """Decorator that wraps callable objects and classes so that they both accept
    a ``requires_grad`` keyword argument, as well as returning a PennyLane
    :class:`~.tensor`.

    Only if the decorated object returns an ``ndarray`` is the
    output converted to a :class:`~.tensor`; this avoids superfluous conversion
    of scalars and other native-Python types.

    .. note::

        This wrapper does *not* enable autodifferentiation of the wrapped function,
        it merely adds support for :class:`~pennylane.numpy.tensor` output.

    Args:
        obj: a callable object or class

    **Example**

    By default, the ``ones`` function provided by Autograd
    constructs standard ``ndarray`` objects, and does not
    permit a ``requires_grad`` argument:

    >>> from autograd.numpy import ones
    >>> ones([2, 2])
    array([[1., 1.],
        [1., 1.]])
    >>> ones([2, 2], requires_grad=True)
    TypeError: ones() got an unexpected keyword argument 'requires_grad'

    ``tensor_wrapper`` both enables construction of :class:`~pennylane.numpy.tensor`
    objects, while also converting the output.

    >>> from pennylane import numpy as np
    >>> ones = np.tensor_wrapper(ones)
    >>> ones([2, 2], requires_grad=True)
    tensor([[1., 1.],
        [1., 1.]], requires_grad=True)
    """

    @functools.wraps(obj)
    def _wrapped(*args, **kwargs):
        """Wrapped NumPy function"""

        tensor_kwargs = {}

        if "requires_grad" in kwargs:
            tensor_kwargs["requires_grad"] = kwargs.pop("requires_grad")
        else:
            tensor_args = list(extract_tensors(args))

            if tensor_args:
                # Unless the user specifies otherwise, if all tensors in the argument
                # list are non-trainable, the output is also non-trainable.
                # Equivalently: if any tensor is trainable, the output is also trainable.
                # NOTE: Use of Python's ``any`` results in an infinite recursion,
                # and I'm not sure why. Using ``np.any`` works fine.
                tensor_kwargs["requires_grad"] = _np.any([i.requires_grad for i in tensor_args])

        # evaluate the original object
        res = obj(*args, **kwargs)

        if isinstance(res, _np.ndarray):
            # only if the output of the object is a ndarray,
            # then convert to a PennyLane tensor
            res = tensor(res, **tensor_kwargs)

        return res

    return _wrapped


def wrap_arrays(old, new):

0 Source : pattern_containers.py
with Apache License 2.0
from rgiordan

    def flat_indices(self, folded_bool, free=None):
        free = self._free_with_default(free)
        valid, msg = self.validate_folded(folded_bool, validate_value=False)
        if not valid:
            raise ValueError(msg)

        indices = []
        pattern_flat_length = self.__base_pattern.flat_length(free=free)
        offset = 0
        for item in itertools.product(*self.__array_ranges):
            if np.any(folded_bool[item]):
                pattern_indices = self.__base_pattern.flat_indices(
                    folded_bool[item], free=free)
                if len(pattern_indices) > 0:
                    indices.append(pattern_indices + offset)
            offset += pattern_flat_length
        if len(indices) > 0:
            return np.hstack(indices)
        else:
            return np.array([], dtype=int)


register_pattern_json(PatternDict)

0 Source : psdmatrix_patterns.py
with Apache License 2.0
from rgiordan

    def flat_indices(self, folded_bool, free=None):
        # If no indices are specified, save time and return an empty array.
        if not np.any(folded_bool):
            return np.array([], dtype=int)

        free = self._free_with_default(free)
        shape_ok, err_msg = self._validate_folded_shape(folded_bool)
        if not shape_ok:
            raise ValueError(err_msg)
        if not free:
            folded_indices = self.fold(
                np.arange(self.flat_length(False), dtype=int),
                validate_value=False, free=False)
            return folded_indices[folded_bool]
        else:
            # This indicates that each folded value depends on each
            # free value.  I think this is not true, but getting the exact
            # pattern may be complicated and will
            # probably not make much of a difference in practice.
            if np.any(folded_bool):
                return np.arange(self.flat_length(True), dtype=int)
            else:
                return np.array([])



register_pattern_json(PSDSymmetricMatrixPattern)

0 Source : simplex_patterns.py
with Apache License 2.0
from rgiordan

    def flat_indices(self, folded_bool, free=None):
        # If no indices are specified, save time and return an empty array.
        if not np.any(folded_bool):
            return np.array([], dtype=int)

        free = self._free_with_default(free)
        shape_ok, err_msg = self._validate_folded_shape(folded_bool)
        if not shape_ok:
            raise ValueError(err_msg)
        if not free:
            folded_indices = self.fold(
                np.arange(self.flat_length(False), dtype=int),
                validate_value=False, free=False)
            return folded_indices[folded_bool]
        else:
            # Every element of a particular simplex depends on all
            # the free values for that simplex.

            # The simplex is the last index, which moves the fastest.
            indices = []
            offset = 0
            free_simplex_length = self.__simplex_size - 1
            array_ranges = (range(n) for n in self.__array_shape)
            for ind in itertools.product(*array_ranges):
                if np.any(folded_bool[ind]):
                    free_inds = np.arange(
                        offset * free_simplex_length,
                        (offset + 1) * free_simplex_length,
                        dtype=int)
                    indices.append(free_inds)
                offset += 1
            if len(indices) > 0:
                return np.hstack(indices)
            else:
                return np.array([])


register_pattern_json(SimplexArrayPattern)

0 Source : generic.py
with MIT License
from wesselb

def any(a: Numeric, axis: Union[Int, None] = None, squeeze: bool = True):
    return anp.any(a, axis=axis, keepdims=not squeeze)


@dispatch