numpy.find_common_type

Here are the examples of the python api numpy.find_common_type taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

56 Examples 7

3 Source : interface.py
with GNU General Public License v3.0
from adityaprakash-bobby

def _get_dtype(operators, dtypes=None):
    if dtypes is None:
        dtypes = []
    for obj in operators:
        if obj is not None and hasattr(obj, 'dtype'):
            dtypes.append(obj.dtype)
    return np.find_common_type(dtypes, [])


class _SumLinearOperator(LinearOperator):

3 Source : nn.py
with MIT License
from BYUCamachoLab

def cartesian_product(arrays):
    la = len(arrays)
    dtype = np.find_common_type([a.dtype for a in arrays], [])
    arr = np.empty([len(a) for a in arrays] + [la], dtype=dtype)
    for i, a in enumerate(np.ix_(*arrays)):
        arr[..., i] = a
    return arr.reshape(-1, la)


# ---------------------------------------------------------------------------- #
# Strip waveguide
# ---------------------------------------------------------------------------- #


def straightWaveguide(wavelength, width, thickness, sw_angle=90, derivative=None):

3 Source : floating.py
with GNU General Public License v3.0
from dnn-security

    def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
        # for now only handle other floating types
        if not all(isinstance(t, FloatingDtype) for t in dtypes):
            return None
        np_dtype = np.find_common_type(
            # error: Item "ExtensionDtype" of "Union[Any, ExtensionDtype]" has no
            # attribute "numpy_dtype"
            [t.numpy_dtype for t in dtypes],  # type: ignore[union-attr]
            [],
        )
        if np.issubdtype(np_dtype, np.floating):
            return FLOAT_STR_TO_DTYPE[str(np_dtype)]
        return None


def coerce_to_array(

3 Source : curve.py
with MIT License
from jan-mue

    def __init__(self, center=Point(0, 0, 0), radius=1, **kwargs):
        if radius == 0:
            raise ValueError("Sphere radius cannot be 0.")

        c = -center.normalized_array
        m = np.eye(center.shape[0], dtype=np.find_common_type([c.dtype, type(radius)], []))
        m[-1, :] = c
        m[:, -1] = c
        m[-1, -1] = c[:-1].dot(c[:-1]) - radius ** 2

        # normalize with abs(det(m))
        m = m / radius ** (2 / 3)

        kwargs["copy"] = False
        super().__init__(m, **kwargs)

    @property

3 Source : interface.py
with MIT License
from ktraunmueller

def _get_dtype(operators, dtypes=[]):
    for obj in operators:
        if obj is not None and hasattr(obj, 'dtype'):
            dtypes.append(obj.dtype)
    return np.find_common_type(dtypes, [])


class _SumLinearOperator(LinearOperator):

3 Source : floating.py
with BSD 3-Clause "New" or "Revised" License
from leobago

    def _get_common_dtype(self, dtypes: List[DtypeObj]) -> Optional[DtypeObj]:
        # for now only handle other floating types
        if not all(isinstance(t, FloatingDtype) for t in dtypes):
            return None
        np_dtype = np.find_common_type(
            [t.numpy_dtype for t in dtypes], []  # type: ignore[union-attr]
        )
        if np.issubdtype(np_dtype, np.floating):
            return FLOAT_STR_TO_DTYPE[str(np_dtype)]
        return None

    def __from_arrow__(

3 Source : LinearOperator.py
with GNU Lesser General Public License v3.0
from PyLops

def _get_dtype(operators, dtypes=None):
    if dtypes is None:
        dtypes = []
    opdtypes = []
    for obj in operators:
        if obj is not None and hasattr(obj, "dtype"):
            opdtypes.append(obj.dtype)
    return np.find_common_type(opdtypes, dtypes)


class _ScaledLinearOperator(spLinearOperator):

3 Source : np_conserved.py
with GNU General Public License v3.0
from tenpy

def _find_calc_dtype(a_dtype, b_dtype):
    """return (calc_dtype, res_dtype) suitable for BLAS calculations."""
    res_dtype = np.find_common_type([a_dtype, b_dtype], [])
    _, calc_dtype, _ = BLAS.find_best_blas_type(dtype=res_dtype)
    return calc_dtype, res_dtype


@use_cython

3 Source : mpo.py
with GNU General Public License v3.0
from tenpy

    def __init__(self, bra, H, ket, cache=None, **init_env_data):
        self.H = H
        super().__init__(bra, ket, cache, **init_env_data)
        self.dtype = np.find_common_type([bra.dtype, ket.dtype, H.dtype], [])

    def init_first_LP_last_RP(self,

0 Source : test_numerictypes.py
with GNU General Public License v3.0
from adityaprakash-bobby

    def test_scalar_loses1(self):
        res = np.find_common_type(['f4', 'f4', 'i2'], ['f8'])
        assert_(res == 'f4')

    def test_scalar_loses2(self):

0 Source : test_numerictypes.py
with GNU General Public License v3.0
from adityaprakash-bobby

    def test_scalar_loses2(self):
        res = np.find_common_type(['f4', 'f4'], ['i8'])
        assert_(res == 'f4')

    def test_scalar_wins(self):

0 Source : test_numerictypes.py
with GNU General Public License v3.0
from adityaprakash-bobby

    def test_scalar_wins(self):
        res = np.find_common_type(['f4', 'f4', 'i2'], ['c8'])
        assert_(res == 'c8')

    def test_scalar_wins2(self):

0 Source : test_numerictypes.py
with GNU General Public License v3.0
from adityaprakash-bobby

    def test_scalar_wins2(self):
        res = np.find_common_type(['u4', 'i4', 'i4'], ['f4'])
        assert_(res == 'f8')

    def test_scalar_wins3(self):  # doesn't go up to 'f16' on purpose

0 Source : test_numerictypes.py
with GNU General Public License v3.0
from adityaprakash-bobby

    def test_scalar_wins3(self):  # doesn't go up to 'f16' on purpose
        res = np.find_common_type(['u8', 'i8', 'i8'], ['f8'])
        assert_(res == 'f8')

class TestMultipleFields(object):

0 Source : test_regression.py
with GNU General Public License v3.0
from adityaprakash-bobby

    def test_find_common_type_boolean(self):
        # Ticket #1695
        assert_(np.find_common_type([], ['?', '?']) == '?')

    def test_empty_mul(self):

0 Source : matfuncs.py
with GNU General Public License v3.0
from adityaprakash-bobby

    def __init__(self, *args, **kwargs):
        self._structure = kwargs.get('structure', None)
        for A in args:
            if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
                raise ValueError(
                        'For now, the ProductOperator implementation is '
                        'limited to the product of multiple square matrices.')
        if args:
            n = args[0].shape[0]
            for A in args:
                for d in A.shape:
                    if d != n:
                        raise ValueError(
                                'The square matrices of the ProductOperator '
                                'must all have the same shape.')
            self.shape = (n, n)
            self.ndim = len(self.shape)
        self.dtype = np.find_common_type([x.dtype for x in args], [])
        self._operator_sequence = args

    def _matvec(self, x):

0 Source : sputils.py
with GNU General Public License v3.0
from adityaprakash-bobby

def upcast(*args):
    """Returns the nearest supported sparse dtype for the
    combination of one or more types.

    upcast(t0, t1, ..., tn) -> T  where T is a supported dtype

    Examples
    --------

    >>> upcast('int32')
      <  type 'numpy.int32'>
    >>> upcast('bool')
     < type 'numpy.bool_'>
    >>> upcast('int32','float32')
     < type 'numpy.float64'>
    >>> upcast('bool',complex,float)
     < type 'numpy.complex128'>

    """

    t = _upcast_memo.get(hash(args))
    if t is not None:
        return t

    upcast = np.find_common_type(args, [])

    for t in supported_dtypes:
        if np.can_cast(upcast, t):
            _upcast_memo[hash(args)] = t
            return t

    raise TypeError('no supported conversion for types: %r' % (args,))


def upcast_char(*args):

0 Source : distance.py
with GNU General Public License v3.0
from adityaprakash-bobby

def _nbool_correspond_all(u, v, w=None):
    if u.dtype == v.dtype == bool and w is None:
        not_u = ~u
        not_v = ~v
        nff = (not_u & not_v).sum()
        nft = (not_u & v).sum()
        ntf = (u & not_v).sum()
        ntt = (u & v).sum()
    else:
        dtype = np.find_common_type([int], [u.dtype, v.dtype])
        u = u.astype(dtype)
        v = v.astype(dtype)
        not_u = 1.0 - u
        not_v = 1.0 - v
        if w is not None:
            not_u = w * not_u
            u = w * u
        nff = (not_u * not_v).sum()
        nft = (not_u * v).sum()
        ntf = (u * not_v).sum()
        ntt = (u * v).sum()
    return (nff, nft, ntf, ntt)


def _nbool_correspond_ft_tf(u, v, w=None):

0 Source : distance.py
with GNU General Public License v3.0
from adityaprakash-bobby

def _nbool_correspond_ft_tf(u, v, w=None):
    if u.dtype == v.dtype == bool and w is None:
        not_u = ~u
        not_v = ~v
        nft = (not_u & v).sum()
        ntf = (u & not_v).sum()
    else:
        dtype = np.find_common_type([int], [u.dtype, v.dtype])
        u = u.astype(dtype)
        v = v.astype(dtype)
        not_u = 1.0 - u
        not_v = 1.0 - v
        if w is not None:
            not_u = w * not_u
            u = w * u
        nft = (not_u * v).sum()
        ntf = (u * not_v).sum()
    return (nft, ntf)


def _validate_cdist_input(XA, XB, mA, mB, n, metric_name, **kwargs):

0 Source : distance.py
with GNU General Public License v3.0
from adityaprakash-bobby

def dice(u, v, w=None):
    """
    Compute the Dice dissimilarity between two boolean 1-D arrays.

    The Dice dissimilarity between `u` and `v`, is

    .. math::

         \\frac{c_{TF} + c_{FT}}
              {2c_{TT} + c_{FT} + c_{TF}}

    where :math:`c_{ij}` is the number of occurrences of
    :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
    :math:`k   <   n`.

    Parameters
    ----------
    u : (N,) ndarray, bool
        Input 1-D array.
    v : (N,) ndarray, bool
        Input 1-D array.
    w : (N,) array_like, optional
        The weights for each value in `u` and `v`. Default is None,
        which gives each value a weight of 1.0

    Returns
    -------
    dice : double
        The Dice dissimilarity between 1-D arrays `u` and `v`.

    Examples
    --------
    >>> from scipy.spatial import distance
    >>> distance.dice([1, 0, 0], [0, 1, 0])
    1.0
    >>> distance.dice([1, 0, 0], [1, 1, 0])
    0.3333333333333333
    >>> distance.dice([1, 0, 0], [2, 0, 0])
    -0.3333333333333333

    """
    u = _validate_vector(u)
    v = _validate_vector(v)
    if w is not None:
        w = _validate_weights(w)
    if u.dtype == v.dtype == bool and w is None:
        ntt = (u & v).sum()
    else:
        dtype = np.find_common_type([int], [u.dtype, v.dtype])
        u = u.astype(dtype)
        v = v.astype(dtype)
        if w is None:
            ntt = (u * v).sum()
        else:
            ntt = (u * v * w).sum()
    (nft, ntf) = _nbool_correspond_ft_tf(u, v, w=w)
    return float((ntf + nft) / np.array(2.0 * ntt + ntf + nft))


def rogerstanimoto(u, v, w=None):

0 Source : encoders.py
with Apache License 2.0
from aws

    def inverse_transform(self, X):
        """Convert the data back to the original representation.
        In slots where the encoding is that of an unrecognised category, the output of the inverse transform is np.nan
        for float or complex arrays, and None otherwise

        Parameters
        ----------
        X : array-like or sparse matrix, shape [n_samples, n_encoded_features]
            The transformed data.

        Returns
        -------
        X_tr : array-like, shape [n_samples, n_features]
            Inverse transformed array.

        Notes
        -----
        Most of the logic is copied from sklearn.preprocessing.OrdinalEncoder.inverse_transform. The difference is in
        handling unknown values.

        """
        check_is_fitted(self, "categories_")
        X = check_array(X, dtype="numeric", force_all_finite="allow-nan" if self.unknown_as_nan else True)

        n_samples, _ = X.shape
        n_features = len(self.categories_)

        # validate shape of passed X
        msg = "Shape of the passed X data is not correct. Expected {0} " "columns, got {1}."
        if X.shape[1] != n_features:
            raise ValueError(msg.format(n_features, X.shape[1]))

        # create resulting array of appropriate dtype
        dt = np.find_common_type([cat.dtype for cat in self.categories_], [])
        X_tr = np.empty((n_samples, n_features), dtype=dt)

        found_unknown = {}
        for i in range(n_features):
            labels = X[:, i].astype("int64", copy=False)
            known_mask = np.isfinite(X[:, i]) if self.unknown_as_nan else (labels != self.categories_[i].shape[0])
            labels *= known_mask
            X_tr[:, i] = self.categories_[i][labels]
            if not np.all(known_mask):
                found_unknown[i] = ~known_mask

        # if unknown are found cast to an object array and transform the missing values to None
        if found_unknown:
            if X_tr.dtype != object:
                X_tr = X_tr.astype(object)

            for idx, unknown_mask in found_unknown.items():
                X_tr[unknown_mask, idx] = None

        X_tr[:, self.feature_idxs_no_categories_] = None

        return X_tr


class WOEAsserts(Enum):

0 Source : categorical_encoders.py
with Apache License 2.0
from awslabs

    def inverse_transform(self, X):
        """Convert the back data to the original representation.
        
        In case unknown categories are encountered (all zeros in the
        one-hot encoding), ``None`` is used to represent this category.
        
        Parameters
        ----------
        X : array-like or sparse matrix, shape [n_samples, n_encoded_features]
            The transformed data.
        
        Returns
        -------
        X_tr : array-like, shape [n_samples, n_features]
            Inverse transformed array.
        
        """
        check_is_fitted(self, 'categories_')
        X = check_array(X, accept_sparse='csr')
        
        n_samples, _ = X.shape
        n_features = len(self.categories_)
        if self.drop is None:
            n_transformed_features = sum(len(cats)
                                         for cats in self.categories_)
        else:
            n_transformed_features = sum(len(cats) - 1
                                         for cats in self.categories_)
        
        # validate shape of passed X
        msg = ("Shape of the passed X data is not correct. Expected {0} "
               "columns, got {1}.")
        if X.shape[1] != n_transformed_features:
            raise ValueError(msg.format(n_transformed_features, X.shape[1]))
        
        # create resulting array of appropriate dtype
        dt = np.find_common_type([cat.dtype for cat in self.categories_], [])
        X_tr = np.empty((n_samples, n_features), dtype=dt)
        j = 0
        found_unknown = {}
        
        for i in range(n_features):
            if self.drop is None:
                cats = self.categories_[i]
            else:
                cats = np.delete(self.categories_[i], self.drop_idx_[i])
            n_categories = len(cats)
        
            # Only happens if there was a column with a unique
            # category. In this case we just fill the column with this
            # unique category value.
            if n_categories == 0:
                X_tr[:, i] = self.categories_[i][self.drop_idx_[i]]
                j += n_categories
                continue
            sub = X[:, j:j + n_categories]  # for sparse X argmax returns 2D matrix, ensure 1D array
            labels = np.asarray(sub.argmax(axis=1)).flatten()
            X_tr[:, i] = cats[labels]
            if self.handle_unknown == 'ignore':
                unknown = np.asarray(sub.sum(axis=1) == 0).flatten()
                # ignored unknown categories: we have a row of all zero
                if unknown.any():
                    found_unknown[i] = unknown
            # drop will either be None or handle_unknown will be error. If
            # self.drop is not None, then we can safely assume that all of
            # the nulls in each column are the dropped value
            elif self.drop is not None:
                dropped = np.asarray(sub.sum(axis=1) == 0).flatten()
                if dropped.any():
                    X_tr[dropped, i] = self.categories_[i][self.drop_idx_[i]]
            
            j += n_categories
        
        # if ignored are found: potentially need to upcast result to
        # insert None values
        if found_unknown:
            if X_tr.dtype != object:
                X_tr = X_tr.astype(object)
        
            for idx, mask in found_unknown.items():
                X_tr[mask, idx] = None
        
        return X_tr
    
    def get_feature_names(self, input_features=None):

0 Source : categorical_encoders.py
with Apache License 2.0
from awslabs

    def inverse_transform(self, X):
        """Convert the data back to the original representation.
            In case unknown categories are encountered (all zeros in the one-hot encoding), ``None`` is used to represent this category.
        
        Parameters
        ----------
        X : array-like or sparse matrix, shape [n_samples, n_encoded_features]
            The transformed data.
        
        Returns
        -------
        X_tr : array-like, shape [n_samples, n_features]
            Inverse transformed array.
        
        """
        check_is_fitted(self, 'categories_')
        X = check_array(X, accept_sparse='csr')
        
        n_samples, _ = X.shape
        n_features = len(self.categories_)
        
        # validate shape of passed X
        msg = ("Shape of the passed X data is not correct. Expected {0} "
               "columns, got {1}.")
        if X.shape[1] != n_features:
            raise ValueError(msg.format(n_features, X.shape[1]))
        
        # create resulting array of appropriate dtype
        dt = np.find_common_type([cat.dtype for cat in self.categories_], [])
        X_tr = np.empty((n_samples, n_features), dtype=dt)
        
        for i in range(n_features):
            possible_categories = np.append(self.categories_[i], None)
            labels = X[:, i].astype('int64', copy=False)
            X_tr[:, i] = self.categories_[i][labels]
        
        return X_tr

0 Source : test_numerictypes.py
with Apache License 2.0
from dashanji

    def test_scalar_wins3(self):  # doesn't go up to 'f16' on purpose
        res = np.find_common_type(['u8', 'i8', 'i8'], ['f8'])
        assert_(res == 'f8')

class TestMultipleFields:

0 Source : dtype.py
with Apache License 2.0
from dashanji

    def _get_common_dtype(self, dtypes: List[DtypeObj]) -> Optional[DtypeObj]:
        # TODO for now only handle SparseDtypes and numpy dtypes => extend
        # with other compatibtle extension dtypes
        if any(
            isinstance(x, ExtensionDtype) and not isinstance(x, SparseDtype)
            for x in dtypes
        ):
            return None

        fill_values = [x.fill_value for x in dtypes if isinstance(x, SparseDtype)]
        fill_value = fill_values[0]

        # np.nan isn't a singleton, so we may end up with multiple
        # NaNs here, so we ignore tha all NA case too.
        if not (len(set(fill_values)) == 1 or isna(fill_values).all()):
            warnings.warn(
                "Concatenating sparse arrays with multiple fill "
                f"values: '{fill_values}'. Picking the first and "
                "converting the rest.",
                PerformanceWarning,
                stacklevel=6,
            )

        np_dtypes = [x.subtype if isinstance(x, SparseDtype) else x for x in dtypes]
        return SparseDtype(np.find_common_type(np_dtypes, []), fill_value=fill_value)

0 Source : _encoders.py
with Apache License 2.0
from dashanji

    def inverse_transform(self, X):
        """
        Convert the data back to the original representation.

        In case unknown categories are encountered (all zeros in the
        one-hot encoding), ``None`` is used to represent this category.

        Parameters
        ----------
        X : array-like or sparse matrix, shape [n_samples, n_encoded_features]
            The transformed data.

        Returns
        -------
        X_tr : array-like, shape [n_samples, n_features]
            Inverse transformed array.
        """
        check_is_fitted(self)
        X = check_array(X, accept_sparse='csr')

        n_samples, _ = X.shape
        n_features = len(self.categories_)
        if self.drop_idx_ is None:
            n_transformed_features = sum(len(cats)
                                         for cats in self.categories_)
        else:
            n_transformed_features = sum(
                len(cats) - 1 if to_drop is not None else len(cats)
                for cats, to_drop in zip(self.categories_, self.drop_idx_)
            )

        # validate shape of passed X
        msg = ("Shape of the passed X data is not correct. Expected {0} "
               "columns, got {1}.")
        if X.shape[1] != n_transformed_features:
            raise ValueError(msg.format(n_transformed_features, X.shape[1]))

        # create resulting array of appropriate dtype
        dt = np.find_common_type([cat.dtype for cat in self.categories_], [])
        X_tr = np.empty((n_samples, n_features), dtype=dt)

        j = 0
        found_unknown = {}

        for i in range(n_features):
            if self.drop_idx_ is None or self.drop_idx_[i] is None:
                cats = self.categories_[i]
            else:
                cats = np.delete(self.categories_[i], self.drop_idx_[i])
            n_categories = len(cats)

            # Only happens if there was a column with a unique
            # category. In this case we just fill the column with this
            # unique category value.
            if n_categories == 0:
                X_tr[:, i] = self.categories_[i][self.drop_idx_[i]]
                j += n_categories
                continue
            sub = X[:, j:j + n_categories]
            # for sparse X argmax returns 2D matrix, ensure 1D array
            labels = np.asarray(sub.argmax(axis=1)).flatten()
            X_tr[:, i] = cats[labels]
            if self.handle_unknown == 'ignore':
                unknown = np.asarray(sub.sum(axis=1) == 0).flatten()
                # ignored unknown categories: we have a row of all zero
                if unknown.any():
                    found_unknown[i] = unknown
            # drop will either be None or handle_unknown will be error. If
            # self.drop_idx_ is not None, then we can safely assume that all of
            # the nulls in each column are the dropped value
            elif self.drop_idx_ is not None:
                dropped = np.asarray(sub.sum(axis=1) == 0).flatten()
                if dropped.any():
                    X_tr[dropped, i] = self.categories_[i][self.drop_idx_[i]]

            j += n_categories

        # if ignored are found: potentially need to upcast result to
        # insert None values
        if found_unknown:
            if X_tr.dtype != object:
                X_tr = X_tr.astype(object)

            for idx, mask in found_unknown.items():
                X_tr[mask, idx] = None

        return X_tr

    def get_feature_names(self, input_features=None):

0 Source : _encoders.py
with Apache License 2.0
from dashanji

    def inverse_transform(self, X):
        """
        Convert the data back to the original representation.

        Parameters
        ----------
        X : array-like or sparse matrix, shape [n_samples, n_encoded_features]
            The transformed data.

        Returns
        -------
        X_tr : array-like, shape [n_samples, n_features]
            Inverse transformed array.
        """
        check_is_fitted(self)
        X = check_array(X, accept_sparse='csr')

        n_samples, _ = X.shape
        n_features = len(self.categories_)

        # validate shape of passed X
        msg = ("Shape of the passed X data is not correct. Expected {0} "
               "columns, got {1}.")
        if X.shape[1] != n_features:
            raise ValueError(msg.format(n_features, X.shape[1]))

        # create resulting array of appropriate dtype
        dt = np.find_common_type([cat.dtype for cat in self.categories_], [])
        X_tr = np.empty((n_samples, n_features), dtype=dt)

        for i in range(n_features):
            labels = X[:, i].astype('int64', copy=False)
            X_tr[:, i] = self.categories_[i][labels]

        return X_tr

0 Source : dtype.py
with GNU General Public License v3.0
from dnn-security

    def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
        # TODO for now only handle SparseDtypes and numpy dtypes => extend
        # with other compatibtle extension dtypes
        if any(
            isinstance(x, ExtensionDtype) and not isinstance(x, SparseDtype)
            for x in dtypes
        ):
            return None

        fill_values = [x.fill_value for x in dtypes if isinstance(x, SparseDtype)]
        fill_value = fill_values[0]

        # np.nan isn't a singleton, so we may end up with multiple
        # NaNs here, so we ignore the all NA case too.
        if not (len(set(fill_values)) == 1 or isna(fill_values).all()):
            warnings.warn(
                "Concatenating sparse arrays with multiple fill "
                f"values: '{fill_values}'. Picking the first and "
                "converting the rest.",
                PerformanceWarning,
                stacklevel=6,
            )

        np_dtypes = [x.subtype if isinstance(x, SparseDtype) else x for x in dtypes]
        return SparseDtype(np.find_common_type(np_dtypes, []), fill_value=fill_value)

0 Source : c_parser_wrapper.py
with GNU General Public License v3.0
from dnn-security

def _concatenate_chunks(chunks: list[dict[int, ArrayLike]]) -> dict:
    """
    Concatenate chunks of data read with low_memory=True.

    The tricky part is handling Categoricals, where different chunks
    may have different inferred categories.
    """
    names = list(chunks[0].keys())
    warning_columns = []

    result = {}
    for name in names:
        arrs = [chunk.pop(name) for chunk in chunks]
        # Check each arr for consistent types.
        dtypes = {a.dtype for a in arrs}
        # TODO: shouldn't we exclude all EA dtypes here?
        numpy_dtypes = {x for x in dtypes if not is_categorical_dtype(x)}
        if len(numpy_dtypes) > 1:
            # error: Argument 1 to "find_common_type" has incompatible type
            # "Set[Any]"; expected "Sequence[Union[dtype[Any], None, type,
            # _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any,
            # Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]]"
            common_type = np.find_common_type(
                numpy_dtypes,  # type: ignore[arg-type]
                [],
            )
            # error: Non-overlapping equality check (left operand type: "dtype[Any]",
            # right operand type: "Type[object]")
            if common_type == object:  # type: ignore[comparison-overlap]
                warning_columns.append(str(name))

        dtype = dtypes.pop()
        if is_categorical_dtype(dtype):
            result[name] = union_categoricals(arrs, sort_categories=False)
        else:
            if isinstance(dtype, ExtensionDtype):
                # TODO: concat_compat?
                array_type = dtype.construct_array_type()
                # error: Argument 1 to "_concat_same_type" of "ExtensionArray"
                # has incompatible type "List[Union[ExtensionArray, ndarray]]";
                # expected "Sequence[ExtensionArray]"
                result[name] = array_type._concat_same_type(
                    arrs  # type: ignore[arg-type]
                )
            else:
                result[name] = np.concatenate(arrs)

    if warning_columns:
        warning_names = ",".join(warning_columns)
        warning_message = " ".join(
            [
                f"Columns ({warning_names}) have mixed types."
                f"Specify dtype option on import or set low_memory=False."
            ]
        )
        warnings.warn(warning_message, DtypeWarning, stacklevel=8)
    return result


def ensure_dtype_objs(dtype):

0 Source : _encoders.py
with GNU General Public License v3.0
from gustavowillam

    def inverse_transform(self, X):
        """
        Convert the data back to the original representation.

        In case unknown categories are encountered (all zeros in the
        one-hot encoding), ``None`` is used to represent this category.

        Parameters
        ----------
        X : array-like or sparse matrix, shape [n_samples, n_encoded_features]
            The transformed data.

        Returns
        -------
        X_tr : array-like, shape [n_samples, n_features]
            Inverse transformed array.
        """
        check_is_fitted(self)
        X = check_array(X, accept_sparse='csr')

        n_samples, _ = X.shape
        n_features = len(self.categories_)
        if self.drop is None:
            n_transformed_features = sum(len(cats)
                                         for cats in self.categories_)
        else:
            n_transformed_features = sum(len(cats) - 1
                                         for cats in self.categories_)

        # validate shape of passed X
        msg = ("Shape of the passed X data is not correct. Expected {0} "
               "columns, got {1}.")
        if X.shape[1] != n_transformed_features:
            raise ValueError(msg.format(n_transformed_features, X.shape[1]))

        # create resulting array of appropriate dtype
        dt = np.find_common_type([cat.dtype for cat in self.categories_], [])
        X_tr = np.empty((n_samples, n_features), dtype=dt)

        j = 0
        found_unknown = {}

        for i in range(n_features):
            if self.drop is None:
                cats = self.categories_[i]
            else:
                cats = np.delete(self.categories_[i], self.drop_idx_[i])
            n_categories = len(cats)

            # Only happens if there was a column with a unique
            # category. In this case we just fill the column with this
            # unique category value.
            if n_categories == 0:
                X_tr[:, i] = self.categories_[i][self.drop_idx_[i]]
                j += n_categories
                continue
            sub = X[:, j:j + n_categories]
            # for sparse X argmax returns 2D matrix, ensure 1D array
            labels = np.asarray(_argmax(sub, axis=1)).flatten()
            X_tr[:, i] = cats[labels]
            if self.handle_unknown == 'ignore':
                unknown = np.asarray(sub.sum(axis=1) == 0).flatten()
                # ignored unknown categories: we have a row of all zero
                if unknown.any():
                    found_unknown[i] = unknown
            # drop will either be None or handle_unknown will be error. If
            # self.drop is not None, then we can safely assume that all of
            # the nulls in each column are the dropped value
            elif self.drop is not None:
                dropped = np.asarray(sub.sum(axis=1) == 0).flatten()
                if dropped.any():
                    X_tr[dropped, i] = self.categories_[i][self.drop_idx_[i]]

            j += n_categories

        # if ignored are found: potentially need to upcast result to
        # insert None values
        if found_unknown:
            if X_tr.dtype != object:
                X_tr = X_tr.astype(object)

            for idx, mask in found_unknown.items():
                X_tr[mask, idx] = None

        return X_tr

    def get_feature_names(self, input_features=None):

0 Source : _encoders.py
with GNU General Public License v3.0
from gustavowillam

    def inverse_transform(self, X):
        """
        Convert the data back to the original representation.

        In case unknown categories are encountered (all zeros in the
        one-hot encoding), ``None`` is used to represent this category.

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape \
                (n_samples, n_encoded_features)
            The transformed data.

        Returns
        -------
        X_tr : ndarray of shape (n_samples, n_features)
            Inverse transformed array.
        """
        check_is_fitted(self)
        X = check_array(X, accept_sparse='csr')

        n_samples, _ = X.shape
        n_features = len(self.categories_)
        if self.drop_idx_ is None:
            n_transformed_features = sum(len(cats)
                                         for cats in self.categories_)
        else:
            n_transformed_features = sum(
                len(cats) - 1 if to_drop is not None else len(cats)
                for cats, to_drop in zip(self.categories_, self.drop_idx_)
            )

        # validate shape of passed X
        msg = ("Shape of the passed X data is not correct. Expected {0} "
               "columns, got {1}.")
        if X.shape[1] != n_transformed_features:
            raise ValueError(msg.format(n_transformed_features, X.shape[1]))

        # create resulting array of appropriate dtype
        dt = np.find_common_type([cat.dtype for cat in self.categories_], [])
        X_tr = np.empty((n_samples, n_features), dtype=dt)

        j = 0
        found_unknown = {}

        for i in range(n_features):
            if self.drop_idx_ is None or self.drop_idx_[i] is None:
                cats = self.categories_[i]
            else:
                cats = np.delete(self.categories_[i], self.drop_idx_[i])
            n_categories = len(cats)

            # Only happens if there was a column with a unique
            # category. In this case we just fill the column with this
            # unique category value.
            if n_categories == 0:
                X_tr[:, i] = self.categories_[i][self.drop_idx_[i]]
                j += n_categories
                continue
            sub = X[:, j:j + n_categories]
            # for sparse X argmax returns 2D matrix, ensure 1D array
            labels = np.asarray(sub.argmax(axis=1)).flatten()
            X_tr[:, i] = cats[labels]
            if self.handle_unknown == 'ignore':
                unknown = np.asarray(sub.sum(axis=1) == 0).flatten()
                # ignored unknown categories: we have a row of all zero
                if unknown.any():
                    found_unknown[i] = unknown
            else:
                dropped = np.asarray(sub.sum(axis=1) == 0).flatten()
                if dropped.any():
                    if self.drop_idx_ is None:
                        all_zero_samples = np.flatnonzero(dropped)
                        raise ValueError(
                            f"Samples {all_zero_samples} can not be inverted "
                            "when drop=None and handle_unknown='error' "
                            "because they contain all zeros")
                    # we can safely assume that all of the nulls in each column
                    # are the dropped value
                    X_tr[dropped, i] = self.categories_[i][
                        self.drop_idx_[i]
                    ]

            j += n_categories

        # if ignored are found: potentially need to upcast result to
        # insert None values
        if found_unknown:
            if X_tr.dtype != object:
                X_tr = X_tr.astype(object)

            for idx, mask in found_unknown.items():
                X_tr[mask, idx] = None

        return X_tr

    def get_feature_names(self, input_features=None):

0 Source : _encoders.py
with GNU General Public License v3.0
from gustavowillam

    def inverse_transform(self, X):
        """
        Convert the data back to the original representation.

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape (n_samples, n_features)
            The transformed data.

        Returns
        -------
        X_tr : ndarray of shape (n_samples, n_features)
            Inverse transformed array.
        """
        check_is_fitted(self)
        X = check_array(X, accept_sparse='csr')

        n_samples, _ = X.shape
        n_features = len(self.categories_)

        # validate shape of passed X
        msg = ("Shape of the passed X data is not correct. Expected {0} "
               "columns, got {1}.")
        if X.shape[1] != n_features:
            raise ValueError(msg.format(n_features, X.shape[1]))

        # create resulting array of appropriate dtype
        dt = np.find_common_type([cat.dtype for cat in self.categories_], [])
        X_tr = np.empty((n_samples, n_features), dtype=dt)

        found_unknown = {}

        for i in range(n_features):
            labels = X[:, i].astype('int64', copy=False)
            if self.handle_unknown == 'use_encoded_value':
                unknown_labels = labels == self.unknown_value
                X_tr[:, i] = self.categories_[i][np.where(
                    unknown_labels, 0, labels)]
                found_unknown[i] = unknown_labels
            else:
                X_tr[:, i] = self.categories_[i][labels]

        # insert None values for unknown values
        if found_unknown:
            X_tr = X_tr.astype(object, copy=False)

            for idx, mask in found_unknown.items():
                X_tr[mask, idx] = None

        return X_tr

0 Source : _encoders.py
with GNU General Public License v3.0
from HHHHhgqcdxhg

    def inverse_transform(self, X):
        """Convert the back data to the original representation.

        In case unknown categories are encountered (all zero's in the
        one-hot encoding), ``None`` is used to represent this category.

        Parameters
        ----------
        X : array-like or sparse matrix, shape [n_samples, n_encoded_features]
            The transformed data.

        Returns
        -------
        X_tr : array-like, shape [n_samples, n_features]
            Inverse transformed array.

        """
        # if self._legacy_mode:
        #     raise ValueError("only supported for categorical features")

        check_is_fitted(self, 'categories_')
        X = check_array(X, accept_sparse='csr')

        n_samples, _ = X.shape
        n_features = len(self.categories_)
        n_transformed_features = sum([len(cats) for cats in self.categories_])

        # validate shape of passed X
        msg = ("Shape of the passed X data is not correct. Expected {0} "
               "columns, got {1}.")
        if X.shape[1] != n_transformed_features:
            raise ValueError(msg.format(n_transformed_features, X.shape[1]))

        # create resulting array of appropriate dtype
        dt = np.find_common_type([cat.dtype for cat in self.categories_], [])
        X_tr = np.empty((n_samples, n_features), dtype=dt)

        j = 0
        found_unknown = {}

        for i in range(n_features):
            n_categories = len(self.categories_[i])
            sub = X[:, j:j + n_categories]

            # for sparse X argmax returns 2D matrix, ensure 1D array
            labels = np.asarray(_argmax(sub, axis=1)).flatten()
            X_tr[:, i] = self.categories_[i][labels]

            if self.handle_unknown == 'ignore':
                # ignored unknown categories: we have a row of all zero's
                unknown = np.asarray(sub.sum(axis=1) == 0).flatten()
                if unknown.any():
                    found_unknown[i] = unknown

            j += n_categories

        # if ignored are found: potentially need to upcast result to
        # insert None values
        if found_unknown:
            if X_tr.dtype != object:
                X_tr = X_tr.astype(object)

            for idx, mask in found_unknown.items():
                X_tr[mask, idx] = None

        return X_tr

    def get_feature_names(self, input_features=None):

0 Source : _encoders.py
with GNU General Public License v3.0
from HHHHhgqcdxhg

    def inverse_transform(self, X):
        """Convert the data back to the original representation.

        Parameters
        ----------
        X : array-like or sparse matrix, shape [n_samples, n_encoded_features]
            The transformed data.

        Returns
        -------
        X_tr : array-like, shape [n_samples, n_features]
            Inverse transformed array.

        """
        check_is_fitted(self, 'categories_')
        X = check_array(X, accept_sparse='csr')

        n_samples, _ = X.shape
        n_features = len(self.categories_)

        # validate shape of passed X
        msg = ("Shape of the passed X data is not correct. Expected {0} "
               "columns, got {1}.")
        if X.shape[1] != n_features:
            raise ValueError(msg.format(n_features, X.shape[1]))

        # create resulting array of appropriate dtype
        dt = np.find_common_type([cat.dtype for cat in self.categories_], [])
        X_tr = np.empty((n_samples, n_features), dtype=dt)

        for i in range(n_features):
            labels = X[:, i].astype('int64')
            X_tr[:, i] = self.categories_[i][labels]

        return X_tr

0 Source : core.py
with Apache License 2.0
from jcmgray

def _find_common_dtype(array_types, scalar_types):
    return np.find_common_type(array_types, scalar_types).name


def find_common_dtype(*xs):

0 Source : multiply.py
with BSD 2-Clause "Simplified" License
from jonathf

def multiply(
    x1: PolyLike,
    x2: PolyLike,
    out: Optional[ndpoly] = None,
    where: numpy.typing.ArrayLike = True,
    **kwargs: Any,
) -> ndpoly:
    """
    Multiply arguments element-wise.

    Args:
        x1, x2:
            Input arrays to be multiplied. If ``x1.shape != x2.shape``, they
            must be broadcastable to a common shape (which becomes the shape of
            the output).
        out:
            A location into which the result is stored. If provided, it must
            have a shape that the inputs broadcast to. If not provided or
            `None`, a freshly-allocated array is returned. A tuple (possible
            only as a keyword argument) must have length equal to the number of
            outputs.
        where:
            This condition is broadcast over the input. At locations where the
            condition is True, the `out` array will be set to the ufunc result.
            Elsewhere, the `out` array will retain its original value. Note
            that if an uninitialized `out` array is created via the default
            ``out=None``, locations within it where the condition is False will
            remain uninitialized.
        kwargs:
            Keyword args passed to numpy.ufunc.

    Returns:
        The product of `x1` and `x2`, element-wise. This is a scalar if
        both `x1` and `x2` are scalars.

    Examples:
        >>> poly = numpy.arange(9.0).reshape((3, 3))
        >>> q0q1q2 = numpoly.variable(3)
        >>> numpoly.multiply(poly, q0q1q2)
        polynomial([[0.0, q1, 2.0*q2],
                    [3.0*q0, 4.0*q1, 5.0*q2],
                    [6.0*q0, 7.0*q1, 8.0*q2]])

    """
    x1, x2 = numpoly.align_indeterminants(x1, x2)
    dtype = numpy.find_common_type([x1.dtype, x2.dtype], [])
    shape = numpy.broadcast_shapes(x1.shape, x2.shape)

    where = numpy.asarray(where)
    exponents = numpy.unique(
        numpy.tile(x1.exponents, (len(x2.exponents), 1)) +
        numpy.repeat(x2.exponents, len(x1.exponents), 0), axis=0)
    out_ = numpoly.ndpoly(
        exponents=exponents,
        shape=shape,
        names=x1.indeterminants,
        dtype=dtype,
    ) if out is None else out

    seen = set()
    for expon1, coeff1 in zip(x1.exponents, x1.coefficients):
        for expon2, coeff2 in zip(x2.exponents, x2.coefficients):
            key = (expon1+expon2+x1.KEY_OFFSET).ravel()
            key = key.view(f"U{len(expon1)}").item()
            if key in seen:
                out_.values[key] += numpy.multiply(
                    coeff1, coeff2, where=where, **kwargs)
            else:
                numpy.multiply(coeff1, coeff2, out=out_.values[key],
                               where=where, **kwargs)
            seen.add(key)

    if out is None:
        out_ = numpoly.clean_attributes(out_)
    return out_

0 Source : test_numerictypes.py
with MIT License
from ktraunmueller

    def test_scalar_loses1(self):
        res = np.find_common_type(['f4', 'f4', 'i2'], ['f8'])
        assert_(res == 'f4')
    def test_scalar_loses2(self):

0 Source : test_numerictypes.py
with MIT License
from ktraunmueller

    def test_scalar_loses2(self):
        res = np.find_common_type(['f4', 'f4'], ['i8'])
        assert_(res == 'f4')
    def test_scalar_wins(self):

0 Source : test_numerictypes.py
with MIT License
from ktraunmueller

    def test_scalar_wins(self):
        res = np.find_common_type(['f4', 'f4', 'i2'], ['c8'])
        assert_(res == 'c8')
    def test_scalar_wins2(self):

0 Source : test_numerictypes.py
with MIT License
from ktraunmueller

    def test_scalar_wins2(self):
        res = np.find_common_type(['u4', 'i4', 'i4'], ['f4'])
        assert_(res == 'f8')
    def test_scalar_wins3(self): # doesn't go up to 'f16' on purpose

0 Source : test_numerictypes.py
with MIT License
from ktraunmueller

    def test_scalar_wins3(self): # doesn't go up to 'f16' on purpose
        res = np.find_common_type(['u8', 'i8', 'i8'], ['f8'])
        assert_(res == 'f8')

class TestMultipleFields(TestCase):

0 Source : ops.py
with MIT License
from ktraunmueller

def _arith_method_SERIES(op, name, str_rep=None, fill_zeros=None,
                         default_axis=None, **eval_kwargs):
    """
    Wrapper function for Series arithmetic operations, to avoid
    code duplication.
    """
    def na_op(x, y):
        try:
            result = expressions.evaluate(op, str_rep, x, y,
                                          raise_on_error=True, **eval_kwargs)
        except TypeError:
            if isinstance(y, (pa.Array, pd.Series)):
                dtype = np.find_common_type([x.dtype, y.dtype], [])
                result = np.empty(x.size, dtype=dtype)
                mask = notnull(x) & notnull(y)
                result[mask] = op(x[mask], y[mask])
            else:
                result = pa.empty(len(x), dtype=x.dtype)
                mask = notnull(x)
                result[mask] = op(x[mask], y)

            result, changed = com._maybe_upcast_putmask(result, -mask, pa.NA)

        result = com._fill_zeros(result, y, fill_zeros)
        return result

    def wrapper(left, right, name=name):

        if isinstance(right, pd.DataFrame):
            return NotImplemented
        time_converted = _TimeOp.maybe_convert_for_time_op(left, right, name)

        if time_converted is None:
            lvalues, rvalues = left, right
            dtype = None
            wrap_results = lambda x: x
        elif time_converted == NotImplemented:
            return NotImplemented
        else:
            lvalues = time_converted.lvalues
            rvalues = time_converted.rvalues
            dtype = time_converted.dtype
            wrap_results = time_converted.wrap_results

        if isinstance(rvalues, pd.Series):
            join_idx, lidx, ridx = left.index.join(rvalues.index, how='outer',
                                                   return_indexers=True)
            rindex = rvalues.index
            name = _maybe_match_name(left, rvalues)
            lvalues = getattr(lvalues, 'values', lvalues)
            rvalues = getattr(rvalues, 'values', rvalues)
            if left.index.equals(rindex):
                index = left.index
            else:
                index = join_idx

                if lidx is not None:
                    lvalues = com.take_1d(lvalues, lidx)

                if ridx is not None:
                    rvalues = com.take_1d(rvalues, ridx)

            arr = na_op(lvalues, rvalues)

            return left._constructor(wrap_results(arr), index=index,
                                     name=name, dtype=dtype)
        else:
            # scalars
            if hasattr(lvalues, 'values'):
                lvalues = lvalues.values
            return left._constructor(wrap_results(na_op(lvalues, rvalues)),
                                     index=left.index, name=left.name,
                                     dtype=dtype)
    return wrapper


def _comp_method_SERIES(op, name, str_rep=None, masker=False):

0 Source : blockarray.py
with BSD 3-Clause "New" or "Revised" License
from lanl

    def array(
        cls, alst: List[Union[np.ndarray, JaxArray]], dtype: Optional[np.dtype] = None
    ) -> BlockArray:
        """Construct a :class:`.BlockArray` from a list or tuple of existing array-like.

        Args:
            alst: Initializers for array components.
                Can be :class:`numpy.ndarray` or
                :class:`jax.interpreters.xla.DeviceArray`
            dtype: Data type of array. If none, dtype is derived from
                dtype of initializers

        Returns:
            :class:`.BlockArray` initialized from `alst` tuple
        """

        if isinstance(alst, (tuple, list)) is False:
            raise TypeError("Input to `array` must be a list or tuple of existing arrays")

        if dtype is None:
            present_types = jax.tree_flatten(jax.tree_map(lambda x: x.dtype, alst))[0]
            dtype = np.find_common_type(present_types, [])

        # alst can be a list/tuple of arrays, or a list/tuple containing list/tuples of arrays
        # consider alst to be a tree where leaves are arrays (possibly abstract arrays)
        # use tree_map to find the shape of each leaf
        # `shapes` will be a tuple of ints and tuples containing ints (possibly nested further)

        # ensure any scalar leaves are converted to (1,) arrays
        def shape_atleast_1d(x):
            return x.shape if x.shape != () else (1,)

        shapes = tuple(
            jax.tree_map(shape_atleast_1d, alst, is_leaf=lambda x: not isinstance(x, (list, tuple)))
        )

        _aval = _AbstractBlockArray(shapes, dtype)
        data_ravel = jnp.hstack(jax.tree_map(lambda x: x.ravel(), jax.tree_flatten(alst)[0]))
        return cls(_aval, data_ravel)

    @classmethod

0 Source : distance.py
with MIT License
from osamhack2021

def _nbool_correspond_ft_tf(u, v, w=None):
    if u.dtype == v.dtype == bool and w is None:
        not_u = ~u
        not_v = ~v
        nft = (not_u & v).sum()
        ntf = (u & not_v).sum()
    else:
        dtype = np.find_common_type([int], [u.dtype, v.dtype])
        u = u.astype(dtype)
        v = v.astype(dtype)
        not_u = 1.0 - u
        not_v = 1.0 - v
        if w is not None:
            not_u = w * not_u
            u = w * u
        nft = (not_u * v).sum()
        ntf = (u * not_v).sum()
    return (nft, ntf)


def _validate_cdist_input(XA, XB, mA, mB, n, metric_info, **kwargs):

0 Source : distance.py
with MIT License
from osamhack2021

def dice(u, v, w=None):
    """
    Compute the Dice dissimilarity between two boolean 1-D arrays.

    The Dice dissimilarity between `u` and `v`, is

    .. math::

         \\frac{c_{TF} + c_{FT}}
              {2c_{TT} + c_{FT} + c_{TF}}

    where :math:`c_{ij}` is the number of occurrences of
    :math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
    :math:`k   <   n`.

    Parameters
    ----------
    u : (N,) array_like, bool
        Input 1-D array.
    v : (N,) array_like, bool
        Input 1-D array.
    w : (N,) array_like, optional
        The weights for each value in `u` and `v`. Default is None,
        which gives each value a weight of 1.0

    Returns
    -------
    dice : double
        The Dice dissimilarity between 1-D arrays `u` and `v`.

    Examples
    --------
    >>> from scipy.spatial import distance
    >>> distance.dice([1, 0, 0], [0, 1, 0])
    1.0
    >>> distance.dice([1, 0, 0], [1, 1, 0])
    0.3333333333333333
    >>> distance.dice([1, 0, 0], [2, 0, 0])
    -0.3333333333333333

    """
    u = _validate_vector(u)
    v = _validate_vector(v)
    if w is not None:
        w = _validate_weights(w)
    if u.dtype == v.dtype == bool and w is None:
        ntt = (u & v).sum()
    else:
        dtype = np.find_common_type([int], [u.dtype, v.dtype])
        u = u.astype(dtype)
        v = v.astype(dtype)
        if w is None:
            ntt = (u * v).sum()
        else:
            ntt = (u * v * w).sum()
    (nft, ntf) = _nbool_correspond_ft_tf(u, v, w=w)
    return float((ntf + nft) / np.array(2.0 * ntt + ntf + nft))


def rogerstanimoto(u, v, w=None):

0 Source : _encoders.py
with MIT License
from PacktPublishing

    def inverse_transform(self, X):
        """Convert the back data to the original representation.

        In case unknown categories are encountered (all zeros in the
        one-hot encoding), ``None`` is used to represent this category.

        Parameters
        ----------
        X : array-like or sparse matrix, shape [n_samples, n_encoded_features]
            The transformed data.

        Returns
        -------
        X_tr : array-like, shape [n_samples, n_features]
            Inverse transformed array.

        """
        # if self._legacy_mode:
        #     raise ValueError("only supported for categorical features")

        check_is_fitted(self, 'categories_')
        X = check_array(X, accept_sparse='csr')

        n_samples, _ = X.shape
        n_features = len(self.categories_)
        if self.drop is None:
            n_transformed_features = sum(len(cats)
                                         for cats in self.categories_)
        else:
            n_transformed_features = sum(len(cats) - 1
                                         for cats in self.categories_)

        # validate shape of passed X
        msg = ("Shape of the passed X data is not correct. Expected {0} "
               "columns, got {1}.")
        if X.shape[1] != n_transformed_features:
            raise ValueError(msg.format(n_transformed_features, X.shape[1]))

        # create resulting array of appropriate dtype
        dt = np.find_common_type([cat.dtype for cat in self.categories_], [])
        X_tr = np.empty((n_samples, n_features), dtype=dt)

        j = 0
        found_unknown = {}

        for i in range(n_features):
            if self.drop is None:
                cats = self.categories_[i]
            else:
                cats = np.delete(self.categories_[i], self.drop_idx_[i])
            n_categories = len(cats)

            # Only happens if there was a column with a unique
            # category. In this case we just fill the column with this
            # unique category value.
            if n_categories == 0:
                X_tr[:, i] = self.categories_[i][self.drop_idx_[i]]
                j += n_categories
                continue
            sub = X[:, j:j + n_categories]
            # for sparse X argmax returns 2D matrix, ensure 1D array
            labels = np.asarray(_argmax(sub, axis=1)).flatten()
            X_tr[:, i] = cats[labels]
            if self.handle_unknown == 'ignore':
                unknown = np.asarray(sub.sum(axis=1) == 0).flatten()
                # ignored unknown categories: we have a row of all zero
                if unknown.any():
                    found_unknown[i] = unknown
            # drop will either be None or handle_unknown will be error. If
            # self.drop is not None, then we can safely assume that all of
            # the nulls in each column are the dropped value
            elif self.drop is not None:
                dropped = np.asarray(sub.sum(axis=1) == 0).flatten()
                if dropped.any():
                    X_tr[dropped, i] = self.categories_[i][self.drop_idx_[i]]

            j += n_categories

        # if ignored are found: potentially need to upcast result to
        # insert None values
        if found_unknown:
            if X_tr.dtype != object:
                X_tr = X_tr.astype(object)

            for idx, mask in found_unknown.items():
                X_tr[mask, idx] = None

        return X_tr

    def get_feature_names(self, input_features=None):

0 Source : _encoders.py
with MIT License
from PacktPublishing

    def inverse_transform(self, X):
        """Convert the data back to the original representation.

        Parameters
        ----------
        X : array-like or sparse matrix, shape [n_samples, n_encoded_features]
            The transformed data.

        Returns
        -------
        X_tr : array-like, shape [n_samples, n_features]
            Inverse transformed array.

        """
        check_is_fitted(self, 'categories_')
        X = check_array(X, accept_sparse='csr')

        n_samples, _ = X.shape
        n_features = len(self.categories_)

        # validate shape of passed X
        msg = ("Shape of the passed X data is not correct. Expected {0} "
               "columns, got {1}.")
        if X.shape[1] != n_features:
            raise ValueError(msg.format(n_features, X.shape[1]))

        # create resulting array of appropriate dtype
        dt = np.find_common_type([cat.dtype for cat in self.categories_], [])
        X_tr = np.empty((n_samples, n_features), dtype=dt)

        for i in range(n_features):
            labels = X[:, i].astype('int64', copy=False)
            X_tr[:, i] = self.categories_[i][labels]

        return X_tr

    def _more_tags(self):

0 Source : test_numerictypes.py
with Apache License 2.0
from pierreant

    def test_scalar_wins3(self):  # doesn't go up to 'f16' on purpose
        res = np.find_common_type(['u8', 'i8', 'i8'], ['f8'])
        assert_(res == 'f8')

class TestMultipleFields(TestCase):

0 Source : np_conserved.py
with GNU General Public License v3.0
from tenpy

    def astype(self, dtype, copy=True):
        """Return copy with new dtype, upcasting all blocks in ``_data``.

        Parameters
        ----------
        dtype : convertible to a np.dtype
            The new data type.
            If None, deduce the new dtype as common type of ``self._data``.
        copy : bool
            Whether to make a copy of the blocks even if the type didn't change.

        Returns
        -------
        copy : :class:`Array`
            Deep copy of self with new dtype.
        """
        cp = self.copy(deep=False)  # manual deep copy: don't copy every block twice
        cp._qdata = cp._qdata.copy()
        if dtype is None:
            dtype = np.find_common_type([d.dtype for d in self._data], [])
        cp.dtype = dtype = np.dtype(dtype)
        if copy or dtype != self.dtype:
            cp._data = [d.astype(dtype, copy=copy) for d in self._data]
        return cp

    def ipurge_zeros(self, cutoff=QCUTOFF, norm_order=None):

0 Source : np_conserved.py
with GNU General Public License v3.0
from tenpy

def grid_outer(grid, grid_legs, qtotal=None, grid_labels=None):
    """Given an np.array of npc.Arrays, return the corresponding higher-dimensional Array.

    Parameters
    ----------
    grid : array_like of {:class:`Array` | None}
        The grid gives the first part of the axes of the resulting array.
        Entries have to have all the same shape and charge-data, giving the remaining axes.
        ``None`` entries in the grid are interpreted as zeros.
    grid_legs : list of :class:`LegCharge`
        One LegCharge for each dimension of the grid along the grid.
    qtotal : charge
        The total charge of the Array.
        By default (``None``), derive it out from a non-trivial entry of the grid.
    grid_labels : list of {str | None}
        One label associated to each of the grid axes. ``None`` for non-named labels.

    Returns
    -------
    res : :class:`Array`
        An Array with shape ``grid.shape + nontrivial_grid_entry.shape``.
        Constructed such that ``res[idx] == grid[idx]`` for any index ``idx`` of the `grid`
        the `grid` entry is not trivial (``None``).

    See also
    --------
    detect_grid_outer_legcharge : can calculate one missing :class:`LegCharge` of the grid.


    Examples
    --------
    A typical use-case for this function is the generation of an MPO.
    Say you have npc.Arrays ``Splus, Sminus, Sz, Id``, each with legs ``[phys.conj(), phys]``.
    Further, you have to define appropriate LegCharges `l_left` and `l_right`.
    Then one 'matrix' of the MPO for a nearest neighbour Heisenberg Hamiltonian could look like:

    >>> s = tenpy.networks.site.SpinHalfSite(conserve='Sz')
    >>> Id, Splus, Sminus, Sz = s.Id, s.Sp, s.Sm, s.Sz
    >>> J = 1.
    >>> leg_wR = npc.LegCharge.from_qflat(s.leg.chinfo,
    ...                                   [op.qtotal for op in [Id, Splus, Sminus, Sz, Id]],
    ...                                   qconj=-1)
    >>> W_mpo = npc.grid_outer([[Id, Splus, Sminus, Sz, None],
    ...                         [None, None, None, None, J*0.5*Sminus],
    ...                         [None, None, None, None, J*0.5*Splus],
    ...                         [None, None, None, None, J*Sz],
    ...                         [None, None, None, None, Id]],
    ...                        grid_legs=[leg_wR.conj(), leg_wR],
    ...                        grid_labels=['wL', 'wR'])
    >>> W_mpo.shape
    (5, 5, 2, 2)
    >>> W_mpo.get_leg_labels()
    ['wL', 'wR', 'p', 'p*']
    """
    grid_shape, entries = _nontrivial_grid_entries(grid)
    if len(grid_shape) != len(grid_legs):
        raise ValueError("wrong number of grid_legs")
    if grid_shape != tuple([l.ind_len for l in grid_legs]):
        raise ValueError("grid shape incompatible with grid_legs")
    idx, entry = entries[0]  # first non-trivial entry
    chinfo = entry.chinfo
    dtype = np.find_common_type([e.dtype for _, e in entries], [])
    legs = list(grid_legs) + entry.legs
    labels = entry._labels[:]
    if grid_labels is None:
        grid_labels = [None] * len(grid_shape)
    labels = grid_labels + labels
    if qtotal is None:
        # figure out qtotal from first non-zero entry
        grid_charges = [l.get_charge(l.get_qindex(i)[0]) for i, l in zip(idx, grid_legs)]
        qtotal = chinfo.make_valid(np.sum(grid_charges + [entry.qtotal], axis=0))
    else:
        qtotal = chinfo.make_valid(qtotal)
    res = Array(legs, dtype, qtotal, labels)
    # main work: iterate over all non-trivial entries to fill `res`.
    for idx, entry in entries:
        res[idx] = entry  # insert the values with Array.__setitem__ partial slicing.
        if labels is not None and entry._labels != labels:
            labels = None
    res.test_sanity()
    return res


def detect_grid_outer_legcharge(grid, grid_legs, qtotal=None, qconj=1, bunch=False):

See More Examples