Esempio n. 1
0
 def test_astype_object(self):
     tdi = pd.TimedeltaIndex(['1 Day', '3 Hours'])
     arr = TimedeltaArray(tdi)
     asobj = arr.astype('O')
     assert isinstance(asobj, np.ndarray)
     assert asobj.dtype == 'O'
     assert list(asobj) == list(tdi)
Esempio n. 2
0
    def test_to_pytimedelta(self, timedelta_index):
        tdi = timedelta_index
        arr = TimedeltaArray(tdi)

        expected = tdi.to_pytimedelta()
        result = arr.to_pytimedelta()

        tm.assert_numpy_array_equal(result, expected)
Esempio n. 3
0
    def test_total_seconds(self, timedelta_index):
        tdi = timedelta_index
        arr = TimedeltaArray(tdi)

        expected = tdi.total_seconds()
        result = arr.total_seconds()

        tm.assert_numpy_array_equal(result, expected.values)
Esempio n. 4
0
    def test_min_max_empty(self, skipna):
        arr = TimedeltaArray._from_sequence([])
        result = arr.min(skipna=skipna)
        assert result is pd.NaT

        result = arr.max(skipna=skipna)
        assert result is pd.NaT
Esempio n. 5
0
    def test_astype_int(self, dtype):
        arr = TimedeltaArray._from_sequence([pd.Timedelta('1H'),
                                             pd.Timedelta('2H')])
        result = arr.astype(dtype)

        if np.dtype(dtype).kind == 'u':
            expected_dtype = np.dtype('uint64')
        else:
            expected_dtype = np.dtype('int64')
        expected = arr.astype(expected_dtype)

        assert result.dtype == expected_dtype
        tm.assert_numpy_array_equal(result, expected)
Esempio n. 6
0
    def test_min_max(self):
        arr = TimedeltaArray._from_sequence([
            '3H', '3H', 'NaT', '2H', '5H', '4H',
        ])

        result = arr.min()
        expected = pd.Timedelta('2H')
        assert result == expected

        result = arr.max()
        expected = pd.Timedelta('5H')
        assert result == expected

        result = arr.min(skipna=False)
        assert result is pd.NaT

        result = arr.max(skipna=False)
        assert result is pd.NaT
Esempio n. 7
0
    def test_take_fill_valid(self, timedelta_index):
        tdi = timedelta_index
        arr = TimedeltaArray(tdi)

        td1 = pd.Timedelta(days=1)
        result = arr.take([-1, 1], allow_fill=True, fill_value=td1)
        assert result[0] == td1

        now = pd.Timestamp.now()
        with pytest.raises(ValueError):
            # fill_value Timestamp invalid
            arr.take([0, 1], allow_fill=True, fill_value=now)

        with pytest.raises(ValueError):
            # fill_value Period invalid
            arr.take([0, 1], allow_fill=True, fill_value=now.to_period('D'))
Esempio n. 8
0
    def test_min_max(self):
        arr = TimedeltaArray._from_sequence([
            '3H',
            '3H',
            'NaT',
            '2H',
            '5H',
            '4H',
        ])

        result = arr.min()
        expected = pd.Timedelta('2H')
        assert result == expected

        result = arr.max()
        expected = pd.Timedelta('5H')
        assert result == expected

        result = arr.min(skipna=False)
        assert result is pd.NaT

        result = arr.max(skipna=False)
        assert result is pd.NaT
def maybe_upcast_datetimelike_array(obj: ArrayLike) -> ArrayLike:
    """
    If we have an ndarray that is either datetime64 or timedelta64, wrap in EA.

    Parameters
    ----------
    obj : ndarray or ExtensionArray

    Returns
    -------
    ndarray or ExtensionArray
    """
    if isinstance(obj, np.ndarray):
        if obj.dtype.kind == "m":
            from pandas.core.arrays import TimedeltaArray

            return TimedeltaArray._from_sequence(obj)
        if obj.dtype.kind == "M":
            from pandas.core.arrays import DatetimeArray

            return DatetimeArray._from_sequence(obj)

    return obj
Esempio n. 10
0
    def _maybe_mask_result(self, result, mask, other, op_name: str):
        """
        Parameters
        ----------
        result : array-like
        mask : array-like bool
        other : scalar or array-like
        op_name : str
        """
        # if we have a float operand we are by-definition
        # a float result
        # or our op is a divide
        if (is_float_dtype(other)
                or is_float(other)) or (op_name in ["rtruediv", "truediv"]):
            result[mask] = np.nan
            return result

        if result.dtype == "timedelta64[ns]":
            from pandas.core.arrays import TimedeltaArray

            result[mask] = iNaT
            return TimedeltaArray._simple_new(result)

        return type(self)(result, mask, copy=False)
Esempio n. 11
0
def array(
        data,  # type: Sequence[object]
        dtype=None,  # type: Optional[Union[str, np.dtype, ExtensionDtype]]
        copy=True,  # type: bool
):
    # type: (...) -> ExtensionArray
    """
    Create an array.

    .. versionadded:: 0.24.0

    Parameters
    ----------
    data : Sequence of objects
        The scalars inside `data` should be instances of the
        scalar type for `dtype`. It's expected that `data`
        represents a 1-dimensional array of data.

        When `data` is an Index or Series, the underlying array
        will be extracted from `data`.

    dtype : str, np.dtype, or ExtensionDtype, optional
        The dtype to use for the array. This may be a NumPy
        dtype or an extension type registered with pandas using
        :meth:`pandas.api.extensions.register_extension_dtype`.

        If not specified, there are two possibilities:

        1. When `data` is a :class:`Series`, :class:`Index`, or
           :class:`ExtensionArray`, the `dtype` will be taken
           from the data.
        2. Otherwise, pandas will attempt to infer the `dtype`
           from the data.

        Note that when `data` is a NumPy array, ``data.dtype`` is
        *not* used for inferring the array type. This is because
        NumPy cannot represent all the types of data that can be
        held in extension arrays.

        Currently, pandas will infer an extension dtype for sequences of

        ============================== =====================================
        scalar type                    Array Type
        =============================  =====================================
        * :class:`pandas.Interval`     :class:`pandas.IntervalArray`
        * :class:`pandas.Period`       :class:`pandas.arrays.PeriodArray`
        * :class:`datetime.datetime`   :class:`pandas.arrays.DatetimeArray`
        * :class:`datetime.timedelta`  :class:`pandas.arrays.TimedeltaArray`
        =============================  =====================================

        For all other cases, NumPy's usual inference rules will be used.

    copy : bool, default True
        Whether to copy the data, even if not necessary. Depending
        on the type of `data`, creating the new array may require
        copying data, even if ``copy=False``.

    Returns
    -------
    ExtensionArray
        The newly created array.

    Raises
    ------
    ValueError
        When `data` is not 1-dimensional.

    See Also
    --------
    numpy.array : Construct a NumPy array.
    arrays.PandasArray : ExtensionArray wrapping a NumPy array.
    Series : Construct a pandas Series.
    Index : Construct a pandas Index.

    Notes
    -----
    Omitting the `dtype` argument means pandas will attempt to infer the
    best array type from the values in the data. As new array types are
    added by pandas and 3rd party libraries, the "best" array type may
    change. We recommend specifying `dtype` to ensure that

    1. the correct array type for the data is returned
    2. the returned array type doesn't change as new extension types
       are added by pandas and third-party libraries

    Additionally, if the underlying memory representation of the returned
    array matters, we recommend specifying the `dtype` as a concrete object
    rather than a string alias or allowing it to be inferred. For example,
    a future version of pandas or a 3rd-party library may include a
    dedicated ExtensionArray for string data. In this event, the following
    would no longer return a :class:`arrays.PandasArray` backed by a NumPy
    array.

    >>> pd.array(['a', 'b'], dtype=str)
    <PandasArray>
    ['a', 'b']
    Length: 2, dtype: str32

    This would instead return the new ExtensionArray dedicated for string
    data. If you really need the new array to be backed by a  NumPy array,
    specify that in the dtype.

    >>> pd.array(['a', 'b'], dtype=np.dtype("<U1"))
    <PandasArray>
    ['a', 'b']
    Length: 2, dtype: str32

    Or use the dedicated constructor for the array you're expecting, and
    wrap that in a PandasArray

    >>> pd.array(np.array(['a', 'b'], dtype='<U1'))
    <PandasArray>
    ['a', 'b']
    Length: 2, dtype: str32

    Examples
    --------
    If a dtype is not specified, `data` is passed through to
    :meth:`numpy.array`, and a :class:`arrays.PandasArray` is returned.

    >>> pd.array([1, 2])
    <PandasArray>
    [1, 2]
    Length: 2, dtype: int64

    Or the NumPy dtype can be specified

    >>> pd.array([1, 2], dtype=np.dtype("int32"))
    <PandasArray>
    [1, 2]
    Length: 2, dtype: int32

    You can use the string alias for `dtype`

    >>> pd.array(['a', 'b', 'a'], dtype='category')
    [a, b, a]
    Categories (2, object): [a, b]

    Or specify the actual dtype

    >>> pd.array(['a', 'b', 'a'],
    ...          dtype=pd.CategoricalDtype(['a', 'b', 'c'], ordered=True))
    [a, b, a]
    Categories (3, object): [a < b < c]

    Because omitting the `dtype` passes the data through to NumPy,
    a mixture of valid integers and NA will return a floating-point
    NumPy array.

    >>> pd.array([1, 2, np.nan])
    <PandasArray>
    [1.0,  2.0, nan]
    Length: 3, dtype: float64

    To use pandas' nullable :class:`pandas.arrays.IntegerArray`, specify
    the dtype:

    >>> pd.array([1, 2, np.nan], dtype='Int64')
    <IntegerArray>
    [1, 2, NaN]
    Length: 3, dtype: Int64

    Pandas will infer an ExtensionArray for some types of data:

    >>> pd.array([pd.Period('2000', freq="D"), pd.Period("2000", freq="D")])
    <PeriodArray>
    ['2000-01-01', '2000-01-01']
    Length: 2, dtype: period[D]

    `data` must be 1-dimensional. A ValueError is raised when the input
    has the wrong dimensionality.

    >>> pd.array(1)
    Traceback (most recent call last):
      ...
    ValueError: Cannot pass scalar '1' to 'pandas.array'.
    """
    from pandas.core.arrays import (
        period_array,
        ExtensionArray,
        IntervalArray,
        PandasArray,
        DatetimeArray,
        TimedeltaArray,
    )
    from pandas.core.internals.arrays import extract_array

    if lib.is_scalar(data):
        msg = ("Cannot pass scalar '{}' to 'pandas.array'.")
        raise ValueError(msg.format(data))

    data = extract_array(data, extract_numpy=True)

    if dtype is None and isinstance(data, ExtensionArray):
        dtype = data.dtype

    # this returns None for not-found dtypes.
    if isinstance(dtype, compat.string_types):
        dtype = registry.find(dtype) or dtype

    if is_extension_array_dtype(dtype):
        cls = dtype.construct_array_type()
        return cls._from_sequence(data, dtype=dtype, copy=copy)

    if dtype is None:
        inferred_dtype = lib.infer_dtype(data, skipna=False)
        if inferred_dtype == 'period':
            try:
                return period_array(data, copy=copy)
            except tslibs.IncompatibleFrequency:
                # We may have a mixture of frequencies.
                # We choose to return an ndarray, rather than raising.
                pass
        elif inferred_dtype == 'interval':
            try:
                return IntervalArray(data, copy=copy)
            except ValueError:
                # We may have a mixture of `closed` here.
                # We choose to return an ndarray, rather than raising.
                pass

        elif inferred_dtype.startswith('datetime'):
            # datetime, datetime64
            try:
                return DatetimeArray._from_sequence(data, copy=copy)
            except ValueError:
                # Mixture of timezones, fall back to PandasArray
                pass

        elif inferred_dtype.startswith('timedelta'):
            # timedelta, timedelta64
            return TimedeltaArray._from_sequence(data, copy=copy)

        # TODO(BooleanArray): handle this type

    result = PandasArray._from_sequence(data, dtype=dtype, copy=copy)
    return result
Esempio n. 12
0
 def test_from_sequence_dtype(self):
     msg = r"Only timedelta64\[ns\] dtype is valid"
     with pytest.raises(ValueError, match=msg):
         TimedeltaArray._from_sequence([], dtype=object)
     with pytest.raises(ValueError, match=msg):
         TimedeltaArray([], dtype=object)
Esempio n. 13
0
 def test_from_sequence_dtype(self):
     msg = r"Only timedelta64\[ns\] dtype is valid"
     with pytest.raises(ValueError, match=msg):
         TimedeltaArray._from_sequence([], dtype=object)
Esempio n. 14
0
    def wrapper(left, right):
        if isinstance(right, ABCDataFrame):
            return NotImplemented

        left, right = _align_method_SERIES(left, right)
        res_name = get_op_result_name(left, right)
        right = maybe_upcast_for_op(right, left.shape)

        if is_categorical_dtype(left):
            raise TypeError("{typ} cannot perform the operation "
                            "{op}".format(typ=type(left).__name__, op=str_rep))

        elif is_datetime64_dtype(left) or is_datetime64tz_dtype(left):
            from pandas.core.arrays import DatetimeArray

            result = dispatch_to_extension_op(op, DatetimeArray(left), right)
            return construct_result(left,
                                    result,
                                    index=left.index,
                                    name=res_name)

        elif is_extension_array_dtype(left) or (is_extension_array_dtype(right)
                                                and not is_scalar(right)):
            # GH#22378 disallow scalar to exclude e.g. "category", "Int64"
            result = dispatch_to_extension_op(op, left, right)
            return construct_result(left,
                                    result,
                                    index=left.index,
                                    name=res_name)

        elif is_timedelta64_dtype(left):
            from pandas.core.arrays import TimedeltaArray

            result = dispatch_to_extension_op(op, TimedeltaArray(left), right)
            return construct_result(left,
                                    result,
                                    index=left.index,
                                    name=res_name)

        elif is_timedelta64_dtype(right):
            # We should only get here with non-scalar values for right
            #  upcast by maybe_upcast_for_op
            assert not isinstance(right, (np.timedelta64, np.ndarray))

            result = op(left._values, right)

            # We do not pass dtype to ensure that the Series constructor
            #  does inference in the case where `result` has object-dtype.
            return construct_result(left,
                                    result,
                                    index=left.index,
                                    name=res_name)

        elif isinstance(right, (ABCDatetimeArray, ABCDatetimeIndex)):
            result = op(left._values, right)
            return construct_result(left,
                                    result,
                                    index=left.index,
                                    name=res_name)

        lvalues = left.values
        rvalues = right
        if isinstance(rvalues, (ABCSeries, ABCIndexClass)):
            rvalues = rvalues._values

        with np.errstate(all="ignore"):
            result = na_op(lvalues, rvalues)
        return construct_result(left,
                                result,
                                index=left.index,
                                name=res_name,
                                dtype=None)
Esempio n. 15
0
 def test_from_sequence_dtype(self):
     msg = "dtype .*object.* cannot be converted to timedelta64"
     with pytest.raises(ValueError, match=msg):
         TimedeltaArray._from_sequence([], dtype=object)
Esempio n. 16
0
 def test_from_sequence_dtype(self):
     msg = "dtype .*object.* cannot be converted to timedelta64"
     with pytest.raises(ValueError, match=msg):
         TimedeltaArray._from_sequence([], dtype=object)
Esempio n. 17
0
    def test_non_nano(self, unit, reso):
        arr = np.arange(5, dtype=np.int64).view(f"m8[{unit}]")
        tda = TimedeltaArray._simple_new(arr, dtype=arr.dtype)

        assert tda.dtype == arr.dtype
        assert tda[0]._reso == reso
Esempio n. 18
0
def array(data: Sequence[object],
          dtype: Optional[Union[str, np.dtype, ExtensionDtype]] = None,
          copy: bool = True,
          ) -> ABCExtensionArray:
    """
    Create an array.

    .. versionadded:: 0.24.0

    Parameters
    ----------
    data : Sequence of objects
        The scalars inside `data` should be instances of the
        scalar type for `dtype`. It's expected that `data`
        represents a 1-dimensional array of data.

        When `data` is an Index or Series, the underlying array
        will be extracted from `data`.

    dtype : str, np.dtype, or ExtensionDtype, optional
        The dtype to use for the array. This may be a NumPy
        dtype or an extension type registered with pandas using
        :meth:`pandas.api.extensions.register_extension_dtype`.

        If not specified, there are two possibilities:

        1. When `data` is a :class:`Series`, :class:`Index`, or
           :class:`ExtensionArray`, the `dtype` will be taken
           from the data.
        2. Otherwise, pandas will attempt to infer the `dtype`
           from the data.

        Note that when `data` is a NumPy array, ``data.dtype`` is
        *not* used for inferring the array type. This is because
        NumPy cannot represent all the types of data that can be
        held in extension arrays.

        Currently, pandas will infer an extension dtype for sequences of

        ============================== =====================================
        Scalar Type                    Array Type
        ============================== =====================================
        :class:`pandas.Interval`       :class:`pandas.arrays.IntervalArray`
        :class:`pandas.Period`         :class:`pandas.arrays.PeriodArray`
        :class:`datetime.datetime`     :class:`pandas.arrays.DatetimeArray`
        :class:`datetime.timedelta`    :class:`pandas.arrays.TimedeltaArray`
        ============================== =====================================

        For all other cases, NumPy's usual inference rules will be used.

    copy : bool, default True
        Whether to copy the data, even if not necessary. Depending
        on the type of `data`, creating the new array may require
        copying data, even if ``copy=False``.

    Returns
    -------
    ExtensionArray
        The newly created array.

    Raises
    ------
    ValueError
        When `data` is not 1-dimensional.

    See Also
    --------
    numpy.array : Construct a NumPy array.
    Series : Construct a pandas Series.
    Index : Construct a pandas Index.
    arrays.PandasArray : ExtensionArray wrapping a NumPy array.
    Series.array : Extract the array stored within a Series.

    Notes
    -----
    Omitting the `dtype` argument means pandas will attempt to infer the
    best array type from the values in the data. As new array types are
    added by pandas and 3rd party libraries, the "best" array type may
    change. We recommend specifying `dtype` to ensure that

    1. the correct array type for the data is returned
    2. the returned array type doesn't change as new extension types
       are added by pandas and third-party libraries

    Additionally, if the underlying memory representation of the returned
    array matters, we recommend specifying the `dtype` as a concrete object
    rather than a string alias or allowing it to be inferred. For example,
    a future version of pandas or a 3rd-party library may include a
    dedicated ExtensionArray for string data. In this event, the following
    would no longer return a :class:`arrays.PandasArray` backed by a NumPy
    array.

    >>> pd.array(['a', 'b'], dtype=str)
    <PandasArray>
    ['a', 'b']
    Length: 2, dtype: str32

    This would instead return the new ExtensionArray dedicated for string
    data. If you really need the new array to be backed by a  NumPy array,
    specify that in the dtype.

    >>> pd.array(['a', 'b'], dtype=np.dtype("<U1"))
    <PandasArray>
    ['a', 'b']
    Length: 2, dtype: str32

    Or use the dedicated constructor for the array you're expecting, and
    wrap that in a PandasArray

    >>> pd.array(np.array(['a', 'b'], dtype='<U1'))
    <PandasArray>
    ['a', 'b']
    Length: 2, dtype: str32

    Finally, Pandas has arrays that mostly overlap with NumPy

      * :class:`arrays.DatetimeArray`
      * :class:`arrays.TimedeltaArray`

    When data with a ``datetime64[ns]`` or ``timedelta64[ns]`` dtype is
    passed, pandas will always return a ``DatetimeArray`` or ``TimedeltaArray``
    rather than a ``PandasArray``. This is for symmetry with the case of
    timezone-aware data, which NumPy does not natively support.

    >>> pd.array(['2015', '2016'], dtype='datetime64[ns]')
    <DatetimeArray>
    ['2015-01-01 00:00:00', '2016-01-01 00:00:00']
    Length: 2, dtype: datetime64[ns]

    >>> pd.array(["1H", "2H"], dtype='timedelta64[ns]')
    <TimedeltaArray>
    ['01:00:00', '02:00:00']
    Length: 2, dtype: timedelta64[ns]

    Examples
    --------
    If a dtype is not specified, `data` is passed through to
    :meth:`numpy.array`, and a :class:`arrays.PandasArray` is returned.

    >>> pd.array([1, 2])
    <PandasArray>
    [1, 2]
    Length: 2, dtype: int64

    Or the NumPy dtype can be specified

    >>> pd.array([1, 2], dtype=np.dtype("int32"))
    <PandasArray>
    [1, 2]
    Length: 2, dtype: int32

    You can use the string alias for `dtype`

    >>> pd.array(['a', 'b', 'a'], dtype='category')
    [a, b, a]
    Categories (2, object): [a, b]

    Or specify the actual dtype

    >>> pd.array(['a', 'b', 'a'],
    ...          dtype=pd.CategoricalDtype(['a', 'b', 'c'], ordered=True))
    [a, b, a]
    Categories (3, object): [a < b < c]

    Because omitting the `dtype` passes the data through to NumPy,
    a mixture of valid integers and NA will return a floating-point
    NumPy array.

    >>> pd.array([1, 2, np.nan])
    <PandasArray>
    [1.0,  2.0, nan]
    Length: 3, dtype: float64

    To use pandas' nullable :class:`pandas.arrays.IntegerArray`, specify
    the dtype:

    >>> pd.array([1, 2, np.nan], dtype='Int64')
    <IntegerArray>
    [1, 2, NaN]
    Length: 3, dtype: Int64

    Pandas will infer an ExtensionArray for some types of data:

    >>> pd.array([pd.Period('2000', freq="D"), pd.Period("2000", freq="D")])
    <PeriodArray>
    ['2000-01-01', '2000-01-01']
    Length: 2, dtype: period[D]

    `data` must be 1-dimensional. A ValueError is raised when the input
    has the wrong dimensionality.

    >>> pd.array(1)
    Traceback (most recent call last):
      ...
    ValueError: Cannot pass scalar '1' to 'pandas.array'.
    """
    from pandas.core.arrays import (
        period_array, ExtensionArray, IntervalArray, PandasArray,
        DatetimeArray,
        TimedeltaArray,
    )
    from pandas.core.internals.arrays import extract_array

    if lib.is_scalar(data):
        msg = (
            "Cannot pass scalar '{}' to 'pandas.array'."
        )
        raise ValueError(msg.format(data))

    data = extract_array(data, extract_numpy=True)

    if dtype is None and isinstance(data, ExtensionArray):
        dtype = data.dtype

    # this returns None for not-found dtypes.
    if isinstance(dtype, str):
        dtype = registry.find(dtype) or dtype

    if is_extension_array_dtype(dtype):
        cls = dtype.construct_array_type()
        return cls._from_sequence(data, dtype=dtype, copy=copy)

    if dtype is None:
        inferred_dtype = lib.infer_dtype(data, skipna=False)
        if inferred_dtype == 'period':
            try:
                return period_array(data, copy=copy)
            except tslibs.IncompatibleFrequency:
                # We may have a mixture of frequencies.
                # We choose to return an ndarray, rather than raising.
                pass
        elif inferred_dtype == 'interval':
            try:
                return IntervalArray(data, copy=copy)
            except ValueError:
                # We may have a mixture of `closed` here.
                # We choose to return an ndarray, rather than raising.
                pass

        elif inferred_dtype.startswith('datetime'):
            # datetime, datetime64
            try:
                return DatetimeArray._from_sequence(data, copy=copy)
            except ValueError:
                # Mixture of timezones, fall back to PandasArray
                pass

        elif inferred_dtype.startswith('timedelta'):
            # timedelta, timedelta64
            return TimedeltaArray._from_sequence(data, copy=copy)

        # TODO(BooleanArray): handle this type

    # Pandas overrides NumPy for
    #   1. datetime64[ns]
    #   2. timedelta64[ns]
    # so that a DatetimeArray is returned.
    if is_datetime64_ns_dtype(dtype):
        return DatetimeArray._from_sequence(data, dtype=dtype, copy=copy)
    elif is_timedelta64_ns_dtype(dtype):
        return TimedeltaArray._from_sequence(data, dtype=dtype, copy=copy)

    result = PandasArray._from_sequence(data, dtype=dtype, copy=copy)
    return result
Esempio n. 19
0
 def test_non_array_raises(self):
     with pytest.raises(ValueError, match="list"):
         TimedeltaArray([1, 2, 3])
Esempio n. 20
0
        ),
        # tz-aware stays tz`-aware
        (
            DatetimeArray(
                np.array(["2000-01-01T06:00:00", "2000-01-02T06:00:00"],
                         dtype="M8[ns]"),
                dtype=DatetimeTZDtype(tz="US/Central"),
            ),
            np.array([
                pd.Timestamp("2000-01-01", tz="US/Central"),
                pd.Timestamp("2000-01-02", tz="US/Central"),
            ]),
        ),
        # Timedelta
        (
            TimedeltaArray(np.array([0, 3600000000000], dtype="i8"), freq="H"),
            np.array([0, 3600000000000], dtype="m8[ns]"),
        ),
    ],
)
def test_to_numpy(array, expected, index_or_series):
    box = index_or_series
    thing = box(array)

    if array.dtype.name in ("Int64", "Sparse[int64, 0]") and box is pd.Index:
        pytest.skip(f"No index type for {array.dtype}")

    result = thing.to_numpy()
    tm.assert_numpy_array_equal(result, expected)

Esempio n. 21
0
 def test_other_type_raises(self):
     with pytest.raises(ValueError, match="dtype bool cannot be converted"):
         TimedeltaArray(np.array([1, 2, 3], dtype="bool"))
Esempio n. 22
0
class TestPeriodIndexArithmetic:
    # ---------------------------------------------------------------
    # __add__/__sub__ with PeriodIndex
    # PeriodIndex + other is defined for integers and timedelta-like others
    # PeriodIndex - other is defined for integers, timedelta-like others,
    #   and PeriodIndex (with matching freq)

    def test_parr_add_iadd_parr_raises(self, box_with_array):
        rng = pd.period_range("1/1/2000", freq="D", periods=5)
        other = pd.period_range("1/6/2000", freq="D", periods=5)
        # TODO: parametrize over boxes for other?

        rng = tm.box_expected(rng, box_with_array)
        # An earlier implementation of PeriodIndex addition performed
        # a set operation (union).  This has since been changed to
        # raise a TypeError. See GH#14164 and GH#13077 for historical
        # reference.
        with pytest.raises(TypeError):
            rng + other

        with pytest.raises(TypeError):
            rng += other

    def test_pi_sub_isub_pi(self):
        # GH#20049
        # For historical reference see GH#14164, GH#13077.
        # PeriodIndex subtraction originally performed set difference,
        # then changed to raise TypeError before being implemented in GH#20049
        rng = pd.period_range("1/1/2000", freq="D", periods=5)
        other = pd.period_range("1/6/2000", freq="D", periods=5)

        off = rng.freq
        expected = pd.Index([-5 * off] * 5)
        result = rng - other
        tm.assert_index_equal(result, expected)

        rng -= other
        tm.assert_index_equal(rng, expected)

    def test_pi_sub_pi_with_nat(self):
        rng = pd.period_range("1/1/2000", freq="D", periods=5)
        other = rng[1:].insert(0, pd.NaT)
        assert other[1:].equals(rng[1:])

        result = rng - other
        off = rng.freq
        expected = pd.Index([pd.NaT, 0 * off, 0 * off, 0 * off, 0 * off])
        tm.assert_index_equal(result, expected)

    def test_parr_sub_pi_mismatched_freq(self, box_with_array):
        rng = pd.period_range("1/1/2000", freq="D", periods=5)
        other = pd.period_range("1/6/2000", freq="H", periods=5)
        # TODO: parametrize over boxes for other?

        rng = tm.box_expected(rng, box_with_array)
        with pytest.raises(IncompatibleFrequency):
            rng - other

    @pytest.mark.parametrize("n", [1, 2, 3, 4])
    def test_sub_n_gt_1_ticks(self, tick_classes, n):
        # GH 23878
        p1_d = "19910905"
        p2_d = "19920406"
        p1 = pd.PeriodIndex([p1_d], freq=tick_classes(n))
        p2 = pd.PeriodIndex([p2_d], freq=tick_classes(n))

        expected = pd.PeriodIndex([p2_d], freq=p2.freq.base) - pd.PeriodIndex(
            [p1_d], freq=p1.freq.base)

        tm.assert_index_equal((p2 - p1), expected)

    @pytest.mark.parametrize("n", [1, 2, 3, 4])
    @pytest.mark.parametrize(
        "offset, kwd_name",
        [
            (pd.offsets.YearEnd, "month"),
            (pd.offsets.QuarterEnd, "startingMonth"),
            (pd.offsets.MonthEnd, None),
            (pd.offsets.Week, "weekday"),
        ],
    )
    def test_sub_n_gt_1_offsets(self, offset, kwd_name, n):
        # GH 23878
        kwds = {kwd_name: 3} if kwd_name is not None else {}
        p1_d = "19910905"
        p2_d = "19920406"
        freq = offset(n, normalize=False, **kwds)
        p1 = pd.PeriodIndex([p1_d], freq=freq)
        p2 = pd.PeriodIndex([p2_d], freq=freq)

        result = p2 - p1
        expected = pd.PeriodIndex([p2_d], freq=freq.base) - pd.PeriodIndex(
            [p1_d], freq=freq.base)

        tm.assert_index_equal(result, expected)

    # -------------------------------------------------------------
    # Invalid Operations

    @pytest.mark.parametrize("other", [3.14, np.array([2.0, 3.0])])
    @pytest.mark.parametrize("op",
                             [operator.add, ops.radd, operator.sub, ops.rsub])
    def test_parr_add_sub_float_raises(self, op, other, box_with_array):
        dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D")
        pi = dti.to_period("D")
        pi = tm.box_expected(pi, box_with_array)
        with pytest.raises(TypeError):
            op(pi, other)

    @pytest.mark.parametrize(
        "other",
        [
            pd.Timestamp.now(),
            pd.Timestamp.now().to_pydatetime(),
            pd.Timestamp.now().to_datetime64(),
        ],
    )
    def test_parr_add_sub_datetime_scalar(self, other, box_with_array):
        # GH#23215
        rng = pd.period_range("1/1/2000", freq="D", periods=3)
        rng = tm.box_expected(rng, box_with_array)

        with pytest.raises(TypeError):
            rng + other
        with pytest.raises(TypeError):
            other + rng
        with pytest.raises(TypeError):
            rng - other
        with pytest.raises(TypeError):
            other - rng

    # -----------------------------------------------------------------
    # __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64]

    def test_parr_add_sub_dt64_array_raises(self, box_with_array):
        rng = pd.period_range("1/1/2000", freq="D", periods=3)
        dti = pd.date_range("2016-01-01", periods=3)
        dtarr = dti.values

        rng = tm.box_expected(rng, box_with_array)

        with pytest.raises(TypeError):
            rng + dtarr
        with pytest.raises(TypeError):
            dtarr + rng

        with pytest.raises(TypeError):
            rng - dtarr
        with pytest.raises(TypeError):
            dtarr - rng

    def test_pi_add_sub_td64_array_non_tick_raises(self):
        rng = pd.period_range("1/1/2000", freq="Q", periods=3)
        tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
        tdarr = tdi.values

        with pytest.raises(IncompatibleFrequency):
            rng + tdarr
        with pytest.raises(IncompatibleFrequency):
            tdarr + rng

        with pytest.raises(IncompatibleFrequency):
            rng - tdarr
        with pytest.raises(TypeError):
            tdarr - rng

    def test_pi_add_sub_td64_array_tick(self):
        # PeriodIndex + Timedelta-like is allowed only with
        #   tick-like frequencies
        rng = pd.period_range("1/1/2000", freq="90D", periods=3)
        tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
        tdarr = tdi.values

        expected = pd.period_range("12/31/1999", freq="90D", periods=3)
        result = rng + tdi
        tm.assert_index_equal(result, expected)
        result = rng + tdarr
        tm.assert_index_equal(result, expected)
        result = tdi + rng
        tm.assert_index_equal(result, expected)
        result = tdarr + rng
        tm.assert_index_equal(result, expected)

        expected = pd.period_range("1/2/2000", freq="90D", periods=3)

        result = rng - tdi
        tm.assert_index_equal(result, expected)
        result = rng - tdarr
        tm.assert_index_equal(result, expected)

        with pytest.raises(TypeError):
            tdarr - rng

        with pytest.raises(TypeError):
            tdi - rng

    # -----------------------------------------------------------------
    # operations with array/Index of DateOffset objects

    @pytest.mark.parametrize("box", [np.array, pd.Index])
    def test_pi_add_offset_array(self, box):
        # GH#18849
        pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("2016Q2")])
        offs = box([
            pd.offsets.QuarterEnd(n=1, startingMonth=12),
            pd.offsets.QuarterEnd(n=-2, startingMonth=12),
        ])
        expected = pd.PeriodIndex([pd.Period("2015Q2"), pd.Period("2015Q4")])

        with tm.assert_produces_warning(PerformanceWarning):
            res = pi + offs
        tm.assert_index_equal(res, expected)

        with tm.assert_produces_warning(PerformanceWarning):
            res2 = offs + pi
        tm.assert_index_equal(res2, expected)

        unanchored = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
        # addition/subtraction ops with incompatible offsets should issue
        # a PerformanceWarning and _then_ raise a TypeError.
        with pytest.raises(IncompatibleFrequency):
            with tm.assert_produces_warning(PerformanceWarning):
                pi + unanchored
        with pytest.raises(IncompatibleFrequency):
            with tm.assert_produces_warning(PerformanceWarning):
                unanchored + pi

    @pytest.mark.parametrize("box", [np.array, pd.Index])
    def test_pi_sub_offset_array(self, box):
        # GH#18824
        pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("2016Q2")])
        other = box([
            pd.offsets.QuarterEnd(n=1, startingMonth=12),
            pd.offsets.QuarterEnd(n=-2, startingMonth=12),
        ])

        expected = PeriodIndex([pi[n] - other[n] for n in range(len(pi))])

        with tm.assert_produces_warning(PerformanceWarning):
            res = pi - other
        tm.assert_index_equal(res, expected)

        anchored = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])

        # addition/subtraction ops with anchored offsets should issue
        # a PerformanceWarning and _then_ raise a TypeError.
        with pytest.raises(IncompatibleFrequency):
            with tm.assert_produces_warning(PerformanceWarning):
                pi - anchored
        with pytest.raises(IncompatibleFrequency):
            with tm.assert_produces_warning(PerformanceWarning):
                anchored - pi

    def test_pi_add_iadd_int(self, one):
        # Variants of `one` for #19012
        rng = pd.period_range("2000-01-01 09:00", freq="H", periods=10)
        result = rng + one
        expected = pd.period_range("2000-01-01 10:00", freq="H", periods=10)
        tm.assert_index_equal(result, expected)
        rng += one
        tm.assert_index_equal(rng, expected)

    def test_pi_sub_isub_int(self, one):
        """
        PeriodIndex.__sub__ and __isub__ with several representations of
        the integer 1, e.g. int, np.int64, np.uint8, ...
        """
        rng = pd.period_range("2000-01-01 09:00", freq="H", periods=10)
        result = rng - one
        expected = pd.period_range("2000-01-01 08:00", freq="H", periods=10)
        tm.assert_index_equal(result, expected)
        rng -= one
        tm.assert_index_equal(rng, expected)

    @pytest.mark.parametrize("five", [5, np.array(5, dtype=np.int64)])
    def test_pi_sub_intlike(self, five):
        rng = period_range("2007-01", periods=50)

        result = rng - five
        exp = rng + (-five)
        tm.assert_index_equal(result, exp)

    def test_pi_sub_isub_offset(self):
        # offset
        # DateOffset
        rng = pd.period_range("2014", "2024", freq="A")
        result = rng - pd.offsets.YearEnd(5)
        expected = pd.period_range("2009", "2019", freq="A")
        tm.assert_index_equal(result, expected)
        rng -= pd.offsets.YearEnd(5)
        tm.assert_index_equal(rng, expected)

        rng = pd.period_range("2014-01", "2016-12", freq="M")
        result = rng - pd.offsets.MonthEnd(5)
        expected = pd.period_range("2013-08", "2016-07", freq="M")
        tm.assert_index_equal(result, expected)

        rng -= pd.offsets.MonthEnd(5)
        tm.assert_index_equal(rng, expected)

    def test_pi_add_offset_n_gt1(self, box_transpose_fail):
        # GH#23215
        # add offset to PeriodIndex with freq.n > 1
        box, transpose = box_transpose_fail

        per = pd.Period("2016-01", freq="2M")
        pi = pd.PeriodIndex([per])

        expected = pd.PeriodIndex(["2016-03"], freq="2M")

        pi = tm.box_expected(pi, box, transpose=transpose)
        expected = tm.box_expected(expected, box, transpose=transpose)

        result = pi + per.freq
        tm.assert_equal(result, expected)

        result = per.freq + pi
        tm.assert_equal(result, expected)

    def test_pi_add_offset_n_gt1_not_divisible(self, box_with_array):
        # GH#23215
        # PeriodIndex with freq.n > 1 add offset with offset.n % freq.n != 0
        pi = pd.PeriodIndex(["2016-01"], freq="2M")
        expected = pd.PeriodIndex(["2016-04"], freq="2M")

        # FIXME: with transposing these tests fail
        pi = tm.box_expected(pi, box_with_array, transpose=False)
        expected = tm.box_expected(expected, box_with_array, transpose=False)

        result = pi + to_offset("3M")
        tm.assert_equal(result, expected)

        result = to_offset("3M") + pi
        tm.assert_equal(result, expected)

    # ---------------------------------------------------------------
    # __add__/__sub__ with integer arrays

    @pytest.mark.parametrize("int_holder", [np.array, pd.Index])
    @pytest.mark.parametrize("op", [operator.add, ops.radd])
    def test_pi_add_intarray(self, int_holder, op):
        # GH#19959
        pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("NaT")])
        other = int_holder([4, -1])

        result = op(pi, other)
        expected = pd.PeriodIndex([pd.Period("2016Q1"), pd.Period("NaT")])
        tm.assert_index_equal(result, expected)

    @pytest.mark.parametrize("int_holder", [np.array, pd.Index])
    def test_pi_sub_intarray(self, int_holder):
        # GH#19959
        pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("NaT")])
        other = int_holder([4, -1])

        result = pi - other
        expected = pd.PeriodIndex([pd.Period("2014Q1"), pd.Period("NaT")])
        tm.assert_index_equal(result, expected)

        with pytest.raises(TypeError):
            other - pi

    # ---------------------------------------------------------------
    # Timedelta-like (timedelta, timedelta64, Timedelta, Tick)
    # TODO: Some of these are misnomers because of non-Tick DateOffsets

    def test_pi_add_timedeltalike_minute_gt1(self, three_days):
        # GH#23031 adding a time-delta-like offset to a PeriodArray that has
        # minute frequency with n != 1.  A more general case is tested below
        # in test_pi_add_timedeltalike_tick_gt1, but here we write out the
        # expected result more explicitly.
        other = three_days
        rng = pd.period_range("2014-05-01", periods=3, freq="2D")

        expected = pd.PeriodIndex(["2014-05-04", "2014-05-06", "2014-05-08"],
                                  freq="2D")

        result = rng + other
        tm.assert_index_equal(result, expected)

        result = other + rng
        tm.assert_index_equal(result, expected)

        # subtraction
        expected = pd.PeriodIndex(["2014-04-28", "2014-04-30", "2014-05-02"],
                                  freq="2D")
        result = rng - other
        tm.assert_index_equal(result, expected)

        with pytest.raises(TypeError):
            other - rng

    @pytest.mark.parametrize("freqstr",
                             ["5ns", "5us", "5ms", "5s", "5T", "5h", "5d"])
    def test_pi_add_timedeltalike_tick_gt1(self, three_days, freqstr):
        # GH#23031 adding a time-delta-like offset to a PeriodArray that has
        # tick-like frequency with n != 1
        other = three_days
        rng = pd.period_range("2014-05-01", periods=6, freq=freqstr)

        expected = pd.period_range(rng[0] + other, periods=6, freq=freqstr)

        result = rng + other
        tm.assert_index_equal(result, expected)

        result = other + rng
        tm.assert_index_equal(result, expected)

        # subtraction
        expected = pd.period_range(rng[0] - other, periods=6, freq=freqstr)
        result = rng - other
        tm.assert_index_equal(result, expected)

        with pytest.raises(TypeError):
            other - rng

    def test_pi_add_iadd_timedeltalike_daily(self, three_days):
        # Tick
        other = three_days
        rng = pd.period_range("2014-05-01", "2014-05-15", freq="D")
        expected = pd.period_range("2014-05-04", "2014-05-18", freq="D")

        result = rng + other
        tm.assert_index_equal(result, expected)

        rng += other
        tm.assert_index_equal(rng, expected)

    def test_pi_sub_isub_timedeltalike_daily(self, three_days):
        # Tick-like 3 Days
        other = three_days
        rng = pd.period_range("2014-05-01", "2014-05-15", freq="D")
        expected = pd.period_range("2014-04-28", "2014-05-12", freq="D")

        result = rng - other
        tm.assert_index_equal(result, expected)

        rng -= other
        tm.assert_index_equal(rng, expected)

    def test_pi_add_sub_timedeltalike_freq_mismatch_daily(self, not_daily):
        other = not_daily
        rng = pd.period_range("2014-05-01", "2014-05-15", freq="D")
        msg = "Input has different freq(=.+)? from Period.*?\\(freq=D\\)"
        with pytest.raises(IncompatibleFrequency, match=msg):
            rng + other
        with pytest.raises(IncompatibleFrequency, match=msg):
            rng += other
        with pytest.raises(IncompatibleFrequency, match=msg):
            rng - other
        with pytest.raises(IncompatibleFrequency, match=msg):
            rng -= other

    def test_pi_add_iadd_timedeltalike_hourly(self, two_hours):
        other = two_hours
        rng = pd.period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
        expected = pd.period_range("2014-01-01 12:00",
                                   "2014-01-05 12:00",
                                   freq="H")

        result = rng + other
        tm.assert_index_equal(result, expected)

        rng += other
        tm.assert_index_equal(rng, expected)

    def test_pi_add_timedeltalike_mismatched_freq_hourly(self, not_hourly):
        other = not_hourly
        rng = pd.period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
        msg = "Input has different freq(=.+)? from Period.*?\\(freq=H\\)"

        with pytest.raises(IncompatibleFrequency, match=msg):
            rng + other

        with pytest.raises(IncompatibleFrequency, match=msg):
            rng += other

    def test_pi_sub_isub_timedeltalike_hourly(self, two_hours):
        other = two_hours
        rng = pd.period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="H")
        expected = pd.period_range("2014-01-01 08:00",
                                   "2014-01-05 08:00",
                                   freq="H")

        result = rng - other
        tm.assert_index_equal(result, expected)

        rng -= other
        tm.assert_index_equal(rng, expected)

    def test_add_iadd_timedeltalike_annual(self):
        # offset
        # DateOffset
        rng = pd.period_range("2014", "2024", freq="A")
        result = rng + pd.offsets.YearEnd(5)
        expected = pd.period_range("2019", "2029", freq="A")
        tm.assert_index_equal(result, expected)
        rng += pd.offsets.YearEnd(5)
        tm.assert_index_equal(rng, expected)

    def test_pi_add_sub_timedeltalike_freq_mismatch_annual(
            self, mismatched_freq):
        other = mismatched_freq
        rng = pd.period_range("2014", "2024", freq="A")
        msg = "Input has different freq(=.+)? from Period.*?\\(freq=A-DEC\\)"
        with pytest.raises(IncompatibleFrequency, match=msg):
            rng + other
        with pytest.raises(IncompatibleFrequency, match=msg):
            rng += other
        with pytest.raises(IncompatibleFrequency, match=msg):
            rng - other
        with pytest.raises(IncompatibleFrequency, match=msg):
            rng -= other

    def test_pi_add_iadd_timedeltalike_M(self):
        rng = pd.period_range("2014-01", "2016-12", freq="M")
        expected = pd.period_range("2014-06", "2017-05", freq="M")

        result = rng + pd.offsets.MonthEnd(5)
        tm.assert_index_equal(result, expected)

        rng += pd.offsets.MonthEnd(5)
        tm.assert_index_equal(rng, expected)

    def test_pi_add_sub_timedeltalike_freq_mismatch_monthly(
            self, mismatched_freq):
        other = mismatched_freq
        rng = pd.period_range("2014-01", "2016-12", freq="M")
        msg = "Input has different freq(=.+)? from Period.*?\\(freq=M\\)"
        with pytest.raises(IncompatibleFrequency, match=msg):
            rng + other
        with pytest.raises(IncompatibleFrequency, match=msg):
            rng += other
        with pytest.raises(IncompatibleFrequency, match=msg):
            rng - other
        with pytest.raises(IncompatibleFrequency, match=msg):
            rng -= other

    def test_parr_add_sub_td64_nat(self, box_transpose_fail):
        # GH#23320 special handling for timedelta64("NaT")
        box, transpose = box_transpose_fail

        pi = pd.period_range("1994-04-01", periods=9, freq="19D")
        other = np.timedelta64("NaT")
        expected = pd.PeriodIndex(["NaT"] * 9, freq="19D")

        obj = tm.box_expected(pi, box, transpose=transpose)
        expected = tm.box_expected(expected, box, transpose=transpose)

        result = obj + other
        tm.assert_equal(result, expected)
        result = other + obj
        tm.assert_equal(result, expected)
        result = obj - other
        tm.assert_equal(result, expected)
        with pytest.raises(TypeError):
            other - obj

    @pytest.mark.parametrize(
        "other",
        [
            np.array(["NaT"] * 9, dtype="m8[ns]"),
            TimedeltaArray._from_sequence(["NaT"] * 9),
        ],
    )
    def test_parr_add_sub_tdt64_nat_array(self, box_df_fail, other):
        # FIXME: DataFrame fails because when when operating column-wise
        #  timedelta64 entries become NaT and are treated like datetimes
        box = box_df_fail

        pi = pd.period_range("1994-04-01", periods=9, freq="19D")
        expected = pd.PeriodIndex(["NaT"] * 9, freq="19D")

        obj = tm.box_expected(pi, box)
        expected = tm.box_expected(expected, box)

        result = obj + other
        tm.assert_equal(result, expected)
        result = other + obj
        tm.assert_equal(result, expected)
        result = obj - other
        tm.assert_equal(result, expected)
        with pytest.raises(TypeError):
            other - obj
Esempio n. 23
0
 def test_setitem_clears_freq(self):
     a = TimedeltaArray(pd.timedelta_range("1H", periods=2, freq="H"))
     a[0] = pd.Timedelta("1H")
     assert a.freq is None
Esempio n. 24
0
 def test_empty_td64(self):
     shape = (3, 9)
     result = TimedeltaArray._empty(shape, dtype="m8[ns]")
     assert isinstance(result, TimedeltaArray)
     assert result.shape == shape
Esempio n. 25
0
    def wrapper(self, other, axis=None):
        # Validate the axis parameter
        if axis is not None:
            self._get_axis_number(axis)

        res_name = get_op_result_name(self, other)
        other = lib.item_from_zerodim(other)

        # TODO: shouldn't we be applying finalize whenever
        #  not isinstance(other, ABCSeries)?
        finalizer = (
            lambda x: x.__finalize__(self)
            if isinstance(other, (np.ndarray, ABCIndexClass))
            else x
        )

        if isinstance(other, list):
            # TODO: same for tuples?
            other = np.asarray(other)

        if isinstance(other, ABCDataFrame):  # pragma: no cover
            # Defer to DataFrame implementation; fail early
            return NotImplemented

        elif isinstance(other, ABCSeries) and not self._indexed_same(other):
            raise ValueError("Can only compare identically-labeled Series objects")

        elif (
            is_list_like(other)
            and len(other) != len(self)
            and not isinstance(other, frozenset)
        ):
            # TODO: why are we treating len-1 frozenset differently?
            raise ValueError("Lengths must match to compare")

        if is_categorical_dtype(self):
            # Dispatch to Categorical implementation; CategoricalIndex
            # behavior is non-canonical GH#19513
            res_values = dispatch_to_extension_op(op, self, other)

        elif is_datetime64_dtype(self) or is_datetime64tz_dtype(self):
            # Dispatch to DatetimeIndex to ensure identical
            # Series/Index behavior
            from pandas.core.arrays import DatetimeArray

            res_values = dispatch_to_extension_op(op, DatetimeArray(self), other)

        elif is_timedelta64_dtype(self):
            from pandas.core.arrays import TimedeltaArray

            res_values = dispatch_to_extension_op(op, TimedeltaArray(self), other)

        elif is_extension_array_dtype(self) or (
            is_extension_array_dtype(other) and not is_scalar(other)
        ):
            # Note: the `not is_scalar(other)` condition rules out
            #  e.g. other == "category"
            res_values = dispatch_to_extension_op(op, self, other)

        elif is_scalar(other) and isna(other):
            # numpy does not like comparisons vs None
            if op is operator.ne:
                res_values = np.ones(len(self), dtype=bool)
            else:
                res_values = np.zeros(len(self), dtype=bool)

        else:
            lvalues = extract_array(self, extract_numpy=True)
            rvalues = extract_array(other, extract_numpy=True)

            with np.errstate(all="ignore"):
                res_values = na_op(lvalues, rvalues)
            if is_scalar(res_values):
                raise TypeError(
                    "Could not compare {typ} type with Series".format(typ=type(other))
                )

        result = self._constructor(res_values, index=self.index)
        # rename is needed in case res_name is None and result.name
        #  is not.
        return finalizer(result).rename(res_name)
Esempio n. 26
0
    (pd.SparseArray([0, 1]), np.array([0, 1], dtype=np.int64)),

    # tz-naive datetime
    (DatetimeArray(np.array(['2000', '2001'], dtype='M8[ns]')),
     np.array(['2000', '2001'], dtype='M8[ns]')),

    # tz-aware stays tz`-aware
    (DatetimeArray(np.array(['2000-01-01T06:00:00',
                             '2000-01-02T06:00:00'],
                            dtype='M8[ns]'),
                   dtype=DatetimeTZDtype(tz='US/Central')),
     np.array([pd.Timestamp('2000-01-01', tz='US/Central'),
               pd.Timestamp('2000-01-02', tz='US/Central')])),

    # Timedelta
    (TimedeltaArray(np.array([0, 3600000000000], dtype='i8'), freq='H'),
     np.array([0, 3600000000000], dtype='m8[ns]')),
])
@pytest.mark.parametrize('box', [pd.Series, pd.Index])
def test_to_numpy(array, expected, box):
    thing = box(array)

    if array.dtype.name in ('Int64', 'Sparse[int64, 0]') and box is pd.Index:
        pytest.skip("No index type for {}".format(array.dtype))

    result = thing.to_numpy()
    tm.assert_numpy_array_equal(result, expected)


@pytest.mark.parametrize("as_series", [True, False])
@pytest.mark.parametrize("arr", [
Esempio n. 27
0
def array(
    data: Union[Sequence[object], AnyArrayLike],
    dtype: Optional[Dtype] = None,
    copy: bool = True,
) -> "ExtensionArray":
    """
    Create an array.

    .. versionadded:: 0.24.0

    Parameters
    ----------
    data : Sequence of objects
        The scalars inside `data` should be instances of the
        scalar type for `dtype`. It's expected that `data`
        represents a 1-dimensional array of data.

        When `data` is an Index or Series, the underlying array
        will be extracted from `data`.

    dtype : str, np.dtype, or ExtensionDtype, optional
        The dtype to use for the array. This may be a NumPy
        dtype or an extension type registered with pandas using
        :meth:`pandas.api.extensions.register_extension_dtype`.

        If not specified, there are two possibilities:

        1. When `data` is a :class:`Series`, :class:`Index`, or
           :class:`ExtensionArray`, the `dtype` will be taken
           from the data.
        2. Otherwise, pandas will attempt to infer the `dtype`
           from the data.

        Note that when `data` is a NumPy array, ``data.dtype`` is
        *not* used for inferring the array type. This is because
        NumPy cannot represent all the types of data that can be
        held in extension arrays.

        Currently, pandas will infer an extension dtype for sequences of

        ============================== =====================================
        Scalar Type                    Array Type
        ============================== =====================================
        :class:`pandas.Interval`       :class:`pandas.arrays.IntervalArray`
        :class:`pandas.Period`         :class:`pandas.arrays.PeriodArray`
        :class:`datetime.datetime`     :class:`pandas.arrays.DatetimeArray`
        :class:`datetime.timedelta`    :class:`pandas.arrays.TimedeltaArray`
        :class:`int`                   :class:`pandas.arrays.IntegerArray`
        :class:`str`                   :class:`pandas.arrays.StringArray`
        :class:`bool`                  :class:`pandas.arrays.BooleanArray`
        ============================== =====================================

        For all other cases, NumPy's usual inference rules will be used.

        .. versionchanged:: 1.0.0

           Pandas infers nullable-integer dtype for integer data,
           string dtype for string data, and nullable-boolean dtype
           for boolean data.

    copy : bool, default True
        Whether to copy the data, even if not necessary. Depending
        on the type of `data`, creating the new array may require
        copying data, even if ``copy=False``.

    Returns
    -------
    ExtensionArray
        The newly created array.

    Raises
    ------
    ValueError
        When `data` is not 1-dimensional.

    See Also
    --------
    numpy.array : Construct a NumPy array.
    Series : Construct a pandas Series.
    Index : Construct a pandas Index.
    arrays.PandasArray : ExtensionArray wrapping a NumPy array.
    Series.array : Extract the array stored within a Series.

    Notes
    -----
    Omitting the `dtype` argument means pandas will attempt to infer the
    best array type from the values in the data. As new array types are
    added by pandas and 3rd party libraries, the "best" array type may
    change. We recommend specifying `dtype` to ensure that

    1. the correct array type for the data is returned
    2. the returned array type doesn't change as new extension types
       are added by pandas and third-party libraries

    Additionally, if the underlying memory representation of the returned
    array matters, we recommend specifying the `dtype` as a concrete object
    rather than a string alias or allowing it to be inferred. For example,
    a future version of pandas or a 3rd-party library may include a
    dedicated ExtensionArray for string data. In this event, the following
    would no longer return a :class:`arrays.PandasArray` backed by a NumPy
    array.

    >>> pd.array(['a', 'b'], dtype=str)
    <PandasArray>
    ['a', 'b']
    Length: 2, dtype: str32

    This would instead return the new ExtensionArray dedicated for string
    data. If you really need the new array to be backed by a  NumPy array,
    specify that in the dtype.

    >>> pd.array(['a', 'b'], dtype=np.dtype("<U1"))
    <PandasArray>
    ['a', 'b']
    Length: 2, dtype: str32

    Finally, Pandas has arrays that mostly overlap with NumPy

      * :class:`arrays.DatetimeArray`
      * :class:`arrays.TimedeltaArray`

    When data with a ``datetime64[ns]`` or ``timedelta64[ns]`` dtype is
    passed, pandas will always return a ``DatetimeArray`` or ``TimedeltaArray``
    rather than a ``PandasArray``. This is for symmetry with the case of
    timezone-aware data, which NumPy does not natively support.

    >>> pd.array(['2015', '2016'], dtype='datetime64[ns]')
    <DatetimeArray>
    ['2015-01-01 00:00:00', '2016-01-01 00:00:00']
    Length: 2, dtype: datetime64[ns]

    >>> pd.array(["1H", "2H"], dtype='timedelta64[ns]')
    <TimedeltaArray>
    ['0 days 01:00:00', '0 days 02:00:00']
    Length: 2, dtype: timedelta64[ns]

    Examples
    --------
    If a dtype is not specified, pandas will infer the best dtype from the values.
    See the description of `dtype` for the types pandas infers for.

    >>> pd.array([1, 2])
    <IntegerArray>
    [1, 2]
    Length: 2, dtype: Int64

    >>> pd.array([1, 2, np.nan])
    <IntegerArray>
    [1, 2, <NA>]
    Length: 3, dtype: Int64

    >>> pd.array(["a", None, "c"])
    <StringArray>
    ['a', <NA>, 'c']
    Length: 3, dtype: string

    >>> pd.array([pd.Period('2000', freq="D"), pd.Period("2000", freq="D")])
    <PeriodArray>
    ['2000-01-01', '2000-01-01']
    Length: 2, dtype: period[D]

    You can use the string alias for `dtype`

    >>> pd.array(['a', 'b', 'a'], dtype='category')
    [a, b, a]
    Categories (2, object): [a, b]

    Or specify the actual dtype

    >>> pd.array(['a', 'b', 'a'],
    ...          dtype=pd.CategoricalDtype(['a', 'b', 'c'], ordered=True))
    [a, b, a]
    Categories (3, object): [a < b < c]

    If pandas does not infer a dedicated extension type a
    :class:`arrays.PandasArray` is returned.

    >>> pd.array([1.1, 2.2])
    <PandasArray>
    [1.1, 2.2]
    Length: 2, dtype: float64

    As mentioned in the "Notes" section, new extension types may be added
    in the future (by pandas or 3rd party libraries), causing the return
    value to no longer be a :class:`arrays.PandasArray`. Specify the `dtype`
    as a NumPy dtype if you need to ensure there's no future change in
    behavior.

    >>> pd.array([1, 2], dtype=np.dtype("int32"))
    <PandasArray>
    [1, 2]
    Length: 2, dtype: int32

    `data` must be 1-dimensional. A ValueError is raised when the input
    has the wrong dimensionality.

    >>> pd.array(1)
    Traceback (most recent call last):
      ...
    ValueError: Cannot pass scalar '1' to 'pandas.array'.
    """
    from pandas.core.arrays import (
        period_array,
        BooleanArray,
        IntegerArray,
        IntervalArray,
        PandasArray,
        DatetimeArray,
        TimedeltaArray,
        StringArray,
    )

    if lib.is_scalar(data):
        msg = f"Cannot pass scalar '{data}' to 'pandas.array'."
        raise ValueError(msg)

    if dtype is None and isinstance(
            data, (ABCSeries, ABCIndexClass, ABCExtensionArray)):
        dtype = data.dtype

    data = extract_array(data, extract_numpy=True)

    # this returns None for not-found dtypes.
    if isinstance(dtype, str):
        dtype = registry.find(dtype) or dtype

    if is_extension_array_dtype(dtype):
        cls = cast(ExtensionDtype, dtype).construct_array_type()
        return cls._from_sequence(data, dtype=dtype, copy=copy)

    if dtype is None:
        inferred_dtype = lib.infer_dtype(data, skipna=True)
        if inferred_dtype == "period":
            try:
                return period_array(data, copy=copy)
            except IncompatibleFrequency:
                # We may have a mixture of frequencies.
                # We choose to return an ndarray, rather than raising.
                pass
        elif inferred_dtype == "interval":
            try:
                return IntervalArray(data, copy=copy)
            except ValueError:
                # We may have a mixture of `closed` here.
                # We choose to return an ndarray, rather than raising.
                pass

        elif inferred_dtype.startswith("datetime"):
            # datetime, datetime64
            try:
                return DatetimeArray._from_sequence(data, copy=copy)
            except ValueError:
                # Mixture of timezones, fall back to PandasArray
                pass

        elif inferred_dtype.startswith("timedelta"):
            # timedelta, timedelta64
            return TimedeltaArray._from_sequence(data, copy=copy)

        elif inferred_dtype == "string":
            return StringArray._from_sequence(data, copy=copy)

        elif inferred_dtype == "integer":
            return IntegerArray._from_sequence(data, copy=copy)

        elif inferred_dtype == "boolean":
            return BooleanArray._from_sequence(data, copy=copy)

    # Pandas overrides NumPy for
    #   1. datetime64[ns]
    #   2. timedelta64[ns]
    # so that a DatetimeArray is returned.
    if is_datetime64_ns_dtype(dtype):
        return DatetimeArray._from_sequence(data, dtype=dtype, copy=copy)
    elif is_timedelta64_ns_dtype(dtype):
        return TimedeltaArray._from_sequence(data, dtype=dtype, copy=copy)

    result = PandasArray._from_sequence(data, dtype=dtype, copy=copy)
    return result
Esempio n. 28
0
 def tda(self, unit):
     arr = np.arange(5, dtype=np.int64).view(f"m8[{unit}]")
     return TimedeltaArray._simple_new(arr, dtype=arr.dtype)