Ejemplo n.º 1
0
    def test_unstack(self, obj):
        # GH-13287: can't use base test, since building the expected fails.
        data = DatetimeArray._from_sequence(['2000', '2001', '2002', '2003'],
                                            tz='US/Central')
        index = pd.MultiIndex.from_product(([['A', 'B'], ['a', 'b']]),
                                           names=['a', 'b'])

        if obj == "series":
            ser = pd.Series(data, index=index)
            expected = pd.DataFrame({
                "A": data.take([0, 1]),
                "B": data.take([2, 3])
            }, index=pd.Index(['a', 'b'], name='b'))
            expected.columns.name = 'a'

        else:
            ser = pd.DataFrame({"A": data, "B": data}, index=index)
            expected = pd.DataFrame(
                {("A", "A"): data.take([0, 1]),
                 ("A", "B"): data.take([2, 3]),
                 ("B", "A"): data.take([0, 1]),
                 ("B", "B"): data.take([2, 3])},
                index=pd.Index(['a', 'b'], name='b')
            )
            expected.columns.names = [None, 'a']

        result = ser.unstack(0)
        self.assert_equal(result, expected)
Ejemplo n.º 2
0
    def test_min_max_empty(self, skipna, tz):
        arr = DatetimeArray._from_sequence([], tz=tz)
        result = arr.min(skipna=skipna)
        assert result is pd.NaT

        result = arr.max(skipna=skipna)
        assert result is pd.NaT
Ejemplo n.º 3
0
    def test_from_pandas_array(self):
        arr = pd.array(np.arange(5, dtype=np.int64)) * 3600 * 10**9

        result = DatetimeArray._from_sequence(arr, freq='infer')

        expected = pd.date_range('1970-01-01', periods=5, freq='H')._data
        tm.assert_datetime_array_equal(result, expected)
Ejemplo n.º 4
0
    def test_astype_int(self, dtype):
        arr = DatetimeArray._from_sequence([pd.Timestamp('2000'),
                                            pd.Timestamp('2001')])
        result = arr.astype(dtype)

        if np.dtype(dtype).kind == 'u':
            expected_dtype = np.dtype('uint64')
        else:
            expected_dtype = np.dtype('int64')
        expected = arr.astype(expected_dtype)

        assert result.dtype == expected_dtype
        tm.assert_numpy_array_equal(result, expected)
Ejemplo n.º 5
0
def ensure_wrapped_if_datetimelike(arr):
    """
    Wrap datetime64 and timedelta64 ndarrays in DatetimeArray/TimedeltaArray.
    """
    if isinstance(arr, np.ndarray):
        if arr.dtype.kind == "M":
            from pandas.core.arrays import DatetimeArray

            return DatetimeArray._from_sequence(arr)

        elif arr.dtype.kind == "m":
            from pandas.core.arrays import TimedeltaArray

            return TimedeltaArray._from_sequence(arr)

    return arr
Ejemplo n.º 6
0
    def test_median_empty(self, skipna, tz):
        dtype = DatetimeTZDtype(
            tz=tz) if tz is not None else np.dtype("M8[ns]")
        arr = DatetimeArray._from_sequence([], dtype=dtype)
        result = arr.median(skipna=skipna)
        assert result is pd.NaT

        arr = arr.reshape(0, 3)
        result = arr.median(axis=0, skipna=skipna)
        expected = type(arr)._from_sequence([pd.NaT, pd.NaT, pd.NaT],
                                            dtype=arr.dtype)
        tm.assert_equal(result, expected)

        result = arr.median(axis=1, skipna=skipna)
        expected = type(arr)._from_sequence([pd.NaT], dtype=arr.dtype)
        tm.assert_equal(result, expected)
Ejemplo n.º 7
0
 def arr1d(self, tz_naive_fixture):
     tz = tz_naive_fixture
     dtype = DatetimeTZDtype(
         tz=tz) if tz is not None else np.dtype("M8[ns]")
     arr = DatetimeArray._from_sequence(
         [
             "2000-01-03",
             "2000-01-03",
             "NaT",
             "2000-01-02",
             "2000-01-05",
             "2000-01-04",
         ],
         dtype=dtype,
     )
     return arr
    def test_mean_empty(self, arr1d, skipna):
        arr = arr1d[:0]

        assert arr.mean(skipna=skipna) is NaT

        arr2d = arr.reshape(0, 3)
        result = arr2d.mean(axis=0, skipna=skipna)
        expected = DatetimeArray._from_sequence([NaT, NaT, NaT], dtype=arr.dtype)
        tm.assert_datetime_array_equal(result, expected)

        result = arr2d.mean(axis=1, skipna=skipna)
        expected = arr  # i.e. 1D, empty
        tm.assert_datetime_array_equal(result, expected)

        result = arr2d.mean(axis=None, skipna=skipna)
        assert result is NaT
Ejemplo n.º 9
0
    def test_fillna_preserves_tz(self, method):
        dti = pd.date_range("2000-01-01", periods=5, freq="D", tz="US/Central")
        arr = DatetimeArray(dti, copy=True)
        arr[2] = pd.NaT

        fill_val = dti[1] if method == "pad" else dti[3]
        expected = DatetimeArray._from_sequence(
            [dti[0], dti[1], fill_val, dti[3], dti[4]], freq=None, tz="US/Central"
        )

        result = arr.fillna(method=method)
        tm.assert_extension_array_equal(result, expected)

        # assert that arr and dti were not modified in-place
        assert arr[2] is pd.NaT
        assert dti[2] == pd.Timestamp("2000-01-03", tz="US/Central")
Ejemplo n.º 10
0
    def test_fillna_preserves_tz(self, method):
        dti = pd.date_range('2000-01-01', periods=5, freq='D', tz='US/Central')
        arr = DatetimeArray(dti, copy=True)
        arr[2] = pd.NaT

        fill_val = dti[1] if method == 'pad' else dti[3]
        expected = DatetimeArray._from_sequence(
            [dti[0], dti[1], fill_val, dti[3], dti[4]],
            freq=None, tz='US/Central'
        )

        result = arr.fillna(method=method)
        tm.assert_extension_array_equal(result, expected)

        # assert that arr and dti were not modified in-place
        assert arr[2] is pd.NaT
        assert dti[2] == pd.Timestamp('2000-01-03', tz='US/Central')
Ejemplo n.º 11
0
    def test_astype_int(self, dtype):
        arr = DatetimeArray._from_sequence([pd.Timestamp("2000"), pd.Timestamp("2001")])
        with tm.assert_produces_warning(FutureWarning):
            # astype(int..) deprecated
            result = arr.astype(dtype)

        if np.dtype(dtype).kind == "u":
            expected_dtype = np.dtype("uint64")
        else:
            expected_dtype = np.dtype("int64")

        with tm.assert_produces_warning(FutureWarning):
            # astype(int..) deprecated
            expected = arr.astype(expected_dtype)

        assert result.dtype == expected_dtype
        tm.assert_numpy_array_equal(result, expected)
Ejemplo n.º 12
0
    def test_astype_int(self, dtype):
        arr = DatetimeArray._from_sequence([pd.Timestamp("2000"), pd.Timestamp("2001")])

        if np.dtype(dtype).kind == "u":
            expected_dtype = np.dtype("uint64")
        else:
            expected_dtype = np.dtype("int64")
        expected = arr.astype(expected_dtype)

        warn = None
        if dtype != expected_dtype:
            warn = FutureWarning
        msg = " will return exactly the specified dtype"
        with tm.assert_produces_warning(warn, match=msg):
            result = arr.astype(dtype)

        assert result.dtype == expected_dtype
        tm.assert_numpy_array_equal(result, expected)
Ejemplo n.º 13
0
    def to_timestamp(self, freq=None, how="start"):
        """
        Cast to DatetimeArray/Index.

        Parameters
        ----------
        freq : str or DateOffset, optional
            Target frequency. The default is 'D' for week or longer,
            'S' otherwise.
        how : {'s', 'e', 'start', 'end'}
            Whether to use the start or end of the time period being converted.

        Returns
        -------
        DatetimeArray/Index
        """
        from pandas.core.arrays import DatetimeArray

        how = libperiod._validate_end_alias(how)

        end = how == "E"
        if end:
            if freq == "B":
                # roll forward to ensure we land on B date
                adjust = Timedelta(1, "D") - Timedelta(1, "ns")
                return self.to_timestamp(how="start") + adjust
            else:
                adjust = Timedelta(1, "ns")
                return (self + self.freq).to_timestamp(how="start") - adjust

        if freq is None:
            base, mult = libfrequencies.get_freq_code(self.freq)
            freq = libfrequencies.get_to_timestamp_base(base)
        else:
            freq = Period._maybe_convert_freq(freq)

        base, mult = libfrequencies.get_freq_code(freq)
        new_data = self.asfreq(freq, how=how)

        new_data = libperiod.periodarr_to_dt64arr(new_data.asi8, base)
        return DatetimeArray._from_sequence(new_data, freq="infer")
Ejemplo n.º 14
0
    def to_timestamp(self, freq=None, how='start'):
        """
        Cast to DatetimeArray/Index.

        Parameters
        ----------
        freq : string or DateOffset, optional
            Target frequency. The default is 'D' for week or longer,
            'S' otherwise
        how : {'s', 'e', 'start', 'end'}

        Returns
        -------
        DatetimeArray/Index
        """
        from pandas.core.arrays import DatetimeArray

        how = libperiod._validate_end_alias(how)

        end = how == 'E'
        if end:
            if freq == 'B':
                # roll forward to ensure we land on B date
                adjust = Timedelta(1, 'D') - Timedelta(1, 'ns')
                return self.to_timestamp(how='start') + adjust
            else:
                adjust = Timedelta(1, 'ns')
                return (self + self.freq).to_timestamp(how='start') - adjust

        if freq is None:
            base, mult = libfrequencies.get_freq_code(self.freq)
            freq = libfrequencies.get_to_timestamp_base(base)
        else:
            freq = Period._maybe_convert_freq(freq)

        base, mult = libfrequencies.get_freq_code(freq)
        new_data = self.asfreq(freq, how=how)

        new_data = libperiod.periodarr_to_dt64arr(new_data.asi8, base)
        return DatetimeArray._from_sequence(new_data, freq='infer')
Ejemplo n.º 15
0
    def test_min_max(self, tz):
        arr = DatetimeArray._from_sequence([
            '2000-01-03',
            '2000-01-03',
            'NaT',
            '2000-01-02',
            '2000-01-05',
            '2000-01-04',
        ], tz=tz)

        result = arr.min()
        expected = pd.Timestamp('2000-01-02', tz=tz)
        assert result == expected

        result = arr.max()
        expected = pd.Timestamp('2000-01-05', tz=tz)
        assert result == expected

        result = arr.min(skipna=False)
        assert result is pd.NaT

        result = arr.max(skipna=False)
        assert result is pd.NaT
Ejemplo n.º 16
0
    def test_min_max(self, tz):
        arr = DatetimeArray._from_sequence([
            '2000-01-03',
            '2000-01-03',
            'NaT',
            '2000-01-02',
            '2000-01-05',
            '2000-01-04',
        ], tz=tz)

        result = arr.min()
        expected = pd.Timestamp('2000-01-02', tz=tz)
        assert result == expected

        result = arr.max()
        expected = pd.Timestamp('2000-01-05', tz=tz)
        assert result == expected

        result = arr.min(skipna=False)
        assert result is pd.NaT

        result = arr.max(skipna=False)
        assert result is pd.NaT
Ejemplo n.º 17
0
def maybe_upcast_datetimelike_array(obj: ArrayLike) -> ArrayLike:
    """
    If we have an ndarray that is either datetime64 or timedelta64, wrap in EA.

    Parameters
    ----------
    obj : ndarray or ExtensionArray

    Returns
    -------
    ndarray or ExtensionArray
    """
    if isinstance(obj, np.ndarray):
        if obj.dtype.kind == "m":
            from pandas.core.arrays import TimedeltaArray

            return TimedeltaArray._from_sequence(obj)
        if obj.dtype.kind == "M":
            from pandas.core.arrays import DatetimeArray

            return DatetimeArray._from_sequence(obj)

    return obj
Ejemplo n.º 18
0
    def test_unstack(self, obj):
        # GH-13287: can't use base test, since building the expected fails.
        dtype = DatetimeTZDtype(tz="US/Central")
        data = DatetimeArray._from_sequence(
            ["2000", "2001", "2002", "2003"],
            dtype=dtype,
        )
        index = pd.MultiIndex.from_product(([["A", "B"], ["a", "b"]]),
                                           names=["a", "b"])

        if obj == "series":
            ser = pd.Series(data, index=index)
            expected = pd.DataFrame(
                {
                    "A": data.take([0, 1]),
                    "B": data.take([2, 3])
                },
                index=pd.Index(["a", "b"], name="b"),
            )
            expected.columns.name = "a"

        else:
            ser = pd.DataFrame({"A": data, "B": data}, index=index)
            expected = pd.DataFrame(
                {
                    ("A", "A"): data.take([0, 1]),
                    ("A", "B"): data.take([2, 3]),
                    ("B", "A"): data.take([0, 1]),
                    ("B", "B"): data.take([2, 3]),
                },
                index=pd.Index(["a", "b"], name="b"),
            )
            expected.columns.names = [None, "a"]

        result = ser.unstack(0)
        self.assert_equal(result, expected)
Ejemplo n.º 19
0
 def test_tz_setter_raises(self):
     arr = DatetimeArray._from_sequence(
         ["2000"], dtype=DatetimeTZDtype(tz="US/Central"))
     with pytest.raises(AttributeError, match="tz_localize"):
         arr.tz = "UTC"
Ejemplo n.º 20
0
 def test_from_sequence_invalid_type(self):
     mi = pd.MultiIndex.from_product([np.arange(5), np.arange(5)])
     with pytest.raises(TypeError, match="Cannot create a DatetimeArray"):
         DatetimeArray._from_sequence(mi)
Ejemplo n.º 21
0
 def test_astype_to_same(self):
     arr = DatetimeArray._from_sequence(
         ["2000"], dtype=DatetimeTZDtype(tz="US/Central"))
     result = arr.astype(DatetimeTZDtype(tz="US/Central"), copy=False)
     assert result is arr
Ejemplo n.º 22
0
    #
    # See also test_timedelta.TestTimedeltaArithmetic.test_floordiv
    td = Timedelta(hours=3, minutes=4)
    assert td // val is expected


@pytest.mark.parametrize(
    "op_name",
    ["left_plus_right", "right_plus_left", "left_minus_right", "right_minus_left"],
)
@pytest.mark.parametrize(
    "value",
    [
        DatetimeIndex(["2011-01-01", "2011-01-02"], name="x"),
        DatetimeIndex(["2011-01-01", "2011-01-02"], tz="US/Eastern", name="x"),
        DatetimeArray._from_sequence(["2011-01-01", "2011-01-02"]),
        DatetimeArray._from_sequence(
            ["2011-01-01", "2011-01-02"], dtype=DatetimeTZDtype(tz="US/Pacific")
        ),
        TimedeltaIndex(["1 day", "2 day"], name="x"),
    ],
)
def test_nat_arithmetic_index(op_name, value):
    # see gh-11718
    exp_name = "x"
    exp_data = [NaT] * 2

    if is_datetime64_any_dtype(value.dtype) and "plus" in op_name:
        expected = DatetimeIndex(exp_data, tz=value.tz, name=exp_name)
    else:
        expected = TimedeltaIndex(exp_data, name=exp_name)
Ejemplo n.º 23
0
def array(
    data: Union[Sequence[object], AnyArrayLike],
    dtype: Optional[Dtype] = None,
    copy: bool = True,
) -> "ExtensionArray":
    """
    Create an array.

    .. versionadded:: 0.24.0

    Parameters
    ----------
    data : Sequence of objects
        The scalars inside `data` should be instances of the
        scalar type for `dtype`. It's expected that `data`
        represents a 1-dimensional array of data.

        When `data` is an Index or Series, the underlying array
        will be extracted from `data`.

    dtype : str, np.dtype, or ExtensionDtype, optional
        The dtype to use for the array. This may be a NumPy
        dtype or an extension type registered with pandas using
        :meth:`pandas.api.extensions.register_extension_dtype`.

        If not specified, there are two possibilities:

        1. When `data` is a :class:`Series`, :class:`Index`, or
           :class:`ExtensionArray`, the `dtype` will be taken
           from the data.
        2. Otherwise, pandas will attempt to infer the `dtype`
           from the data.

        Note that when `data` is a NumPy array, ``data.dtype`` is
        *not* used for inferring the array type. This is because
        NumPy cannot represent all the types of data that can be
        held in extension arrays.

        Currently, pandas will infer an extension dtype for sequences of

        ============================== =====================================
        Scalar Type                    Array Type
        ============================== =====================================
        :class:`pandas.Interval`       :class:`pandas.arrays.IntervalArray`
        :class:`pandas.Period`         :class:`pandas.arrays.PeriodArray`
        :class:`datetime.datetime`     :class:`pandas.arrays.DatetimeArray`
        :class:`datetime.timedelta`    :class:`pandas.arrays.TimedeltaArray`
        :class:`int`                   :class:`pandas.arrays.IntegerArray`
        :class:`str`                   :class:`pandas.arrays.StringArray`
        :class:`bool`                  :class:`pandas.arrays.BooleanArray`
        ============================== =====================================

        For all other cases, NumPy's usual inference rules will be used.

        .. versionchanged:: 1.0.0

           Pandas infers nullable-integer dtype for integer data,
           string dtype for string data, and nullable-boolean dtype
           for boolean data.

    copy : bool, default True
        Whether to copy the data, even if not necessary. Depending
        on the type of `data`, creating the new array may require
        copying data, even if ``copy=False``.

    Returns
    -------
    ExtensionArray
        The newly created array.

    Raises
    ------
    ValueError
        When `data` is not 1-dimensional.

    See Also
    --------
    numpy.array : Construct a NumPy array.
    Series : Construct a pandas Series.
    Index : Construct a pandas Index.
    arrays.PandasArray : ExtensionArray wrapping a NumPy array.
    Series.array : Extract the array stored within a Series.

    Notes
    -----
    Omitting the `dtype` argument means pandas will attempt to infer the
    best array type from the values in the data. As new array types are
    added by pandas and 3rd party libraries, the "best" array type may
    change. We recommend specifying `dtype` to ensure that

    1. the correct array type for the data is returned
    2. the returned array type doesn't change as new extension types
       are added by pandas and third-party libraries

    Additionally, if the underlying memory representation of the returned
    array matters, we recommend specifying the `dtype` as a concrete object
    rather than a string alias or allowing it to be inferred. For example,
    a future version of pandas or a 3rd-party library may include a
    dedicated ExtensionArray for string data. In this event, the following
    would no longer return a :class:`arrays.PandasArray` backed by a NumPy
    array.

    >>> pd.array(['a', 'b'], dtype=str)
    <PandasArray>
    ['a', 'b']
    Length: 2, dtype: str32

    This would instead return the new ExtensionArray dedicated for string
    data. If you really need the new array to be backed by a  NumPy array,
    specify that in the dtype.

    >>> pd.array(['a', 'b'], dtype=np.dtype("<U1"))
    <PandasArray>
    ['a', 'b']
    Length: 2, dtype: str32

    Finally, Pandas has arrays that mostly overlap with NumPy

      * :class:`arrays.DatetimeArray`
      * :class:`arrays.TimedeltaArray`

    When data with a ``datetime64[ns]`` or ``timedelta64[ns]`` dtype is
    passed, pandas will always return a ``DatetimeArray`` or ``TimedeltaArray``
    rather than a ``PandasArray``. This is for symmetry with the case of
    timezone-aware data, which NumPy does not natively support.

    >>> pd.array(['2015', '2016'], dtype='datetime64[ns]')
    <DatetimeArray>
    ['2015-01-01 00:00:00', '2016-01-01 00:00:00']
    Length: 2, dtype: datetime64[ns]

    >>> pd.array(["1H", "2H"], dtype='timedelta64[ns]')
    <TimedeltaArray>
    ['0 days 01:00:00', '0 days 02:00:00']
    Length: 2, dtype: timedelta64[ns]

    Examples
    --------
    If a dtype is not specified, pandas will infer the best dtype from the values.
    See the description of `dtype` for the types pandas infers for.

    >>> pd.array([1, 2])
    <IntegerArray>
    [1, 2]
    Length: 2, dtype: Int64

    >>> pd.array([1, 2, np.nan])
    <IntegerArray>
    [1, 2, <NA>]
    Length: 3, dtype: Int64

    >>> pd.array(["a", None, "c"])
    <StringArray>
    ['a', <NA>, 'c']
    Length: 3, dtype: string

    >>> pd.array([pd.Period('2000', freq="D"), pd.Period("2000", freq="D")])
    <PeriodArray>
    ['2000-01-01', '2000-01-01']
    Length: 2, dtype: period[D]

    You can use the string alias for `dtype`

    >>> pd.array(['a', 'b', 'a'], dtype='category')
    [a, b, a]
    Categories (2, object): [a, b]

    Or specify the actual dtype

    >>> pd.array(['a', 'b', 'a'],
    ...          dtype=pd.CategoricalDtype(['a', 'b', 'c'], ordered=True))
    [a, b, a]
    Categories (3, object): [a < b < c]

    If pandas does not infer a dedicated extension type a
    :class:`arrays.PandasArray` is returned.

    >>> pd.array([1.1, 2.2])
    <PandasArray>
    [1.1, 2.2]
    Length: 2, dtype: float64

    As mentioned in the "Notes" section, new extension types may be added
    in the future (by pandas or 3rd party libraries), causing the return
    value to no longer be a :class:`arrays.PandasArray`. Specify the `dtype`
    as a NumPy dtype if you need to ensure there's no future change in
    behavior.

    >>> pd.array([1, 2], dtype=np.dtype("int32"))
    <PandasArray>
    [1, 2]
    Length: 2, dtype: int32

    `data` must be 1-dimensional. A ValueError is raised when the input
    has the wrong dimensionality.

    >>> pd.array(1)
    Traceback (most recent call last):
      ...
    ValueError: Cannot pass scalar '1' to 'pandas.array'.
    """
    from pandas.core.arrays import (
        period_array,
        BooleanArray,
        IntegerArray,
        IntervalArray,
        PandasArray,
        DatetimeArray,
        TimedeltaArray,
        StringArray,
    )

    if lib.is_scalar(data):
        msg = f"Cannot pass scalar '{data}' to 'pandas.array'."
        raise ValueError(msg)

    if dtype is None and isinstance(
            data, (ABCSeries, ABCIndexClass, ABCExtensionArray)):
        dtype = data.dtype

    data = extract_array(data, extract_numpy=True)

    # this returns None for not-found dtypes.
    if isinstance(dtype, str):
        dtype = registry.find(dtype) or dtype

    if is_extension_array_dtype(dtype):
        cls = cast(ExtensionDtype, dtype).construct_array_type()
        return cls._from_sequence(data, dtype=dtype, copy=copy)

    if dtype is None:
        inferred_dtype = lib.infer_dtype(data, skipna=True)
        if inferred_dtype == "period":
            try:
                return period_array(data, copy=copy)
            except IncompatibleFrequency:
                # We may have a mixture of frequencies.
                # We choose to return an ndarray, rather than raising.
                pass
        elif inferred_dtype == "interval":
            try:
                return IntervalArray(data, copy=copy)
            except ValueError:
                # We may have a mixture of `closed` here.
                # We choose to return an ndarray, rather than raising.
                pass

        elif inferred_dtype.startswith("datetime"):
            # datetime, datetime64
            try:
                return DatetimeArray._from_sequence(data, copy=copy)
            except ValueError:
                # Mixture of timezones, fall back to PandasArray
                pass

        elif inferred_dtype.startswith("timedelta"):
            # timedelta, timedelta64
            return TimedeltaArray._from_sequence(data, copy=copy)

        elif inferred_dtype == "string":
            return StringArray._from_sequence(data, copy=copy)

        elif inferred_dtype == "integer":
            return IntegerArray._from_sequence(data, copy=copy)

        elif inferred_dtype == "boolean":
            return BooleanArray._from_sequence(data, copy=copy)

    # Pandas overrides NumPy for
    #   1. datetime64[ns]
    #   2. timedelta64[ns]
    # so that a DatetimeArray is returned.
    if is_datetime64_ns_dtype(dtype):
        return DatetimeArray._from_sequence(data, dtype=dtype, copy=copy)
    elif is_timedelta64_ns_dtype(dtype):
        return TimedeltaArray._from_sequence(data, dtype=dtype, copy=copy)

    result = PandasArray._from_sequence(data, dtype=dtype, copy=copy)
    return result
Ejemplo n.º 24
0
def array(data: Sequence[object],
          dtype: Optional[Union[str, np.dtype, ExtensionDtype]] = None,
          copy: bool = True,
          ) -> ABCExtensionArray:
    """
    Create an array.

    .. versionadded:: 0.24.0

    Parameters
    ----------
    data : Sequence of objects
        The scalars inside `data` should be instances of the
        scalar type for `dtype`. It's expected that `data`
        represents a 1-dimensional array of data.

        When `data` is an Index or Series, the underlying array
        will be extracted from `data`.

    dtype : str, np.dtype, or ExtensionDtype, optional
        The dtype to use for the array. This may be a NumPy
        dtype or an extension type registered with pandas using
        :meth:`pandas.api.extensions.register_extension_dtype`.

        If not specified, there are two possibilities:

        1. When `data` is a :class:`Series`, :class:`Index`, or
           :class:`ExtensionArray`, the `dtype` will be taken
           from the data.
        2. Otherwise, pandas will attempt to infer the `dtype`
           from the data.

        Note that when `data` is a NumPy array, ``data.dtype`` is
        *not* used for inferring the array type. This is because
        NumPy cannot represent all the types of data that can be
        held in extension arrays.

        Currently, pandas will infer an extension dtype for sequences of

        ============================== =====================================
        Scalar Type                    Array Type
        ============================== =====================================
        :class:`pandas.Interval`       :class:`pandas.arrays.IntervalArray`
        :class:`pandas.Period`         :class:`pandas.arrays.PeriodArray`
        :class:`datetime.datetime`     :class:`pandas.arrays.DatetimeArray`
        :class:`datetime.timedelta`    :class:`pandas.arrays.TimedeltaArray`
        ============================== =====================================

        For all other cases, NumPy's usual inference rules will be used.

    copy : bool, default True
        Whether to copy the data, even if not necessary. Depending
        on the type of `data`, creating the new array may require
        copying data, even if ``copy=False``.

    Returns
    -------
    ExtensionArray
        The newly created array.

    Raises
    ------
    ValueError
        When `data` is not 1-dimensional.

    See Also
    --------
    numpy.array : Construct a NumPy array.
    Series : Construct a pandas Series.
    Index : Construct a pandas Index.
    arrays.PandasArray : ExtensionArray wrapping a NumPy array.
    Series.array : Extract the array stored within a Series.

    Notes
    -----
    Omitting the `dtype` argument means pandas will attempt to infer the
    best array type from the values in the data. As new array types are
    added by pandas and 3rd party libraries, the "best" array type may
    change. We recommend specifying `dtype` to ensure that

    1. the correct array type for the data is returned
    2. the returned array type doesn't change as new extension types
       are added by pandas and third-party libraries

    Additionally, if the underlying memory representation of the returned
    array matters, we recommend specifying the `dtype` as a concrete object
    rather than a string alias or allowing it to be inferred. For example,
    a future version of pandas or a 3rd-party library may include a
    dedicated ExtensionArray for string data. In this event, the following
    would no longer return a :class:`arrays.PandasArray` backed by a NumPy
    array.

    >>> pd.array(['a', 'b'], dtype=str)
    <PandasArray>
    ['a', 'b']
    Length: 2, dtype: str32

    This would instead return the new ExtensionArray dedicated for string
    data. If you really need the new array to be backed by a  NumPy array,
    specify that in the dtype.

    >>> pd.array(['a', 'b'], dtype=np.dtype("<U1"))
    <PandasArray>
    ['a', 'b']
    Length: 2, dtype: str32

    Or use the dedicated constructor for the array you're expecting, and
    wrap that in a PandasArray

    >>> pd.array(np.array(['a', 'b'], dtype='<U1'))
    <PandasArray>
    ['a', 'b']
    Length: 2, dtype: str32

    Finally, Pandas has arrays that mostly overlap with NumPy

      * :class:`arrays.DatetimeArray`
      * :class:`arrays.TimedeltaArray`

    When data with a ``datetime64[ns]`` or ``timedelta64[ns]`` dtype is
    passed, pandas will always return a ``DatetimeArray`` or ``TimedeltaArray``
    rather than a ``PandasArray``. This is for symmetry with the case of
    timezone-aware data, which NumPy does not natively support.

    >>> pd.array(['2015', '2016'], dtype='datetime64[ns]')
    <DatetimeArray>
    ['2015-01-01 00:00:00', '2016-01-01 00:00:00']
    Length: 2, dtype: datetime64[ns]

    >>> pd.array(["1H", "2H"], dtype='timedelta64[ns]')
    <TimedeltaArray>
    ['01:00:00', '02:00:00']
    Length: 2, dtype: timedelta64[ns]

    Examples
    --------
    If a dtype is not specified, `data` is passed through to
    :meth:`numpy.array`, and a :class:`arrays.PandasArray` is returned.

    >>> pd.array([1, 2])
    <PandasArray>
    [1, 2]
    Length: 2, dtype: int64

    Or the NumPy dtype can be specified

    >>> pd.array([1, 2], dtype=np.dtype("int32"))
    <PandasArray>
    [1, 2]
    Length: 2, dtype: int32

    You can use the string alias for `dtype`

    >>> pd.array(['a', 'b', 'a'], dtype='category')
    [a, b, a]
    Categories (2, object): [a, b]

    Or specify the actual dtype

    >>> pd.array(['a', 'b', 'a'],
    ...          dtype=pd.CategoricalDtype(['a', 'b', 'c'], ordered=True))
    [a, b, a]
    Categories (3, object): [a < b < c]

    Because omitting the `dtype` passes the data through to NumPy,
    a mixture of valid integers and NA will return a floating-point
    NumPy array.

    >>> pd.array([1, 2, np.nan])
    <PandasArray>
    [1.0,  2.0, nan]
    Length: 3, dtype: float64

    To use pandas' nullable :class:`pandas.arrays.IntegerArray`, specify
    the dtype:

    >>> pd.array([1, 2, np.nan], dtype='Int64')
    <IntegerArray>
    [1, 2, NaN]
    Length: 3, dtype: Int64

    Pandas will infer an ExtensionArray for some types of data:

    >>> pd.array([pd.Period('2000', freq="D"), pd.Period("2000", freq="D")])
    <PeriodArray>
    ['2000-01-01', '2000-01-01']
    Length: 2, dtype: period[D]

    `data` must be 1-dimensional. A ValueError is raised when the input
    has the wrong dimensionality.

    >>> pd.array(1)
    Traceback (most recent call last):
      ...
    ValueError: Cannot pass scalar '1' to 'pandas.array'.
    """
    from pandas.core.arrays import (
        period_array, ExtensionArray, IntervalArray, PandasArray,
        DatetimeArray,
        TimedeltaArray,
    )
    from pandas.core.internals.arrays import extract_array

    if lib.is_scalar(data):
        msg = (
            "Cannot pass scalar '{}' to 'pandas.array'."
        )
        raise ValueError(msg.format(data))

    data = extract_array(data, extract_numpy=True)

    if dtype is None and isinstance(data, ExtensionArray):
        dtype = data.dtype

    # this returns None for not-found dtypes.
    if isinstance(dtype, str):
        dtype = registry.find(dtype) or dtype

    if is_extension_array_dtype(dtype):
        cls = dtype.construct_array_type()
        return cls._from_sequence(data, dtype=dtype, copy=copy)

    if dtype is None:
        inferred_dtype = lib.infer_dtype(data, skipna=False)
        if inferred_dtype == 'period':
            try:
                return period_array(data, copy=copy)
            except tslibs.IncompatibleFrequency:
                # We may have a mixture of frequencies.
                # We choose to return an ndarray, rather than raising.
                pass
        elif inferred_dtype == 'interval':
            try:
                return IntervalArray(data, copy=copy)
            except ValueError:
                # We may have a mixture of `closed` here.
                # We choose to return an ndarray, rather than raising.
                pass

        elif inferred_dtype.startswith('datetime'):
            # datetime, datetime64
            try:
                return DatetimeArray._from_sequence(data, copy=copy)
            except ValueError:
                # Mixture of timezones, fall back to PandasArray
                pass

        elif inferred_dtype.startswith('timedelta'):
            # timedelta, timedelta64
            return TimedeltaArray._from_sequence(data, copy=copy)

        # TODO(BooleanArray): handle this type

    # Pandas overrides NumPy for
    #   1. datetime64[ns]
    #   2. timedelta64[ns]
    # so that a DatetimeArray is returned.
    if is_datetime64_ns_dtype(dtype):
        return DatetimeArray._from_sequence(data, dtype=dtype, copy=copy)
    elif is_timedelta64_ns_dtype(dtype):
        return TimedeltaArray._from_sequence(data, dtype=dtype, copy=copy)

    result = PandasArray._from_sequence(data, dtype=dtype, copy=copy)
    return result
Ejemplo n.º 25
0
 def test_tz_dtype_mismatch_raises(self):
     arr = DatetimeArray._from_sequence(['2000'], tz='US/Central')
     with pytest.raises(TypeError, match='data is already tz-aware'):
         sequence_to_dt64ns(arr, dtype=DatetimeTZDtype(tz="UTC"))
Ejemplo n.º 26
0
    #
    # See also test_timedelta.TestTimedeltaArithmetic.test_floordiv
    td = Timedelta(hours=3, minutes=4)
    assert td // val is expected


@pytest.mark.parametrize(
    "op_name",
    ["left_plus_right", "right_plus_left", "left_minus_right", "right_minus_left"],
)
@pytest.mark.parametrize(
    "value",
    [
        DatetimeIndex(["2011-01-01", "2011-01-02"], name="x"),
        DatetimeIndex(["2011-01-01", "2011-01-02"], tz="US/Eastern", name="x"),
        DatetimeArray._from_sequence(["2011-01-01", "2011-01-02"]),
        DatetimeArray._from_sequence(["2011-01-01", "2011-01-02"], tz="US/Pacific"),
        TimedeltaIndex(["1 day", "2 day"], name="x"),
    ],
)
def test_nat_arithmetic_index(op_name, value):
    # see gh-11718
    exp_name = "x"
    exp_data = [NaT] * 2

    if is_datetime64_any_dtype(value.dtype) and "plus" in op_name:
        expected = DatetimeIndex(exp_data, tz=value.tz, name=exp_name)
    else:
        expected = TimedeltaIndex(exp_data, name=exp_name)

    if not isinstance(value, Index):
Ejemplo n.º 27
0
 def test_tz_setter_raises(self):
     arr = DatetimeArray._from_sequence(['2000'], tz='US/Central')
     with pytest.raises(AttributeError, match='tz_localize'):
         arr.tz = 'UTC'
Ejemplo n.º 28
0
 def test_astype_to_same(self):
     arr = DatetimeArray._from_sequence(['2000'], tz='US/Central')
     result = arr.astype(DatetimeTZDtype(tz="US/Central"), copy=False)
     assert result is arr
Ejemplo n.º 29
0
 def test_tz_setter_raises(self):
     arr = DatetimeArray._from_sequence(['2000'], tz='US/Central')
     with pytest.raises(AttributeError, match='tz_localize'):
         arr.tz = 'UTC'
Ejemplo n.º 30
0
 def test_tz_dtype_mismatch_raises(self):
     arr = DatetimeArray._from_sequence(
         ["2000"], dtype=DatetimeTZDtype(tz="US/Central"))
     with pytest.raises(TypeError, match="data is already tz-aware"):
         sequence_to_dt64ns(arr, dtype=DatetimeTZDtype(tz="UTC"))
Ejemplo n.º 31
0
def array(
        data,  # type: Sequence[object]
        dtype=None,  # type: Optional[Union[str, np.dtype, ExtensionDtype]]
        copy=True,  # type: bool
):
    # type: (...) -> ExtensionArray
    """
    Create an array.

    .. versionadded:: 0.24.0

    Parameters
    ----------
    data : Sequence of objects
        The scalars inside `data` should be instances of the
        scalar type for `dtype`. It's expected that `data`
        represents a 1-dimensional array of data.

        When `data` is an Index or Series, the underlying array
        will be extracted from `data`.

    dtype : str, np.dtype, or ExtensionDtype, optional
        The dtype to use for the array. This may be a NumPy
        dtype or an extension type registered with pandas using
        :meth:`pandas.api.extensions.register_extension_dtype`.

        If not specified, there are two possibilities:

        1. When `data` is a :class:`Series`, :class:`Index`, or
           :class:`ExtensionArray`, the `dtype` will be taken
           from the data.
        2. Otherwise, pandas will attempt to infer the `dtype`
           from the data.

        Note that when `data` is a NumPy array, ``data.dtype`` is
        *not* used for inferring the array type. This is because
        NumPy cannot represent all the types of data that can be
        held in extension arrays.

        Currently, pandas will infer an extension dtype for sequences of

        ============================== =====================================
        scalar type                    Array Type
        =============================  =====================================
        * :class:`pandas.Interval`     :class:`pandas.IntervalArray`
        * :class:`pandas.Period`       :class:`pandas.arrays.PeriodArray`
        * :class:`datetime.datetime`   :class:`pandas.arrays.DatetimeArray`
        * :class:`datetime.timedelta`  :class:`pandas.arrays.TimedeltaArray`
        =============================  =====================================

        For all other cases, NumPy's usual inference rules will be used.

    copy : bool, default True
        Whether to copy the data, even if not necessary. Depending
        on the type of `data`, creating the new array may require
        copying data, even if ``copy=False``.

    Returns
    -------
    ExtensionArray
        The newly created array.

    Raises
    ------
    ValueError
        When `data` is not 1-dimensional.

    See Also
    --------
    numpy.array : Construct a NumPy array.
    arrays.PandasArray : ExtensionArray wrapping a NumPy array.
    Series : Construct a pandas Series.
    Index : Construct a pandas Index.

    Notes
    -----
    Omitting the `dtype` argument means pandas will attempt to infer the
    best array type from the values in the data. As new array types are
    added by pandas and 3rd party libraries, the "best" array type may
    change. We recommend specifying `dtype` to ensure that

    1. the correct array type for the data is returned
    2. the returned array type doesn't change as new extension types
       are added by pandas and third-party libraries

    Additionally, if the underlying memory representation of the returned
    array matters, we recommend specifying the `dtype` as a concrete object
    rather than a string alias or allowing it to be inferred. For example,
    a future version of pandas or a 3rd-party library may include a
    dedicated ExtensionArray for string data. In this event, the following
    would no longer return a :class:`arrays.PandasArray` backed by a NumPy
    array.

    >>> pd.array(['a', 'b'], dtype=str)
    <PandasArray>
    ['a', 'b']
    Length: 2, dtype: str32

    This would instead return the new ExtensionArray dedicated for string
    data. If you really need the new array to be backed by a  NumPy array,
    specify that in the dtype.

    >>> pd.array(['a', 'b'], dtype=np.dtype("<U1"))
    <PandasArray>
    ['a', 'b']
    Length: 2, dtype: str32

    Or use the dedicated constructor for the array you're expecting, and
    wrap that in a PandasArray

    >>> pd.array(np.array(['a', 'b'], dtype='<U1'))
    <PandasArray>
    ['a', 'b']
    Length: 2, dtype: str32

    Examples
    --------
    If a dtype is not specified, `data` is passed through to
    :meth:`numpy.array`, and a :class:`arrays.PandasArray` is returned.

    >>> pd.array([1, 2])
    <PandasArray>
    [1, 2]
    Length: 2, dtype: int64

    Or the NumPy dtype can be specified

    >>> pd.array([1, 2], dtype=np.dtype("int32"))
    <PandasArray>
    [1, 2]
    Length: 2, dtype: int32

    You can use the string alias for `dtype`

    >>> pd.array(['a', 'b', 'a'], dtype='category')
    [a, b, a]
    Categories (2, object): [a, b]

    Or specify the actual dtype

    >>> pd.array(['a', 'b', 'a'],
    ...          dtype=pd.CategoricalDtype(['a', 'b', 'c'], ordered=True))
    [a, b, a]
    Categories (3, object): [a < b < c]

    Because omitting the `dtype` passes the data through to NumPy,
    a mixture of valid integers and NA will return a floating-point
    NumPy array.

    >>> pd.array([1, 2, np.nan])
    <PandasArray>
    [1.0,  2.0, nan]
    Length: 3, dtype: float64

    To use pandas' nullable :class:`pandas.arrays.IntegerArray`, specify
    the dtype:

    >>> pd.array([1, 2, np.nan], dtype='Int64')
    <IntegerArray>
    [1, 2, NaN]
    Length: 3, dtype: Int64

    Pandas will infer an ExtensionArray for some types of data:

    >>> pd.array([pd.Period('2000', freq="D"), pd.Period("2000", freq="D")])
    <PeriodArray>
    ['2000-01-01', '2000-01-01']
    Length: 2, dtype: period[D]

    `data` must be 1-dimensional. A ValueError is raised when the input
    has the wrong dimensionality.

    >>> pd.array(1)
    Traceback (most recent call last):
      ...
    ValueError: Cannot pass scalar '1' to 'pandas.array'.
    """
    from pandas.core.arrays import (
        period_array,
        ExtensionArray,
        IntervalArray,
        PandasArray,
        DatetimeArray,
        TimedeltaArray,
    )
    from pandas.core.internals.arrays import extract_array

    if lib.is_scalar(data):
        msg = ("Cannot pass scalar '{}' to 'pandas.array'.")
        raise ValueError(msg.format(data))

    data = extract_array(data, extract_numpy=True)

    if dtype is None and isinstance(data, ExtensionArray):
        dtype = data.dtype

    # this returns None for not-found dtypes.
    if isinstance(dtype, compat.string_types):
        dtype = registry.find(dtype) or dtype

    if is_extension_array_dtype(dtype):
        cls = dtype.construct_array_type()
        return cls._from_sequence(data, dtype=dtype, copy=copy)

    if dtype is None:
        inferred_dtype = lib.infer_dtype(data, skipna=False)
        if inferred_dtype == 'period':
            try:
                return period_array(data, copy=copy)
            except tslibs.IncompatibleFrequency:
                # We may have a mixture of frequencies.
                # We choose to return an ndarray, rather than raising.
                pass
        elif inferred_dtype == 'interval':
            try:
                return IntervalArray(data, copy=copy)
            except ValueError:
                # We may have a mixture of `closed` here.
                # We choose to return an ndarray, rather than raising.
                pass

        elif inferred_dtype.startswith('datetime'):
            # datetime, datetime64
            try:
                return DatetimeArray._from_sequence(data, copy=copy)
            except ValueError:
                # Mixture of timezones, fall back to PandasArray
                pass

        elif inferred_dtype.startswith('timedelta'):
            # timedelta, timedelta64
            return TimedeltaArray._from_sequence(data, copy=copy)

        # TODO(BooleanArray): handle this type

    result = PandasArray._from_sequence(data, dtype=dtype, copy=copy)
    return result
Ejemplo n.º 32
0
 def test_tz_dtype_matches(self):
     arr = DatetimeArray._from_sequence(
         ["2000"], dtype=DatetimeTZDtype(tz="US/Central"))
     result, _, _ = sequence_to_dt64ns(
         arr, dtype=DatetimeTZDtype(tz="US/Central"))
     tm.assert_numpy_array_equal(arr._data, result)
Ejemplo n.º 33
0
 def test_tz_dtype_matches(self):
     arr = DatetimeArray._from_sequence(['2000'], tz='US/Central')
     result, _, _ = sequence_to_dt64ns(
         arr, dtype=DatetimeTZDtype(tz="US/Central"))
     tm.assert_numpy_array_equal(arr._data, result)