Пример #1
0
def maybe_upcast_for_op(obj, shape: Tuple[int, ...]):
    """
    Cast non-pandas objects to pandas types to unify behavior of arithmetic
    and comparison operations.

    Parameters
    ----------
    obj: object
    shape : tuple[int]

    Returns
    -------
    out : object

    Notes
    -----
    Be careful to call this *after* determining the `name` attribute to be
    attached to the result of the arithmetic operation.
    """
    from pandas.core.arrays import DatetimeArray, TimedeltaArray

    if type(obj) is timedelta:
        # GH#22390  cast up to Timedelta to rely on Timedelta
        # implementation; otherwise operation against numeric-dtype
        # raises TypeError
        return Timedelta(obj)
    elif isinstance(obj, np.datetime64):
        # GH#28080 numpy casts integer-dtype to datetime64 when doing
        #  array[int] + datetime64, which we do not allow
        if isna(obj):
            # Avoid possible ambiguities with pd.NaT
            obj = obj.astype("datetime64[ns]")
            right = np.broadcast_to(obj, shape)
            return DatetimeArray(right)

        return Timestamp(obj)

    elif isinstance(obj, np.timedelta64):
        if isna(obj):
            # wrapping timedelta64("NaT") in Timedelta returns NaT,
            #  which would incorrectly be treated as a datetime-NaT, so
            #  we broadcast and wrap in a TimedeltaArray
            obj = obj.astype("timedelta64[ns]")
            right = np.broadcast_to(obj, shape)
            return TimedeltaArray(right)

        # In particular non-nanosecond timedelta64 needs to be cast to
        #  nanoseconds, or else we get undesired behavior like
        #  np.timedelta64(3, 'D') / 2 == np.timedelta64(1, 'D')
        return Timedelta(obj)

    elif isinstance(obj, np.ndarray) and obj.dtype.kind == "m":
        # GH#22390 Unfortunately we need to special-case right-hand
        # timedelta64 dtypes because numpy casts integer dtypes to
        # timedelta64 when operating with timedelta64
        return TimedeltaArray._from_sequence(obj)
    return obj
Пример #2
0
    def test_take_fill_valid(self, arr1d):
        arr = arr1d
        dti = self.index_cls(arr1d)

        now = Timestamp.now().tz_localize(dti.tz)
        result = arr.take([-1, 1], allow_fill=True, fill_value=now)
        assert result[0] == now

        msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got"
        with pytest.raises(TypeError, match=msg):
            # fill_value Timedelta invalid
            arr.take([-1, 1], allow_fill=True, fill_value=now - now)

        with pytest.raises(TypeError, match=msg):
            # fill_value Period invalid
            arr.take([-1, 1], allow_fill=True, fill_value=Period("2014Q1"))

        tz = None if dti.tz is not None else "US/Eastern"
        now = Timestamp.now().tz_localize(tz)
        msg = "Cannot compare tz-naive and tz-aware datetime-like objects"
        with pytest.raises(TypeError, match=msg):
            # Timestamp with mismatched tz-awareness
            arr.take([-1, 1], allow_fill=True, fill_value=now)

        value = NaT.value
        msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got"
        with pytest.raises(TypeError, match=msg):
            # require NaT, not iNaT, as it could be confused with an integer
            arr.take([-1, 1], allow_fill=True, fill_value=value)

        value = np.timedelta64("NaT", "ns")
        with pytest.raises(TypeError, match=msg):
            # require appropriate-dtype if we have a NA value
            arr.take([-1, 1], allow_fill=True, fill_value=value)

        if arr.tz is not None:
            # GH#37356
            # Assuming here that arr1d fixture does not include Australia/Melbourne
            value = Timestamp.now().tz_localize("Australia/Melbourne")
            msg = "Timezones don't match. .* != 'Australia/Melbourne'"
            with pytest.raises(ValueError, match=msg):
                # require tz match, not just tzawareness match
                arr.take([-1, 1], allow_fill=True, fill_value=value)
Пример #3
0
    def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default):

        # GH#42855 handle date here instead of get_slice_bound
        if isinstance(label, date) and not isinstance(label, datetime):
            # Pandas supports slicing with dates, treated as datetimes at midnight.
            # https://github.com/pandas-dev/pandas/issues/31501
            label = Timestamp(label).to_pydatetime()

        label = super()._maybe_cast_slice_bound(label, side, kind=kind)
        self._deprecate_mismatched_indexing(label)
        return self._maybe_cast_for_get_loc(label)
Пример #4
0
def datetime_index(freqstr):
    """
    A fixture to provide DatetimeIndex objects with different frequencies.

    Most DatetimeArray behavior is already tested in DatetimeIndex tests,
    so here we just test that the DatetimeArray behavior matches
    the DatetimeIndex behavior.
    """
    # TODO: non-monotone indexes; NaTs, different start dates, timezones
    dti = pd.date_range(start=Timestamp("2000-01-01"), periods=100, freq=freqstr)
    return dti
Пример #5
0
def period_index(freqstr):
    """
    A fixture to provide PeriodIndex objects with different frequencies.

    Most PeriodArray behavior is already tested in PeriodIndex tests,
    so here we just test that the PeriodArray behavior matches
    the PeriodIndex behavior.
    """
    # TODO: non-monotone indexes; NaTs, different start dates
    pi = pd.period_range(start=Timestamp("2000-01-01"), periods=100, freq=freqstr)
    return pi
Пример #6
0
def _wrap_results(result, dtype: DtypeObj, fill_value=None):
    """ wrap our results if needed """
    if result is NaT:
        pass

    elif is_datetime64_any_dtype(dtype):
        if fill_value is None:
            # GH#24293
            fill_value = iNaT
        if not isinstance(result, np.ndarray):
            tz = getattr(dtype, "tz", None)
            assert not isna(fill_value), "Expected non-null fill_value"
            if result == fill_value:
                result = np.nan

            if tz is not None:
                # we get here e.g. via nanmean when we call it on a DTA[tz]
                result = Timestamp(result, tz=tz)
            elif isna(result):
                result = np.datetime64("NaT", "ns")
            else:
                result = np.int64(result).view("datetime64[ns]")
        else:
            # If we have float dtype, taking a view will give the wrong result
            result = result.astype(dtype)
    elif is_timedelta64_dtype(dtype):
        if not isinstance(result, np.ndarray):
            if result == fill_value:
                result = np.nan

            # raise if we have a timedelta64[ns] which is too large
            if np.fabs(result) > np.iinfo(np.int64).max:
                raise ValueError("overflow in timedelta operation")

            result = Timedelta(result, unit="ns")
        else:
            result = result.astype("m8[ns]").view(dtype)

    return result
Пример #7
0
    def get_loc(self, key, method=None, tolerance=None):
        """
        Get integer location for requested label

        Returns
        -------
        loc : int
        """

        if tolerance is not None:
            # try converting tolerance now, so errors don't get swallowed by
            # the try/except clauses below
            tolerance = self._convert_tolerance(tolerance, np.asarray(key))

        if isinstance(key, datetime):
            # needed to localize naive datetimes
            if key.tzinfo is None:
                key = Timestamp(key, tz=self.tz)
            else:
                key = Timestamp(key).tz_convert(self.tz)
            return Index.get_loc(self, key, method, tolerance)

        elif isinstance(key, timedelta):
            # GH#20464
            raise TypeError(
                f"Cannot index {type(self).__name__} with {type(key).__name__}"
            )

        if isinstance(key, time):
            if method is not None:
                raise NotImplementedError(
                    "cannot yet lookup inexact labels when key is a time object"
                )
            return self.indexer_at_time(key)

        try:
            return Index.get_loc(self, key, method, tolerance)
        except (KeyError, ValueError, TypeError):
            try:
                return self._get_string_slice(key)
            except (TypeError, KeyError, ValueError, OverflowError):
                pass

            try:
                stamp = Timestamp(key)
                if stamp.tzinfo is not None and self.tz is not None:
                    stamp = stamp.tz_convert(self.tz)
                else:
                    stamp = stamp.tz_localize(self.tz)
                return Index.get_loc(self, stamp, method, tolerance)
            except KeyError:
                raise KeyError(key)
            except ValueError as e:
                # list-like tolerance size must match target index size
                if "list-like" in str(e):
                    raise e
                raise KeyError(key)
Пример #8
0
    def test_take_fill_valid(self, timedelta_index):
        tdi = timedelta_index
        arr = TimedeltaArray(tdi)

        td1 = pd.Timedelta(days=1)
        result = arr.take([-1, 1], allow_fill=True, fill_value=td1)
        assert result[0] == td1

        dt_ind = Timestamp(2021, 1, 1, 12)
        value = dt_ind
        msg = f"value should be a '{arr._scalar_type.__name__}' or 'NaT'. Got"
        with pytest.raises(TypeError, match=msg):
            # fill_value Timestamp invalid
            arr.take([0, 1], allow_fill=True, fill_value=value)

        value = dt_ind.to_period("D")
        with pytest.raises(TypeError, match=msg):
            # fill_value Period invalid
            arr.take([0, 1], allow_fill=True, fill_value=value)

        value = np.datetime64("NaT", "ns")
        with pytest.raises(TypeError, match=msg):
            # require appropriate-dtype if we have a NA value
            arr.take([-1, 1], allow_fill=True, fill_value=value)
Пример #9
0
def _hash_scalar(val,
                 encoding: str = "utf8",
                 hash_key: str = _default_hash_key) -> np.ndarray:
    """
    Hash scalar value.

    Parameters
    ----------
    val : scalar
    encoding : str, default "utf8"
    hash_key : str, default _default_hash_key

    Returns
    -------
    1d uint64 numpy array of hash value, of length 1
    """

    if isna(val):
        # this is to be consistent with the _hash_categorical implementation
        return np.array([np.iinfo(np.uint64).max], dtype="u8")

    if getattr(val, "tzinfo", None) is not None:
        # for tz-aware datetimes, we need the underlying naive UTC value and
        # not the tz aware object or pd extension type (as
        # infer_dtype_from_scalar would do)
        if not isinstance(val, Timestamp):
            val = Timestamp(val)
        val = val.tz_convert(None)

    dtype, val = infer_dtype_from_scalar(val)
    vals = np.array([val], dtype=dtype)

    return hash_array(vals,
                      hash_key=hash_key,
                      encoding=encoding,
                      categorize=False)
Пример #10
0
 def get_value_maybe_box(self, series, key):
     # needed to localize naive datetimes
     if self.tz is not None:
         key = Timestamp(key)
         if key.tzinfo is not None:
             key = key.tz_convert(self.tz)
         else:
             key = key.tz_localize(self.tz)
     elif not isinstance(key, Timestamp):
         key = Timestamp(key)
     values = self._engine.get_value(com.values_from_object(series), key, tz=self.tz)
     return com.maybe_box(self, values, series, key)
Пример #11
0
def _wrap_results(result, dtype: Dtype, fill_value=None):
    """ wrap our results if needed """
    if is_datetime64_any_dtype(dtype):
        if fill_value is None:
            # GH#24293
            fill_value = iNaT
        if not isinstance(result, np.ndarray):
            tz = getattr(dtype, "tz", None)
            assert not isna(fill_value), "Expected non-null fill_value"
            if result == fill_value:
                result = np.nan
            result = Timestamp(result, tz=tz)
        else:
            # If we have float dtype, taking a view will give the wrong result
            result = result.astype(dtype)
    elif is_timedelta64_dtype(dtype):
        if not isinstance(result, np.ndarray):
            if result == fill_value:
                result = np.nan

            # raise if we have a timedelta64[ns] which is too large
            if np.fabs(result) > _int64_max:
                raise ValueError("overflow in timedelta operation")

            result = Timedelta(result, unit="ns")
        else:
            result = result.astype("m8[ns]").view(dtype)

    elif isinstance(dtype, PeriodDtype):
        if is_float(result) and result.is_integer():
            result = int(result)
        if is_integer(result):
            result = Period._from_ordinal(result, freq=dtype.freq)
        else:
            raise NotImplementedError(type(result), result)

    return result
Пример #12
0
 def test_fast_unique_multiple_unsortable_runtimewarning(self):
     arr = [np.array(["foo", Timestamp("2000")])]
     with tm.assert_produces_warning(RuntimeWarning):
         lib.fast_unique_multiple(arr, sort=None)
Пример #13
0
class SharedTests:
    index_cls: Type[Union[DatetimeIndex, PeriodIndex, TimedeltaIndex]]

    @pytest.fixture
    def arr1d(self):
        data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
        arr = self.array_cls(data, freq="D")
        return arr

    def test_compare_len1_raises(self, arr1d):
        # make sure we raise when comparing with different lengths, specific
        #  to the case where one has length-1, which numpy would broadcast
        arr = arr1d
        idx = self.index_cls(arr)

        with pytest.raises(ValueError, match="Lengths must match"):
            arr == arr[:1]

        # test the index classes while we're at it, GH#23078
        with pytest.raises(ValueError, match="Lengths must match"):
            idx <= idx[[0]]

    @pytest.mark.parametrize(
        "result",
        [
            pd.date_range("2020", periods=3),
            pd.date_range("2020", periods=3, tz="UTC"),
            pd.timedelta_range("0 days", periods=3),
            pd.period_range("2020Q1", periods=3, freq="Q"),
        ],
    )
    def test_compare_with_Categorical(self, result):
        expected = pd.Categorical(result)
        assert all(result == expected)
        assert not any(result != expected)

    @pytest.mark.parametrize("reverse", [True, False])
    @pytest.mark.parametrize("as_index", [True, False])
    def test_compare_categorical_dtype(self, arr1d, as_index, reverse,
                                       ordered):
        other = pd.Categorical(arr1d, ordered=ordered)
        if as_index:
            other = pd.CategoricalIndex(other)

        left, right = arr1d, other
        if reverse:
            left, right = right, left

        ones = np.ones(arr1d.shape, dtype=bool)
        zeros = ~ones

        result = left == right
        tm.assert_numpy_array_equal(result, ones)

        result = left != right
        tm.assert_numpy_array_equal(result, zeros)

        if not reverse and not as_index:
            # Otherwise Categorical raises TypeError bc it is not ordered
            # TODO: we should probably get the same behavior regardless?
            result = left < right
            tm.assert_numpy_array_equal(result, zeros)

            result = left <= right
            tm.assert_numpy_array_equal(result, ones)

            result = left > right
            tm.assert_numpy_array_equal(result, zeros)

            result = left >= right
            tm.assert_numpy_array_equal(result, ones)

    def test_take(self):
        data = np.arange(100, dtype="i8") * 24 * 3600 * 10**9
        np.random.shuffle(data)

        freq = None if self.array_cls is not PeriodArray else "D"

        arr = self.array_cls(data, freq=freq)
        idx = self.index_cls._simple_new(arr)

        takers = [1, 4, 94]
        result = arr.take(takers)
        expected = idx.take(takers)

        tm.assert_index_equal(self.index_cls(result), expected)

        takers = np.array([1, 4, 94])
        result = arr.take(takers)
        expected = idx.take(takers)

        tm.assert_index_equal(self.index_cls(result), expected)

    @pytest.mark.parametrize("fill_value", [2, 2.0, Timestamp.now().time])
    def test_take_fill_raises(self, fill_value):
        data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9

        arr = self.array_cls(data, freq="D")

        msg = f"value should be a '{arr._scalar_type.__name__}' or 'NaT'. Got"
        with pytest.raises(TypeError, match=msg):
            arr.take([0, 1], allow_fill=True, fill_value=fill_value)

    def test_take_fill(self):
        data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9

        arr = self.array_cls(data, freq="D")

        result = arr.take([-1, 1], allow_fill=True, fill_value=None)
        assert result[0] is NaT

        result = arr.take([-1, 1], allow_fill=True, fill_value=np.nan)
        assert result[0] is NaT

        result = arr.take([-1, 1], allow_fill=True, fill_value=NaT)
        assert result[0] is NaT

    def test_take_fill_str(self, arr1d):
        # Cast str fill_value matching other fill_value-taking methods
        result = arr1d.take([-1, 1],
                            allow_fill=True,
                            fill_value=str(arr1d[-1]))
        expected = arr1d[[-1, 1]]
        tm.assert_equal(result, expected)

        msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got"
        with pytest.raises(TypeError, match=msg):
            arr1d.take([-1, 1], allow_fill=True, fill_value="foo")

    def test_concat_same_type(self, arr1d):
        arr = arr1d
        idx = self.index_cls(arr)
        idx = idx.insert(0, NaT)
        arr = self.array_cls(idx)

        result = arr._concat_same_type([arr[:-1], arr[1:], arr])
        arr2 = arr.astype(object)
        expected = self.index_cls(np.concatenate([arr2[:-1], arr2[1:], arr2]),
                                  None)

        tm.assert_index_equal(self.index_cls(result), expected)

    def test_unbox_scalar(self):
        data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
        arr = self.array_cls(data, freq="D")
        result = arr._unbox_scalar(arr[0])
        expected = arr._data.dtype.type
        assert isinstance(result, expected)

        result = arr._unbox_scalar(NaT)
        assert isinstance(result, expected)

        msg = f"'value' should be a {self.scalar_type.__name__}."
        with pytest.raises(ValueError, match=msg):
            arr._unbox_scalar("foo")

    def test_check_compatible_with(self):
        data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
        arr = self.array_cls(data, freq="D")

        arr._check_compatible_with(arr[0])
        arr._check_compatible_with(arr[:1])
        arr._check_compatible_with(NaT)

    def test_scalar_from_string(self):
        data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
        arr = self.array_cls(data, freq="D")
        result = arr._scalar_from_string(str(arr[0]))
        assert result == arr[0]

    def test_reduce_invalid(self):
        data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
        arr = self.array_cls(data, freq="D")

        msg = f"'{type(arr).__name__}' does not implement reduction 'not a method'"
        with pytest.raises(TypeError, match=msg):
            arr._reduce("not a method")

    @pytest.mark.parametrize("method", ["pad", "backfill"])
    def test_fillna_method_doesnt_change_orig(self, method):
        data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
        arr = self.array_cls(data, freq="D")
        arr[4] = NaT

        fill_value = arr[3] if method == "pad" else arr[5]

        result = arr.fillna(method=method)
        assert result[4] == fill_value

        # check that the original was not changed
        assert arr[4] is NaT

    def test_searchsorted(self):
        data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
        arr = self.array_cls(data, freq="D")

        # scalar
        result = arr.searchsorted(arr[1])
        assert result == 1

        result = arr.searchsorted(arr[2], side="right")
        assert result == 3

        # own-type
        result = arr.searchsorted(arr[1:3])
        expected = np.array([1, 2], dtype=np.intp)
        tm.assert_numpy_array_equal(result, expected)

        result = arr.searchsorted(arr[1:3], side="right")
        expected = np.array([2, 3], dtype=np.intp)
        tm.assert_numpy_array_equal(result, expected)

        # GH#29884 match numpy convention on whether NaT goes
        #  at the end or the beginning
        result = arr.searchsorted(NaT)
        if np_version_under1p18:
            # Following numpy convention, NaT goes at the beginning
            #  (unlike NaN which goes at the end)
            assert result == 0
        else:
            assert result == 10

    @pytest.mark.parametrize("box", [None, "index", "series"])
    def test_searchsorted_castable_strings(self, arr1d, box, request):
        if isinstance(arr1d, DatetimeArray):
            tz = arr1d.tz
            ts1, ts2 = arr1d[1:3]
            if tz is not None and ts1.tz.tzname(ts1) != ts2.tz.tzname(ts2):
                # If we have e.g. tzutc(), when we cast to string and parse
                #  back we get pytz.UTC, and then consider them different timezones
                #  so incorrectly raise.
                mark = pytest.mark.xfail(
                    reason="timezone comparisons inconsistent")
                request.node.add_marker(mark)

        arr = arr1d
        if box is None:
            pass
        elif box == "index":
            # Test the equivalent Index.searchsorted method while we're here
            arr = self.index_cls(arr)
        else:
            # Test the equivalent Series.searchsorted method while we're here
            arr = pd.Series(arr)

        # scalar
        result = arr.searchsorted(str(arr[1]))
        assert result == 1

        result = arr.searchsorted(str(arr[2]), side="right")
        assert result == 3

        result = arr.searchsorted([str(x) for x in arr[1:3]])
        expected = np.array([1, 2], dtype=np.intp)
        tm.assert_numpy_array_equal(result, expected)

        with pytest.raises(
                TypeError,
                match=re.escape(
                    f"value should be a '{arr1d._scalar_type.__name__}', 'NaT', "
                    "or array of those. Got 'str' instead."),
        ):
            arr.searchsorted("foo")

        with pytest.raises(
                TypeError,
                match=re.escape(
                    f"value should be a '{arr1d._scalar_type.__name__}', 'NaT', "
                    "or array of those. Got 'StringArray' instead."),
        ):
            arr.searchsorted([str(arr[1]), "baz"])

    def test_getitem_near_implementation_bounds(self):
        # We only check tz-naive for DTA bc the bounds are slightly different
        #  for other tzs
        i8vals = np.asarray([NaT.value + n for n in range(1, 5)], dtype="i8")
        arr = self.array_cls(i8vals, freq="ns")
        arr[0]  # should not raise OutOfBoundsDatetime

        index = pd.Index(arr)
        index[0]  # should not raise OutOfBoundsDatetime

        ser = pd.Series(arr)
        ser[0]  # should not raise OutOfBoundsDatetime

    def test_getitem_2d(self, arr1d):
        # 2d slicing on a 1D array
        expected = type(arr1d)(arr1d._data[:, np.newaxis], dtype=arr1d.dtype)
        result = arr1d[:, np.newaxis]
        tm.assert_equal(result, expected)

        # Lookup on a 2D array
        arr2d = expected
        expected = type(arr2d)(arr2d._data[:3, 0], dtype=arr2d.dtype)
        result = arr2d[:3, 0]
        tm.assert_equal(result, expected)

        # Scalar lookup
        result = arr2d[-1, 0]
        expected = arr1d[-1]
        assert result == expected

    def test_iter_2d(self, arr1d):
        data2d = arr1d._data[:3, np.newaxis]
        arr2d = type(arr1d)._simple_new(data2d, dtype=arr1d.dtype)
        result = list(arr2d)
        assert len(result) == 3
        for x in result:
            assert isinstance(x, type(arr1d))
            assert x.ndim == 1
            assert x.dtype == arr1d.dtype

    def test_repr_2d(self, arr1d):
        data2d = arr1d._data[:3, np.newaxis]
        arr2d = type(arr1d)._simple_new(data2d, dtype=arr1d.dtype)

        result = repr(arr2d)

        if isinstance(arr2d, TimedeltaArray):
            expected = (f"<{type(arr2d).__name__}>\n"
                        "[\n"
                        f"['{arr1d[0]._repr_base()}'],\n"
                        f"['{arr1d[1]._repr_base()}'],\n"
                        f"['{arr1d[2]._repr_base()}']\n"
                        "]\n"
                        f"Shape: (3, 1), dtype: {arr1d.dtype}")
        else:
            expected = (f"<{type(arr2d).__name__}>\n"
                        "[\n"
                        f"['{arr1d[0]}'],\n"
                        f"['{arr1d[1]}'],\n"
                        f"['{arr1d[2]}']\n"
                        "]\n"
                        f"Shape: (3, 1), dtype: {arr1d.dtype}")

        assert result == expected

    def test_setitem(self):
        data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
        arr = self.array_cls(data, freq="D")

        arr[0] = arr[1]
        expected = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
        expected[0] = expected[1]

        tm.assert_numpy_array_equal(arr.asi8, expected)

        arr[:2] = arr[-2:]
        expected[:2] = expected[-2:]
        tm.assert_numpy_array_equal(arr.asi8, expected)

    @pytest.mark.parametrize(
        "box",
        [
            pd.Index,
            pd.Series,
            np.array,
            list,
            PandasArray,
        ],
    )
    def test_setitem_object_dtype(self, box, arr1d):

        expected = arr1d.copy()[::-1]
        if expected.dtype.kind in ["m", "M"]:
            expected = expected._with_freq(None)

        vals = expected
        if box is list:
            vals = list(vals)
        elif box is np.array:
            # if we do np.array(x).astype(object) then dt64 and td64 cast to ints
            vals = np.array(vals.astype(object))
        elif box is PandasArray:
            vals = box(np.asarray(vals, dtype=object))
        else:
            vals = box(vals).astype(object)

        arr1d[:] = vals

        tm.assert_equal(arr1d, expected)

    def test_setitem_strs(self, arr1d, request):
        # Check that we parse strs in both scalar and listlike
        if isinstance(arr1d, DatetimeArray):
            tz = arr1d.tz
            ts1, ts2 = arr1d[-2:]
            if tz is not None and ts1.tz.tzname(ts1) != ts2.tz.tzname(ts2):
                # If we have e.g. tzutc(), when we cast to string and parse
                #  back we get pytz.UTC, and then consider them different timezones
                #  so incorrectly raise.
                mark = pytest.mark.xfail(
                    reason="timezone comparisons inconsistent")
                request.node.add_marker(mark)

        # Setting list-like of strs
        expected = arr1d.copy()
        expected[[0, 1]] = arr1d[-2:]

        result = arr1d.copy()
        result[:2] = [str(x) for x in arr1d[-2:]]
        tm.assert_equal(result, expected)

        # Same thing but now for just a scalar str
        expected = arr1d.copy()
        expected[0] = arr1d[-1]

        result = arr1d.copy()
        result[0] = str(arr1d[-1])
        tm.assert_equal(result, expected)

    @pytest.mark.parametrize("as_index", [True, False])
    def test_setitem_categorical(self, arr1d, as_index):
        expected = arr1d.copy()[::-1]
        if not isinstance(expected, PeriodArray):
            expected = expected._with_freq(None)

        cat = pd.Categorical(arr1d)
        if as_index:
            cat = pd.CategoricalIndex(cat)

        arr1d[:] = cat[::-1]

        tm.assert_equal(arr1d, expected)

    def test_setitem_raises(self):
        data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
        arr = self.array_cls(data, freq="D")
        val = arr[0]

        with pytest.raises(IndexError, match="index 12 is out of bounds"):
            arr[12] = val

        with pytest.raises(TypeError, match="value should be a.* 'object'"):
            arr[0] = object()

        msg = "cannot set using a list-like indexer with a different length"
        with pytest.raises(ValueError, match=msg):
            # GH#36339
            arr[[]] = [arr[1]]

        msg = "cannot set using a slice indexer with a different length than"
        with pytest.raises(ValueError, match=msg):
            # GH#36339
            arr[1:1] = arr[:3]

    @pytest.mark.parametrize("box", [list, np.array, pd.Index, pd.Series])
    def test_setitem_numeric_raises(self, arr1d, box):
        # We dont case e.g. int64 to our own dtype for setitem

        msg = (f"value should be a '{arr1d._scalar_type.__name__}', "
               "'NaT', or array of those. Got")
        with pytest.raises(TypeError, match=msg):
            arr1d[:2] = box([0, 1])

        with pytest.raises(TypeError, match=msg):
            arr1d[:2] = box([0.0, 1.0])

    def test_inplace_arithmetic(self):
        # GH#24115 check that iadd and isub are actually in-place
        data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
        arr = self.array_cls(data, freq="D")

        expected = arr + pd.Timedelta(days=1)
        arr += pd.Timedelta(days=1)
        tm.assert_equal(arr, expected)

        expected = arr - pd.Timedelta(days=1)
        arr -= pd.Timedelta(days=1)
        tm.assert_equal(arr, expected)

    def test_shift_fill_int_deprecated(self):
        # GH#31971
        data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9
        arr = self.array_cls(data, freq="D")

        with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
            result = arr.shift(1, fill_value=1)

        expected = arr.copy()
        if self.array_cls is PeriodArray:
            fill_val = PeriodArray._scalar_type._from_ordinal(1, freq=arr.freq)
        else:
            fill_val = arr._scalar_type(1)
        expected[0] = fill_val
        expected[1:] = arr[:-1]
        tm.assert_equal(result, expected)

    def test_median(self, arr1d):
        arr = arr1d
        if len(arr) % 2 == 0:
            # make it easier to define `expected`
            arr = arr[:-1]

        expected = arr[len(arr) // 2]

        result = arr.median()
        assert type(result) is type(expected)
        assert result == expected

        arr[len(arr) // 2] = NaT
        if not isinstance(expected, Period):
            expected = arr[len(arr) // 2 - 1:len(arr) // 2 + 2].mean()

        assert arr.median(skipna=False) is NaT

        result = arr.median()
        assert type(result) is type(expected)
        assert result == expected

        assert arr[:0].median() is NaT
        assert arr[:0].median(skipna=False) is NaT

        # 2d Case
        arr2 = arr.reshape(-1, 1)

        result = arr2.median(axis=None)
        assert type(result) is type(expected)
        assert result == expected

        assert arr2.median(axis=None, skipna=False) is NaT

        result = arr2.median(axis=0)
        expected2 = type(arr)._from_sequence([expected], dtype=arr.dtype)
        tm.assert_equal(result, expected2)

        result = arr2.median(axis=0, skipna=False)
        expected2 = type(arr)._from_sequence([NaT], dtype=arr.dtype)
        tm.assert_equal(result, expected2)

        result = arr2.median(axis=1)
        tm.assert_equal(result, arr)

        result = arr2.median(axis=1, skipna=False)
        tm.assert_equal(result, arr)

    def test_from_integer_array(self):
        arr = np.array([1, 2, 3], dtype=np.int64)
        expected = self.array_cls(arr, dtype=self.example_dtype)

        data = pd.array(arr, dtype="Int64")
        result = self.array_cls(data, dtype=self.example_dtype)

        tm.assert_extension_array_equal(result, expected)
Пример #14
0
 def get_slice_bound(self, label, side: str, kind=lib.no_default) -> int:
     # GH#42855 handle date here instead of _maybe_cast_slice_bound
     if isinstance(label, date) and not isinstance(label, datetime):
         label = Timestamp(label).to_pydatetime()
     return super().get_slice_bound(label, side=side, kind=kind)
Пример #15
0
    expected = np.array([0, 1], dtype=result.dtype)

    tm.assert_numpy_array_equal(result, expected)


@pytest.mark.parametrize(
    "values",
    [
        pd.to_datetime(["2020-01-01", "2020-02-01"]),
        TimedeltaIndex([1, 2], unit="D"),
        PeriodIndex(["2020-01-01", "2020-02-01"], freq="D"),
    ],
)
@pytest.mark.parametrize(
    "arg",
    [[1, 2], ["a", "b"], [Timestamp("2020-01-01", tz="Europe/London")] * 2])
def test_searchsorted_datetimelike_with_listlike_invalid_dtype(values, arg):
    # https://github.com/pandas-dev/pandas/issues/32762
    msg = "[Unexpected type|Cannot compare]"
    with pytest.raises(TypeError, match=msg):
        values.searchsorted(arg)


@pytest.mark.parametrize("klass", [list, tuple, np.array, pd.Series])
def test_period_index_construction_from_strings(klass):
    # https://github.com/pandas-dev/pandas/issues/26109
    strings = ["2020Q1", "2020Q2"] * 2
    data = klass(strings)
    result = PeriodIndex(data, freq="Q")
    expected = PeriodIndex([Period(s) for s in strings])
    tm.assert_index_equal(result, expected)
Пример #16
0
 def _box_func(self):
     return lambda x: Timestamp(x, tz=self.tz)
Пример #17
0
    def _parsed_string_to_bounds(self, reso, parsed):
        """
        Calculate datetime bounds for parsed time string and its resolution.

        Parameters
        ----------
        reso : Resolution
            Resolution provided by parsed string.
        parsed : datetime
            Datetime from parsed string.

        Returns
        -------
        lower, upper: pd.Timestamp

        """
        valid_resos = {
            "year",
            "month",
            "quarter",
            "day",
            "hour",
            "minute",
            "second",
            "minute",
            "second",
            "microsecond",
        }
        if reso not in valid_resos:
            raise KeyError
        if reso == "year":
            start = Timestamp(parsed.year, 1, 1)
            end = Timestamp(parsed.year, 12, 31, 23, 59, 59, 999999)
        elif reso == "month":
            d = ccalendar.get_days_in_month(parsed.year, parsed.month)
            start = Timestamp(parsed.year, parsed.month, 1)
            end = Timestamp(parsed.year, parsed.month, d, 23, 59, 59, 999999)
        elif reso == "quarter":
            qe = (((parsed.month - 1) + 2) % 12) + 1  # two months ahead
            d = ccalendar.get_days_in_month(parsed.year, qe)  # at end of month
            start = Timestamp(parsed.year, parsed.month, 1)
            end = Timestamp(parsed.year, qe, d, 23, 59, 59, 999999)
        elif reso == "day":
            start = Timestamp(parsed.year, parsed.month, parsed.day)
            end = start + timedelta(days=1) - Nano(1)
        elif reso == "hour":
            start = Timestamp(parsed.year, parsed.month, parsed.day,
                              parsed.hour)
            end = start + timedelta(hours=1) - Nano(1)
        elif reso == "minute":
            start = Timestamp(parsed.year, parsed.month, parsed.day,
                              parsed.hour, parsed.minute)
            end = start + timedelta(minutes=1) - Nano(1)
        elif reso == "second":
            start = Timestamp(
                parsed.year,
                parsed.month,
                parsed.day,
                parsed.hour,
                parsed.minute,
                parsed.second,
            )
            end = start + timedelta(seconds=1) - Nano(1)
        elif reso == "microsecond":
            start = Timestamp(
                parsed.year,
                parsed.month,
                parsed.day,
                parsed.hour,
                parsed.minute,
                parsed.second,
                parsed.microsecond,
            )
            end = start + timedelta(microseconds=1) - Nano(1)
        # GH 24076
        # If an incoming date string contained a UTC offset, need to localize
        # the parsed date to this offset first before aligning with the index's
        # timezone
        if parsed.tzinfo is not None:
            if self.tz is None:
                raise ValueError(
                    "The index must be timezone aware when indexing "
                    "with a date string with a UTC offset")
            start = start.tz_localize(parsed.tzinfo).tz_convert(self.tz)
            end = end.tz_localize(parsed.tzinfo).tz_convert(self.tz)
        elif self.tz is not None:
            start = start.tz_localize(self.tz)
            end = end.tz_localize(self.tz)
        return start, end