def test_astype_nansafe(): # https://github.com/pandas-dev/pandas/pull/22343 arr = IntegerArray([np.nan, 1, 2], dtype="Int8") with tm.assert_raises_regex(ValueError, 'cannot convert float NaN to integer'): arr.astype('uint32')
def test_to_integer_array_error(values): # error in converting existing arrays to IntegerArrays msg = (r"(:?.* cannot be converted to an IntegerDtype)" r"|(:?values must be a 1D list-like)" r"|(Cannot pass scalar)") with pytest.raises((ValueError, TypeError), match=msg): pd.array(values, dtype="Int64") with pytest.raises(TypeError, match=msg): IntegerArray._from_sequence(values)
def test_get_numeric_data_extension_dtype(self): # GH 22290 df = DataFrame({ 'A': IntegerArray([-10, np.nan, 0, 10, 20, 30], dtype='Int64'), 'B': Categorical(list('abcabc')), 'C': IntegerArray([0, 1, 2, 3, np.nan, 5], dtype='UInt8'), 'D': IntervalArray.from_breaks(range(7))}) result = df._get_numeric_data() expected = df.loc[:, ['A', 'C']] assert_frame_equal(result, expected)
def test_to_integer_array_float(): result = IntegerArray._from_sequence([1.0, 2.0]) expected = pd.array([1, 2], dtype="Int64") tm.assert_extension_array_equal(result, expected) with pytest.raises(TypeError, match="cannot safely cast non-equivalent"): IntegerArray._from_sequence([1.5, 2.0]) # for float dtypes, the itemsize is not preserved result = IntegerArray._from_sequence(np.array([1.0, 2.0], dtype="float32")) assert result.dtype == Int64Dtype()
def test_integer_array_constructor_copy(): values = np.array([1, 2, 3, 4], dtype='int64') mask = np.array([False, False, False, True], dtype='bool') result = IntegerArray(values, mask) assert result._data is values assert result._mask is mask result = IntegerArray(values, mask, copy=True) assert result._data is not values assert result._mask is not mask
def test_to_integer_array_str(): result = IntegerArray._from_sequence(["1", "2", None]) expected = pd.array([1, 2, np.nan], dtype="Int64") tm.assert_extension_array_equal(result, expected) with pytest.raises(ValueError, match=r"invalid literal for int\(\) with base 10: .*"): IntegerArray._from_sequence(["1", "2", ""]) with pytest.raises(ValueError, match=r"invalid literal for int\(\) with base 10: .*"): IntegerArray._from_sequence(["1.5", "2.0"])
def test_to_integer_array_error(values): # error in converting existing arrays to IntegerArrays msg = "|".join([ r"cannot be converted to an IntegerDtype", r"invalid literal for int\(\) with base 10:", r"values must be a 1D list-like", r"Cannot pass scalar", ]) with pytest.raises((ValueError, TypeError), match=msg): pd.array(values, dtype="Int64") with pytest.raises((ValueError, TypeError), match=msg): IntegerArray._from_sequence(values)
def astype(self, dtype, copy: bool = True) -> ArrayLike: """ Cast to a NumPy array or ExtensionArray with 'dtype'. Parameters ---------- dtype : str or dtype Typecode or data-type to which the array is cast. copy : bool, default True Whether to copy the data, even if not necessary. If False, a copy is made only if the old dtype does not match the new dtype. Returns ------- ndarray or ExtensionArray NumPy ndarray, BooleanArray or IntegerArray with 'dtype' for its dtype. Raises ------ TypeError if incompatible type with an BooleanDtype, equivalent of same_kind casting """ from pandas.core.arrays.string_ import StringDtype dtype = pandas_dtype(dtype) if isinstance(dtype, BooleanDtype): values, mask = coerce_to_array(self, copy=copy) if not copy: return self else: return BooleanArray(values, mask, copy=False) elif isinstance(dtype, StringDtype): return dtype.construct_array_type()._from_sequence(self, copy=False) if is_bool_dtype(dtype): # astype_nansafe converts np.nan to True if self._hasna: raise ValueError("cannot convert float NaN to bool") else: return self._data.astype(dtype, copy=copy) if is_extension_array_dtype(dtype) and is_integer_dtype(dtype): from pandas.core.arrays import IntegerArray return IntegerArray(self._data.astype(dtype.numpy_dtype), self._mask.copy(), copy=False) # for integer, error if there are missing values if is_integer_dtype(dtype) and self._hasna: raise ValueError("cannot convert NA to integer") # for float dtype, ensure we use np.nan before casting (numpy cannot # deal with pd.NA) na_value = self._na_value if is_float_dtype(dtype): na_value = np.nan # coerce return self.to_numpy(dtype=dtype, na_value=na_value, copy=False)
def _maybe_mask_result(self, result, mask, other, op_name: str): """ Parameters ---------- result : array-like mask : array-like bool other : scalar or array-like op_name : str """ # if we have a float operand we are by-definition # a float result # or our op is a divide if (is_float_dtype(other) or is_float(other)) or ( op_name in ["rtruediv", "truediv"] ): from pandas.core.arrays import FloatingArray return FloatingArray(result, mask, copy=False) elif is_bool_dtype(result): return BooleanArray(result, mask, copy=False) elif is_integer_dtype(result): from pandas.core.arrays import IntegerArray return IntegerArray(result, mask, copy=False) else: result[mask] = np.nan return result
def astype(self, dtype, copy=True): dtype = pandas_dtype(dtype) if isinstance(dtype, StringDtype): if copy: return self.copy() return self elif isinstance(dtype, _IntegerDtype): arr = self._ndarray.copy() mask = self.isna() arr[mask] = 0 values = arr.astype(dtype.numpy_dtype) return IntegerArray(values, mask, copy=False) elif isinstance(dtype, FloatingDtype): arr = self.copy() mask = self.isna() arr[mask] = "0" values = arr.astype(dtype.numpy_dtype) return FloatingArray(values, mask, copy=False) elif np.issubdtype(dtype, np.floating): arr = self._ndarray.copy() mask = self.isna() arr[mask] = 0 values = arr.astype(dtype) values[mask] = np.nan return values return super().astype(dtype, copy)
def reconstruct(x): # we don't worry about scalar `x` here, since we # raise for reduce up above. from pandas.core.arrays import ( BooleanArray, FloatingArray, IntegerArray, ) if is_bool_dtype(x.dtype): m = mask.copy() return BooleanArray(x, m) elif is_integer_dtype(x.dtype): m = mask.copy() return IntegerArray(x, m) elif is_float_dtype(x.dtype): m = mask.copy() if x.dtype == np.float16: # reached in e.g. np.sqrt on BooleanArray # we don't support float16 x = x.astype(np.float32) return FloatingArray(x, m) else: x[mask] = np.nan return x
def test_construct_cast_invalid(self, dtype): msg = "cannot safely" arr = [1.2, 2.3, 3.7] with tm.assert_raises_regex(TypeError, msg): IntegerArray(arr, dtype=dtype) with tm.assert_raises_regex(TypeError, msg): pd.Series(arr).astype(dtype) arr = [1.2, 2.3, 3.7, np.nan] with tm.assert_raises_regex(TypeError, msg): IntegerArray(arr, dtype=dtype) with tm.assert_raises_regex(TypeError, msg): pd.Series(arr).astype(dtype)
def astype(self, dtype, copy=True): dtype = pandas_dtype(dtype) if is_dtype_equal(dtype, self.dtype): if copy: return self.copy() return self elif isinstance(dtype, _IntegerDtype): arr = self._ndarray.copy() mask = self.isna() arr[mask] = 0 values = arr.astype(dtype.numpy_dtype) return IntegerArray(values, mask, copy=False) elif isinstance(dtype, FloatingDtype): # error: Incompatible types in assignment (expression has type # "StringArray", variable has type "ndarray") arr = self.copy() # type: ignore[assignment] mask = self.isna() arr[mask] = "0" values = arr.astype(dtype.numpy_dtype) return FloatingArray(values, mask, copy=False) elif isinstance(dtype, ExtensionDtype): cls = dtype.construct_array_type() return cls._from_sequence(self, dtype=dtype, copy=copy) elif np.issubdtype(dtype, np.floating): arr = self._ndarray.copy() mask = self.isna() arr[mask] = 0 values = arr.astype(dtype) values[mask] = np.nan return values return super().astype(dtype, copy)
def astype(self, dtype, copy=True): dtype = pandas_dtype(dtype) if is_dtype_equal(dtype, self.dtype): if copy: return self.copy() return self elif isinstance(dtype, _IntegerDtype): arr = self._ndarray.copy() mask = self.isna() arr[mask] = 0 values = arr.astype(dtype.numpy_dtype) return IntegerArray(values, mask, copy=False) elif isinstance(dtype, FloatingDtype): arr = self.copy() mask = self.isna() arr[mask] = "0" values = arr.astype(dtype.numpy_dtype) return FloatingArray(values, mask, copy=False) elif isinstance(dtype, ExtensionDtype): cls = dtype.construct_array_type() return cls._from_sequence(self, dtype=dtype, copy=copy) elif np.issubdtype(dtype, np.floating): arr = self._ndarray.copy() mask = self.isna() arr[mask] = 0 values = arr.astype(dtype) values[mask] = np.nan return values return super().astype(dtype, copy)
def test_integer_array_constructor(): values = np.array([1, 2, 3, 4], dtype='int64') mask = np.array([False, False, False, True], dtype='bool') result = IntegerArray(values, mask) expected = integer_array([1, 2, 3, np.nan], dtype='int64') tm.assert_extension_array_equal(result, expected) with pytest.raises(TypeError): IntegerArray(values.tolist(), mask) with pytest.raises(TypeError): IntegerArray(values, mask.tolist()) with pytest.raises(TypeError): IntegerArray(values.astype(float), mask) with pytest.raises(TypeError): IntegerArray(values)
def value_counts(self, dropna: bool = True) -> Series: """ Returns a Series containing counts of each unique value. Parameters ---------- dropna : bool, default True Don't include counts of missing values. Returns ------- counts : Series See Also -------- Series.value_counts """ from pandas import ( Index, Series, ) from pandas.arrays import IntegerArray if dropna: keys, counts = algos.value_counts_arraylike(self._data, dropna=True, mask=self._mask) res = Series(counts, index=keys) res.index = res.index.astype(self.dtype) res = res.astype("Int64") return res # compute counts on the data with no nans data = self._data[~self._mask] value_counts = Index(data).value_counts() index = value_counts.index # if we want nans, count the mask if dropna: counts = value_counts._values else: counts = np.empty(len(value_counts) + 1, dtype="int64") counts[:-1] = value_counts counts[-1] = self._mask.sum() index = index.insert(len(index), self.dtype.na_value) index = index.astype(self.dtype) mask = np.zeros(len(counts), dtype="bool") counts = IntegerArray(counts, mask) return Series(counts, index=index)
def test_integer_array_constructor(): values = np.array([1, 2, 3, 4], dtype="int64") mask = np.array([False, False, False, True], dtype="bool") result = IntegerArray(values, mask) expected = pd.array([1, 2, 3, np.nan], dtype="Int64") tm.assert_extension_array_equal(result, expected) msg = r".* should be .* numpy array. Use the 'pd.array' function instead" with pytest.raises(TypeError, match=msg): IntegerArray(values.tolist(), mask) with pytest.raises(TypeError, match=msg): IntegerArray(values, mask.tolist()) with pytest.raises(TypeError, match=msg): IntegerArray(values.astype(float), mask) msg = r"__init__\(\) missing 1 required positional argument: 'mask'" with pytest.raises(TypeError, match=msg): IntegerArray(values)
def value_counts(self, dropna: bool = True) -> Series: """ Returns a Series containing counts of each unique value. Parameters ---------- dropna : bool, default True Don't include counts of missing values. Returns ------- counts : Series See Also -------- Series.value_counts """ from pandas import ( Index, Series, ) from pandas.arrays import IntegerArray # compute counts on the data with no nans data = self._data[~self._mask] value_counts = Index(data).value_counts() # TODO(ExtensionIndex) # if we have allow Index to hold an ExtensionArray # this is easier index = value_counts.index._values.astype(object) # if we want nans, count the mask if dropna: counts = value_counts._values else: counts = np.empty(len(value_counts) + 1, dtype="int64") counts[:-1] = value_counts counts[-1] = self._mask.sum() index = Index( np.concatenate( [index, np.array([self.dtype.na_value], dtype=object)]), dtype=object, ) mask = np.zeros(len(counts), dtype="bool") counts = IntegerArray(counts, mask) return Series(counts, index=index)
def _maybe_mask_result(self, result, mask, other, op_name: str): """ Parameters ---------- result : array-like mask : array-like bool other : scalar or array-like op_name : str """ if op_name == "divmod": # divmod returns a tuple div, mod = result return ( self._maybe_mask_result(div, mask, other, "floordiv"), self._maybe_mask_result(mod, mask, other, "mod"), ) # if we have a float operand we are by-definition # a float result # or our op is a divide if ((is_float_dtype(other) or is_float(other)) or (op_name in ["rtruediv", "truediv"]) or (is_float_dtype(self.dtype) and is_numeric_dtype(result.dtype))): from pandas.core.arrays import FloatingArray return FloatingArray(result, mask, copy=False) elif is_bool_dtype(result): from pandas.core.arrays import BooleanArray return BooleanArray(result, mask, copy=False) elif result.dtype == "timedelta64[ns]": # e.g. test_numeric_arr_mul_tdscalar_numexpr_path from pandas.core.arrays import TimedeltaArray if not isinstance(result, TimedeltaArray): result = TimedeltaArray._simple_new(result) result[mask] = result.dtype.type("NaT") return result elif is_integer_dtype(result): from pandas.core.arrays import IntegerArray return IntegerArray(result, mask, copy=False) else: result[mask] = np.nan return result
def test_construct_index(self, all_data, dropna): # ensure that we do not coerce to Float64Index, rather # keep as Index all_data = all_data[:10] if dropna: other = np.array(all_data[~all_data.isna()]) else: other = all_data result = pd.Index(IntegerArray(other, dtype=all_data.dtype)) expected = pd.Index(other, dtype=object) self.assert_index_equal(result, expected)
def astype(self, dtype, copy=True): dtype = pandas_dtype(dtype) if isinstance(dtype, StringDtype): if copy: return self.copy() return self elif isinstance(dtype, _IntegerDtype): arr = self._ndarray.copy() mask = self.isna() arr[mask] = 0 values = arr.astype(dtype.numpy_dtype) return IntegerArray(values, mask, copy=False) return super().astype(dtype, copy)
def test_groupby_mean_included(): df = pd.DataFrame({ "A": ['a', 'b', 'b'], "B": [1, None, 3], "C": IntegerArray([1, None, 3], dtype='Int64'), }) result = df.groupby("A").sum() # TODO(#22346): preserve Int64 dtype expected = pd.DataFrame( { "B": np.array([1.0, 3.0]), "C": np.array([1, 3], dtype="int64") }, index=pd.Index(['a', 'b'], name='A')) tm.assert_frame_equal(result, expected)
def reconstruct(x): # we don't worry about scalar `x` here, since we # raise for reduce up above. if is_integer_dtype(x.dtype): from pandas.core.arrays import IntegerArray m = mask.copy() return IntegerArray(x, m) elif is_float_dtype(x.dtype): from pandas.core.arrays import FloatingArray m = mask.copy() return FloatingArray(x, m) else: x[mask] = np.nan return x
def astype(self, dtype, copy=True): """ Cast to a NumPy array or ExtensionArray with 'dtype'. Parameters ---------- dtype : str or dtype Typecode or data-type to which the array is cast. copy : bool, default True Whether to copy the data, even if not necessary. If False, a copy is made only if the old dtype does not match the new dtype. Returns ------- array : ndarray or ExtensionArray NumPy ndarray, BooleanArray or IntergerArray with 'dtype' for its dtype. Raises ------ TypeError if incompatible type with an BooleanDtype, equivalent of same_kind casting """ dtype = pandas_dtype(dtype) if isinstance(dtype, BooleanDtype): values, mask = coerce_to_array(self, copy=copy) return BooleanArray(values, mask, copy=False) if is_bool_dtype(dtype): # astype_nansafe converts np.nan to True if self.isna().any(): raise ValueError("cannot convert float NaN to bool") else: return self._data.astype(dtype, copy=copy) if is_extension_array_dtype(dtype) and is_integer_dtype(dtype): from pandas.core.arrays import IntegerArray return IntegerArray(self._data.astype(dtype.numpy_dtype), self._mask.copy(), copy=False) # coerce data = self._coerce_to_ndarray() return astype_nansafe(data, dtype, copy=None)
def _maybe_mask_result(self, result, mask): """ Parameters ---------- result : array-like or tuple[array-like] mask : array-like bool """ if isinstance(result, tuple): # i.e. divmod div, mod = result return ( self._maybe_mask_result(div, mask), self._maybe_mask_result(mod, mask), ) if is_float_dtype(result.dtype): from pandas.core.arrays import FloatingArray return FloatingArray(result, mask, copy=False) elif is_bool_dtype(result.dtype): from pandas.core.arrays import BooleanArray return BooleanArray(result, mask, copy=False) elif result.dtype == "timedelta64[ns]": # e.g. test_numeric_arr_mul_tdscalar_numexpr_path from pandas.core.arrays import TimedeltaArray if not isinstance(result, TimedeltaArray): result = TimedeltaArray._simple_new(result) result[mask] = result.dtype.type("NaT") return result elif is_integer_dtype(result.dtype): from pandas.core.arrays import IntegerArray return IntegerArray(result, mask, copy=False) else: result[mask] = np.nan return result
def test_to_integer_array(values, to_dtype, result_dtype): # convert existing arrays to IntegerArrays result = to_integer_array(values, dtype=to_dtype) expected = IntegerArray(values, dtype=result_dtype()) tm.assert_extension_array_equal(result, expected)
def array( data: Union[Sequence[object], AnyArrayLike], dtype: Optional[Dtype] = None, copy: bool = True, ) -> "ExtensionArray": """ Create an array. .. versionadded:: 0.24.0 Parameters ---------- data : Sequence of objects The scalars inside `data` should be instances of the scalar type for `dtype`. It's expected that `data` represents a 1-dimensional array of data. When `data` is an Index or Series, the underlying array will be extracted from `data`. dtype : str, np.dtype, or ExtensionDtype, optional The dtype to use for the array. This may be a NumPy dtype or an extension type registered with pandas using :meth:`pandas.api.extensions.register_extension_dtype`. If not specified, there are two possibilities: 1. When `data` is a :class:`Series`, :class:`Index`, or :class:`ExtensionArray`, the `dtype` will be taken from the data. 2. Otherwise, pandas will attempt to infer the `dtype` from the data. Note that when `data` is a NumPy array, ``data.dtype`` is *not* used for inferring the array type. This is because NumPy cannot represent all the types of data that can be held in extension arrays. Currently, pandas will infer an extension dtype for sequences of ============================== ===================================== Scalar Type Array Type ============================== ===================================== :class:`pandas.Interval` :class:`pandas.arrays.IntervalArray` :class:`pandas.Period` :class:`pandas.arrays.PeriodArray` :class:`datetime.datetime` :class:`pandas.arrays.DatetimeArray` :class:`datetime.timedelta` :class:`pandas.arrays.TimedeltaArray` :class:`int` :class:`pandas.arrays.IntegerArray` :class:`str` :class:`pandas.arrays.StringArray` :class:`bool` :class:`pandas.arrays.BooleanArray` ============================== ===================================== For all other cases, NumPy's usual inference rules will be used. .. versionchanged:: 1.0.0 Pandas infers nullable-integer dtype for integer data, string dtype for string data, and nullable-boolean dtype for boolean data. copy : bool, default True Whether to copy the data, even if not necessary. Depending on the type of `data`, creating the new array may require copying data, even if ``copy=False``. Returns ------- ExtensionArray The newly created array. Raises ------ ValueError When `data` is not 1-dimensional. See Also -------- numpy.array : Construct a NumPy array. Series : Construct a pandas Series. Index : Construct a pandas Index. arrays.PandasArray : ExtensionArray wrapping a NumPy array. Series.array : Extract the array stored within a Series. Notes ----- Omitting the `dtype` argument means pandas will attempt to infer the best array type from the values in the data. As new array types are added by pandas and 3rd party libraries, the "best" array type may change. We recommend specifying `dtype` to ensure that 1. the correct array type for the data is returned 2. the returned array type doesn't change as new extension types are added by pandas and third-party libraries Additionally, if the underlying memory representation of the returned array matters, we recommend specifying the `dtype` as a concrete object rather than a string alias or allowing it to be inferred. For example, a future version of pandas or a 3rd-party library may include a dedicated ExtensionArray for string data. In this event, the following would no longer return a :class:`arrays.PandasArray` backed by a NumPy array. >>> pd.array(['a', 'b'], dtype=str) <PandasArray> ['a', 'b'] Length: 2, dtype: str32 This would instead return the new ExtensionArray dedicated for string data. If you really need the new array to be backed by a NumPy array, specify that in the dtype. >>> pd.array(['a', 'b'], dtype=np.dtype("<U1")) <PandasArray> ['a', 'b'] Length: 2, dtype: str32 Finally, Pandas has arrays that mostly overlap with NumPy * :class:`arrays.DatetimeArray` * :class:`arrays.TimedeltaArray` When data with a ``datetime64[ns]`` or ``timedelta64[ns]`` dtype is passed, pandas will always return a ``DatetimeArray`` or ``TimedeltaArray`` rather than a ``PandasArray``. This is for symmetry with the case of timezone-aware data, which NumPy does not natively support. >>> pd.array(['2015', '2016'], dtype='datetime64[ns]') <DatetimeArray> ['2015-01-01 00:00:00', '2016-01-01 00:00:00'] Length: 2, dtype: datetime64[ns] >>> pd.array(["1H", "2H"], dtype='timedelta64[ns]') <TimedeltaArray> ['0 days 01:00:00', '0 days 02:00:00'] Length: 2, dtype: timedelta64[ns] Examples -------- If a dtype is not specified, pandas will infer the best dtype from the values. See the description of `dtype` for the types pandas infers for. >>> pd.array([1, 2]) <IntegerArray> [1, 2] Length: 2, dtype: Int64 >>> pd.array([1, 2, np.nan]) <IntegerArray> [1, 2, <NA>] Length: 3, dtype: Int64 >>> pd.array(["a", None, "c"]) <StringArray> ['a', <NA>, 'c'] Length: 3, dtype: string >>> pd.array([pd.Period('2000', freq="D"), pd.Period("2000", freq="D")]) <PeriodArray> ['2000-01-01', '2000-01-01'] Length: 2, dtype: period[D] You can use the string alias for `dtype` >>> pd.array(['a', 'b', 'a'], dtype='category') [a, b, a] Categories (2, object): [a, b] Or specify the actual dtype >>> pd.array(['a', 'b', 'a'], ... dtype=pd.CategoricalDtype(['a', 'b', 'c'], ordered=True)) [a, b, a] Categories (3, object): [a < b < c] If pandas does not infer a dedicated extension type a :class:`arrays.PandasArray` is returned. >>> pd.array([1.1, 2.2]) <PandasArray> [1.1, 2.2] Length: 2, dtype: float64 As mentioned in the "Notes" section, new extension types may be added in the future (by pandas or 3rd party libraries), causing the return value to no longer be a :class:`arrays.PandasArray`. Specify the `dtype` as a NumPy dtype if you need to ensure there's no future change in behavior. >>> pd.array([1, 2], dtype=np.dtype("int32")) <PandasArray> [1, 2] Length: 2, dtype: int32 `data` must be 1-dimensional. A ValueError is raised when the input has the wrong dimensionality. >>> pd.array(1) Traceback (most recent call last): ... ValueError: Cannot pass scalar '1' to 'pandas.array'. """ from pandas.core.arrays import ( period_array, BooleanArray, IntegerArray, IntervalArray, PandasArray, DatetimeArray, TimedeltaArray, StringArray, ) if lib.is_scalar(data): msg = f"Cannot pass scalar '{data}' to 'pandas.array'." raise ValueError(msg) if dtype is None and isinstance( data, (ABCSeries, ABCIndexClass, ABCExtensionArray)): dtype = data.dtype data = extract_array(data, extract_numpy=True) # this returns None for not-found dtypes. if isinstance(dtype, str): dtype = registry.find(dtype) or dtype if is_extension_array_dtype(dtype): cls = cast(ExtensionDtype, dtype).construct_array_type() return cls._from_sequence(data, dtype=dtype, copy=copy) if dtype is None: inferred_dtype = lib.infer_dtype(data, skipna=True) if inferred_dtype == "period": try: return period_array(data, copy=copy) except IncompatibleFrequency: # We may have a mixture of frequencies. # We choose to return an ndarray, rather than raising. pass elif inferred_dtype == "interval": try: return IntervalArray(data, copy=copy) except ValueError: # We may have a mixture of `closed` here. # We choose to return an ndarray, rather than raising. pass elif inferred_dtype.startswith("datetime"): # datetime, datetime64 try: return DatetimeArray._from_sequence(data, copy=copy) except ValueError: # Mixture of timezones, fall back to PandasArray pass elif inferred_dtype.startswith("timedelta"): # timedelta, timedelta64 return TimedeltaArray._from_sequence(data, copy=copy) elif inferred_dtype == "string": return StringArray._from_sequence(data, copy=copy) elif inferred_dtype == "integer": return IntegerArray._from_sequence(data, copy=copy) elif inferred_dtype == "boolean": return BooleanArray._from_sequence(data, copy=copy) # Pandas overrides NumPy for # 1. datetime64[ns] # 2. timedelta64[ns] # so that a DatetimeArray is returned. if is_datetime64_ns_dtype(dtype): return DatetimeArray._from_sequence(data, dtype=dtype, copy=copy) elif is_timedelta64_ns_dtype(dtype): return TimedeltaArray._from_sequence(data, dtype=dtype, copy=copy) result = PandasArray._from_sequence(data, dtype=dtype, copy=copy) return result
def data_for_grouping(dtype): b = 1 a = 0 c = 2 na = np.nan return IntegerArray([b, b, na, na, a, a, b, c], dtype=dtype)
tm.assert_numpy_array_equal(out, exp) arr = np.array([pd.NaT, np.timedelta64(1, "s")], dtype=object) exp = np.array([np.timedelta64("NaT"), np.timedelta64(1, "s")], dtype="m8[ns]") out = lib.maybe_convert_objects(arr, convert_datetime=1, convert_timedelta=1) tm.assert_numpy_array_equal(out, exp) arr = np.array([np.timedelta64(1, "s"), np.nan], dtype=object) exp = arr.copy() out = lib.maybe_convert_objects(arr, convert_datetime=1, convert_timedelta=1) tm.assert_numpy_array_equal(out, exp) @pytest.mark.parametrize( "exp", [ IntegerArray(np.array([2, 0], dtype="i8"), np.array([False, True])), IntegerArray(np.array([2, 0], dtype="int64"), np.array([False, True])), ], ) def test_maybe_convert_objects_nullable_integer(self, exp): # GH27335 arr = np.array([2, np.NaN], dtype=object) result = lib.maybe_convert_objects(arr, convert_to_nullable_integer=1) tm.assert_extension_array_equal(result, exp) def test_mixed_dtypes_remain_object_array(self): # GH14956 array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1], dtype=object) result = lib.maybe_convert_objects(array, convert_datetime=1) tm.assert_numpy_array_equal(result, array)
def test_to_integer_array(values, to_dtype, result_dtype): # convert existing arrays to IntegerArrays result = IntegerArray._from_sequence(values, dtype=to_dtype) assert result.dtype == result_dtype() expected = pd.array(values, dtype=result_dtype()) tm.assert_extension_array_equal(result, expected)