def test_isscalar_numpy_zerodim_arrays(self): for zerodim in [np.array(1), np.array('foobar'), np.array(np.datetime64('2014-01-01')), np.array(np.timedelta64(1, 'h')), np.array(np.datetime64('NaT'))]: self.assertFalse(lib.isscalar(zerodim)) self.assertTrue(lib.isscalar(lib.item_from_zerodim(zerodim)))
def wrapper(self, other, axis=None): # Validate the axis parameter if axis is not None: self._get_axis_number(axis) if isinstance(other, ABCSeries): name = _maybe_match_name(self, other) if len(self) != len(other): raise ValueError('Series lengths must match to compare') return self._constructor(na_op(self.values, other.values), index=self.index, name=name) elif isinstance(other, pd.DataFrame): # pragma: no cover return NotImplemented elif isinstance(other, (np.ndarray, pd.Index)): # do not check length of zerodim array # as it will broadcast if (not lib.isscalar(lib.item_from_zerodim(other)) and len(self) != len(other)): raise ValueError('Lengths must match to compare') if isinstance(other, ABCPeriodIndex): # temp workaround until fixing GH 13637 # tested in test_nat_comparisons # (pandas.tests.series.test_operators.TestSeriesOperators) return self._constructor(na_op(self.values, other.asobject.values), index=self.index) return self._constructor(na_op(self.values, np.asarray(other)), index=self.index).__finalize__(self) elif isinstance(other, pd.Categorical): if not is_categorical_dtype(self): msg = ("Cannot compare a Categorical for op {op} with Series " "of dtype {typ}.\nIf you want to compare values, use " "'series <op> np.asarray(other)'.") raise TypeError(msg.format(op=op, typ=self.dtype)) if is_categorical_dtype(self): # cats are a special case as get_values() would return an ndarray, # which would then not take categories ordering into account # we can go directly to op, as the na_op would just test again and # dispatch to it. res = op(self.values, other) else: values = self.get_values() if isinstance(other, (list, np.ndarray)): other = np.asarray(other) res = na_op(values, other) if isscalar(res): raise TypeError('Could not compare %s type with Series' % type(other)) # always return a full value series here res = _values_from_object(res) res = pd.Series(res, index=self.index, name=self.name, dtype='bool') return res
def test_isscalar_numpy_zerodim_arrays(self): for zerodim in [ np.array(1), np.array("foobar"), np.array(np.datetime64("2014-01-01")), np.array(np.timedelta64(1, "h")), ]: self.assertFalse(isscalar(zerodim)) self.assertTrue(isscalar(item_from_zerodim(zerodim)))
def na_op(x, y): # dispatch to the categorical if we have a categorical # in either operand if is_categorical_dtype(x): return op(x, y) elif is_categorical_dtype(y) and not isscalar(y): return op(y, x) if is_object_dtype(x.dtype): result = _comp_method_OBJECT_ARRAY(op, x, y) else: # we want to compare like types # we only want to convert to integer like if # we are not NotImplemented, otherwise # we would allow datetime64 (but viewed as i8) against # integer comparisons if is_datetimelike_v_numeric(x, y): raise TypeError("invalid type comparison") # numpy does not like comparisons vs None if isscalar(y) and isnull(y): if name == '__ne__': return np.ones(len(x), dtype=bool) else: return np.zeros(len(x), dtype=bool) # we have a datetime/timedelta and may need to convert mask = None if (needs_i8_conversion(x) or (not isscalar(y) and needs_i8_conversion(y))): if isscalar(y): mask = isnull(x) y = _index.convert_scalar(x, _values_from_object(y)) else: mask = isnull(x) | isnull(y) y = y.view('i8') x = x.view('i8') try: result = getattr(x, name)(y) if result is NotImplemented: raise TypeError("invalid type comparison") except AttributeError: result = op(x, y) if mask is not None and mask.any(): result[mask] = masker return result
def test_isscalar_numpy_array_scalars(self): self.assertTrue(lib.isscalar(np.int64(1))) self.assertTrue(lib.isscalar(np.float64(1.0))) self.assertTrue(lib.isscalar(np.int32(1))) self.assertTrue(lib.isscalar(np.object_("foobar"))) self.assertTrue(lib.isscalar(np.str_("foobar"))) self.assertTrue(lib.isscalar(np.unicode_(u("foobar")))) self.assertTrue(lib.isscalar(np.bytes_(b"foobar"))) self.assertTrue(lib.isscalar(np.datetime64("2014-01-01"))) self.assertTrue(lib.isscalar(np.timedelta64(1, "h")))
def test_isscalar_numpy_array_scalars(self): self.assertTrue(lib.isscalar(np.int64(1))) self.assertTrue(lib.isscalar(np.float64(1.))) self.assertTrue(lib.isscalar(np.int32(1))) self.assertTrue(lib.isscalar(np.object_('foobar'))) self.assertTrue(lib.isscalar(np.str_('foobar'))) self.assertTrue(lib.isscalar(np.unicode_(u('foobar')))) self.assertTrue(lib.isscalar(np.bytes_(b'foobar'))) self.assertTrue(lib.isscalar(np.datetime64('2014-01-01'))) self.assertTrue(lib.isscalar(np.timedelta64(1, 'h')))
def _gotitem(self, key, ndim, subset=None): """ sub-classes to define return a sliced object Parameters ---------- key : string / list of selections ndim : 1,2 requested ndim of result subset : object, default None subset to act on """ # create a new object to prevent aliasing if subset is None: subset = self.obj # we need to make a shallow copy of ourselves # with the same groupby kwargs = dict([(attr, getattr(self, attr)) for attr in self._attributes]) self = self.__class__(subset, groupby=self._groupby[key], parent=self, **kwargs) self._reset_cache() if subset.ndim == 2: if lib.isscalar(key) and key in subset or com.is_list_like(key): self._selection = key return self
def isnull(obj): ''' Replacement for numpy.isnan / -numpy.isfinite which is suitable for use on object arrays. Parameters ---------- arr: ndarray or object value Returns ------- boolean ndarray or boolean ''' if lib.isscalar(obj): return lib.checknull(obj) from pandas.core.generic import PandasObject if isinstance(obj, np.ndarray): return _isnull_ndarraylike(obj) elif isinstance(obj, PandasObject): # TODO: optimize for DataFrame, etc. return obj.apply(isnull) elif hasattr(obj, '__array__'): return _isnull_ndarraylike(obj) else: return obj is None
def __new__(cls, data, sparse_index=None, index=None, kind='integer', fill_value=None, dtype=np.float64, copy=False): if index is not None: if data is None: data = np.nan if not lib.isscalar(data): raise Exception("must only pass scalars with an index ") values = np.empty(len(index), dtype='float64') values.fill(data) data = values if dtype is not None: dtype = np.dtype(dtype) is_sparse_array = isinstance(data, SparseArray) if fill_value is None: if is_sparse_array: fill_value = data.fill_value else: fill_value = nan if is_sparse_array: sparse_index = data.sp_index values = np.asarray(data) else: # array-like if sparse_index is None: values, sparse_index = make_sparse(data, kind=kind, fill_value=fill_value) else: values = _sanitize_values(data) if len(values) != sparse_index.npoints: raise AssertionError("Non array-like type {0} must have" " the same length as the" " index".format(type(values))) # Create array, do *not* copy data by default if copy: try: # ToDo: Can remove this error handling when we actually # support other dtypes subarr = np.array(values, dtype=dtype, copy=True) except ValueError: subarr = np.array(values, copy=True) else: try: subarr = np.asarray(values, dtype=dtype) except ValueError: subarr = np.asarray(values) # if we have a bool type, make sure that we have a bool fill_value if ((dtype is not None and issubclass(dtype.type, np.bool_)) or (data is not None and lib.is_bool_array(subarr))): if np.isnan(fill_value) or not fill_value: fill_value = False else: fill_value = bool(fill_value) # Change the class of the array to be the subclass type. return cls._simple_new(subarr, sparse_index, fill_value)
def _isnull_old(obj): ''' Detect missing values. Treat None, NaN, INF, -INF as null. Parameters ---------- arr: ndarray or object value Returns ------- boolean ndarray or boolean ''' if lib.isscalar(obj): return lib.checknull_old(obj) from pandas.core.generic import PandasObject if isinstance(obj, np.ndarray): return _isnull_ndarraylike_old(obj) elif isinstance(obj, PandasObject): # TODO: optimize for DataFrame, etc. return obj.apply(_isnull_old) elif isinstance(obj, list) or hasattr(obj, '__array__'): return _isnull_ndarraylike_old(obj) else: return obj is None
def na_op(x, y): # dispatch to the categorical if we have a categorical # in either operand if com.is_categorical_dtype(x): return op(x, y) elif com.is_categorical_dtype(y) and not lib.isscalar(y): return op(y, x) if x.dtype == np.object_: if isinstance(y, list): y = lib.list_to_object_array(y) if isinstance(y, (np.ndarray, pd.Series)): if y.dtype != np.object_: result = lib.vec_compare(x, y.astype(np.object_), op) else: result = lib.vec_compare(x, y, op) else: result = lib.scalar_compare(x, y, op) else: try: result = getattr(x, name)(y) if result is NotImplemented: raise TypeError("invalid type comparison") except (AttributeError): result = op(x, y) return result
def f(self, other): if not isscalar(other): raise ValueError('Simple arithmetic with %s can only be ' 'done with scalar values' % self._constructor.__name__) return self._combine(other, op)
def __getitem__(self, key): getitem = self._data.__getitem__ if lib.isscalar(key): val = getitem(key) return self._box_func(val) else: if com.is_bool_indexer(key): key = np.asarray(key) if key.all(): key = slice(0, None, None) else: key = lib.maybe_booleans_to_slice(key.view(np.uint8)) attribs = self._get_attributes_dict() freq = None if isinstance(key, slice): if self.freq is not None and key.step is not None: freq = key.step * self.freq else: freq = self.freq attribs['freq'] = freq result = getitem(key) if result.ndim > 1: return result return self._simple_new(result, **attribs)
def _evaluate_compare(self, other, op): """ We have been called because a comparison between 8 aware arrays. numpy >= 1.11 will now warn about NaT comparisons """ # coerce to a similar object if not isinstance(other, type(self)): if not com.is_list_like(other): # scalar other = [other] elif lib.isscalar(lib.item_from_zerodim(other)): # ndarray scalar other = [other.item()] other = type(self)(other) # compare result = getattr(self.asi8, op)(other.asi8) # technically we could support bool dtyped Index # for now just return the indexing array directly mask = (self._isnan) | (other._isnan) if is_bool_dtype(result): result[mask] = False return result try: result[mask] = tslib.iNaT return Index(result) except TypeError: return result
def _sanitize_values(arr): """ return an ndarray for our input, in a platform independent manner """ if hasattr(arr, 'values'): arr = arr.values else: # scalar if lib.isscalar(arr): arr = [arr] # ndarray if isinstance(arr, np.ndarray): pass elif com.is_list_like(arr) and len(arr) > 0: arr = com._possibly_convert_platform(arr) else: arr = np.asarray(arr) return arr
def _convert_scalar_indexer(self, key, kind=None): """ we don't allow integer or float indexing on datetime-like when using loc Parameters ---------- key : label of the slice bound kind : {'ix', 'loc', 'getitem', 'iloc'} or None """ assert kind in ['ix', 'loc', 'getitem', 'iloc', None] # we don't allow integer/float indexing for loc # we don't allow float indexing for ix/getitem if lib.isscalar(key): is_int = is_integer(key) is_flt = is_float(key) if kind in ['loc'] and (is_int or is_flt): self._invalid_indexer('index', key) elif kind in ['ix', 'getitem'] and is_flt: self._invalid_indexer('index', key) return (super(DatetimeIndexOpsMixin, self)._convert_scalar_indexer(key, kind=kind))
def isnull(obj): ''' Replacement for numpy.isnan / -numpy.isfinite which is suitable for use on object arrays. Parameters ---------- arr: ndarray or object value Returns ------- boolean ndarray or boolean ''' if lib.isscalar(obj): return lib.checknull(obj) from pandas.core.generic import PandasObject if isinstance(obj, np.ndarray): return _isnull_ndarraylike(obj) elif isinstance(obj, PandasObject): # TODO: optimize for DataFrame, etc. return obj.apply(isnull) elif isinstance(obj, list) or hasattr(obj, '__array__'): return _isnull_ndarraylike(obj) else: return obj is None
def __new__(cls, data=None, categories=None, ordered=None, dtype=None, copy=False, name=None, fastpath=False, **kwargs): if fastpath: return cls._simple_new(data, name=name) if isinstance(data, com.ABCCategorical): data = cls._create_categorical(cls, data, categories, ordered) elif isinstance(data, CategoricalIndex): data = data._data data = cls._create_categorical(cls, data, categories, ordered) else: # don't allow scalars # if data is None, then categories must be provided if lib.isscalar(data): if data is not None or categories is None: cls._scalar_data_error(data) data = [] data = cls._create_categorical(cls, data, categories, ordered) if copy: data = data.copy() return cls._simple_new(data, name=name)
def _convert_scalar_indexer(self, key, kind=None): """ we don't allow integer or float indexing on datetime-like when using loc Parameters ---------- key : label of the slice bound kind : {'ix', 'loc', 'getitem', 'iloc'} or None """ assert kind in ['ix', 'loc', 'getitem', 'iloc', None] # we don't allow integer/float indexing for loc # we don't allow float indexing for ix/getitem if lib.isscalar(key): is_int = is_integer(key) is_flt = is_float(key) if kind in ['loc'] and (is_int or is_flt): self._invalid_indexer('index', key) elif kind in ['ix', 'getitem'] and is_flt: self._invalid_indexer('index', key) return (super(DatetimeIndexOpsMixin, self) ._convert_scalar_indexer(key, kind=kind))
def na_op(x, y): # dispatch to the categorical if we have a categorical # in either operand if com.is_categorical_dtype(x): return op(x,y) elif com.is_categorical_dtype(y) and not lib.isscalar(y): return op(y,x) if x.dtype == np.object_: if isinstance(y, list): y = lib.list_to_object_array(y) if isinstance(y, (np.ndarray, pd.Series)): if y.dtype != np.object_: result = lib.vec_compare(x, y.astype(np.object_), op) else: result = lib.vec_compare(x, y, op) else: result = lib.scalar_compare(x, y, op) else: try: result = getattr(x, name)(y) if result is NotImplemented: raise TypeError("invalid type comparison") except (AttributeError): result = op(x, y) return result
def test_lisscalar_pandas_containers(self): self.assertFalse(lib.isscalar(pd.Series())) self.assertFalse(lib.isscalar(pd.Series([1]))) self.assertFalse(lib.isscalar(pd.DataFrame())) self.assertFalse(lib.isscalar(pd.DataFrame([[1]]))) self.assertFalse(lib.isscalar(pd.Panel())) self.assertFalse(lib.isscalar(pd.Panel([[[1]]]))) self.assertFalse(lib.isscalar(pd.Index([]))) self.assertFalse(lib.isscalar(pd.Index([1])))
def __new__(cls, data, sparse_index=None, index=None, kind='integer', fill_value=None, dtype=np.float64, copy=False): if index is not None: if data is None: data = np.nan if not lib.isscalar(data): raise Exception("must only pass scalars with an index ") values = np.empty(len(index), dtype='float64') values.fill(data) data = values if dtype is not None: dtype = np.dtype(dtype) is_sparse_array = isinstance(data, SparseArray) if fill_value is None: if is_sparse_array: fill_value = data.fill_value else: fill_value = nan if is_sparse_array: sparse_index = data.sp_index values = np.asarray(data) else: # array-like if sparse_index is None: values, sparse_index = make_sparse(data, kind=kind, fill_value=fill_value) else: values = data if len(values) != sparse_index.npoints: raise AssertionError("Non array-like type {0} must have" " the same length as the" " index".format(type(values))) # Create array, do *not* copy data by default if copy: subarr = np.array(values, dtype=dtype, copy=True) else: subarr = np.asarray(values, dtype=dtype) # if we have a bool type, make sure that we have a bool fill_value if ((dtype is not None and issubclass(dtype.type, np.bool_)) or (data is not None and lib.is_bool_array(subarr))): if np.isnan(fill_value) or not fill_value: fill_value = False else: fill_value = bool(fill_value) # Change the class of the array to be the subclass type. return cls._simple_new(subarr, sparse_index, fill_value)
def _combine(self, other, func, axis=0): if isinstance(other, DataFrame): return self._combineFrame(other, func, axis=axis) elif isinstance(other, Panel): return self._combinePanel(other, func) elif lib.isscalar(other): new_frames = dict((k, func(v, other)) for k, v in self.iteritems()) return self._new_like(new_frames)
def _from_arraylike(cls, data, freq, tz): if not isinstance( data, (np.ndarray, PeriodIndex, DatetimeIndex, Int64Index)): if lib.isscalar(data) or isinstance(data, Period): raise ValueError('PeriodIndex() must be called with a ' 'collection of some kind, %s was passed' % repr(data)) # other iterable of some kind if not isinstance(data, (list, tuple)): data = list(data) try: data = com._ensure_int64(data) if freq is None: raise ValueError('freq not specified') data = np.array([Period(x, freq=freq).ordinal for x in data], dtype=np.int64) except (TypeError, ValueError): data = com._ensure_object(data) if freq is None and len(data) > 0: freq = getattr(data[0], 'freq', None) if freq is None: raise ValueError('freq not specified and cannot be ' 'inferred from first element') data = _get_ordinals(data, freq) else: if isinstance(data, PeriodIndex): if freq is None or freq == data.freq: freq = data.freq data = data.values else: base1, _ = _gfc(data.freq) base2, _ = _gfc(freq) data = period.period_asfreq_arr(data.values, base1, base2, 1) else: if freq is None and len(data) > 0: freq = getattr(data[0], 'freq', None) if freq is None: raise ValueError('freq not specified and cannot be ' 'inferred from first element') if data.dtype != np.int64: if np.issubdtype(data.dtype, np.datetime64): data = dt64arr_to_periodarr(data, freq, tz) else: try: data = com._ensure_int64(data) except (TypeError, ValueError): data = com._ensure_object(data) data = _get_ordinals(data, freq) return data, freq
def _from_arraylike(cls, data, freq, tz): if not isinstance(data, (np.ndarray, PeriodIndex, DatetimeIndex, Int64Index)): if lib.isscalar(data) or isinstance(data, Period): raise ValueError('PeriodIndex() must be called with a ' 'collection of some kind, %s was passed' % repr(data)) # other iterable of some kind if not isinstance(data, (list, tuple)): data = list(data) try: data = com._ensure_int64(data) if freq is None: raise ValueError('freq not specified') data = np.array([Period(x, freq=freq).ordinal for x in data], dtype=np.int64) except (TypeError, ValueError): data = com._ensure_object(data) if freq is None and len(data) > 0: freq = getattr(data[0], 'freq', None) if freq is None: raise ValueError('freq not specified and cannot be ' 'inferred from first element') data = _get_ordinals(data, freq) else: if isinstance(data, PeriodIndex): if freq is None or freq == data.freq: freq = data.freq data = data.values else: base1, _ = _gfc(data.freq) base2, _ = _gfc(freq) data = period.period_asfreq_arr(data.values, base1, base2, 1) else: if freq is None and len(data) > 0: freq = getattr(data[0], 'freq', None) if freq is None: raise ValueError('freq not specified and cannot be ' 'inferred from first element') if data.dtype != np.int64: if np.issubdtype(data.dtype, np.datetime64): data = dt64arr_to_periodarr(data, freq, tz) else: try: data = com._ensure_int64(data) except (TypeError, ValueError): data = com._ensure_object(data) data = _get_ordinals(data, freq) return data, freq
def get_value(self, series, key): """ we always want to get an index value, never a value """ if not lib.isscalar(key): raise InvalidIndexError from pandas.core.indexing import maybe_droplevels from pandas.core.series import Series k = com._values_from_object(key) loc = self.get_loc(k) new_values = com._values_from_object(series)[loc] if lib.isscalar(new_values) or new_values is None: return new_values new_index = self[loc] new_index = maybe_droplevels(new_index, k) return Series(new_values, index=new_index, name=series.name)
def _delegate_method(self, name, *args, **kwargs): """ method delegation to the ._values """ method = getattr(self._values, name) if 'inplace' in kwargs: raise ValueError("cannot use inplace with CategoricalIndex") res = method(*args, **kwargs) if lib.isscalar(res): return res return CategoricalIndex(res, name=self.name)
def test_isscalar_builtin_nonscalars(self): self.assertFalse(lib.isscalar({})) self.assertFalse(lib.isscalar([])) self.assertFalse(lib.isscalar([1])) self.assertFalse(lib.isscalar(())) self.assertFalse(lib.isscalar((1, ))) self.assertFalse(lib.isscalar(slice(None))) self.assertFalse(lib.isscalar(Ellipsis))
def _convert_by(by): if by is None: by = [] elif (lib.isscalar(by) or isinstance(by, (np.ndarray, Index, Series, Grouper)) or hasattr(by, '__call__')): by = [by] else: by = list(by) return by
def _checked_add_with_arr(arr, b): """ Performs the addition of an int64 array and an int64 integer (or array) but checks that they do not result in overflow first. Parameters ---------- arr : array addend. b : array or scalar addend. Returns ------- sum : An array for elements x + b for each element x in arr if b is a scalar or an array for elements x + y for each element pair (x, y) in (arr, b). Raises ------ OverflowError if any x + y exceeds the maximum or minimum int64 value. """ # For performance reasons, we broadcast 'b' to the new array 'b2' # so that it has the same size as 'arr'. if _np_version_under1p10: if lib.isscalar(b): b2 = np.empty(arr.shape) b2.fill(b) else: b2 = b else: b2 = np.broadcast_to(b, arr.shape) # gh-14324: For each element in 'arr' and its corresponding element # in 'b2', we check the sign of the element in 'b2'. If it is positive, # we then check whether its sum with the element in 'arr' exceeds # np.iinfo(np.int64).max. If so, we have an overflow error. If it # it is negative, we then check whether its sum with the element in # 'arr' exceeds np.iinfo(np.int64).min. If so, we have an overflow # error as well. mask1 = b2 > 0 mask2 = b2 < 0 if not mask1.any(): to_raise = (np.iinfo(np.int64).min - b2 > arr).any() elif not mask2.any(): to_raise = (np.iinfo(np.int64).max - b2 < arr).any() else: to_raise = (np.iinfo(np.int64).max - b2[mask1] < arr[mask1]).any() or ( np.iinfo(np.int64).min - b2[mask2] > arr[mask2] ).any() if to_raise: raise OverflowError("Overflow in int64 addition") return arr + b
def _get_counts(mask, axis, dtype=float): dtype = _get_dtype(dtype) if axis is None: return dtype.type(mask.size - mask.sum()) count = mask.shape[axis] - mask.sum(axis) if lib.isscalar(count): return dtype.type(count) try: return count.astype(dtype) except AttributeError: return np.array(count, dtype=dtype)
def _checked_add_with_arr(arr, b): """ Performs the addition of an int64 array and an int64 integer (or array) but checks that they do not result in overflow first. Parameters ---------- arr : array addend. b : array or scalar addend. Returns ------- sum : An array for elements x + b for each element x in arr if b is a scalar or an array for elements x + y for each element pair (x, y) in (arr, b). Raises ------ OverflowError if any x + y exceeds the maximum or minimum int64 value. """ # For performance reasons, we broadcast 'b' to the new array 'b2' # so that it has the same size as 'arr'. if _np_version_under1p10: if lib.isscalar(b): b2 = np.empty(arr.shape) b2.fill(b) else: b2 = b else: b2 = np.broadcast_to(b, arr.shape) # gh-14324: For each element in 'arr' and its corresponding element # in 'b2', we check the sign of the element in 'b2'. If it is positive, # we then check whether its sum with the element in 'arr' exceeds # np.iinfo(np.int64).max. If so, we have an overflow error. If it # it is negative, we then check whether its sum with the element in # 'arr' exceeds np.iinfo(np.int64).min. If so, we have an overflow # error as well. mask1 = b2 > 0 mask2 = b2 < 0 if not mask1.any(): to_raise = (np.iinfo(np.int64).min - b2 > arr).any() elif not mask2.any(): to_raise = (np.iinfo(np.int64).max - b2 < arr).any() else: to_raise = ((np.iinfo(np.int64).max - b2[mask1] < arr[mask1]).any() or (np.iinfo(np.int64).min - b2[mask2] > arr[mask2]).any()) if to_raise: raise OverflowError("Overflow in int64 addition") return arr + b
def fill_zeros(result, x, y, name, fill): """ if this is a reversed op, then flip x,y if we have an integer value (or array in y) and we have 0's, fill them with the fill, return the result mask the nan's from x """ if fill is None or com.is_float_dtype(result): return result if name.startswith(('r', '__r')): x, y = y, x is_typed_variable = (hasattr(y, 'dtype') or hasattr(y, 'type')) is_scalar = lib.isscalar(y) if not is_typed_variable and not is_scalar: return result if is_scalar: y = np.array(y) if com.is_integer_dtype(y): if (y == 0).any(): # GH 7325, mask and nans must be broadcastable (also: PR 9308) # Raveling and then reshaping makes np.putmask faster mask = ((y == 0) & ~np.isnan(result)).ravel() shape = result.shape result = result.astype('float64', copy=False).ravel() np.putmask(result, mask, fill) # if we have a fill of inf, then sign it correctly # (GH 6178 and PR 9308) if np.isinf(fill): signs = np.sign(y if name.startswith(('r', '__r')) else x) negative_inf_mask = (signs.ravel() < 0) & mask np.putmask(result, negative_inf_mask, -fill) if "floordiv" in name: # (PR 9308) nan_mask = ((y == 0) & (x == 0)).ravel() np.putmask(result, nan_mask, np.nan) result = result.reshape(shape) return result
def _broadcast(arr_or_scalar, shape): """ Helper function to broadcast arrays / scalars to the desired shape. """ if _np_version_under1p10: if lib.isscalar(arr_or_scalar): out = np.empty(shape) out.fill(arr_or_scalar) else: out = arr_or_scalar else: out = np.broadcast_to(arr_or_scalar, shape) return out
def _isnull_new(obj): if lib.isscalar(obj): return lib.checknull(obj) from pandas.core.generic import PandasObject if isinstance(obj, np.ndarray): return _isnull_ndarraylike(obj) elif isinstance(obj, PandasObject): # TODO: optimize for DataFrame, etc. return obj.apply(isnull) elif isinstance(obj, list) or hasattr(obj, '__array__'): return _isnull_ndarraylike(obj) else: return obj is None
def __sizeof__(self): """ Generates the total memory usage for a object that returns either a value or Series of values """ if hasattr(self, 'memory_usage'): mem = self.memory_usage(deep=True) if not lib.isscalar(mem): mem = mem.sum() return int(mem) # no memory_usage attribute, so fall back to # object's 'sizeof' return super(self, PandasObject).__sizeof__()
def _convert_scalar_indexer(self, key, kind=None): """ we don't allow integer or float indexing on datetime-like when using loc Parameters ---------- key : label of the slice bound kind : optional, type of the indexing operation (loc/ix/iloc/None) """ if kind in ['loc'] and lib.isscalar(key) and (is_integer(key) or is_float(key)): self._invalid_indexer('index',key) return super(DatetimeIndexOpsMixin, self)._convert_scalar_indexer(key, kind=kind)
def append(self, value): """ Append element or array-like chunk of data to the SparseList Parameters ---------- value: scalar or array-like """ if lib.isscalar(value): value = [value] sparr = SparseArray(value, fill_value=self.fill_value) self._chunks.append(sparr) self._consolidated = False
def _ensure_datetimelike_to_i8(other): """ helper for coercing an input scalar or array to i8 """ if lib.isscalar(other) and com.isnull(other): other = tslib.iNaT elif isinstance(other, com.ABCIndexClass): # convert tz if needed if getattr(other, 'tz', None) is not None: other = other.tz_localize(None).asi8 else: other = other.asi8 else: other = np.array(other, copy=False).view('i8') return other
def _get_counts_nanvar(mask, axis, ddof, dtype=float): dtype = _get_dtype(dtype) count = _get_counts(mask, axis, dtype=dtype) d = count - dtype.type(ddof) # always return NaN, never inf if lib.isscalar(count): if count <= ddof: count = np.nan d = np.nan else: mask2 = count <= ddof if mask2.any(): np.putmask(d, mask2, np.nan) np.putmask(count, mask2, np.nan) return count, d