def test_infer_dtype_from_scalar(self): # Test that _infer_dtype_from_scalar is returning correct dtype for int # and float. for dtypec in [ np.uint8, np.int8, np.uint16, np.int16, np.uint32, np.int32, np.uint64, np.int64 ]: data = dtypec(12) dtype, val = com._infer_dtype_from_scalar(data) self.assertEqual(dtype, type(data)) data = 12 dtype, val = com._infer_dtype_from_scalar(data) self.assertEqual(dtype, np.int64) for dtypec in [np.float16, np.float32, np.float64]: data = dtypec(12) dtype, val = com._infer_dtype_from_scalar(data) self.assertEqual(dtype, dtypec) data = np.float(12) dtype, val = com._infer_dtype_from_scalar(data) self.assertEqual(dtype, np.float64) for data in [True, False]: dtype, val = com._infer_dtype_from_scalar(data) self.assertEqual(dtype, np.bool_) for data in [np.complex64(1), np.complex128(1)]: dtype, val = com._infer_dtype_from_scalar(data) self.assertEqual(dtype, np.complex_) import datetime for data in [ np.datetime64(1, 'ns'), pd.Timestamp(1), datetime.datetime(2000, 1, 1, 0, 0) ]: dtype, val = com._infer_dtype_from_scalar(data) self.assertEqual(dtype, 'M8[ns]') for data in [ np.timedelta64(1, 'ns'), pd.Timedelta(1), datetime.timedelta(1) ]: dtype, val = com._infer_dtype_from_scalar(data) self.assertEqual(dtype, 'm8[ns]') for data in [ datetime.date(2000, 1, 1), pd.Timestamp(1, tz='US/Eastern'), 'foo' ]: dtype, val = com._infer_dtype_from_scalar(data) self.assertEqual(dtype, np.object_)
def test_infer_dtype_from_scalar(self): # Test that _infer_dtype_from_scalar is returning correct dtype for int and float. for dtypec in [ np.uint8, np.int8, np.uint16, np.int16, np.uint32, np.int32, np.uint64, np.int64 ]: data = dtypec(12) dtype, val = com._infer_dtype_from_scalar(data) self.assertEqual(dtype, dtypec) data = 12 dtype, val = com._infer_dtype_from_scalar(data) self.assertEqual(dtype, np.int64) for dtypec in [ np.float16, np.float32, np.float64 ]: data = dtypec(12) dtype, val = com._infer_dtype_from_scalar(data) self.assertEqual(dtype, dtypec) data = np.float(12) dtype, val = com._infer_dtype_from_scalar(data) self.assertEqual(dtype, np.float64) for data in [ True, False ]: dtype, val = com._infer_dtype_from_scalar(data) self.assertEqual(dtype, np.bool_) for data in [ np.complex64(1), np.complex128(1) ]: dtype, val = com._infer_dtype_from_scalar(data) self.assertEqual(dtype, np.complex_) import datetime for data in [ np.datetime64(1,'ns'), pd.Timestamp(1), datetime.datetime(2000,1,1,0,0) ]: dtype, val = com._infer_dtype_from_scalar(data) self.assertEqual(dtype, 'M8[ns]') for data in [ np.timedelta64(1,'ns'), pd.Timedelta(1), datetime.timedelta(1) ]: dtype, val = com._infer_dtype_from_scalar(data) self.assertEqual(dtype, 'm8[ns]') for data in [ datetime.date(2000,1,1), pd.Timestamp(1,tz='US/Eastern'), 'foo' ]: dtype, val = com._infer_dtype_from_scalar(data) self.assertEqual(dtype, np.object_)
def set_value(self, *args, **kwargs): """ Quickly set single value at (item, major, minor) location Parameters ---------- item : item label (panel item) major : major axis label (panel item row) minor : minor axis label (panel item column) value : scalar takeable : interpret the passed labels as indexers, default False Returns ------- panel : Panel If label combo is contained, will be reference to calling Panel, otherwise a new object """ # require an arg for each axis and the value nargs = len(args) nreq = self._AXIS_LEN + 1 if nargs != nreq: raise TypeError( "There must be an argument for each axis plus the " "value provided, you gave {0} args, but {1} are " "required".format(nargs, nreq) ) takeable = kwargs.get("takeable") try: if takeable is True: lower = self._iget_item_cache(args[0]) else: lower = self._get_item_cache(args[0]) lower.set_value(*args[1:], takeable=takeable) return self except KeyError: axes = self._expand_axes(args) d = self._construct_axes_dict_from(self, axes, copy=False) result = self.reindex(**d) args = list(args) likely_dtype, args[-1] = _infer_dtype_from_scalar(args[-1]) made_bigger = not np.array_equal(axes[0], self._info_axis) # how to make this logic simpler? if made_bigger: com._possibly_cast_item(result, args[0], likely_dtype) return result.set_value(*args)
def set_value(self, *args, **kwargs): """ Quickly set single value at (item, major, minor) location Parameters ---------- item : item label (panel item) major : major axis label (panel item row) minor : minor axis label (panel item column) value : scalar takeable : interpret the passed labels as indexers, default False Returns ------- panel : Panel If label combo is contained, will be reference to calling Panel, otherwise a new object """ # require an arg for each axis and the value nargs = len(args) nreq = self._AXIS_LEN + 1 if nargs != nreq: raise TypeError('There must be an argument for each axis plus the ' 'value provided, you gave {0} args, but {1} are ' 'required'.format(nargs, nreq)) takeable = kwargs.get('takeable') try: if takeable is True: lower = self._iget_item_cache(args[0]) else: lower = self._get_item_cache(args[0]) lower.set_value(*args[1:], takeable=takeable) return self except KeyError: axes = self._expand_axes(args) d = self._construct_axes_dict_from(self, axes, copy=False) result = self.reindex(**d) args = list(args) likely_dtype, args[-1] = _infer_dtype_from_scalar(args[-1]) made_bigger = not np.array_equal( axes[0], self._info_axis) # how to make this logic simpler? if made_bigger: com._possibly_cast_item(result, args[0], likely_dtype) return result.set_value(*args)
def __setitem__(self, key, value): shape = tuple(self.shape) if isinstance(value, self._constructor_sliced): value = value.reindex( **self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:])) mat = value.values elif isinstance(value, np.ndarray): if not ((value.shape == shape[1:])): raise AssertionError() mat = np.asarray(value) elif np.isscalar(value): dtype, value = _infer_dtype_from_scalar(value) mat = np.empty(shape[1:], dtype=dtype) mat.fill(value) else: raise TypeError('Cannot set item of type: %s' % str(type(value))) mat = mat.reshape(tuple([1]) + shape[1:]) NDFrame._set_item(self, key, mat)
def __setitem__(self, key, value): shape = tuple(self.shape) if isinstance(value, self._constructor_sliced): value = value.reindex(**self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:])) mat = value.values elif isinstance(value, np.ndarray): if value.shape != shape[1:]: raise ValueError( "shape of value must be {0}, shape of given " "object was {1}".format(shape[1:], value.shape) ) mat = np.asarray(value) elif np.isscalar(value): dtype, value = _infer_dtype_from_scalar(value) mat = np.empty(shape[1:], dtype=dtype) mat.fill(value) else: raise TypeError("Cannot set item of type: %s" % str(type(value))) mat = mat.reshape(tuple([1]) + shape[1:]) NDFrame._set_item(self, key, mat)
def __setitem__(self, key, value): shape = tuple(self.shape) if isinstance(value, self._constructor_sliced): value = value.reindex( **self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:])) mat = value.values elif isinstance(value, np.ndarray): if value.shape != shape[1:]: raise ValueError('shape of value must be {0}, shape of given ' 'object was {1}'.format( shape[1:], tuple(map(int, value.shape)))) mat = np.asarray(value) elif np.isscalar(value): dtype, value = _infer_dtype_from_scalar(value) mat = np.empty(shape[1:], dtype=dtype) mat.fill(value) else: raise TypeError('Cannot set item of type: %s' % str(type(value))) mat = mat.reshape(tuple([1]) + shape[1:]) NDFrame._set_item(self, key, mat)
def set_value(self, *args): """ Quickly set single value at (item, major, minor) location Parameters ---------- item : item label (panel item) major : major axis label (panel item row) minor : minor axis label (panel item column) value : scalar Returns ------- panel : Panel If label combo is contained, will be reference to calling Panel, otherwise a new object """ # require an arg for each axis and the value if not ((len(args) == self._AXIS_LEN + 1)): raise AssertionError() try: frame = self._get_item_cache(args[0]) frame.set_value(*args[1:]) return self except KeyError: axes = self._expand_axes(args) d = self._construct_axes_dict_from(self, axes, copy=False) result = self.reindex(**d) args = list(args) likely_dtype, args[-1] = _infer_dtype_from_scalar(args[-1]) made_bigger = not np.array_equal( axes[0], self._info_axis) # how to make this logic simpler? if made_bigger: com._possibly_cast_item(result, args[0], likely_dtype) return result.set_value(*args)
def set_value(self, *args): """ Quickly set single value at (item, major, minor) location Parameters ---------- item : item label (panel item) major : major axis label (panel item row) minor : minor axis label (panel item column) value : scalar Returns ------- panel : Panel If label combo is contained, will be reference to calling Panel, otherwise a new object """ # require an arg for each axis and the value if not ((len(args) == self._AXIS_LEN + 1)): raise AssertionError() try: frame = self._get_item_cache(args[0]) frame.set_value(*args[1:]) return self except KeyError: axes = self._expand_axes(args) d = self._construct_axes_dict_from(self, axes, copy=False) result = self.reindex(**d) args = list(args) likely_dtype, args[-1] = _infer_dtype_from_scalar(args[-1]) made_bigger = not np.array_equal(axes[0], self._info_axis) # how to make this logic simpler? if made_bigger: com._possibly_cast_item(result, args[0], likely_dtype) return result.set_value(*args)
def setitem(self, indexer, value): """ set the value inplace; return a new block (of a possibly different dtype) indexer is a direct slice/positional indexer; value must be a compatible shape """ # coerce None values, if appropriate if value is None: if self.is_numeric: value = np.nan # coerce args values, value = self._try_coerce_args(self.values, value) arr_value = np.array(value) # cast the values to a type that can hold nan (if necessary) if not self._can_hold_element(value): dtype, _ = com._maybe_promote(arr_value.dtype) values = values.astype(dtype) transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x) values = transf(values) l = len(values) # length checking # boolean with truth values == len of the value is ok too if isinstance(indexer, (np.ndarray, list)): if is_list_like(value) and len(indexer) != len(value): if not (isinstance(indexer, np.ndarray) and indexer.dtype == np.bool_ and len(indexer[indexer]) == len(value)): raise ValueError("cannot set using a list-like indexer " "with a different length than the value") # slice elif isinstance(indexer, slice): if is_list_like(value) and l: if len(value) != length_of_indexer(indexer, values): raise ValueError("cannot set using a slice indexer with a " "different length than the value") try: def _is_scalar_indexer(indexer): # return True if we are all scalar indexers if arr_value.ndim == 1: if not isinstance(indexer, tuple): indexer = tuple([indexer]) return all([ np.isscalar(idx) for idx in indexer ]) return False def _is_empty_indexer(indexer): # return a boolean if we have an empty indexer if arr_value.ndim == 1: if not isinstance(indexer, tuple): indexer = tuple([indexer]) return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer) return False # empty indexers # 8669 (empty) if _is_empty_indexer(indexer): pass # setting a single element for each dim and with a rhs that could be say a list # GH 6043 elif _is_scalar_indexer(indexer): values[indexer] = value # if we are an exact match (ex-broadcasting), # then use the resultant dtype elif len(arr_value.shape) and arr_value.shape[0] == values.shape[0] and np.prod(arr_value.shape) == np.prod(values.shape): values[indexer] = value values = values.astype(arr_value.dtype) # set else: values[indexer] = value # coerce and try to infer the dtypes of the result if np.isscalar(value): dtype, _ = _infer_dtype_from_scalar(value) else: dtype = 'infer' values = self._try_coerce_and_cast_result(values, dtype) block = make_block(transf(values), ndim=self.ndim, placement=self.mgr_locs, fastpath=True) # may have to soft convert_objects here if block.is_object and not self.is_object: block = block.convert(numeric=False) return block except (ValueError, TypeError) as detail: raise except Exception as detail: pass return [self]