def astype(self, space, dtype, order, copy=True): # copy the general pattern of the strides # but make the array storage contiguous in memory shape = self.get_shape() strides = self.get_strides() if order not in (NPY.KEEPORDER, NPY.FORTRANORDER, NPY.CORDER): raise oefmt(space.w_ValueError, "Unknown order %d in astype", order) if len(strides) == 0: t_strides = [] backstrides = [] elif order in (NPY.FORTRANORDER, NPY.CORDER): t_strides, backstrides = calc_strides(shape, dtype, order) else: indx_array = range(len(strides)) list_sorter = StrideSort(indx_array, strides, self.order) list_sorter.sort() t_elsize = dtype.elsize t_strides = strides[:] base = dtype.elsize for i in indx_array: t_strides[i] = base base *= shape[i] backstrides = calc_backstrides(t_strides, shape) order = support.get_order_as_CF(self.order, order) impl = ConcreteArray(shape, dtype, order, t_strides, backstrides) if copy: loop.setslice(space, impl.get_shape(), impl, self) return impl
def reshape(self, orig_array, new_shape, order=NPY.ANYORDER): # Since we got to here, prod(new_shape) == self.size order = support.get_order_as_CF(self.order, order) new_strides = None if self.size == 0: new_strides, _ = calc_strides(new_shape, self.dtype, order) else: if len(self.get_shape()) == 0: new_strides = [self.dtype.elsize] * len(new_shape) else: new_strides = calc_new_strides(new_shape, self.get_shape(), self.get_strides(), order) if new_strides is None or len(new_strides) != len(new_shape): return None if new_strides is not None: # We can create a view, strides somehow match up. new_backstrides = calc_backstrides(new_strides, new_shape) assert isinstance(orig_array, W_NDimArray) or orig_array is None return SliceArray(self.start, new_strides, new_backstrides, new_shape, self, orig_array) return None
def __init__(self, space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, w_itershape, buffersize=0, order=NPY.KEEPORDER, allow_backward=True): self.external_loop = False self.buffered = False self.tracked_index = '' self.common_dtype = False self.delay_bufalloc = False self.grow_inner = False self.ranged = False self.refs_ok = False self.reduce_ok = False self.zerosize_ok = False self.index_iter = None self.done = False self.first_next = True self.op_axes = [] self.allow_backward = allow_backward if not space.is_w(w_casting, space.w_None): self.casting = space.str_w(w_casting) else: self.casting = 'safe' # convert w_seq operands to a list of W_NDimArray if space.isinstance_w(w_seq, space.w_tuple) or \ space.isinstance_w(w_seq, space.w_list): w_seq_as_list = space.listview(w_seq) self.seq = [ convert_to_array(space, w_elem) if not space.is_none(w_elem) else None for w_elem in w_seq_as_list ] else: self.seq = [convert_to_array(space, w_seq)] if order == NPY.ANYORDER: # 'A' means "'F' order if all the arrays are Fortran contiguous, # 'C' order otherwise" order = NPY.CORDER for s in self.seq: if s and not (s.get_flags() & NPY.ARRAY_F_CONTIGUOUS): break else: order = NPY.FORTRANORDER elif order == NPY.KEEPORDER: # 'K' means "as close to the order the array elements appear in # memory as possible", so match self.order to seq.order order = NPY.CORDER for s in self.seq: if s and not (s.get_order() == NPY.FORTRANORDER): break else: order = NPY.FORTRANORDER self.order = order parse_func_flags(space, self, w_flags) self.op_flags = parse_op_arg(space, 'op_flags', w_op_flags, len(self.seq), parse_op_flag) # handle w_op_axes oa_ndim = -1 if not space.is_none(w_op_axes): oa_ndim = self.set_op_axes(space, w_op_axes) self.ndim = calculate_ndim(self.seq, oa_ndim) # handle w_op_dtypes part 1: creating self.dtypes list from input if not space.is_none(w_op_dtypes): w_seq_as_list = space.listview(w_op_dtypes) self.dtypes = [ decode_w_dtype(space, w_elem) for w_elem in w_seq_as_list ] if len(self.dtypes) != len(self.seq): raise oefmt( space.w_ValueError, "op_dtypes must be a tuple/list matching the number of ops" ) else: self.dtypes = [] # handle None or writable operands, calculate my shape outargs = [ i for i in range(len(self.seq)) if self.seq[i] is None or self.op_flags[i].rw == 'w' ] if len(outargs) > 0: out_shape = shape_agreement_multiple( space, [self.seq[i] for i in outargs]) else: out_shape = None if space.isinstance_w(w_itershape, space.w_tuple) or \ space.isinstance_w(w_itershape, space.w_list): self.shape = [space.int_w(i) for i in space.listview(w_itershape)] else: self.shape = shape_agreement_multiple(space, self.seq, shape=out_shape) if len(outargs) > 0: # Make None operands writeonly and flagged for allocation if len(self.dtypes) > 0: out_dtype = self.dtypes[outargs[0]] else: out_dtype = None for i in range(len(self.seq)): if self.seq[i] is None: self.op_flags[i].allocate = True continue if self.op_flags[i].rw == 'w': continue out_dtype = find_binop_result_dtype( space, self.seq[i].get_dtype(), out_dtype) for i in outargs: if self.seq[i] is None: # XXX can we postpone allocation to later? self.seq[i] = W_NDimArray.from_shape( space, self.shape, out_dtype) else: if not self.op_flags[i].broadcast: # Raises if output cannot be broadcast try: shape_agreement(space, self.shape, self.seq[i], False) except OperationError as e: raise oefmt( space.w_ValueError, "non-broadcastable" " output operand with shape %s doesn't match " "the broadcast shape %s", str(self.seq[i].get_shape()), str(self.shape)) if self.tracked_index != "": order = self.order if order == NPY.KEEPORDER: order = self.seq[0].implementation.order if self.tracked_index == "multi": backward = False else: backward = ((order == NPY.CORDER and self.tracked_index != 'C') or (order == NPY.FORTRANORDER and self.tracked_index != 'F')) self.index_iter = IndexIterator(self.shape, backward=backward) # handle w_op_dtypes part 2: copy where needed if possible if len(self.dtypes) > 0: for i in range(len(self.seq)): self_d = self.dtypes[i] seq_d = self.seq[i].get_dtype() if not self_d: self.dtypes[i] = seq_d elif self_d != seq_d: impl = self.seq[i].implementation if self.buffered or 'r' in self.op_flags[i].tmp_copy: if not can_cast_array(space, self.seq[i], self_d, self.casting): raise oefmt( space.w_TypeError, "Iterator operand %d" " dtype could not be cast from %s to %s" " according to the rule '%s'", i, space.str_w(seq_d.descr_repr(space)), space.str_w(self_d.descr_repr(space)), self.casting) order = support.get_order_as_CF(impl.order, self.order) new_impl = impl.astype(space, self_d, order).copy(space) self.seq[i] = W_NDimArray(new_impl) else: raise oefmt( space.w_TypeError, "Iterator " "operand required copying or buffering, " "but neither copying nor buffering was " "enabled") if 'w' in self.op_flags[i].rw: if not can_cast_type(space, self_d, seq_d, self.casting): raise oefmt( space.w_TypeError, "Iterator" " requested dtype could not be cast from " " %s to %s, the operand %d dtype, accord" "ing to the rule '%s'", space.str_w(self_d.descr_repr(space)), space.str_w(seq_d.descr_repr(space)), i, self.casting) elif self.buffered and not (self.external_loop and len(self.seq) < 2): for i in range(len(self.seq)): if i not in outargs: self.seq[i] = self.seq[i].descr_copy(space, w_order=space.wrap( self.order)) self.dtypes = [s.get_dtype() for s in self.seq] else: #copy them from seq self.dtypes = [s.get_dtype() for s in self.seq] # create an iterator for each operand self.iters = [] for i in range(len(self.seq)): it = self.get_iter(space, i) it.contiguous = False self.iters.append((it, it.reset())) if self.external_loop: coalesce_axes(self, space)
def __init__( self, space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, w_itershape, buffersize=0, order=NPY.KEEPORDER, allow_backward=True, ): self.external_loop = False self.buffered = False self.tracked_index = "" self.common_dtype = False self.delay_bufalloc = False self.grow_inner = False self.ranged = False self.refs_ok = False self.reduce_ok = False self.zerosize_ok = False self.index_iter = None self.done = False self.first_next = True self.op_axes = [] self.allow_backward = allow_backward if not space.is_w(w_casting, space.w_None): self.casting = space.str_w(w_casting) else: self.casting = "safe" # convert w_seq operands to a list of W_NDimArray if space.isinstance_w(w_seq, space.w_tuple) or space.isinstance_w(w_seq, space.w_list): w_seq_as_list = space.listview(w_seq) self.seq = [ convert_to_array(space, w_elem) if not space.is_none(w_elem) else None for w_elem in w_seq_as_list ] else: self.seq = [convert_to_array(space, w_seq)] if order == NPY.ANYORDER: # 'A' means "'F' order if all the arrays are Fortran contiguous, # 'C' order otherwise" order = NPY.CORDER for s in self.seq: if s and not (s.get_flags() & NPY.ARRAY_F_CONTIGUOUS): break else: order = NPY.FORTRANORDER elif order == NPY.KEEPORDER: # 'K' means "as close to the order the array elements appear in # memory as possible", so match self.order to seq.order order = NPY.CORDER for s in self.seq: if s and not (s.get_order() == NPY.FORTRANORDER): break else: order = NPY.FORTRANORDER self.order = order parse_func_flags(space, self, w_flags) self.op_flags = parse_op_arg(space, "op_flags", w_op_flags, len(self.seq), parse_op_flag) # handle w_op_axes oa_ndim = -1 if not space.is_none(w_op_axes): oa_ndim = self.set_op_axes(space, w_op_axes) self.ndim = calculate_ndim(self.seq, oa_ndim) # handle w_op_dtypes part 1: creating self.dtypes list from input if not space.is_none(w_op_dtypes): w_seq_as_list = space.listview(w_op_dtypes) self.dtypes = [decode_w_dtype(space, w_elem) for w_elem in w_seq_as_list] if len(self.dtypes) != len(self.seq): raise oefmt(space.w_ValueError, "op_dtypes must be a tuple/list matching the number of ops") else: self.dtypes = [] # handle None or writable operands, calculate my shape outargs = [i for i in range(len(self.seq)) if self.seq[i] is None or self.op_flags[i].rw == "w"] if len(outargs) > 0: out_shape = shape_agreement_multiple(space, [self.seq[i] for i in outargs]) else: out_shape = None if space.isinstance_w(w_itershape, space.w_tuple) or space.isinstance_w(w_itershape, space.w_list): self.shape = [space.int_w(i) for i in space.listview(w_itershape)] else: self.shape = shape_agreement_multiple(space, self.seq, shape=out_shape) if len(outargs) > 0: # Make None operands writeonly and flagged for allocation if len(self.dtypes) > 0: out_dtype = self.dtypes[outargs[0]] else: out_dtype = None for i in range(len(self.seq)): if self.seq[i] is None: self.op_flags[i].allocate = True continue if self.op_flags[i].rw == "w": continue out_dtype = find_binop_result_dtype(space, self.seq[i].get_dtype(), out_dtype) for i in outargs: if self.seq[i] is None: # XXX can we postpone allocation to later? self.seq[i] = W_NDimArray.from_shape(space, self.shape, out_dtype) else: if not self.op_flags[i].broadcast: # Raises if output cannot be broadcast try: shape_agreement(space, self.shape, self.seq[i], False) except OperationError as e: raise oefmt( space.w_ValueError, "non-broadcastable" " output operand with shape %s doesn't match " "the broadcast shape %s", str(self.seq[i].get_shape()), str(self.shape), ) if self.tracked_index != "": order = self.order if order == NPY.KEEPORDER: order = self.seq[0].implementation.order if self.tracked_index == "multi": backward = False else: backward = (order == NPY.CORDER and self.tracked_index != "C") or ( order == NPY.FORTRANORDER and self.tracked_index != "F" ) self.index_iter = IndexIterator(self.shape, backward=backward) # handle w_op_dtypes part 2: copy where needed if possible if len(self.dtypes) > 0: for i in range(len(self.seq)): self_d = self.dtypes[i] seq_d = self.seq[i].get_dtype() if not self_d: self.dtypes[i] = seq_d elif self_d != seq_d: impl = self.seq[i].implementation if self.buffered or "r" in self.op_flags[i].tmp_copy: if not can_cast_array(space, self.seq[i], self_d, self.casting): raise oefmt( space.w_TypeError, "Iterator operand %d" " dtype could not be cast from %s to %s" " according to the rule '%s'", i, space.str_w(seq_d.descr_repr(space)), space.str_w(self_d.descr_repr(space)), self.casting, ) order = support.get_order_as_CF(impl.order, self.order) new_impl = impl.astype(space, self_d, order).copy(space) self.seq[i] = W_NDimArray(new_impl) else: raise oefmt( space.w_TypeError, "Iterator " "operand required copying or buffering, " "but neither copying nor buffering was " "enabled", ) if "w" in self.op_flags[i].rw: if not can_cast_type(space, self_d, seq_d, self.casting): raise oefmt( space.w_TypeError, "Iterator" " requested dtype could not be cast from " " %s to %s, the operand %d dtype, accord" "ing to the rule '%s'", space.str_w(self_d.descr_repr(space)), space.str_w(seq_d.descr_repr(space)), i, self.casting, ) elif self.buffered and not (self.external_loop and len(self.seq) < 2): for i in range(len(self.seq)): if i not in outargs: self.seq[i] = self.seq[i].descr_copy(space, w_order=space.wrap(self.order)) self.dtypes = [s.get_dtype() for s in self.seq] else: # copy them from seq self.dtypes = [s.get_dtype() for s in self.seq] # create an iterator for each operand self.iters = [] for i in range(len(self.seq)): it = self.get_iter(space, i) it.contiguous = False self.iters.append((it, it.reset())) if self.external_loop: coalesce_axes(self, space)
def _array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False): from pypy.module.micronumpy.boxes import W_GenericBox # numpy testing calls array(type(array([]))) and expects a ValueError if space.isinstance_w(w_object, space.w_type): raise oefmt(space.w_ValueError, "cannot create ndarray from type instance") # for anything that isn't already an array, try __array__ method first dtype = descriptor.decode_w_dtype(space, w_dtype) if not isinstance(w_object, W_NDimArray): w_array = try_array_method(space, w_object, w_dtype) if w_array is None: if (not space.isinstance_w(w_object, space.w_bytes) and not space.isinstance_w(w_object, space.w_unicode) and not isinstance(w_object, W_GenericBox)): # use buffer interface w_object = _array_from_buffer_3118(space, w_object, dtype) else: # continue with w_array, but do further operations in place w_object = w_array copy = False dtype = w_object.get_dtype() if not isinstance(w_object, W_NDimArray): w_array, _copy = try_interface_method(space, w_object, copy) if w_array is not None: w_object = w_array copy = _copy dtype = w_object.get_dtype() if isinstance(w_object, W_NDimArray): npy_order = order_converter(space, w_order, NPY.ANYORDER) if (dtype is None or w_object.get_dtype() is dtype) and ( subok or type(w_object) is W_NDimArray): flags = w_object.get_flags() must_copy = copy must_copy |= (npy_order == NPY.CORDER and not flags & NPY.ARRAY_C_CONTIGUOUS) must_copy |= (npy_order == NPY.FORTRANORDER and not flags & NPY.ARRAY_F_CONTIGUOUS) if must_copy: return w_object.descr_copy(space, space.newint(npy_order)) else: return w_object if subok and not type(w_object) is W_NDimArray: raise oefmt(space.w_NotImplementedError, "array(..., subok=True) only partially implemented") # we have a ndarray, but need to copy or change dtype if dtype is None: dtype = w_object.get_dtype() if dtype != w_object.get_dtype(): # silently reject the copy value copy = True if copy: shape = w_object.get_shape() order = support.get_order_as_CF(w_object.get_order(), npy_order) w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) if support.product(shape) == 1: w_arr.set_scalar_value( dtype.coerce(space, w_object.implementation.getitem(0))) else: loop.setslice(space, shape, w_arr.implementation, w_object.implementation) return w_arr else: imp = w_object.implementation w_base = w_object sz = w_base.get_size() * dtype.elsize if imp.base() is not None: w_base = imp.base() if type(w_base) is W_NDimArray: sz = w_base.get_size() * dtype.elsize else: # this must succeed (mmap, buffer, ...) sz = space.int_w(space.call_method(w_base, 'size')) with imp as storage: return W_NDimArray.from_shape_and_storage(space, w_object.get_shape(), storage, dtype, storage_bytes=sz, w_base=w_base, strides=imp.strides, start=imp.start) else: # not an array npy_order = order_converter(space, w_order, NPY.CORDER) shape, elems_w = find_shape_and_elems(space, w_object, dtype) if dtype is None and space.isinstance_w(w_object, space.w_buffer): dtype = descriptor.get_dtype_cache(space).w_uint8dtype if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): dtype = find_dtype_for_seq(space, elems_w, dtype) w_arr = W_NDimArray.from_shape(space, shape, dtype, order=npy_order) if support.product( shape) == 1: # safe from overflow since from_shape checks w_arr.set_scalar_value(dtype.coerce(space, elems_w[0])) else: loop.assign(space, w_arr, elems_w) return w_arr
def _array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False): from pypy.module.micronumpy.boxes import W_GenericBox # numpy testing calls array(type(array([]))) and expects a ValueError if space.isinstance_w(w_object, space.w_type): raise oefmt(space.w_ValueError, "cannot create ndarray from type instance") # for anything that isn't already an array, try __array__ method first dtype = descriptor.decode_w_dtype(space, w_dtype) if not isinstance(w_object, W_NDimArray): w_array = try_array_method(space, w_object, w_dtype) if w_array is None: if ( not space.isinstance_w(w_object, space.w_str) and not space.isinstance_w(w_object, space.w_unicode) and not isinstance(w_object, W_GenericBox)): # use buffer interface w_object = _array_from_buffer_3118(space, w_object, dtype) else: # continue with w_array, but do further operations in place w_object = w_array copy = False dtype = w_object.get_dtype() if not isinstance(w_object, W_NDimArray): w_array, _copy = try_interface_method(space, w_object, copy) if w_array is not None: w_object = w_array copy = _copy dtype = w_object.get_dtype() if isinstance(w_object, W_NDimArray): npy_order = order_converter(space, w_order, NPY.ANYORDER) if (dtype is None or w_object.get_dtype() is dtype) and (subok or type(w_object) is W_NDimArray): flags = w_object.get_flags() must_copy = copy must_copy |= (npy_order == NPY.CORDER and not flags & NPY.ARRAY_C_CONTIGUOUS) must_copy |= (npy_order == NPY.FORTRANORDER and not flags & NPY.ARRAY_F_CONTIGUOUS) if must_copy: return w_object.descr_copy(space, space.wrap(npy_order)) else: return w_object if subok and not type(w_object) is W_NDimArray: raise oefmt(space.w_NotImplementedError, "array(..., subok=True) only partially implemented") # we have a ndarray, but need to copy or change dtype if dtype is None: dtype = w_object.get_dtype() if dtype != w_object.get_dtype(): # silently reject the copy value copy = True if copy: shape = w_object.get_shape() order = support.get_order_as_CF(w_object.get_order(), npy_order) w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) if support.product(shape) == 1: w_arr.set_scalar_value(dtype.coerce(space, w_object.implementation.getitem(0))) else: loop.setslice(space, shape, w_arr.implementation, w_object.implementation) return w_arr else: imp = w_object.implementation w_base = w_object sz = w_base.get_size() * dtype.elsize if imp.base() is not None: w_base = imp.base() if type(w_base) is W_NDimArray: sz = w_base.get_size() * dtype.elsize else: # this must succeed (mmap, buffer, ...) sz = space.int_w(space.call_method(w_base, 'size')) with imp as storage: return W_NDimArray.from_shape_and_storage(space, w_object.get_shape(), storage, dtype, storage_bytes=sz, w_base=w_base, strides=imp.strides, start=imp.start) else: # not an array npy_order = order_converter(space, w_order, NPY.CORDER) shape, elems_w = find_shape_and_elems(space, w_object, dtype) if dtype is None and space.isinstance_w(w_object, space.w_buffer): dtype = descriptor.get_dtype_cache(space).w_uint8dtype if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): dtype = find_dtype_for_seq(space, elems_w, dtype) w_arr = W_NDimArray.from_shape(space, shape, dtype, order=npy_order) if support.product(shape) == 1: # safe from overflow since from_shape checks w_arr.set_scalar_value(dtype.coerce(space, elems_w[0])) else: loop.assign(space, w_arr, elems_w) return w_arr