def test_type_resolver(self, space): c128_dtype = get_dtype_cache(space).w_complex128dtype c64_dtype = get_dtype_cache(space).w_complex64dtype f64_dtype = get_dtype_cache(space).w_float64dtype f32_dtype = get_dtype_cache(space).w_float32dtype u32_dtype = get_dtype_cache(space).w_uint32dtype b_dtype = get_dtype_cache(space).w_booldtype ufunc = W_UfuncGeneric(space, [None, None, None], 'eigenvals', None, 1, 1, [f32_dtype, c64_dtype, f64_dtype, c128_dtype, c128_dtype, c128_dtype], '') f32_array = W_NDimArray(VoidBoxStorage(0, f32_dtype)) index, dtypes = ufunc.type_resolver(space, [f32_array], [None], 'd->D', ufunc.dtypes) #needs to cast input type, create output type assert index == 1 assert dtypes == [f64_dtype, c128_dtype] index, dtypes = ufunc.type_resolver(space, [f32_array], [None], '', ufunc.dtypes) assert index == 0 assert dtypes == [f32_dtype, c64_dtype] raises(OperationError, ufunc.type_resolver, space, [f32_array], [None], 'u->u', ufunc.dtypes) exc = raises(OperationError, ufunc.type_resolver, space, [f32_array], [None], 'i->i', ufunc.dtypes)
def array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False, ndmin=0): w_res = _array(space, w_object, w_dtype, copy, w_order, subok) shape = w_res.get_shape() if len(shape) < ndmin: shape = [1] * (ndmin - len(shape)) + shape impl = w_res.implementation.set_shape(space, w_res, shape) if w_res is w_object: return W_NDimArray(impl) else: w_res.implementation = impl return w_res
def getitem(self, it, st): w_res = W_NDimArray(it.getoperand(st)) return w_res
def __init__(self, space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, w_itershape, buffersize=0, order=NPY.KEEPORDER, allow_backward=True): self.external_loop = False self.buffered = False self.tracked_index = '' self.common_dtype = False self.delay_bufalloc = False self.grow_inner = False self.ranged = False self.refs_ok = False self.reduce_ok = False self.zerosize_ok = False self.index_iter = None self.done = False self.first_next = True self.op_axes = [] self.allow_backward = allow_backward if not space.is_w(w_casting, space.w_None): self.casting = space.str_w(w_casting) else: self.casting = 'safe' # convert w_seq operands to a list of W_NDimArray if space.isinstance_w(w_seq, space.w_tuple) or \ space.isinstance_w(w_seq, space.w_list): w_seq_as_list = space.listview(w_seq) self.seq = [ convert_to_array(space, w_elem) if not space.is_none(w_elem) else None for w_elem in w_seq_as_list ] else: self.seq = [convert_to_array(space, w_seq)] if order == NPY.ANYORDER: # 'A' means "'F' order if all the arrays are Fortran contiguous, # 'C' order otherwise" order = NPY.CORDER for s in self.seq: if s and not (s.get_flags() & NPY.ARRAY_F_CONTIGUOUS): break else: order = NPY.FORTRANORDER elif order == NPY.KEEPORDER: # 'K' means "as close to the order the array elements appear in # memory as possible", so match self.order to seq.order order = NPY.CORDER for s in self.seq: if s and not (s.get_order() == NPY.FORTRANORDER): break else: order = NPY.FORTRANORDER self.order = order parse_func_flags(space, self, w_flags) self.op_flags = parse_op_arg(space, 'op_flags', w_op_flags, len(self.seq), parse_op_flag) # handle w_op_axes oa_ndim = -1 if not space.is_none(w_op_axes): oa_ndim = self.set_op_axes(space, w_op_axes) self.ndim = calculate_ndim(self.seq, oa_ndim) # handle w_op_dtypes part 1: creating self.dtypes list from input if not space.is_none(w_op_dtypes): w_seq_as_list = space.listview(w_op_dtypes) self.dtypes = [ decode_w_dtype(space, w_elem) for w_elem in w_seq_as_list ] if len(self.dtypes) != len(self.seq): raise oefmt( space.w_ValueError, "op_dtypes must be a tuple/list matching the number of ops" ) else: self.dtypes = [] # handle None or writable operands, calculate my shape outargs = [ i for i in range(len(self.seq)) if self.seq[i] is None or self.op_flags[i].rw == 'w' ] if len(outargs) > 0: out_shape = shape_agreement_multiple( space, [self.seq[i] for i in outargs]) else: out_shape = None if space.isinstance_w(w_itershape, space.w_tuple) or \ space.isinstance_w(w_itershape, space.w_list): self.shape = [space.int_w(i) for i in space.listview(w_itershape)] else: self.shape = shape_agreement_multiple(space, self.seq, shape=out_shape) if len(outargs) > 0: # Make None operands writeonly and flagged for allocation if len(self.dtypes) > 0: out_dtype = self.dtypes[outargs[0]] else: out_dtype = None for i in range(len(self.seq)): if self.seq[i] is None: self.op_flags[i].allocate = True continue if self.op_flags[i].rw == 'w': continue out_dtype = find_binop_result_dtype( space, self.seq[i].get_dtype(), out_dtype) for i in outargs: if self.seq[i] is None: # XXX can we postpone allocation to later? self.seq[i] = W_NDimArray.from_shape( space, self.shape, out_dtype) else: if not self.op_flags[i].broadcast: # Raises if output cannot be broadcast try: shape_agreement(space, self.shape, self.seq[i], False) except OperationError as e: raise oefmt( space.w_ValueError, "non-broadcastable" " output operand with shape %s doesn't match " "the broadcast shape %s", str(self.seq[i].get_shape()), str(self.shape)) if self.tracked_index != "": order = self.order if order == NPY.KEEPORDER: order = self.seq[0].implementation.order if self.tracked_index == "multi": backward = False else: backward = ((order == NPY.CORDER and self.tracked_index != 'C') or (order == NPY.FORTRANORDER and self.tracked_index != 'F')) self.index_iter = IndexIterator(self.shape, backward=backward) # handle w_op_dtypes part 2: copy where needed if possible if len(self.dtypes) > 0: for i in range(len(self.seq)): self_d = self.dtypes[i] seq_d = self.seq[i].get_dtype() if not self_d: self.dtypes[i] = seq_d elif self_d != seq_d: impl = self.seq[i].implementation if self.buffered or 'r' in self.op_flags[i].tmp_copy: if not can_cast_array(space, self.seq[i], self_d, self.casting): raise oefmt( space.w_TypeError, "Iterator operand %d" " dtype could not be cast from %s to %s" " according to the rule '%s'", i, space.str_w(seq_d.descr_repr(space)), space.str_w(self_d.descr_repr(space)), self.casting) order = support.get_order_as_CF(impl.order, self.order) new_impl = impl.astype(space, self_d, order).copy(space) self.seq[i] = W_NDimArray(new_impl) else: raise oefmt( space.w_TypeError, "Iterator " "operand required copying or buffering, " "but neither copying nor buffering was " "enabled") if 'w' in self.op_flags[i].rw: if not can_cast_type(space, self_d, seq_d, self.casting): raise oefmt( space.w_TypeError, "Iterator" " requested dtype could not be cast from " " %s to %s, the operand %d dtype, accord" "ing to the rule '%s'", space.str_w(self_d.descr_repr(space)), space.str_w(seq_d.descr_repr(space)), i, self.casting) elif self.buffered and not (self.external_loop and len(self.seq) < 2): for i in range(len(self.seq)): if i not in outargs: self.seq[i] = self.seq[i].descr_copy(space, w_order=space.wrap( self.order)) self.dtypes = [s.get_dtype() for s in self.seq] else: #copy them from seq self.dtypes = [s.get_dtype() for s in self.seq] # create an iterator for each operand self.iters = [] for i in range(len(self.seq)): it = self.get_iter(space, i) it.contiguous = False self.iters.append((it, it.reset())) if self.external_loop: coalesce_axes(self, space)
def _get_item(self, it, st): return W_NDimArray(it.getoperand(st))
def getitem(self, it, st): res = it.getoperand(st) return W_NDimArray(res)
def __init__(self, space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, w_itershape, buffersize=0, order='K'): self.order = order self.external_loop = False self.buffered = False self.tracked_index = '' self.common_dtype = False self.delay_bufalloc = False self.grow_inner = False self.ranged = False self.refs_ok = False self.reduce_ok = False self.zerosize_ok = False self.index_iter = None self.done = False self.first_next = True self.op_axes = [] # convert w_seq operands to a list of W_NDimArray if space.isinstance_w(w_seq, space.w_tuple) or \ space.isinstance_w(w_seq, space.w_list): w_seq_as_list = space.listview(w_seq) self.seq = [ convert_to_array(space, w_elem) if not space.is_none(w_elem) else None for w_elem in w_seq_as_list ] else: self.seq = [convert_to_array(space, w_seq)] parse_func_flags(space, self, w_flags) self.op_flags = parse_op_arg(space, 'op_flags', w_op_flags, len(self.seq), parse_op_flag) # handle w_op_axes oa_ndim = -1 if not space.is_none(w_op_axes): oa_ndim = self.set_op_axes(space, w_op_axes) self.ndim = calculate_ndim(self.seq, oa_ndim) # handle w_op_dtypes part 1: creating self.dtypes list from input if not space.is_none(w_op_dtypes): w_seq_as_list = space.listview(w_op_dtypes) self.dtypes = [ decode_w_dtype(space, w_elem) for w_elem in w_seq_as_list ] if len(self.dtypes) != len(self.seq): raise oefmt( space.w_ValueError, "op_dtypes must be a tuple/list matching the number of ops" ) else: self.dtypes = [] # handle None or writable operands, calculate my shape outargs = [ i for i in range(len(self.seq)) if self.seq[i] is None or self.op_flags[i].rw == 'w' ] if len(outargs) > 0: out_shape = shape_agreement_multiple( space, [self.seq[i] for i in outargs]) else: out_shape = None if space.isinstance_w(w_itershape, space.w_tuple) or \ space.isinstance_w(w_itershape, space.w_list): self.shape = [space.int_w(i) for i in space.listview(w_itershape)] else: self.shape = shape_agreement_multiple(space, self.seq, shape=out_shape) if len(outargs) > 0: # Make None operands writeonly and flagged for allocation if len(self.dtypes) > 0: out_dtype = self.dtypes[outargs[0]] else: out_dtype = None for i in range(len(self.seq)): if self.seq[i] is None: self.op_flags[i].allocate = True continue if self.op_flags[i].rw == 'w': continue out_dtype = find_binop_result_dtype( space, self.seq[i].get_dtype(), out_dtype) for i in outargs: if self.seq[i] is None: # XXX can we postpone allocation to later? self.seq[i] = W_NDimArray.from_shape( space, self.shape, out_dtype) else: if not self.op_flags[i].broadcast: # Raises if ooutput cannot be broadcast shape_agreement(space, self.shape, self.seq[i], False) if self.tracked_index != "": if self.order == "K": self.order = self.seq[0].implementation.order if self.tracked_index == "multi": backward = False else: backward = self.order != self.tracked_index self.index_iter = IndexIterator(self.shape, backward=backward) # handle w_op_dtypes part 2: copy where needed if possible if len(self.dtypes) > 0: for i in range(len(self.seq)): selfd = self.dtypes[i] seq_d = self.seq[i].get_dtype() if not selfd: self.dtypes[i] = seq_d elif selfd != seq_d: if not 'r' in self.op_flags[i].tmp_copy: raise oefmt( space.w_TypeError, "Iterator operand required copying or " "buffering for operand %d", i) impl = self.seq[i].implementation new_impl = impl.astype(space, selfd) self.seq[i] = W_NDimArray(new_impl) else: #copy them from seq self.dtypes = [s.get_dtype() for s in self.seq] # create an iterator for each operand self.iters = [] for i in range(len(self.seq)): it = get_iter(space, self.order, self.seq[i], self.shape, self.dtypes[i], self.op_flags[i], self) it.contiguous = False self.iters.append((it, it.reset())) if self.external_loop: coalesce_axes(self, space)
def try_interface_method(space, w_object, copy): try: w_interface = space.getattr(w_object, space.newtext("__array_interface__")) if w_interface is None: return None, False version_w = space.finditem(w_interface, space.newtext("version")) if version_w is None: raise oefmt(space.w_ValueError, "__array_interface__ found without" " 'version' key") if not space.isinstance_w(version_w, space.w_int): raise oefmt( space.w_ValueError, "__array_interface__ found with" " non-int 'version' key") version = space.int_w(version_w) if version < 3: raise oefmt(space.w_ValueError, "__array_interface__ version %d not supported", version) # make a view into the data w_shape = space.finditem(w_interface, space.newtext('shape')) w_dtype = space.finditem(w_interface, space.newtext('typestr')) w_descr = space.finditem(w_interface, space.newtext('descr')) w_data = space.finditem(w_interface, space.newtext('data')) w_strides = space.finditem(w_interface, space.newtext('strides')) if w_shape is None or w_dtype is None: raise oefmt( space.w_ValueError, "__array_interface__ missing one or more required keys: shape, typestr" ) if w_descr is not None: raise oefmt(space.w_NotImplementedError, "__array_interface__ descr not supported yet") if w_strides is None or space.is_w(w_strides, space.w_None): strides = None else: strides = [space.int_w(i) for i in space.listview(w_strides)] shape = [space.int_w(i) for i in space.listview(w_shape)] dtype = descriptor.decode_w_dtype(space, w_dtype) if dtype is None: raise oefmt(space.w_ValueError, "__array_interface__ could not decode dtype %R", w_dtype) if w_data is not None and (space.isinstance_w(w_data, space.w_tuple) or space.isinstance_w(w_data, space.w_list)): data_w = space.listview(w_data) w_data = rffi.cast(RAW_STORAGE_PTR, space.int_w(data_w[0])) read_only = space.is_true(data_w[1]) or copy offset = 0 w_base = w_object if read_only: w_base = None return W_NDimArray.from_shape_and_storage(space, shape, w_data, dtype, w_base=w_base, strides=strides, start=offset), read_only if w_data is None: w_data = w_object w_offset = space.finditem(w_interface, space.newtext('offset')) if w_offset is None: offset = 0 else: offset = space.int_w(w_offset) #print 'create view from shape',shape,'dtype',dtype,'data',data if strides is not None: raise oefmt(space.w_NotImplementedError, "__array_interface__ strides not fully supported yet") arr = frombuffer(space, w_data, dtype, support.product(shape), offset) new_impl = arr.implementation.reshape(arr, shape) return W_NDimArray(new_impl), False except OperationError as e: if e.match(space, space.w_AttributeError): return None, False raise