def execute(self, interp): arr = self.args[0].execute(interp) if not isinstance(arr, BaseArray): raise ArgumentNotAnArray if self.name in SINGLE_ARG_FUNCTIONS: if len(self.args) != 1 and self.name != 'sum': raise ArgumentMismatch if self.name == "sum": if len(self.args)>1: w_res = arr.descr_sum(interp.space, self.args[1].execute(interp)) else: w_res = arr.descr_sum(interp.space) elif self.name == "prod": w_res = arr.descr_prod(interp.space) elif self.name == "max": w_res = arr.descr_max(interp.space) elif self.name == "min": w_res = arr.descr_min(interp.space) elif self.name == "any": w_res = arr.descr_any(interp.space) elif self.name == "all": w_res = arr.descr_all(interp.space) elif self.name == "unegative": neg = interp_ufuncs.get(interp.space).negative w_res = neg.call(interp.space, [arr]) elif self.name == "flat": w_res = arr.descr_get_flatiter(interp.space) elif self.name == "tostring": arr.descr_tostring(interp.space) w_res = None else: assert False # unreachable code elif self.name in TWO_ARG_FUNCTIONS: if len(self.args) != 2: raise ArgumentMismatch arg = self.args[1].execute(interp) if not isinstance(arg, BaseArray): raise ArgumentNotAnArray if not isinstance(arg, BaseArray): raise ArgumentNotAnArray if self.name == "dot": w_res = arr.descr_dot(interp.space, arg) elif self.name == 'take': w_res = arr.descr_take(interp.space, arg) else: assert False # unreachable code else: raise WrongFunctionName if isinstance(w_res, BaseArray): return w_res if isinstance(w_res, FloatObject): dtype = get_dtype_cache(interp.space).w_float64dtype elif isinstance(w_res, BoolObject): dtype = get_dtype_cache(interp.space).w_booldtype elif isinstance(w_res, interp_boxes.W_GenericBox): dtype = w_res.get_dtype(interp.space) else: dtype = None return scalar_w(interp.space, dtype, w_res)
def execute(self, interp): w_lhs = self.lhs.execute(interp) if isinstance(self.rhs, SliceConstant): w_rhs = self.rhs.wrap(interp.space) else: w_rhs = self.rhs.execute(interp) if not isinstance(w_lhs, W_NDimArray): # scalar dtype = get_dtype_cache(interp.space).w_float64dtype w_lhs = W_NDimArray.new_scalar(interp.space, dtype, w_lhs) assert isinstance(w_lhs, W_NDimArray) if self.name == '+': w_res = w_lhs.descr_add(interp.space, w_rhs) elif self.name == '*': w_res = w_lhs.descr_mul(interp.space, w_rhs) elif self.name == '-': w_res = w_lhs.descr_sub(interp.space, w_rhs) elif self.name == '->': if isinstance(w_rhs, FloatObject): w_rhs = IntObject(int(w_rhs.floatval)) assert isinstance(w_lhs, W_NDimArray) w_res = w_lhs.descr_getitem(interp.space, w_rhs) else: raise NotImplementedError if (not isinstance(w_res, W_NDimArray) and not isinstance(w_res, interp_boxes.W_GenericBox)): dtype = get_dtype_cache(interp.space).w_float64dtype w_res = W_NDimArray.new_scalar(interp.space, dtype, w_res) return w_res
def find_dtype_for_scalar(space, w_obj, current_guess=None): bool_dtype = interp_dtype.get_dtype_cache(space).w_booldtype long_dtype = interp_dtype.get_dtype_cache(space).w_longdtype int64_dtype = interp_dtype.get_dtype_cache(space).w_int64dtype if isinstance(w_obj, interp_boxes.W_GenericBox): dtype = w_obj.get_dtype(space) if current_guess is None: return dtype return find_binop_result_dtype(space, dtype, current_guess) if space.isinstance_w(w_obj, space.w_bool): if current_guess is None or current_guess is bool_dtype: return bool_dtype return current_guess elif space.isinstance_w(w_obj, space.w_int): if (current_guess is None or current_guess is bool_dtype or current_guess is long_dtype): return long_dtype return current_guess elif space.isinstance_w(w_obj, space.w_long): if (current_guess is None or current_guess is bool_dtype or current_guess is long_dtype or current_guess is int64_dtype): return int64_dtype return current_guess return interp_dtype.get_dtype_cache(space).w_float64dtype
def test_binops(self, space): bool_dtype = get_dtype_cache(space).w_booldtype int8_dtype = get_dtype_cache(space).w_int8dtype int32_dtype = get_dtype_cache(space).w_int32dtype float64_dtype = get_dtype_cache(space).w_float64dtype # Basic pairing assert find_binop_result_dtype(space, bool_dtype, bool_dtype) is bool_dtype assert find_binop_result_dtype(space, bool_dtype, float64_dtype) is float64_dtype assert find_binop_result_dtype(space, float64_dtype, bool_dtype) is float64_dtype assert find_binop_result_dtype(space, int32_dtype, int8_dtype) is int32_dtype assert find_binop_result_dtype(space, int32_dtype, bool_dtype) is int32_dtype # With promote bool (happens on div), the result is that the op should # promote bools to int8 assert find_binop_result_dtype( space, bool_dtype, bool_dtype, promote_bools=True) is int8_dtype assert find_binop_result_dtype( space, bool_dtype, float64_dtype, promote_bools=True) is float64_dtype # Coerce to floats assert find_binop_result_dtype( space, bool_dtype, float64_dtype, promote_to_float=True) is float64_dtype
def execute(self, interp): if self.v == 'int': dtype = get_dtype_cache(interp.space).w_int64dtype elif self.v == 'float': dtype = get_dtype_cache(interp.space).w_float64dtype else: raise BadToken('unknown v to dtype "%s"' % self.v) return dtype
def call(self, space, args_w): w_obj = args_w[0] out = None if len(args_w) > 1: out = args_w[1] if space.is_w(out, space.w_None): out = None w_obj = convert_to_array(space, w_obj) dtype = w_obj.get_dtype() if dtype.is_flexible_type(): raise OperationError(space.w_TypeError, space.wrap('Not implemented for this type')) if (self.int_only and not dtype.is_int_type() or not self.allow_bool and dtype.is_bool_type() or not self.allow_complex and dtype.is_complex_type()): raise OperationError(space.w_TypeError, space.wrap( "ufunc %s not supported for the input type" % self.name)) calc_dtype = find_unaryop_result_dtype(space, w_obj.get_dtype(), promote_to_float=self.promote_to_float, promote_bools=self.promote_bools) if out is not None: if not isinstance(out, W_NDimArray): raise OperationError(space.w_TypeError, space.wrap( 'output must be an array')) res_dtype = out.get_dtype() #if not w_obj.get_dtype().can_cast_to(res_dtype): # raise operationerrfmt(space.w_TypeError, # "Cannot cast ufunc %s output from dtype('%s') to dtype('%s') with casting rule 'same_kind'", self.name, w_obj.get_dtype().name, res_dtype.name) elif self.bool_result: res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if self.complex_to_float and calc_dtype.is_complex_type(): if calc_dtype.name == 'complex64': res_dtype = interp_dtype.get_dtype_cache(space).w_float32dtype else: res_dtype = interp_dtype.get_dtype_cache(space).w_float64dtype if w_obj.is_scalar(): w_val = self.func(calc_dtype, w_obj.get_scalar_value().convert_to(calc_dtype)) if out is None: return w_val if out.is_scalar(): out.set_scalar_value(w_val) else: out.fill(res_dtype.coerce(space, w_val)) return out shape = shape_agreement(space, w_obj.get_shape(), out, broadcast_down=False) return loop.call1(space, shape, self.func, calc_dtype, res_dtype, w_obj, out)
def find_dtype_for_scalar(space, w_obj, current_guess=None): bool_dtype = interp_dtype.get_dtype_cache(space).w_booldtype long_dtype = interp_dtype.get_dtype_cache(space).w_longdtype int64_dtype = interp_dtype.get_dtype_cache(space).w_int64dtype complex_type = interp_dtype.get_dtype_cache(space).w_complex128dtype float_type = interp_dtype.get_dtype_cache(space).w_float64dtype if isinstance(w_obj, interp_boxes.W_GenericBox): dtype = w_obj.get_dtype(space) if current_guess is None: return dtype return find_binop_result_dtype(space, dtype, current_guess) if space.isinstance_w(w_obj, space.w_bool): if current_guess is None or current_guess is bool_dtype: return bool_dtype return current_guess elif space.isinstance_w(w_obj, space.w_int): if (current_guess is None or current_guess is bool_dtype or current_guess is long_dtype): return long_dtype return current_guess elif space.isinstance_w(w_obj, space.w_long): if (current_guess is None or current_guess is bool_dtype or current_guess is long_dtype or current_guess is int64_dtype): return int64_dtype return current_guess elif space.isinstance_w(w_obj, space.w_complex): if (current_guess is None or current_guess is bool_dtype or current_guess is long_dtype or current_guess is int64_dtype or current_guess is complex_type or current_guess is float_type): return complex_type return current_guess elif space.isinstance_w(w_obj, space.w_str): if (current_guess is None): return interp_dtype.variable_dtype(space, 'S%d' % space.len_w(w_obj)) elif current_guess.num == NPY_STRING: if current_guess.itemtype.get_size() < space.len_w(w_obj): return interp_dtype.variable_dtype(space, 'S%d' % space.len_w(w_obj)) return current_guess if current_guess is complex_type: return complex_type if space.isinstance_w(w_obj, space.w_float): return float_type elif space.isinstance_w(w_obj, space.w_slice): return long_dtype raise operationerrfmt(space.w_NotImplementedError, 'unable to create dtype from objects, ' '"%T" instance not supported', w_obj)
def find_binop_result_dtype(space, dt1, dt2, promote_to_float=False, promote_bools=False, int_only=False): # dt1.num should be <= dt2.num if dt1.num > dt2.num: dt1, dt2 = dt2, dt1 if int_only and (not dt1.is_int_type() or not dt2.is_int_type()): raise OperationError(space.w_TypeError, space.wrap("Unsupported types")) # Some operations promote op(bool, bool) to return int8, rather than bool if promote_bools and (dt1.kind == dt2.kind == interp_dtype.BOOLLTR): return interp_dtype.get_dtype_cache(space).w_int8dtype if promote_to_float: return find_unaryop_result_dtype(space, dt2, promote_to_float=True) # If they're the same kind, choose the greater one. if dt1.kind == dt2.kind: return dt2 # Everything promotes to float, and bool promotes to everything. if dt2.kind == interp_dtype.FLOATINGLTR or dt1.kind == interp_dtype.BOOLLTR: # Float32 + 8-bit int = Float64 if dt2.num == 11 and dt1.itemtype.get_element_size() >= 4: return interp_dtype.get_dtype_cache(space).w_float64dtype return dt2 # for now this means mixing signed and unsigned if dt2.kind == interp_dtype.SIGNEDLTR: # if dt2 has a greater number of bytes, then just go with it if dt1.itemtype.get_element_size() < dt2.itemtype.get_element_size(): return dt2 # we need to promote both dtypes dtypenum = dt2.num + 2 else: # increase to the next signed type (or to float) dtypenum = dt2.num + 1 # UInt64 + signed = Float64 if dt2.num == 10: dtypenum += 1 newdtype = interp_dtype.get_dtype_cache(space).builtin_dtypes[dtypenum] if (newdtype.itemtype.get_element_size() > dt2.itemtype.get_element_size() or newdtype.kind == interp_dtype.FLOATINGLTR): return newdtype else: # we only promoted to long on 32-bit or to longlong on 64-bit # this is really for dealing with the Long and Ulong dtypes if LONG_BIT == 32: dtypenum += 2 else: dtypenum += 3 return interp_dtype.get_dtype_cache(space).builtin_dtypes[dtypenum]
def setup_class(cls): py.test.skip("old") from pypy.module.micronumpy.compile import FakeSpace from pypy.module.micronumpy.interp_dtype import get_dtype_cache cls.space = FakeSpace() cls.float64_dtype = get_dtype_cache(cls.space).w_float64dtype
def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call2, convert_to_array, Scalar, shape_agreement) [w_lhs, w_rhs] = args_w w_lhs = convert_to_array(space, w_lhs) w_rhs = convert_to_array(space, w_rhs) calc_dtype = find_binop_result_dtype(space, w_lhs.find_dtype(), w_rhs.find_dtype(), promote_to_float=self.promote_to_float, promote_bools=self.promote_bools, ) if self.comparison_func: res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if isinstance(w_lhs, Scalar) and isinstance(w_rhs, Scalar): return self.func(calc_dtype, w_lhs.value.convert_to(calc_dtype), w_rhs.value.convert_to(calc_dtype) ) new_shape = shape_agreement(space, w_lhs.shape, w_rhs.shape) w_res = Call2(self.func, self.name, new_shape, calc_dtype, res_dtype, w_lhs, w_rhs) w_lhs.add_invalidates(w_res) w_rhs.add_invalidates(w_res) return w_res
def reduce(self, space, w_obj, multidim, promote_to_largest, dim, keepdims=False): from pypy.module.micronumpy.interp_numarray import convert_to_array, \ Scalar, ReduceArray if self.argcount != 2: raise OperationError(space.w_ValueError, space.wrap("reduce only " "supported for binary functions")) assert isinstance(self, W_Ufunc2) obj = convert_to_array(space, w_obj) if dim >= len(obj.shape): raise OperationError(space.w_ValueError, space.wrap("axis(=%d) out of bounds" % dim)) if isinstance(obj, Scalar): raise OperationError(space.w_TypeError, space.wrap("cannot reduce " "on a scalar")) size = obj.size if self.comparison_func: dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: dtype = find_unaryop_result_dtype( space, obj.find_dtype(), promote_to_float=self.promote_to_float, promote_to_largest=promote_to_largest, promote_bools=True ) shapelen = len(obj.shape) if self.identity is None and size == 0: raise operationerrfmt(space.w_ValueError, "zero-size array to " "%s.reduce without identity", self.name) if shapelen > 1 and dim >= 0: return self.do_axis_reduce(obj, dtype, dim, keepdims) arr = ReduceArray(self.func, self.name, self.identity, obj, dtype) return loop.compute(arr)
def __init__(self, index_stride_size, stride_size, size): start = 0 dtype = interp_dtype.get_dtype_cache(space).w_longdtype indexes = dtype.itemtype.malloc(size * dtype.get_size()) values = alloc_raw_storage(size * stride_size, track_allocation=False) Repr.__init__(self, dtype.get_size(), stride_size, size, values, indexes, start, start)
def __init__(self, index_stride_size, stride_size, size): start = 0 dtype = interp_dtype.get_dtype_cache(space).w_longdtype indexes = dtype.itemtype.malloc(size*dtype.get_size()) values = alloc_raw_storage(size * stride_size, track_allocation=False) Repr.__init__(self, dtype.get_size(), stride_size, size, values, indexes, start, start)
def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call2, convert_to_array, Scalar, shape_agreement, BaseArray) if len(args_w) > 2: [w_lhs, w_rhs, w_out] = args_w else: [w_lhs, w_rhs] = args_w w_out = None w_lhs = convert_to_array(space, w_lhs) w_rhs = convert_to_array(space, w_rhs) if space.is_w(w_out, space.w_None) or w_out is None: out = None calc_dtype = find_binop_result_dtype(space, w_lhs.find_dtype(), w_rhs.find_dtype(), int_only=self.int_only, promote_to_float=self.promote_to_float, promote_bools=self.promote_bools, ) elif not isinstance(w_out, BaseArray): raise OperationError(space.w_TypeError, space.wrap( 'output must be an array')) else: out = w_out calc_dtype = out.find_dtype() if self.comparison_func: res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if isinstance(w_lhs, Scalar) and isinstance(w_rhs, Scalar): arr = self.func(calc_dtype, w_lhs.value.convert_to(calc_dtype), w_rhs.value.convert_to(calc_dtype) ) if isinstance(out,Scalar): out.value = arr elif isinstance(out, BaseArray): out.fill(space, arr) else: out = arr return space.wrap(out) new_shape = shape_agreement(space, w_lhs.shape, w_rhs.shape) # Test correctness of out.shape if out and out.shape != shape_agreement(space, new_shape, out.shape): raise operationerrfmt(space.w_ValueError, 'output parameter shape mismatch, could not broadcast [%s]' + ' to [%s]', ",".join([str(x) for x in new_shape]), ",".join([str(x) for x in out.shape]), ) w_res = Call2(self.func, self.name, new_shape, calc_dtype, res_dtype, w_lhs, w_rhs, out) w_lhs.add_invalidates(w_res) w_rhs.add_invalidates(w_res) if out: w_res.get_concrete() return w_res
def ufunc_dtype_caller(space, ufunc_name, op_name, argcount, comparison_func): if argcount == 1: def impl(res_dtype, value): return getattr(res_dtype.itemtype, op_name)(value) elif argcount == 2: dtype_cache = interp_dtype.get_dtype_cache(space) def impl(res_dtype, lvalue, rvalue): res = getattr(res_dtype.itemtype, op_name)(lvalue, rvalue) if comparison_func: return dtype_cache.w_booldtype.box(res) return res return func_with_new_name(impl, ufunc_name)
def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call1, BaseArray, convert_to_array, Scalar, shape_agreement) if len(args_w)<2: [w_obj] = args_w out = None else: [w_obj, out] = args_w if space.is_w(out, space.w_None): out = None w_obj = convert_to_array(space, w_obj) calc_dtype = find_unaryop_result_dtype(space, w_obj.find_dtype(), promote_to_float=self.promote_to_float, promote_bools=self.promote_bools) if out: if not isinstance(out, BaseArray): raise OperationError(space.w_TypeError, space.wrap( 'output must be an array')) res_dtype = out.find_dtype() elif self.bool_result: res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if isinstance(w_obj, Scalar): arr = self.func(calc_dtype, w_obj.value.convert_to(calc_dtype)) if isinstance(out,Scalar): out.value = arr elif isinstance(out, BaseArray): out.fill(space, arr) else: out = arr return space.wrap(out) if out: assert isinstance(out, BaseArray) # For translation broadcast_shape = shape_agreement(space, w_obj.shape, out.shape) if not broadcast_shape or broadcast_shape != out.shape: raise operationerrfmt(space.w_ValueError, 'output parameter shape mismatch, could not broadcast [%s]' + ' to [%s]', ",".join([str(x) for x in w_obj.shape]), ",".join([str(x) for x in out.shape]), ) w_res = Call1(self.func, self.name, out.shape, calc_dtype, res_dtype, w_obj, out) #Force it immediately w_res.get_concrete() else: w_res = Call1(self.func, self.name, w_obj.shape, calc_dtype, res_dtype, w_obj) w_obj.add_invalidates(w_res) return w_res
def test_slice_signature(self, space): float64_dtype = get_dtype_cache(space).w_float64dtype ar = W_NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_getitem(space, space.wrap(slice(1, 3, 1))) v2 = ar.descr_getitem(space, space.wrap(slice(4, 6, 1))) assert v1.find_sig() is v2.find_sig() v3 = v2.descr_add(space, v1) v4 = v1.descr_add(space, v2) assert v3.find_sig() is v4.find_sig() v5 = ar.descr_add(space, ar).descr_getitem(space, space.wrap(slice(1, 3, 1))) v6 = ar.descr_add(space, ar).descr_getitem(space, space.wrap(slice(1, 4, 1))) assert v5.find_sig() is v6.find_sig()
def test_binop_signature(self, space): float64_dtype = get_dtype_cache(space).w_float64dtype bool_dtype = get_dtype_cache(space).w_booldtype ar = W_NDimArray(10, [10], dtype=float64_dtype) ar2 = W_NDimArray(10, [10], dtype=float64_dtype) v1 = ar.descr_add(space, ar) v2 = ar.descr_add(space, Scalar(float64_dtype, W_Float64Box(2.0))) sig1 = v1.find_sig() sig2 = v2.find_sig() assert v1 is not v2 assert sig1.left.iter_no == sig1.right.iter_no assert sig2.left.iter_no != sig2.right.iter_no assert sig1.left.array_no == sig1.right.array_no sig1b = ar2.descr_add(space, ar).find_sig() assert sig1b.left.array_no != sig1b.right.array_no assert sig1b is not sig1 v3 = ar.descr_add(space, Scalar(float64_dtype, W_Float64Box(1.0))) sig3 = v3.find_sig() assert sig2 is sig3 v4 = ar.descr_add(space, ar) assert v1.find_sig() is v4.find_sig() bool_ar = W_NDimArray(10, [10], dtype=bool_dtype) v5 = ar.descr_add(space, bool_ar) assert v5.find_sig() is not v1.find_sig() assert v5.find_sig() is not v2.find_sig() v6 = ar.descr_add(space, bool_ar) assert v5.find_sig() is v6.find_sig() v7 = v6.descr_add(space, v6) sig7 = v7.find_sig() assert sig7.left.left.iter_no == sig7.right.left.iter_no assert sig7.left.left.iter_no != sig7.right.right.iter_no assert sig7.left.right.iter_no == sig7.right.right.iter_no v1.forced_result = ar assert v1.find_sig() is not sig1
def argsort(arr, space, w_axis, itemsize): if w_axis is space.w_None: # note that it's fine ot pass None here as we're not going # to pass the result around (None is the link to base in slices) arr = arr.reshape(space, None, [arr.get_size()]) axis = 0 elif w_axis is None: axis = -1 else: axis = space.int_w(w_axis) # create array of indexes dtype = interp_dtype.get_dtype_cache(space).w_longdtype index_arr = W_NDimArray.from_shape(space, arr.get_shape(), dtype) storage = index_arr.implementation.get_storage() if len(arr.get_shape()) == 1: for i in range(arr.get_size()): raw_storage_setitem(storage, i * INT_SIZE, i) r = Repr(INT_SIZE, itemsize, arr.get_size(), arr.get_storage(), storage, 0, arr.start) ArgSort(r).sort() else: shape = arr.get_shape() if axis < 0: axis = len(shape) + axis if axis < 0 or axis >= len(shape): raise OperationError(space.w_IndexError, space.wrap("Wrong axis %d" % axis)) iterable_shape = shape[:axis] + [0] + shape[axis + 1:] iter = AxisIterator(arr, iterable_shape, axis, False) index_impl = index_arr.implementation index_iter = AxisIterator(index_impl, iterable_shape, axis, False) stride_size = arr.strides[axis] index_stride_size = index_impl.strides[axis] axis_size = arr.shape[axis] while not iter.done(): for i in range(axis_size): raw_storage_setitem( storage, i * index_stride_size + index_iter.offset, i) r = Repr(index_stride_size, stride_size, axis_size, arr.get_storage(), storage, index_iter.offset, iter.offset) ArgSort(r).sort() iter.next() index_iter.next() return index_arr
def add_ufunc(self, space, ufunc_name, op_name, argcount, extra_kwargs=None): if extra_kwargs is None: extra_kwargs = {} identity = extra_kwargs.get("identity") if identity is not None: identity = \ interp_dtype.get_dtype_cache(space).w_longdtype.box(identity) extra_kwargs["identity"] = identity func = ufunc_dtype_caller(space, ufunc_name, op_name, argcount, comparison_func=extra_kwargs.get("comparison_func", False) ) if argcount == 1: ufunc = W_Ufunc1(func, ufunc_name, **extra_kwargs) elif argcount == 2: ufunc = W_Ufunc2(func, ufunc_name, **extra_kwargs) setattr(self, ufunc_name, ufunc)
def argsort(arr, space, w_axis, itemsize): if w_axis is space.w_None: # note that it's fine ot pass None here as we're not going # to pass the result around (None is the link to base in slices) arr = arr.reshape(space, None, [arr.get_size()]) axis = 0 elif w_axis is None: axis = -1 else: axis = space.int_w(w_axis) # create array of indexes dtype = interp_dtype.get_dtype_cache(space).w_longdtype index_arr = W_NDimArray.from_shape(space, arr.get_shape(), dtype) storage = index_arr.implementation.get_storage() if len(arr.get_shape()) == 1: for i in range(arr.get_size()): raw_storage_setitem(storage, i * INT_SIZE, i) r = Repr(INT_SIZE, itemsize, arr.get_size(), arr.get_storage(), storage, 0, arr.start) ArgSort(r).sort() else: shape = arr.get_shape() if axis < 0: axis = len(shape) + axis - 1 if axis < 0 or axis > len(shape): raise OperationError(space.w_IndexError, space.wrap( "Wrong axis %d" % axis)) iterable_shape = shape[:axis] + [0] + shape[axis + 1:] iter = AxisIterator(arr, iterable_shape, axis, False) index_impl = index_arr.implementation index_iter = AxisIterator(index_impl, iterable_shape, axis, False) stride_size = arr.strides[axis] index_stride_size = index_impl.strides[axis] axis_size = arr.shape[axis] while not iter.done(): for i in range(axis_size): raw_storage_setitem(storage, i * index_stride_size + index_iter.offset, i) r = Repr(index_stride_size, stride_size, axis_size, arr.get_storage(), storage, index_iter.offset, iter.offset) ArgSort(r).sort() iter.next() index_iter.next() return index_arr
def array(space, w_item_or_iterable, w_dtype=None, w_order=NoneNotWrapped): # find scalar if not space.issequence_w(w_item_or_iterable): if space.is_w(w_dtype, space.w_None): w_dtype = interp_ufuncs.find_dtype_for_scalar(space, w_item_or_iterable) dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) return scalar_w(space, dtype, w_item_or_iterable) if w_order is None: order = 'C' else: order = space.str_w(w_order) if order != 'C': # or order != 'F': raise operationerrfmt(space.w_ValueError, "Unknown order: %s", order) shape, elems_w = _find_shape_and_elems(space, w_item_or_iterable) # they come back in C order size = len(elems_w) if space.is_w(w_dtype, space.w_None): w_dtype = None for w_elem in elems_w: w_dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, w_dtype) if w_dtype is interp_dtype.get_dtype_cache(space).w_float64dtype: break if w_dtype is None: w_dtype = space.w_None dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) ) arr = W_NDimArray(size, shape[:], dtype=dtype, order=order) shapelen = len(shape) arr_iter = ArrayIterator(arr.size) for i in range(len(elems_w)): w_elem = elems_w[i] dtype.setitem(arr.storage, arr_iter.offset, dtype.coerce(space, w_elem)) arr_iter = arr_iter.next(shapelen) return arr
def call(self, space, args_w): from pypy.module.micronumpy.interp_numarray import (Call1, convert_to_array, Scalar) [w_obj] = args_w w_obj = convert_to_array(space, w_obj) calc_dtype = find_unaryop_result_dtype(space, w_obj.find_dtype(), promote_to_float=self.promote_to_float, promote_bools=self.promote_bools) if self.bool_result: res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if isinstance(w_obj, Scalar): return space.wrap(self.func(calc_dtype, w_obj.value.convert_to(calc_dtype))) w_res = Call1(self.func, self.name, w_obj.shape, calc_dtype, res_dtype, w_obj) w_obj.add_invalidates(w_res) return w_res
def descr_repr(self, space): res = StringBuilder() res.append("array(") concrete = self.get_concrete_or_scalar() dtype = concrete.find_dtype() if not concrete.size: res.append('[]') if len(self.shape) > 1: # An empty slice reports its shape res.append(", shape=(") self_shape = str(self.shape) res.append_slice(str(self_shape), 1, len(self_shape) - 1) res.append(')') else: concrete.to_str(space, 1, res, indent=' ') if (dtype is not interp_dtype.get_dtype_cache(space).w_float64dtype and not (dtype.kind == interp_dtype.SIGNEDLTR and dtype.itemtype.get_element_size() == rffi.sizeof(lltype.Signed)) or not self.size): res.append(", dtype=" + dtype.name) res.append(")") return space.wrap(res.build())
def find_unaryop_result_dtype(space, dt, promote_to_float=False, promote_bools=False, promote_to_largest=False): if promote_bools and (dt.kind == interp_dtype.BOOLLTR): return interp_dtype.get_dtype_cache(space).w_int8dtype if promote_to_float: if dt.kind == interp_dtype.FLOATINGLTR: return dt if dt.num >= 5: return interp_dtype.get_dtype_cache(space).w_float64dtype for bytes, dtype in interp_dtype.get_dtype_cache(space).float_dtypes_by_num_bytes: if (dtype.kind == interp_dtype.FLOATINGLTR and dtype.itemtype.get_element_size() > dt.itemtype.get_element_size()): return dtype if promote_to_largest: if dt.kind == interp_dtype.BOOLLTR or dt.kind == interp_dtype.SIGNEDLTR: return interp_dtype.get_dtype_cache(space).w_float64dtype elif dt.kind == interp_dtype.FLOATINGLTR: return interp_dtype.get_dtype_cache(space).w_float64dtype elif dt.kind == interp_dtype.UNSIGNEDLTR: return interp_dtype.get_dtype_cache(space).w_uint64dtype else: assert False return dt
def execute(self, interp): w_list = interp.space.newlist( [interp.space.wrap(float(i)) for i in range(self.v)] ) dtype = get_dtype_cache(interp.space).w_float64dtype return array(interp.space, w_list, w_dtype=dtype, w_order=None)
def descr_all(self, space): from pypy.module.micronumpy.interp_dtype import get_dtype_cache value = space.is_true(self) return get_dtype_cache(space).w_booldtype.box(value)
def _get_dtype(space): from pypy.module.micronumpy.interp_dtype import get_dtype_cache return get_dtype_cache(space).dtypes_by_name[name]
def reduce(self, space, w_obj, multidim, promote_to_largest, axis, keepdims=False, out=None): from pypy.module.micronumpy.interp_numarray import convert_to_array, \ Scalar, ReduceArray, W_NDimArray if self.argcount != 2: raise OperationError(space.w_ValueError, space.wrap("reduce only " "supported for binary functions")) assert isinstance(self, W_Ufunc2) obj = convert_to_array(space, w_obj) if axis >= len(obj.shape): raise OperationError(space.w_ValueError, space.wrap("axis(=%d) out of bounds" % axis)) if isinstance(obj, Scalar): raise OperationError(space.w_TypeError, space.wrap("cannot reduce " "on a scalar")) size = obj.size if self.comparison_func: dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: dtype = find_unaryop_result_dtype( space, obj.find_dtype(), promote_to_float=self.promote_to_float, promote_to_largest=promote_to_largest, promote_bools=True ) shapelen = len(obj.shape) if self.identity is None and size == 0: raise operationerrfmt(space.w_ValueError, "zero-size array to " "%s.reduce without identity", self.name) if shapelen > 1 and axis >= 0: if keepdims: shape = obj.shape[:axis] + [1] + obj.shape[axis + 1:] else: shape = obj.shape[:axis] + obj.shape[axis + 1:] if out: #Test for shape agreement if len(out.shape) > len(shape): raise operationerrfmt(space.w_ValueError, 'output parameter for reduction operation %s' + ' has too many dimensions', self.name) elif len(out.shape) < len(shape): raise operationerrfmt(space.w_ValueError, 'output parameter for reduction operation %s' + ' does not have enough dimensions', self.name) elif out.shape != shape: raise operationerrfmt(space.w_ValueError, 'output parameter shape mismatch, expecting [%s]' + ' , got [%s]', ",".join([str(x) for x in shape]), ",".join([str(x) for x in out.shape]), ) #Test for dtype agreement, perhaps create an itermediate #if out.dtype != dtype: # raise OperationError(space.w_TypeError, space.wrap( # "mismatched dtypes")) return self.do_axis_reduce(obj, out.find_dtype(), axis, out) else: result = W_NDimArray(shape, dtype) return self.do_axis_reduce(obj, dtype, axis, result) if out: if len(out.shape)>0: raise operationerrfmt(space.w_ValueError, "output parameter " "for reduction operation %s has too many" " dimensions",self.name) arr = ReduceArray(self.func, self.name, self.identity, obj, out.find_dtype()) val = loop.compute(arr) assert isinstance(out, Scalar) out.value = val else: arr = ReduceArray(self.func, self.name, self.identity, obj, dtype) val = loop.compute(arr) return val
def find_binop_result_dtype(space, dt1, dt2, promote_to_float=False, promote_bools=False): # dt1.num should be <= dt2.num if dt1.num > dt2.num: dt1, dt2 = dt2, dt1 # Some operations promote op(bool, bool) to return int8, rather than bool if promote_bools and (dt1.kind == dt2.kind == NPY_GENBOOLLTR): return interp_dtype.get_dtype_cache(space).w_int8dtype # Everything numeric promotes to complex if dt2.is_complex_type() or dt1.is_complex_type(): if dt2.num == NPY_CFLOAT: return interp_dtype.get_dtype_cache(space).w_complex64dtype elif dt2.num == NPY_CDOUBLE: return interp_dtype.get_dtype_cache(space).w_complex128dtype elif dt2.num == NPY_CLONGDOUBLE: return interp_dtype.get_dtype_cache(space).w_complexlongdtype else: raise OperationError(space.w_TypeError, space.wrap("Unsupported types")) if promote_to_float: return find_unaryop_result_dtype(space, dt2, promote_to_float=True) # If they're the same kind, choose the greater one. if dt1.kind == dt2.kind and not dt2.is_flexible_type(): return dt2 # Everything promotes to float, and bool promotes to everything. if dt2.kind == NPY_FLOATINGLTR or dt1.kind == NPY_GENBOOLLTR: # Float32 + 8-bit int = Float64 if dt2.num == NPY_FLOAT and dt1.itemtype.get_element_size() >= 4: return interp_dtype.get_dtype_cache(space).w_float64dtype return dt2 # for now this means mixing signed and unsigned if dt2.kind == NPY_SIGNEDLTR: # if dt2 has a greater number of bytes, then just go with it if dt1.itemtype.get_element_size() < dt2.itemtype.get_element_size(): return dt2 # we need to promote both dtypes dtypenum = dt2.num + 2 elif dt2.num == NPY_ULONGLONG or (LONG_BIT == 64 and dt2.num == NPY_ULONG): # UInt64 + signed = Float64 dtypenum = NPY_DOUBLE elif dt2.is_flexible_type(): # For those operations that get here (concatenate, stack), # flexible types take precedence over numeric type if dt2.is_record_type(): return dt2 if dt1.is_str_or_unicode(): if dt2.itemtype.get_element_size() >= \ dt1.itemtype.get_element_size(): return dt2 return dt1 return dt2 else: # increase to the next signed type dtypenum = dt2.num + 1 newdtype = interp_dtype.get_dtype_cache(space).dtypes_by_num[dtypenum] if (newdtype.itemtype.get_element_size() > dt2.itemtype.get_element_size() or newdtype.kind == NPY_FLOATINGLTR): return newdtype else: # we only promoted to long on 32-bit or to longlong on 64-bit # this is really for dealing with the Long and Ulong dtypes dtypenum += 2 return interp_dtype.get_dtype_cache(space).dtypes_by_num[dtypenum]
def call(self, space, args_w): if len(args_w) > 2: [w_lhs, w_rhs, w_out] = args_w else: [w_lhs, w_rhs] = args_w w_out = None w_lhs = convert_to_array(space, w_lhs) w_rhs = convert_to_array(space, w_rhs) w_ldtype = w_lhs.get_dtype() w_rdtype = w_rhs.get_dtype() if w_ldtype.is_str_type() and w_rdtype.is_str_type() and \ self.comparison_func: pass elif (w_ldtype.is_str_type() or w_rdtype.is_str_type()) and \ self.comparison_func and w_out is None: return space.wrap(False) elif (w_ldtype.is_flexible_type() or \ w_rdtype.is_flexible_type()): raise OperationError(space.w_TypeError, space.wrap( 'unsupported operand dtypes %s and %s for "%s"' % \ (w_rdtype.get_name(), w_ldtype.get_name(), self.name))) if self.are_common_types(w_ldtype, w_rdtype): if not w_lhs.is_scalar() and w_rhs.is_scalar(): w_rdtype = w_ldtype elif w_lhs.is_scalar() and not w_rhs.is_scalar(): w_ldtype = w_rdtype if (self.int_only and (not w_ldtype.is_int_type() or not w_rdtype.is_int_type()) or not self.allow_bool and (w_ldtype.is_bool_type() or w_rdtype.is_bool_type()) or not self.allow_complex and (w_ldtype.is_complex_type() or w_rdtype.is_complex_type())): raise OperationError(space.w_TypeError, space.wrap("Unsupported types")) calc_dtype = find_binop_result_dtype(space, w_ldtype, w_rdtype, promote_to_float=self.promote_to_float, promote_bools=self.promote_bools) if space.is_none(w_out): out = None elif not isinstance(w_out, W_NDimArray): raise OperationError(space.w_TypeError, space.wrap( 'output must be an array')) else: out = w_out calc_dtype = out.get_dtype() if self.comparison_func: res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if w_lhs.is_scalar() and w_rhs.is_scalar(): arr = self.func(calc_dtype, w_lhs.get_scalar_value().convert_to(calc_dtype), w_rhs.get_scalar_value().convert_to(calc_dtype) ) if isinstance(out, W_NDimArray): if out.is_scalar(): out.set_scalar_value(arr) else: out.fill(arr) else: out = arr return out new_shape = shape_agreement(space, w_lhs.get_shape(), w_rhs) new_shape = shape_agreement(space, new_shape, out, broadcast_down=False) return loop.call2(space, new_shape, self.func, calc_dtype, res_dtype, w_lhs, w_rhs, out)
def reduce(self, space, w_obj, promote_to_largest, w_axis, keepdims=False, out=None, dtype=None, cumulative=False): if self.argcount != 2: raise OperationError(space.w_ValueError, space.wrap("reduce only " "supported for binary functions")) assert isinstance(self, W_Ufunc2) obj = convert_to_array(space, w_obj) if obj.get_dtype().is_flexible_type(): raise OperationError(space.w_TypeError, space.wrap('cannot perform reduce for flexible type')) obj_shape = obj.get_shape() if obj.is_scalar(): return obj.get_scalar_value() shapelen = len(obj_shape) axis = unwrap_axis_arg(space, shapelen, w_axis) assert axis >= 0 dtype = interp_dtype.decode_w_dtype(space, dtype) if dtype is None: if self.comparison_func: dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: dtype = find_unaryop_result_dtype( space, obj.get_dtype(), promote_to_float=self.promote_to_float, promote_to_largest=promote_to_largest, promote_bools=True ) if self.identity is None: for i in range(shapelen): if space.is_none(w_axis) or i == axis: if obj_shape[i] == 0: raise operationerrfmt(space.w_ValueError, "zero-size array to " "%s.reduce without identity", self.name) if shapelen > 1 and axis < shapelen: temp = None if cumulative: shape = obj_shape[:] temp_shape = obj_shape[:axis] + obj_shape[axis + 1:] if out: dtype = out.get_dtype() temp = W_NDimArray.from_shape(space, temp_shape, dtype, w_instance=obj) elif keepdims: shape = obj_shape[:axis] + [1] + obj_shape[axis + 1:] else: shape = obj_shape[:axis] + obj_shape[axis + 1:] if out: # Test for shape agreement # XXX maybe we need to do broadcasting here, although I must # say I don't understand the details for axis reduce if len(out.get_shape()) > len(shape): raise operationerrfmt(space.w_ValueError, 'output parameter for reduction operation %s' + ' has too many dimensions', self.name) elif len(out.get_shape()) < len(shape): raise operationerrfmt(space.w_ValueError, 'output parameter for reduction operation %s' + ' does not have enough dimensions', self.name) elif out.get_shape() != shape: raise operationerrfmt(space.w_ValueError, 'output parameter shape mismatch, expecting [%s]' + ' , got [%s]', ",".join([str(x) for x in shape]), ",".join([str(x) for x in out.get_shape()]), ) dtype = out.get_dtype() else: out = W_NDimArray.from_shape(space, shape, dtype, w_instance=obj) return loop.do_axis_reduce(shape, self.func, obj, dtype, axis, out, self.identity, cumulative, temp) if cumulative: if out: if out.get_shape() != [obj.get_size()]: raise OperationError(space.w_ValueError, space.wrap( "out of incompatible size")) else: out = W_NDimArray.from_shape(space, [obj.get_size()], dtype, w_instance=obj) loop.compute_reduce_cumulative(obj, out, dtype, self.func, self.identity) return out if out: if len(out.get_shape())>0: raise operationerrfmt(space.w_ValueError, "output parameter " "for reduction operation %s has too many" " dimensions",self.name) dtype = out.get_dtype() res = loop.compute_reduce(obj, dtype, self.func, self.done_func, self.identity) if out: out.set_scalar_value(res) return out return res
def test_unaryops(self, space): bool_dtype = get_dtype_cache(space).w_booldtype int8_dtype = get_dtype_cache(space).w_int8dtype uint8_dtype = get_dtype_cache(space).w_uint8dtype int16_dtype = get_dtype_cache(space).w_int16dtype uint16_dtype = get_dtype_cache(space).w_uint16dtype int32_dtype = get_dtype_cache(space).w_int32dtype uint32_dtype = get_dtype_cache(space).w_uint32dtype long_dtype = get_dtype_cache(space).w_longdtype ulong_dtype = get_dtype_cache(space).w_ulongdtype int64_dtype = get_dtype_cache(space).w_int64dtype uint64_dtype = get_dtype_cache(space).w_uint64dtype float32_dtype = get_dtype_cache(space).w_float32dtype float64_dtype = get_dtype_cache(space).w_float64dtype # Normal rules, everything returns itself assert find_unaryop_result_dtype(space, bool_dtype) is bool_dtype assert find_unaryop_result_dtype(space, int8_dtype) is int8_dtype assert find_unaryop_result_dtype(space, uint8_dtype) is uint8_dtype assert find_unaryop_result_dtype(space, int16_dtype) is int16_dtype assert find_unaryop_result_dtype(space, uint16_dtype) is uint16_dtype assert find_unaryop_result_dtype(space, int32_dtype) is int32_dtype assert find_unaryop_result_dtype(space, uint32_dtype) is uint32_dtype assert find_unaryop_result_dtype(space, long_dtype) is long_dtype assert find_unaryop_result_dtype(space, ulong_dtype) is ulong_dtype assert find_unaryop_result_dtype(space, int64_dtype) is int64_dtype assert find_unaryop_result_dtype(space, uint64_dtype) is uint64_dtype assert find_unaryop_result_dtype(space, float32_dtype) is float32_dtype assert find_unaryop_result_dtype(space, float64_dtype) is float64_dtype # Coerce to floats, some of these will eventually be float16, or # whatever our smallest float type is. assert find_unaryop_result_dtype(space, bool_dtype, promote_to_float=True) is float32_dtype # will be float16 if we ever put that in assert find_unaryop_result_dtype(space, int8_dtype, promote_to_float=True) is float32_dtype # will be float16 if we ever put that in assert find_unaryop_result_dtype(space, uint8_dtype, promote_to_float=True) is float32_dtype # will be float16 if we ever put that in assert find_unaryop_result_dtype(space, int16_dtype, promote_to_float=True) is float32_dtype assert find_unaryop_result_dtype(space, uint16_dtype, promote_to_float=True) is float32_dtype assert find_unaryop_result_dtype(space, int32_dtype, promote_to_float=True) is float64_dtype assert find_unaryop_result_dtype(space, uint32_dtype, promote_to_float=True) is float64_dtype assert find_unaryop_result_dtype(space, int64_dtype, promote_to_float=True) is float64_dtype assert find_unaryop_result_dtype(space, uint64_dtype, promote_to_float=True) is float64_dtype assert find_unaryop_result_dtype(space, float32_dtype, promote_to_float=True) is float32_dtype assert find_unaryop_result_dtype(space, float64_dtype, promote_to_float=True) is float64_dtype # promote bools, happens with sign ufunc assert find_unaryop_result_dtype(space, bool_dtype, promote_bools=True) is int8_dtype
def execute(self, interp): w_list = self.wrap(interp.space) dtype = get_dtype_cache(interp.space).w_float64dtype return array(interp.space, w_list, w_dtype=dtype, w_order=None)