def reduce(self, space, w_obj, w_axis, keepdims=False, out=None, dtype=None, cumulative=False): if self.argcount != 2: raise oefmt(space.w_ValueError, "reduce only supported for binary functions") assert isinstance(self, W_Ufunc2) obj = convert_to_array(space, w_obj) if obj.get_dtype().is_flexible(): raise oefmt(space.w_TypeError, "cannot perform reduce with flexible type") obj_shape = obj.get_shape() if obj.is_scalar(): return obj.get_scalar_value() shapelen = len(obj_shape) if space.is_none(w_axis): axis = maxint else: if space.isinstance_w(w_axis, space.w_tuple) and space.len_w(w_axis) == 1: w_axis = space.getitem(w_axis, space.wrap(0)) axis = space.int_w(w_axis) if axis < -shapelen or axis >= shapelen: raise oefmt(space.w_ValueError, "'axis' entry is out of bounds") if axis < 0: axis += shapelen assert axis >= 0 dtype = descriptor.decode_w_dtype(space, dtype) if dtype is None: if self.comparison_func: dtype = descriptor.get_dtype_cache(space).w_booldtype else: dtype = find_unaryop_result_dtype( space, obj.get_dtype(), promote_to_float=self.promote_to_float, promote_to_largest=self.promote_to_largest, promote_bools=self.promote_bools, ) if self.identity is None: for i in range(shapelen): if space.is_none(w_axis) or i == axis: if obj_shape[i] == 0: raise oefmt(space.w_ValueError, "zero-size array to reduction operation %s " "which has no identity", self.name) if shapelen > 1 and axis < shapelen: temp = None if cumulative: shape = obj_shape[:] temp_shape = obj_shape[:axis] + obj_shape[axis + 1:] if out: dtype = out.get_dtype() temp = W_NDimArray.from_shape(space, temp_shape, dtype, w_instance=obj) elif keepdims: shape = obj_shape[:axis] + [1] + obj_shape[axis + 1:] else: shape = obj_shape[:axis] + obj_shape[axis + 1:] if out: # Test for shape agreement # XXX maybe we need to do broadcasting here, although I must # say I don't understand the details for axis reduce if len(out.get_shape()) > len(shape): raise oefmt(space.w_ValueError, "output parameter for reduction operation %s " "has too many dimensions", self.name) elif len(out.get_shape()) < len(shape): raise oefmt(space.w_ValueError, "output parameter for reduction operation %s " "does not have enough dimensions", self.name) elif out.get_shape() != shape: raise oefmt(space.w_ValueError, "output parameter shape mismatch, expecting " "[%s], got [%s]", ",".join([str(x) for x in shape]), ",".join([str(x) for x in out.get_shape()]), ) dtype = out.get_dtype() else: out = W_NDimArray.from_shape(space, shape, dtype, w_instance=obj) if obj.get_size() == 0: if self.identity is not None: out.fill(space, self.identity.convert_to(space, dtype)) return out return loop.do_axis_reduce(space, shape, self.func, obj, dtype, axis, out, self.identity, cumulative, temp) if cumulative: if out: if out.get_shape() != [obj.get_size()]: raise OperationError(space.w_ValueError, space.wrap( "out of incompatible size")) else: out = W_NDimArray.from_shape(space, [obj.get_size()], dtype, w_instance=obj) loop.compute_reduce_cumulative(space, obj, out, dtype, self.func, self.identity) return out if out: if len(out.get_shape()) > 0: raise oefmt(space.w_ValueError, "output parameter for reduction operation %s has " "too many dimensions", self.name) dtype = out.get_dtype() res = loop.compute_reduce(space, obj, dtype, self.func, self.done_func, self.identity) if out: out.set_scalar_value(res) return out if keepdims: shape = [1] * len(obj_shape) out = W_NDimArray.from_shape(space, [1] * len(obj_shape), dtype, w_instance=obj) out.implementation.setitem(0, res) return out return res
def reduce(self, space, w_obj, promote_to_largest, w_axis, keepdims=False, out=None, dtype=None, cumulative=False): if self.argcount != 2: raise OperationError(space.w_ValueError, space.wrap("reduce only " "supported for binary functions")) assert isinstance(self, W_Ufunc2) obj = convert_to_array(space, w_obj) if obj.get_dtype().is_flexible_type(): raise OperationError(space.w_TypeError, space.wrap('cannot perform reduce for flexible type')) obj_shape = obj.get_shape() if obj.is_scalar(): return obj.get_scalar_value() shapelen = len(obj_shape) axis = unwrap_axis_arg(space, shapelen, w_axis) assert axis >= 0 dtype = interp_dtype.decode_w_dtype(space, dtype) if dtype is None: if self.comparison_func: dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: dtype = find_unaryop_result_dtype( space, obj.get_dtype(), promote_to_float=self.promote_to_float, promote_to_largest=promote_to_largest, promote_bools=True ) if self.identity is None: for i in range(shapelen): if space.is_none(w_axis) or i == axis: if obj_shape[i] == 0: raise operationerrfmt(space.w_ValueError, "zero-size array to " "%s.reduce without identity", self.name) if shapelen > 1 and axis < shapelen: temp = None if cumulative: shape = obj_shape[:] temp_shape = obj_shape[:axis] + obj_shape[axis + 1:] if out: dtype = out.get_dtype() temp = W_NDimArray.from_shape(space, temp_shape, dtype, w_instance=obj) elif keepdims: shape = obj_shape[:axis] + [1] + obj_shape[axis + 1:] else: shape = obj_shape[:axis] + obj_shape[axis + 1:] if out: # Test for shape agreement # XXX maybe we need to do broadcasting here, although I must # say I don't understand the details for axis reduce if len(out.get_shape()) > len(shape): raise operationerrfmt(space.w_ValueError, 'output parameter for reduction operation %s' + ' has too many dimensions', self.name) elif len(out.get_shape()) < len(shape): raise operationerrfmt(space.w_ValueError, 'output parameter for reduction operation %s' + ' does not have enough dimensions', self.name) elif out.get_shape() != shape: raise operationerrfmt(space.w_ValueError, 'output parameter shape mismatch, expecting [%s]' + ' , got [%s]', ",".join([str(x) for x in shape]), ",".join([str(x) for x in out.get_shape()]), ) dtype = out.get_dtype() else: out = W_NDimArray.from_shape(space, shape, dtype, w_instance=obj) return loop.do_axis_reduce(shape, self.func, obj, dtype, axis, out, self.identity, cumulative, temp) if cumulative: if out: if out.get_shape() != [obj.get_size()]: raise OperationError(space.w_ValueError, space.wrap( "out of incompatible size")) else: out = W_NDimArray.from_shape(space, [obj.get_size()], dtype, w_instance=obj) loop.compute_reduce_cumulative(obj, out, dtype, self.func, self.identity) return out if out: if len(out.get_shape())>0: raise operationerrfmt(space.w_ValueError, "output parameter " "for reduction operation %s has too many" " dimensions",self.name) dtype = out.get_dtype() res = loop.compute_reduce(obj, dtype, self.func, self.done_func, self.identity) if out: out.set_scalar_value(res) return out return res
def reduce(self, space, w_obj, promote_to_largest, w_axis, keepdims=False, out=None, dtype=None, cumulative=False): if self.argcount != 2: raise OperationError( space.w_ValueError, space.wrap("reduce only " "supported for binary functions")) assert isinstance(self, W_Ufunc2) obj = convert_to_array(space, w_obj) if obj.get_dtype().is_flexible_type(): raise OperationError( space.w_TypeError, space.wrap('cannot perform reduce for flexible type')) obj_shape = obj.get_shape() if obj.is_scalar(): return obj.get_scalar_value() shapelen = len(obj_shape) axis = unwrap_axis_arg(space, shapelen, w_axis) assert axis >= 0 dtype = interp_dtype.decode_w_dtype(space, dtype) if dtype is None: if self.comparison_func: dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: dtype = find_unaryop_result_dtype( space, obj.get_dtype(), promote_to_float=self.promote_to_float, promote_to_largest=promote_to_largest, promote_bools=True) if self.identity is None: for i in range(shapelen): if space.is_none(w_axis) or i == axis: if obj_shape[i] == 0: raise operationerrfmt( space.w_ValueError, "zero-size array to " "%s.reduce without identity", self.name) if shapelen > 1 and axis < shapelen: temp = None if cumulative: shape = obj_shape[:] temp_shape = obj_shape[:axis] + obj_shape[axis + 1:] if out: dtype = out.get_dtype() temp = W_NDimArray.from_shape(space, temp_shape, dtype, w_instance=obj) elif keepdims: shape = obj_shape[:axis] + [1] + obj_shape[axis + 1:] else: shape = obj_shape[:axis] + obj_shape[axis + 1:] if out: # Test for shape agreement # XXX maybe we need to do broadcasting here, although I must # say I don't understand the details for axis reduce if len(out.get_shape()) > len(shape): raise operationerrfmt( space.w_ValueError, 'output parameter for reduction operation %s' + ' has too many dimensions', self.name) elif len(out.get_shape()) < len(shape): raise operationerrfmt( space.w_ValueError, 'output parameter for reduction operation %s' + ' does not have enough dimensions', self.name) elif out.get_shape() != shape: raise operationerrfmt( space.w_ValueError, 'output parameter shape mismatch, expecting [%s]' + ' , got [%s]', ",".join([str(x) for x in shape]), ",".join([str(x) for x in out.get_shape()]), ) dtype = out.get_dtype() else: out = W_NDimArray.from_shape(space, shape, dtype, w_instance=obj) return loop.do_axis_reduce(shape, self.func, obj, dtype, axis, out, self.identity, cumulative, temp) if cumulative: if out: if out.get_shape() != [obj.get_size()]: raise OperationError( space.w_ValueError, space.wrap("out of incompatible size")) else: out = W_NDimArray.from_shape(space, [obj.get_size()], dtype, w_instance=obj) loop.compute_reduce_cumulative(obj, out, dtype, self.func, self.identity) return out if out: if len(out.get_shape()) > 0: raise operationerrfmt( space.w_ValueError, "output parameter " "for reduction operation %s has too many" " dimensions", self.name) dtype = out.get_dtype() res = loop.compute_reduce(obj, dtype, self.func, self.done_func, self.identity) if out: out.set_scalar_value(res) return out return res