def choose(space, w_arr, w_choices, w_out, w_mode): arr = convert_to_array(space, w_arr) choices = [convert_to_array(space, w_item) for w_item in space.listview(w_choices)] if not choices: raise OperationError(space.w_ValueError, space.wrap("choices list cannot be empty")) if space.is_none(w_out): w_out = None elif not isinstance(w_out, W_NDimArray): raise OperationError(space.w_TypeError, space.wrap("return arrays must be of ArrayType")) shape = shape_agreement_multiple(space, choices + [w_out]) out = interp_dtype.dtype_agreement(space, choices, shape, w_out) dtype = out.get_dtype() mode = clipmode_converter(space, w_mode) loop.choose(space, arr, choices, shape, dtype, out, mode) return out
def __init__(self, space, args): num_args = len(args) if not (2 <= num_args <= NPY.MAXARGS): raise oefmt( space.w_ValueError, "Need at least two and fewer than (%d) array objects.", NPY.MAXARGS) self.seq = [convert_to_array(space, w_elem) for w_elem in args] self.op_flags = parse_op_arg(space, 'op_flags', space.w_None, len(self.seq), parse_op_flag) self.shape = shape_agreement_multiple(space, self.seq, shape=None) self.order = NPY.CORDER self.iters = [] self.index = 0 try: self.size = support.product_check(self.shape) except OverflowError as e: raise oefmt(space.w_ValueError, "broadcast dimensions too large.") for i in range(len(self.seq)): it = self.get_iter(space, i) it.contiguous = False self.iters.append((it, it.reset())) self.done = False pass
def repeat(space, w_arr, repeats, w_axis): arr = convert_to_array(space, w_arr) if space.is_none(w_axis): arr = arr.descr_flatten(space) orig_size = arr.get_shape()[0] shape = [arr.get_shape()[0] * repeats] w_res = W_NDimArray.from_shape(space, shape, arr.get_dtype(), w_instance=arr) for i in range(repeats): Chunks([Chunk(i, shape[0] - repeats + i, repeats, orig_size) ]).apply(space, w_res).implementation.setslice(space, arr) else: axis = space.int_w(w_axis) shape = arr.get_shape()[:] chunks = [Chunk(0, i, 1, i) for i in shape] orig_size = shape[axis] shape[axis] *= repeats w_res = W_NDimArray.from_shape(space, shape, arr.get_dtype(), w_instance=arr) for i in range(repeats): chunks[axis] = Chunk(i, shape[axis] - repeats + i, repeats, orig_size) Chunks(chunks).apply(space, w_res).implementation.setslice(space, arr) return w_res
def empty_like(space, w_a, w_dtype=None, w_order=None, subok=True): w_a = convert_to_array(space, w_a) npy_order = order_converter(space, w_order, w_a.get_order()) if space.is_none(w_dtype): dtype = w_a.get_dtype() else: dtype = space.interp_w( descriptor.W_Dtype, space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) if dtype.is_str_or_unicode() and dtype.elsize < 1: dtype = descriptor.variable_dtype(space, dtype.char + '1') if npy_order in (NPY.KEEPORDER, NPY.ANYORDER): # Try to copy the stride pattern impl = w_a.implementation.astype(space, dtype, NPY.KEEPORDER) if subok: w_type = space.type(w_a) else: w_type = None return wrap_impl(space, w_type, w_a, impl) return W_NDimArray.from_shape(space, w_a.get_shape(), dtype=dtype, order=npy_order, w_instance=w_a if subok else None, zero=False)
def __init__(self, space, args): num_args = len(args) if not (2 <= num_args <= NPY.MAXARGS): raise oefmt(space.w_ValueError, "Need at least two and fewer than (%d) array objects.", NPY.MAXARGS) self.seq = [convert_to_array(space, w_elem) for w_elem in args] self.op_flags = parse_op_arg(space, 'op_flags', space.w_None, len(self.seq), parse_op_flag) self.shape = shape_agreement_multiple(space, self.seq, shape=None) self.order = NPY.CORDER self.iters = [] self.index = 0 try: self.size = support.product_check(self.shape) except OverflowError as e: raise oefmt(space.w_ValueError, "broadcast dimensions too large.") for i in range(len(self.seq)): it = self.get_iter(space, i) it.contiguous = False self.iters.append((it, it.reset())) self.done = False pass
def call(self, space, args_w): w_obj = args_w[0] out = None if len(args_w) > 1: out = args_w[1] if space.is_w(out, space.w_None): out = None w_obj = convert_to_array(space, w_obj) dtype = w_obj.get_dtype() if dtype.is_flexible_type(): raise OperationError(space.w_TypeError, space.wrap('Not implemented for this type')) if (self.int_only and not dtype.is_int_type() or not self.allow_bool and dtype.is_bool_type() or not self.allow_complex and dtype.is_complex_type()): raise OperationError( space.w_TypeError, space.wrap("ufunc %s not supported for the input type" % self.name)) calc_dtype = find_unaryop_result_dtype( space, w_obj.get_dtype(), promote_to_float=self.promote_to_float, promote_bools=self.promote_bools) if out is not None: if not isinstance(out, W_NDimArray): raise OperationError(space.w_TypeError, space.wrap('output must be an array')) res_dtype = out.get_dtype() #if not w_obj.get_dtype().can_cast_to(res_dtype): # raise operationerrfmt(space.w_TypeError, # "Cannot cast ufunc %s output from dtype('%s') to dtype('%s') with casting rule 'same_kind'", self.name, w_obj.get_dtype().name, res_dtype.name) elif self.bool_result: res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if self.complex_to_float and calc_dtype.is_complex_type(): if calc_dtype.name == 'complex64': res_dtype = interp_dtype.get_dtype_cache( space).w_float32dtype else: res_dtype = interp_dtype.get_dtype_cache( space).w_float64dtype if w_obj.is_scalar(): w_val = self.func(calc_dtype, w_obj.get_scalar_value().convert_to(calc_dtype)) if out is None: return w_val if out.is_scalar(): out.set_scalar_value(w_val) else: out.fill(res_dtype.coerce(space, w_val)) return out shape = shape_agreement(space, w_obj.get_shape(), out, broadcast_down=False) return loop.call1(space, shape, self.func, calc_dtype, res_dtype, w_obj, out)
def concatenate(space, w_args, w_axis=None): args_w = space.listview(w_args) if len(args_w) == 0: raise oefmt(space.w_ValueError, "need at least one array to concatenate") args_w = [convert_to_array(space, w_arg) for w_arg in args_w] if w_axis is None: w_axis = space.wrap(0) if space.is_none(w_axis): args_w = [ w_arg.reshape(space, space.newlist([w_arg.descr_get_size(space)])) for w_arg in args_w ] w_axis = space.wrap(0) dtype = args_w[0].get_dtype() shape = args_w[0].get_shape()[:] ndim = len(shape) if ndim == 0: raise oefmt(space.w_ValueError, "zero-dimensional arrays cannot be concatenated") axis = space.int_w(w_axis) orig_axis = axis if axis < 0: axis = ndim + axis if ndim == 1 and axis != 0: axis = 0 if axis < 0 or axis >= ndim: raise oefmt(space.w_IndexError, "axis %d out of bounds [0, %d)", orig_axis, ndim) for arr in args_w[1:]: if len(arr.get_shape()) != ndim: raise OperationError( space.w_ValueError, space.wrap( "all the input arrays must have same number of dimensions") ) for i, axis_size in enumerate(arr.get_shape()): if i == axis: shape[i] += axis_size elif axis_size != shape[i]: raise OperationError( space.w_ValueError, space.wrap("all the input array dimensions except for the " "concatenation axis must match exactly")) dtype = find_result_type(space, args_w, []) # concatenate does not handle ndarray subtypes, it always returns a ndarray res = W_NDimArray.from_shape(space, shape, dtype, 'C') chunks = [Chunk(0, i, 1, i) for i in shape] axis_start = 0 for arr in args_w: if arr.get_shape()[axis] == 0: continue chunks[axis] = Chunk(axis_start, axis_start + arr.get_shape()[axis], 1, arr.get_shape()[axis]) view = new_view(space, res, chunks) view.implementation.setslice(space, arr) axis_start += arr.get_shape()[axis] return res
def min_scalar_type(space, w_a): w_array = convert_to_array(space, w_a) dtype = w_array.get_dtype() if w_array.is_scalar() and dtype.is_number(): num, alt_num = w_array.get_scalar_value().min_dtype() return num2dtype(space, num) else: return dtype
def min_scalar_type(space, w_a): w_array = convert_to_array(space, w_a) dtype = w_array.get_dtype() if w_array.is_scalar() and dtype.is_number(): num, alt_num = w_array.get_scalar_value().min_dtype() return num2dtype(space, num) else: return dtype
def descr_getitem(self, space, w_item): from pypy.module.micronumpy.base import convert_to_array if space.is_w(w_item, space.w_Ellipsis) or \ (space.isinstance_w(w_item, space.w_tuple) and space.len_w(w_item) == 0): return convert_to_array(space, self) raise OperationError(space.w_IndexError, space.wrap( "invalid index to scalar variable"))
def descr_setitem(self, space, w_idx, w_value): if not (space.isinstance_w(w_idx, space.w_int) or space.isinstance_w(w_idx, space.w_slice)): raise oefmt(space.w_IndexError, 'unsupported iterator index') base = self.base start, stop, step, length = space.decode_index4(w_idx, base.get_size()) arr = convert_to_array(space, w_value) loop.flatiter_setitem(space, self.base, arr, start, step, length)
def choose(space, w_arr, w_choices, w_out, w_mode): arr = convert_to_array(space, w_arr) choices = [convert_to_array(space, w_item) for w_item in space.listview(w_choices)] if not choices: raise oefmt(space.w_ValueError, "choices list cannot be empty") if space.is_none(w_out): w_out = None elif not isinstance(w_out, W_NDimArray): raise OperationError(space.w_TypeError, space.wrap( "return arrays must be of ArrayType")) shape = shape_agreement_multiple(space, choices + [w_out]) out = descriptor.dtype_agreement(space, choices, shape, w_out) dtype = out.get_dtype() mode = clipmode_converter(space, w_mode) loop.choose(space, arr, choices, shape, dtype, out, mode) return out
def descr_getitem(self, space, w_item): from pypy.module.micronumpy.base import convert_to_array if space.is_w(w_item, space.w_Ellipsis): return convert_to_array(space, self) elif (space.isinstance_w(w_item, space.w_tuple) and space.len_w(w_item) == 0): return self raise oefmt(space.w_IndexError, "invalid index to scalar variable")
def descr_setitem(self, space, orig_arr, w_index, w_value): try: item = self._single_item_index(space, w_index) self.setitem(item, self.dtype.coerce(space, w_value)) except IndexError: w_value = convert_to_array(space, w_value) chunks = self._prepare_slice_args(space, w_index) view = chunks.apply(space, orig_arr) view.implementation.setslice(space, w_value)
def descr_setitem(self, space, w_idx, w_value): if not (space.isinstance_w(w_idx, space.w_int) or space.isinstance_w(w_idx, space.w_slice)): raise OperationError(space.w_IndexError, space.wrap('unsupported iterator index')) base = self.base start, stop, step, length = space.decode_index4(w_idx, base.get_size()) arr = convert_to_array(space, w_value) loop.flatiter_setitem(space, self.base, arr, start, step, length)
def descr_setitem(self, space, orig_arr, w_index, w_value): try: item = self._single_item_index(space, w_index) self.setitem(item, self.dtype.coerce(space, w_value)) except IndexError: w_value = convert_to_array(space, w_value) chunks = self._prepare_slice_args(space, w_index) view = chunks.apply(space, orig_arr) view.implementation.setslice(space, w_value)
def concatenate(space, w_args, w_axis=None): args_w = space.listview(w_args) if len(args_w) == 0: raise oefmt(space.w_ValueError, "need at least one array to concatenate") args_w = [convert_to_array(space, w_arg) for w_arg in args_w] if w_axis is None: w_axis = space.wrap(0) if space.is_none(w_axis): args_w = [w_arg.reshape(space, space.newlist([w_arg.descr_get_size(space)]), w_arg.get_order()) for w_arg in args_w] w_axis = space.wrap(0) dtype = args_w[0].get_dtype() shape = args_w[0].get_shape()[:] ndim = len(shape) if ndim == 0: raise oefmt(space.w_ValueError, "zero-dimensional arrays cannot be concatenated") axis = space.int_w(w_axis) orig_axis = axis if axis < 0: axis = ndim + axis if ndim == 1 and axis != 0: axis = 0 if axis < 0 or axis >= ndim: raise oefmt(space.w_IndexError, "axis %d out of bounds [0, %d)", orig_axis, ndim) for arr in args_w[1:]: if len(arr.get_shape()) != ndim: raise oefmt(space.w_ValueError, "all the input arrays must have same number of " "dimensions") for i, axis_size in enumerate(arr.get_shape()): if i == axis: shape[i] += axis_size elif axis_size != shape[i]: raise oefmt(space.w_ValueError, "all the input array dimensions except for the " "concatenation axis must match exactly") dtype = find_result_type(space, args_w, []) # concatenate does not handle ndarray subtypes, it always returns a ndarray res = W_NDimArray.from_shape(space, shape, dtype, NPY.CORDER) chunks = [Chunk(0, i, 1, i) for i in shape] axis_start = 0 for arr in args_w: if arr.get_shape()[axis] == 0: continue chunks[axis] = Chunk(axis_start, axis_start + arr.get_shape()[axis], 1, arr.get_shape()[axis]) view = new_view(space, res, chunks) view.implementation.setslice(space, arr) axis_start += arr.get_shape()[axis] return res
def empty_like(space, w_a, w_dtype=None, w_order=None, subok=True): w_a = convert_to_array(space, w_a) if space.is_none(w_dtype): dtype = w_a.get_dtype() else: dtype = space.interp_w(descriptor.W_Dtype, space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) if dtype.is_str_or_unicode() and dtype.elsize < 1: dtype = descriptor.variable_dtype(space, dtype.char + '1') return W_NDimArray.from_shape(space, w_a.get_shape(), dtype=dtype, w_instance=w_a if subok else None)
def empty_like(space, w_a, w_dtype=None, w_order=None, subok=True): w_a = convert_to_array(space, w_a) if space.is_none(w_dtype): dtype = w_a.get_dtype() else: dtype = space.interp_w(descriptor.W_Dtype, space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) if dtype.is_str_or_unicode() and dtype.elsize < 1: dtype = descriptor.variable_dtype(space, dtype.char + '1') return W_NDimArray.from_shape(space, w_a.get_shape(), dtype=dtype, w_instance=w_a if subok else None, zero=False)
def concatenate(space, w_args, axis=0): args_w = space.listview(w_args) if len(args_w) == 0: raise OperationError( space.w_ValueError, space.wrap("need at least one array to concatenate")) args_w = [convert_to_array(space, w_arg) for w_arg in args_w] dtype = args_w[0].get_dtype() shape = args_w[0].get_shape()[:] _axis = axis if axis < 0: _axis = len(shape) + axis for arr in args_w[1:]: for i, axis_size in enumerate(arr.get_shape()): if len(arr.get_shape()) != len(shape) or (i != _axis and axis_size != shape[i]): raise OperationError( space.w_ValueError, space.wrap( "all the input arrays must have same number of dimensions" )) elif i == _axis: shape[i] += axis_size a_dt = arr.get_dtype() if dtype.is_record_type() and a_dt.is_record_type(): # Record types must match for f in dtype.fields: if f not in a_dt.fields or \ dtype.fields[f] != a_dt.fields[f]: raise OperationError(space.w_TypeError, space.wrap("invalid type promotion")) elif dtype.is_record_type() or a_dt.is_record_type(): raise OperationError(space.w_TypeError, space.wrap("invalid type promotion")) dtype = interp_ufuncs.find_binop_result_dtype(space, dtype, arr.get_dtype()) if _axis < 0 or len(arr.get_shape()) <= _axis: raise operationerrfmt(space.w_IndexError, "axis %d out of bounds [0, %d)", axis, len(shape)) # concatenate does not handle ndarray subtypes, it always returns a ndarray res = W_NDimArray.from_shape(space, shape, dtype, 'C') chunks = [Chunk(0, i, 1, i) for i in shape] axis_start = 0 for arr in args_w: if arr.get_shape()[_axis] == 0: continue chunks[_axis] = Chunk(axis_start, axis_start + arr.get_shape()[_axis], 1, arr.get_shape()[_axis]) Chunks(chunks).apply(space, res).implementation.setslice(space, arr) axis_start += arr.get_shape()[_axis] return res
def put(space, w_arr, w_indices, w_values, w_mode): from pypy.module.micronumpy.support import index_w arr = convert_to_array(space, w_arr) mode = clipmode_converter(space, w_mode) if not w_indices: raise OperationError(space.w_ValueError, space.wrap("indice list cannot be empty")) if not w_values: raise OperationError(space.w_ValueError, space.wrap("value list cannot be empty")) dtype = arr.get_dtype() if space.isinstance_w(w_indices, space.w_list): indices = space.listview(w_indices) else: indices = [w_indices] if space.isinstance_w(w_values, space.w_list): values = space.listview(w_values) else: values = [w_values] v_idx = 0 for idx in indices: index = index_w(space, idx) if index < 0 or index >= arr.get_size(): if mode == NPY_RAISE: raise OperationError( space.w_IndexError, space.wrap( "index %d is out of bounds for axis 0 with size %d" % (index, arr.get_size()))) elif mode == NPY_WRAP: index = index % arr.get_size() elif mode == NPY_CLIP: if index < 0: index = 0 else: index = arr.get_size() - 1 else: assert False value = values[v_idx] if v_idx + 1 < len(values): v_idx += 1 arr.setitem(space, [index], dtype.coerce(space, value))
def call(self, space, args_w): w_obj = args_w[0] out = None if len(args_w) > 1: out = args_w[1] if space.is_w(out, space.w_None): out = None w_obj = convert_to_array(space, w_obj) dtype = w_obj.get_dtype() if dtype.is_flexible_type(): raise OperationError(space.w_TypeError, space.wrap('Not implemented for this type')) if (self.int_only and not dtype.is_int_type() or not self.allow_bool and dtype.is_bool_type() or not self.allow_complex and dtype.is_complex_type()): raise OperationError(space.w_TypeError, space.wrap( "ufunc %s not supported for the input type" % self.name)) calc_dtype = find_unaryop_result_dtype(space, w_obj.get_dtype(), promote_to_float=self.promote_to_float, promote_bools=self.promote_bools) if out is not None: if not isinstance(out, W_NDimArray): raise OperationError(space.w_TypeError, space.wrap( 'output must be an array')) res_dtype = out.get_dtype() #if not w_obj.get_dtype().can_cast_to(res_dtype): # raise operationerrfmt(space.w_TypeError, # "Cannot cast ufunc %s output from dtype('%s') to dtype('%s') with casting rule 'same_kind'", self.name, w_obj.get_dtype().name, res_dtype.name) elif self.bool_result: res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if self.complex_to_float and calc_dtype.is_complex_type(): if calc_dtype.name == 'complex64': res_dtype = interp_dtype.get_dtype_cache(space).w_float32dtype else: res_dtype = interp_dtype.get_dtype_cache(space).w_float64dtype if w_obj.is_scalar(): w_val = self.func(calc_dtype, w_obj.get_scalar_value().convert_to(calc_dtype)) if out is None: return w_val if out.is_scalar(): out.set_scalar_value(w_val) else: out.fill(res_dtype.coerce(space, w_val)) return out shape = shape_agreement(space, w_obj.get_shape(), out, broadcast_down=False) return loop.call1(space, shape, self.func, calc_dtype, res_dtype, w_obj, out)
def put(space, w_arr, w_indices, w_values, mode='raise'): from pypy.module.micronumpy import constants from pypy.module.micronumpy.support import int_w arr = convert_to_array(space, w_arr) if mode not in constants.MODES: raise OperationError(space.w_ValueError, space.wrap("mode %s not known" % (mode,))) if not w_indices: raise OperationError(space.w_ValueError, space.wrap("indice list cannot be empty")) if not w_values: raise OperationError(space.w_ValueError, space.wrap("value list cannot be empty")) dtype = arr.get_dtype() if space.isinstance_w(w_indices, space.w_list): indices = space.listview(w_indices) else: indices = [w_indices] if space.isinstance_w(w_values, space.w_list): values = space.listview(w_values) else: values = [w_values] v_idx = 0 for idx in indices: index = int_w(space, idx) if index < 0 or index >= arr.get_size(): if constants.MODES[mode] == constants.MODE_RAISE: raise OperationError(space.w_ValueError, space.wrap( "invalid entry in choice array")) elif constants.MODES[mode] == constants.MODE_WRAP: index = index % arr.get_size() else: assert constants.MODES[mode] == constants.MODE_CLIP if index < 0: index = 0 else: index = arr.get_size() - 1 value = values[v_idx] if v_idx + 1 < len(values): v_idx += 1 arr.setitem(space, [index], dtype.coerce(space, value))
def set_real(self, space, orig_array, w_val): w_arr = convert_to_array(space, w_val) dtype = self.dtype.float_type or self.dtype if len(w_arr.get_shape()) > 0: raise OperationError(space.w_ValueError, space.wrap( "could not broadcast input array from shape " + "(%s) into shape ()" % ( ','.join([str(x) for x in w_arr.get_shape()],)))) if self.dtype.is_complex_type(): self.value = self.dtype.itemtype.composite( w_arr.get_scalar_value().convert_to(dtype), self.value.convert_imag_to(dtype)) else: self.value = w_arr.get_scalar_value()
def set_real(self, space, orig_array, w_val): w_arr = convert_to_array(space, w_val) dtype = self.dtype.float_type or self.dtype if len(w_arr.get_shape()) > 0: raise OperationError(space.w_ValueError, space.wrap( "could not broadcast input array from shape " + "(%s) into shape ()" % ( ','.join([str(x) for x in w_arr.get_shape()],)))) if self.dtype.is_complex_type(): self.value = self.dtype.itemtype.composite( w_arr.get_scalar_value().convert_to(dtype), self.value.convert_imag_to(dtype)) else: self.value = w_arr.get_scalar_value()
def put(space, w_arr, w_indices, w_values, w_mode): from pypy.module.micronumpy.support import index_w arr = convert_to_array(space, w_arr) mode = clipmode_converter(space, w_mode) if not w_indices: raise OperationError(space.w_ValueError, space.wrap("indice list cannot be empty")) if not w_values: raise OperationError(space.w_ValueError, space.wrap("value list cannot be empty")) dtype = arr.get_dtype() if space.isinstance_w(w_indices, space.w_list): indices = space.listview(w_indices) else: indices = [w_indices] if space.isinstance_w(w_values, space.w_list): values = space.listview(w_values) else: values = [w_values] v_idx = 0 for idx in indices: index = index_w(space, idx) if index < 0 or index >= arr.get_size(): if mode == NPY_RAISE: raise OperationError( space.w_IndexError, space.wrap("index %d is out of bounds for axis 0 with size %d" % (index, arr.get_size())), ) elif mode == NPY_WRAP: index = index % arr.get_size() elif mode == NPY_CLIP: if index < 0: index = 0 else: index = arr.get_size() - 1 else: assert False value = values[v_idx] if v_idx + 1 < len(values): v_idx += 1 arr.setitem(space, [index], dtype.coerce(space, value))
def call_many_to_many(space, shape, func, res_dtype, in_args, out_args): # out must hav been built. func needs no calc_type, is usually an # external ufunc nin = len(in_args) in_iters = [None] * nin in_states = [None] * nin nout = len(out_args) out_iters = [None] * nout out_states = [None] * nout for i in range(nin): in_i = in_args[i] assert isinstance(in_i, W_NDimArray) in_iter, in_state = in_i.create_iter(shape) in_iters[i] = in_iter in_states[i] = in_state for i in range(nout): out_i = out_args[i] assert isinstance(out_i, W_NDimArray) out_iter, out_state = out_i.create_iter(shape) out_iters[i] = out_iter out_states[i] = out_state shapelen = len(shape) vals = [None] * nin while not out_iters[0].done(out_states[0]): call_many_to_many_driver.jit_merge_point(shapelen=shapelen, func=func, res_dtype=res_dtype, nin=nin, nout=nout) for i in range(nin): vals[i] = in_iters[i].getitem(in_states[i]) w_arglist = space.newlist(vals) w_outvals = space.call_args(func, Arguments.frompacked(space, w_arglist)) # w_outvals should be a tuple, but func can return a single value as well if space.isinstance_w(w_outvals, space.w_tuple): batch = space.listview(w_outvals) for i in range(len(batch)): out_iters[i].setitem(out_states[i], res_dtype.coerce(space, batch[i])) out_states[i] = out_iters[i].next(out_states[i]) else: out_iters[0].setitem(out_states[0], res_dtype.coerce(space, w_outvals)) out_states[0] = out_iters[0].next(out_states[0]) for i in range(nin): in_states[i] = in_iters[i].next(in_states[i]) return space.newtuple([convert_to_array(space, o) for o in out_args])
def set_imag(self, space, orig_array, w_val): # Only called on complex dtype assert self.dtype.is_complex_type() w_arr = convert_to_array(space, w_val) dtype = self.dtype.float_type if len(w_arr.get_shape()) > 0: raise OperationError( space.w_ValueError, space.wrap( "could not broadcast input array from shape " + "(%s) into shape ()" % (",".join([str(x) for x in w_arr.get_shape()])) ), ) self.value = self.dtype.itemtype.composite( self.value.convert_real_to(dtype), w_arr.get_scalar_value().convert_to(dtype) )
def call_many_to_many(space, shape, func, in_dtypes, out_dtypes, in_args, out_args): # out must hav been built. func needs no calc_type, is usually an # external ufunc nin = len(in_args) in_iters = [None] * nin in_states = [None] * nin nout = len(out_args) out_iters = [None] * nout out_states = [None] * nout for i in range(nin): in_i = in_args[i] assert isinstance(in_i, W_NDimArray) in_iter, in_state = in_i.create_iter(shape) in_iters[i] = in_iter in_states[i] = in_state for i in range(nout): out_i = out_args[i] assert isinstance(out_i, W_NDimArray) out_iter, out_state = out_i.create_iter(shape) out_iters[i] = out_iter out_states[i] = out_state shapelen = len(shape) vals = [None] * nin test_iter, test_state = in_iters[-1], in_states[-1] if nout > 0: test_iter, test_state = out_iters[0], out_states[0] while not test_iter.done(test_state): call_many_to_many_driver.jit_merge_point(shapelen=shapelen, func=func, in_dtypes=in_dtypes, out_dtypes=out_dtypes, nin=nin, nout=nout) for i in range(nin): vals[i] = in_dtypes[i].coerce(space, in_iters[i].getitem(in_states[i])) w_arglist = space.newlist(vals) w_outvals = space.call_args(func, Arguments.frompacked(space, w_arglist)) # w_outvals should be a tuple, but func can return a single value as well if space.isinstance_w(w_outvals, space.w_tuple): batch = space.listview(w_outvals) for i in range(len(batch)): out_iters[i].setitem(out_states[i], out_dtypes[i].coerce(space, batch[i])) out_states[i] = out_iters[i].next(out_states[i]) elif nout > 0: out_iters[0].setitem(out_states[0], out_dtypes[0].coerce(space, w_outvals)) out_states[0] = out_iters[0].next(out_states[0]) for i in range(nin): in_states[i] = in_iters[i].next(in_states[i]) test_state = test_iter.next(test_state) return space.newtuple([convert_to_array(space, o) for o in out_args])
def put(space, w_arr, w_indices, w_values, w_mode): arr = convert_to_array(space, w_arr) mode = clipmode_converter(space, w_mode) if not w_indices: raise oefmt(space.w_ValueError, "indices list cannot be empty") if not w_values: raise oefmt(space.w_ValueError, "value list cannot be empty") dtype = arr.get_dtype() if space.isinstance_w(w_indices, space.w_list): indices = space.listview(w_indices) else: indices = [w_indices] if space.isinstance_w(w_values, space.w_list): values = space.listview(w_values) else: values = [w_values] v_idx = 0 for idx in indices: index = support.index_w(space, idx) if index < 0 or index >= arr.get_size(): if mode == NPY.RAISE: raise oefmt( space.w_IndexError, "index %d is out of bounds for axis 0 with size %d", index, arr.get_size()) elif mode == NPY.WRAP: index = index % arr.get_size() elif mode == NPY.CLIP: if index < 0: index = 0 else: index = arr.get_size() - 1 else: assert False value = values[v_idx] if v_idx + 1 < len(values): v_idx += 1 arr.setitem(space, [index], dtype.coerce(space, value))
def put(space, w_arr, w_indices, w_values, w_mode): arr = convert_to_array(space, w_arr) mode = clipmode_converter(space, w_mode) if not w_indices: raise oefmt(space.w_ValueError, "indices list cannot be empty") if not w_values: raise oefmt(space.w_ValueError, "value list cannot be empty") dtype = arr.get_dtype() if space.isinstance_w(w_indices, space.w_list): indices = space.listview(w_indices) else: indices = [w_indices] if space.isinstance_w(w_values, space.w_list): values = space.listview(w_values) else: values = [w_values] v_idx = 0 for idx in indices: index = support.index_w(space, idx) if index < 0 or index >= arr.get_size(): if mode == NPY.RAISE: raise oefmt(space.w_IndexError, "index %d is out of bounds for axis 0 with size %d", index, arr.get_size()) elif mode == NPY.WRAP: index = index % arr.get_size() elif mode == NPY.CLIP: if index < 0: index = 0 else: index = arr.get_size() - 1 else: assert False value = values[v_idx] if v_idx + 1 < len(values): v_idx += 1 arr.setitem(space, [index], dtype.coerce(space, value))
def descr_setitem(self, space, w_idx, w_value): if not (space.isinstance_w(w_idx, space.w_int) or space.isinstance_w(w_idx, space.w_slice)): raise oefmt(space.w_IndexError, "unsupported iterator index") start, stop, step, length = space.decode_index4(w_idx, self.iter.size) try: state = self.iter.goto(start) dtype = self.base.get_dtype() if length == 1: try: val = dtype.coerce(space, w_value) except OperationError: raise oefmt(space.w_ValueError, "Error setting single item of array.") self.iter.setitem(state, val) return arr = convert_to_array(space, w_value) loop.flatiter_setitem(space, dtype, arr, self.iter, state, step, length) finally: self.iter.reset(self.state, mutate=True)
def concatenate(space, w_args, axis=0): args_w = space.listview(w_args) if len(args_w) == 0: raise OperationError(space.w_ValueError, space.wrap("need at least one array to concatenate")) args_w = [convert_to_array(space, w_arg) for w_arg in args_w] dtype = args_w[0].get_dtype() shape = args_w[0].get_shape()[:] _axis = axis if axis < 0: _axis = len(shape) + axis for arr in args_w[1:]: for i, axis_size in enumerate(arr.get_shape()): if len(arr.get_shape()) != len(shape) or (i != _axis and axis_size != shape[i]): raise OperationError(space.w_ValueError, space.wrap( "all the input arrays must have same number of dimensions")) elif i == _axis: shape[i] += axis_size a_dt = arr.get_dtype() if dtype.is_record_type() and a_dt.is_record_type(): #Record types must match for f in dtype.fields: if f not in a_dt.fields or \ dtype.fields[f] != a_dt.fields[f]: raise OperationError(space.w_TypeError, space.wrap("record type mismatch")) elif dtype.is_record_type() or a_dt.is_record_type(): raise OperationError(space.w_TypeError, space.wrap("invalid type promotion")) dtype = interp_ufuncs.find_binop_result_dtype(space, dtype, arr.get_dtype()) if _axis < 0 or len(arr.get_shape()) <= _axis: raise operationerrfmt(space.w_IndexError, "axis %d out of bounds [0, %d)", axis, len(shape)) # concatenate does not handle ndarray subtypes, it always returns a ndarray res = W_NDimArray.from_shape(space, shape, dtype, 'C') chunks = [Chunk(0, i, 1, i) for i in shape] axis_start = 0 for arr in args_w: if arr.get_shape()[_axis] == 0: continue chunks[_axis] = Chunk(axis_start, axis_start + arr.get_shape()[_axis], 1, arr.get_shape()[_axis]) Chunks(chunks).apply(space, res).implementation.setslice(space, arr) axis_start += arr.get_shape()[_axis] return res
def descr_setitem(self, space, w_idx, w_value): if not (space.isinstance_w(w_idx, space.w_int) or space.isinstance_w(w_idx, space.w_slice)): raise oefmt(space.w_IndexError, 'unsupported iterator index') start, stop, step, length = space.decode_index4(w_idx, self.iter.size) try: state = self.iter.goto(start) dtype = self.base.get_dtype() if length == 1: try: val = dtype.coerce(space, w_value) except OperationError: raise oefmt(space.w_ValueError, "Error setting single item of array.") self.iter.setitem(state, val) return arr = convert_to_array(space, w_value) loop.flatiter_setitem(space, dtype, arr, self.iter, state, step, length) finally: self.iter.reset(self.state, mutate=True)
def repeat(space, w_arr, repeats, w_axis): arr = convert_to_array(space, w_arr) if space.is_none(w_axis): arr = arr.descr_flatten(space) orig_size = arr.get_shape()[0] shape = [arr.get_shape()[0] * repeats] w_res = W_NDimArray.from_shape(space, shape, arr.get_dtype(), w_instance=arr) for i in range(repeats): Chunks([Chunk(i, shape[0] - repeats + i, repeats, orig_size)]).apply(space, w_res).implementation.setslice( space, arr ) else: axis = space.int_w(w_axis) shape = arr.get_shape()[:] chunks = [Chunk(0, i, 1, i) for i in shape] orig_size = shape[axis] shape[axis] *= repeats w_res = W_NDimArray.from_shape(space, shape, arr.get_dtype(), w_instance=arr) for i in range(repeats): chunks[axis] = Chunk(i, shape[axis] - repeats + i, repeats, orig_size) Chunks(chunks).apply(space, w_res).implementation.setslice(space, arr) return w_res
def empty_like(space, w_a, w_dtype=None, w_order=None, subok=True): w_a = convert_to_array(space, w_a) npy_order = order_converter(space, w_order, w_a.get_order()) if space.is_none(w_dtype): dtype = w_a.get_dtype() else: dtype = space.interp_w(descriptor.W_Dtype, space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) if dtype.is_str_or_unicode() and dtype.elsize < 1: dtype = descriptor.variable_dtype(space, dtype.char + '1') if npy_order in (NPY.KEEPORDER, NPY.ANYORDER): # Try to copy the stride pattern impl = w_a.implementation.astype(space, dtype, NPY.KEEPORDER) if subok: w_type = space.type(w_a) else: w_type = None return wrap_impl(space, w_type, w_a, impl) return W_NDimArray.from_shape(space, w_a.get_shape(), dtype=dtype, order=npy_order, w_instance=w_a if subok else None, zero=False)
def where(space, w_arr, w_x=None, w_y=None): """where(condition, [x, y]) Return elements, either from `x` or `y`, depending on `condition`. If only `condition` is given, return ``condition.nonzero()``. Parameters ---------- condition : array_like, bool When True, yield `x`, otherwise yield `y`. x, y : array_like, optional Values from which to choose. `x` and `y` need to have the same shape as `condition`. Returns ------- out : ndarray or tuple of ndarrays If both `x` and `y` are specified, the output array contains elements of `x` where `condition` is True, and elements from `y` elsewhere. If only `condition` is given, return the tuple ``condition.nonzero()``, the indices where `condition` is True. See Also -------- nonzero, choose Notes ----- If `x` and `y` are given and input arrays are 1-D, `where` is equivalent to:: [xv if c else yv for (c,xv,yv) in zip(condition,x,y)] Examples -------- >>> np.where([[True, False], [True, True]], ... [[1, 2], [3, 4]], ... [[9, 8], [7, 6]]) array([[1, 8], [3, 4]]) >>> np.where([[0, 1], [1, 0]]) (array([0, 1]), array([1, 0])) >>> x = np.arange(9.).reshape(3, 3) >>> np.where( x > 5 ) (array([2, 2, 2]), array([0, 1, 2])) >>> x[np.where( x > 3.0 )] # Note: result is 1D. array([ 4., 5., 6., 7., 8.]) >>> np.where(x < 5, x, -1) # Note: broadcasting. array([[ 0., 1., 2.], [ 3., 4., -1.], [-1., -1., -1.]]) NOTE: support for not passing x and y is unsupported """ if space.is_none(w_y): if space.is_none(w_x): raise OperationError(space.w_NotImplementedError, space.wrap("1-arg where unsupported right now")) raise OperationError(space.w_ValueError, space.wrap("Where should be called with either 1 or 3 arguments")) if space.is_none(w_x): raise OperationError(space.w_ValueError, space.wrap("Where should be called with either 1 or 3 arguments")) arr = convert_to_array(space, w_arr) x = convert_to_array(space, w_x) y = convert_to_array(space, w_y) if x.is_scalar() and y.is_scalar() and arr.is_scalar(): if arr.get_dtype().itemtype.bool(arr.get_scalar_value()): return x return y dtype = interp_ufuncs.find_binop_result_dtype(space, x.get_dtype(), y.get_dtype()) shape = shape_agreement(space, arr.get_shape(), x) shape = shape_agreement(space, shape, y) out = W_NDimArray.from_shape(space, shape, dtype) return loop.where(out, shape, arr, x, y, dtype)
def __init__(self, space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, w_itershape, buffersize=0, order='K'): self.order = order self.external_loop = False self.buffered = False self.tracked_index = '' self.common_dtype = False self.delay_bufalloc = False self.grow_inner = False self.ranged = False self.refs_ok = False self.reduce_ok = False self.zerosize_ok = False self.index_iter = None self.done = False self.first_next = True self.op_axes = [] # convert w_seq operands to a list of W_NDimArray if space.isinstance_w(w_seq, space.w_tuple) or \ space.isinstance_w(w_seq, space.w_list): w_seq_as_list = space.listview(w_seq) self.seq = [ convert_to_array(space, w_elem) if not space.is_none(w_elem) else None for w_elem in w_seq_as_list ] else: self.seq = [convert_to_array(space, w_seq)] parse_func_flags(space, self, w_flags) self.op_flags = parse_op_arg(space, 'op_flags', w_op_flags, len(self.seq), parse_op_flag) # handle w_op_axes oa_ndim = -1 if not space.is_none(w_op_axes): oa_ndim = self.set_op_axes(space, w_op_axes) self.ndim = calculate_ndim(self.seq, oa_ndim) # handle w_op_dtypes part 1: creating self.dtypes list from input if not space.is_none(w_op_dtypes): w_seq_as_list = space.listview(w_op_dtypes) self.dtypes = [ decode_w_dtype(space, w_elem) for w_elem in w_seq_as_list ] if len(self.dtypes) != len(self.seq): raise oefmt( space.w_ValueError, "op_dtypes must be a tuple/list matching the number of ops" ) else: self.dtypes = [] # handle None or writable operands, calculate my shape outargs = [ i for i in range(len(self.seq)) if self.seq[i] is None or self.op_flags[i].rw == 'w' ] if len(outargs) > 0: out_shape = shape_agreement_multiple( space, [self.seq[i] for i in outargs]) else: out_shape = None if space.isinstance_w(w_itershape, space.w_tuple) or \ space.isinstance_w(w_itershape, space.w_list): self.shape = [space.int_w(i) for i in space.listview(w_itershape)] else: self.shape = shape_agreement_multiple(space, self.seq, shape=out_shape) if len(outargs) > 0: # Make None operands writeonly and flagged for allocation if len(self.dtypes) > 0: out_dtype = self.dtypes[outargs[0]] else: out_dtype = None for i in range(len(self.seq)): if self.seq[i] is None: self.op_flags[i].allocate = True continue if self.op_flags[i].rw == 'w': continue out_dtype = find_binop_result_dtype( space, self.seq[i].get_dtype(), out_dtype) for i in outargs: if self.seq[i] is None: # XXX can we postpone allocation to later? self.seq[i] = W_NDimArray.from_shape( space, self.shape, out_dtype) else: if not self.op_flags[i].broadcast: # Raises if ooutput cannot be broadcast shape_agreement(space, self.shape, self.seq[i], False) if self.tracked_index != "": if self.order == "K": self.order = self.seq[0].implementation.order if self.tracked_index == "multi": backward = False else: backward = self.order != self.tracked_index self.index_iter = IndexIterator(self.shape, backward=backward) # handle w_op_dtypes part 2: copy where needed if possible if len(self.dtypes) > 0: for i in range(len(self.seq)): selfd = self.dtypes[i] seq_d = self.seq[i].get_dtype() if not selfd: self.dtypes[i] = seq_d elif selfd != seq_d: if not 'r' in self.op_flags[i].tmp_copy: raise oefmt( space.w_TypeError, "Iterator operand required copying or " "buffering for operand %d", i) impl = self.seq[i].implementation new_impl = impl.astype(space, selfd) self.seq[i] = W_NDimArray(new_impl) else: #copy them from seq self.dtypes = [s.get_dtype() for s in self.seq] # create an iterator for each operand self.iters = [] for i in range(len(self.seq)): it = get_iter(space, self.order, self.seq[i], self.shape, self.dtypes[i], self.op_flags[i], self) it.contiguous = False self.iters.append((it, it.reset())) if self.external_loop: coalesce_axes(self, space)
def count_nonzero(space, w_obj): return space.wrap(loop.count_all_true(convert_to_array(space, w_obj)))
def where(space, w_arr, w_x=None, w_y=None): """where(condition, [x, y]) Return elements, either from `x` or `y`, depending on `condition`. If only `condition` is given, return ``condition.nonzero()``. Parameters ---------- condition : array_like, bool When True, yield `x`, otherwise yield `y`. x, y : array_like, optional Values from which to choose. `x` and `y` need to have the same shape as `condition`. Returns ------- out : ndarray or tuple of ndarrays If both `x` and `y` are specified, the output array contains elements of `x` where `condition` is True, and elements from `y` elsewhere. If only `condition` is given, return the tuple ``condition.nonzero()``, the indices where `condition` is True. See Also -------- nonzero, choose Notes ----- If `x` and `y` are given and input arrays are 1-D, `where` is equivalent to:: [xv if c else yv for (c,xv,yv) in zip(condition,x,y)] Examples -------- >>> np.where([[True, False], [True, True]], ... [[1, 2], [3, 4]], ... [[9, 8], [7, 6]]) array([[1, 8], [3, 4]]) >>> np.where([[0, 1], [1, 0]]) (array([0, 1]), array([1, 0])) >>> x = np.arange(9.).reshape(3, 3) >>> np.where( x > 5 ) (array([2, 2, 2]), array([0, 1, 2])) >>> x[np.where( x > 3.0 )] # Note: result is 1D. array([ 4., 5., 6., 7., 8.]) >>> np.where(x < 5, x, -1) # Note: broadcasting. array([[ 0., 1., 2.], [ 3., 4., -1.], [-1., -1., -1.]]) NOTE: support for not passing x and y is unsupported """ if space.is_none(w_y): if space.is_none(w_x): raise OperationError( space.w_NotImplementedError, space.wrap("1-arg where unsupported right now")) raise OperationError( space.w_ValueError, space.wrap("Where should be called with either 1 or 3 arguments")) if space.is_none(w_x): raise OperationError( space.w_ValueError, space.wrap("Where should be called with either 1 or 3 arguments")) arr = convert_to_array(space, w_arr) x = convert_to_array(space, w_x) y = convert_to_array(space, w_y) if x.is_scalar() and y.is_scalar() and arr.is_scalar(): if arr.get_dtype().itemtype.bool(arr.get_scalar_value()): return x return y dtype = ufuncs.find_binop_result_dtype(space, x.get_dtype(), y.get_dtype()) shape = shape_agreement(space, arr.get_shape(), x) shape = shape_agreement(space, shape, y) out = W_NDimArray.from_shape(space, shape, dtype) return loop.where(space, out, shape, arr, x, y, dtype)
def reduce(self, space, w_obj, w_axis, keepdims=False, out=None, dtype=None, cumulative=False): if self.argcount != 2: raise oefmt(space.w_ValueError, "reduce only supported for binary functions") assert isinstance(self, W_Ufunc2) obj = convert_to_array(space, w_obj) if obj.get_dtype().is_flexible(): raise oefmt(space.w_TypeError, "cannot perform reduce with flexible type") obj_shape = obj.get_shape() if obj.is_scalar(): return obj.get_scalar_value() shapelen = len(obj_shape) if space.is_none(w_axis): axis = maxint else: if space.isinstance_w(w_axis, space.w_tuple) and space.len_w(w_axis) == 1: w_axis = space.getitem(w_axis, space.wrap(0)) axis = space.int_w(w_axis) if axis < -shapelen or axis >= shapelen: raise oefmt(space.w_ValueError, "'axis' entry is out of bounds") if axis < 0: axis += shapelen assert axis >= 0 dtype = descriptor.decode_w_dtype(space, dtype) if dtype is None: if self.comparison_func: dtype = descriptor.get_dtype_cache(space).w_booldtype else: dtype = find_unaryop_result_dtype( space, obj.get_dtype(), promote_to_float=self.promote_to_float, promote_to_largest=self.promote_to_largest, promote_bools=self.promote_bools, ) if self.identity is None: for i in range(shapelen): if space.is_none(w_axis) or i == axis: if obj_shape[i] == 0: raise oefmt(space.w_ValueError, "zero-size array to reduction operation %s " "which has no identity", self.name) if shapelen > 1 and axis < shapelen: temp = None if cumulative: shape = obj_shape[:] temp_shape = obj_shape[:axis] + obj_shape[axis + 1:] if out: dtype = out.get_dtype() temp = W_NDimArray.from_shape(space, temp_shape, dtype, w_instance=obj) elif keepdims: shape = obj_shape[:axis] + [1] + obj_shape[axis + 1:] else: shape = obj_shape[:axis] + obj_shape[axis + 1:] if out: # Test for shape agreement # XXX maybe we need to do broadcasting here, although I must # say I don't understand the details for axis reduce if len(out.get_shape()) > len(shape): raise oefmt(space.w_ValueError, "output parameter for reduction operation %s " "has too many dimensions", self.name) elif len(out.get_shape()) < len(shape): raise oefmt(space.w_ValueError, "output parameter for reduction operation %s " "does not have enough dimensions", self.name) elif out.get_shape() != shape: raise oefmt(space.w_ValueError, "output parameter shape mismatch, expecting " "[%s], got [%s]", ",".join([str(x) for x in shape]), ",".join([str(x) for x in out.get_shape()]), ) dtype = out.get_dtype() else: out = W_NDimArray.from_shape(space, shape, dtype, w_instance=obj) if obj.get_size() == 0: if self.identity is not None: out.fill(space, self.identity.convert_to(space, dtype)) return out return loop.do_axis_reduce(space, shape, self.func, obj, dtype, axis, out, self.identity, cumulative, temp) if cumulative: if out: if out.get_shape() != [obj.get_size()]: raise OperationError(space.w_ValueError, space.wrap( "out of incompatible size")) else: out = W_NDimArray.from_shape(space, [obj.get_size()], dtype, w_instance=obj) loop.compute_reduce_cumulative(space, obj, out, dtype, self.func, self.identity) return out if out: if len(out.get_shape()) > 0: raise oefmt(space.w_ValueError, "output parameter for reduction operation %s has " "too many dimensions", self.name) dtype = out.get_dtype() res = loop.compute_reduce(space, obj, dtype, self.func, self.done_func, self.identity) if out: out.set_scalar_value(res) return out if keepdims: shape = [1] * len(obj_shape) out = W_NDimArray.from_shape(space, [1] * len(obj_shape), dtype, w_instance=obj) out.implementation.setitem(0, res) return out return res
def call(self, space, args_w): if len(args_w) > 2: [w_lhs, w_rhs, w_out] = args_w else: [w_lhs, w_rhs] = args_w w_out = None w_lhs = convert_to_array(space, w_lhs) w_rhs = convert_to_array(space, w_rhs) w_ldtype = w_lhs.get_dtype() w_rdtype = w_rhs.get_dtype() if w_ldtype.is_str() and w_rdtype.is_str() and \ self.comparison_func: pass elif (w_ldtype.is_str() or w_rdtype.is_str()) and \ self.comparison_func and w_out is None: return space.wrap(False) elif w_ldtype.is_flexible() or w_rdtype.is_flexible(): if self.comparison_func: if self.name == 'equal' or self.name == 'not_equal': res = w_ldtype.eq(space, w_rdtype) if not res: return space.wrap(self.name == 'not_equal') else: return space.w_NotImplemented else: raise oefmt(space.w_TypeError, 'unsupported operand dtypes %s and %s for "%s"', w_rdtype.get_name(), w_ldtype.get_name(), self.name) if self.are_common_types(w_ldtype, w_rdtype): if not w_lhs.is_scalar() and w_rhs.is_scalar(): w_rdtype = w_ldtype elif w_lhs.is_scalar() and not w_rhs.is_scalar(): w_ldtype = w_rdtype calc_dtype = find_binop_result_dtype(space, w_ldtype, w_rdtype, promote_to_float=self.promote_to_float, promote_bools=self.promote_bools) if (self.int_only and (not w_ldtype.is_int() or not w_rdtype.is_int() or not calc_dtype.is_int()) or not self.allow_bool and (w_ldtype.is_bool() or w_rdtype.is_bool()) or not self.allow_complex and (w_ldtype.is_complex() or w_rdtype.is_complex())): raise oefmt(space.w_TypeError, "ufunc '%s' not supported for the input types", self.name) if space.is_none(w_out): out = None elif not isinstance(w_out, W_NDimArray): raise oefmt(space.w_TypeError, 'output must be an array') else: out = w_out calc_dtype = out.get_dtype() if self.comparison_func: res_dtype = descriptor.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if w_lhs.is_scalar() and w_rhs.is_scalar(): arr = self.func(calc_dtype, w_lhs.get_scalar_value().convert_to(space, calc_dtype), w_rhs.get_scalar_value().convert_to(space, calc_dtype) ) if isinstance(out, W_NDimArray): if out.is_scalar(): out.set_scalar_value(arr) else: out.fill(space, arr) else: out = arr return out new_shape = shape_agreement(space, w_lhs.get_shape(), w_rhs) new_shape = shape_agreement(space, new_shape, out, broadcast_down=False) return loop.call2(space, new_shape, self.func, calc_dtype, res_dtype, w_lhs, w_rhs, out)
def set_imag(self, space, orig_array, w_value): tmp = self.get_imag(space, orig_array) tmp.setslice(space, convert_to_array(space, w_value))
def __init__( self, space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, w_itershape, buffersize=0, order=NPY.KEEPORDER, allow_backward=True, ): self.external_loop = False self.buffered = False self.tracked_index = "" self.common_dtype = False self.delay_bufalloc = False self.grow_inner = False self.ranged = False self.refs_ok = False self.reduce_ok = False self.zerosize_ok = False self.index_iter = None self.done = False self.first_next = True self.op_axes = [] self.allow_backward = allow_backward if not space.is_w(w_casting, space.w_None): self.casting = space.str_w(w_casting) else: self.casting = "safe" # convert w_seq operands to a list of W_NDimArray if space.isinstance_w(w_seq, space.w_tuple) or space.isinstance_w(w_seq, space.w_list): w_seq_as_list = space.listview(w_seq) self.seq = [ convert_to_array(space, w_elem) if not space.is_none(w_elem) else None for w_elem in w_seq_as_list ] else: self.seq = [convert_to_array(space, w_seq)] if order == NPY.ANYORDER: # 'A' means "'F' order if all the arrays are Fortran contiguous, # 'C' order otherwise" order = NPY.CORDER for s in self.seq: if s and not (s.get_flags() & NPY.ARRAY_F_CONTIGUOUS): break else: order = NPY.FORTRANORDER elif order == NPY.KEEPORDER: # 'K' means "as close to the order the array elements appear in # memory as possible", so match self.order to seq.order order = NPY.CORDER for s in self.seq: if s and not (s.get_order() == NPY.FORTRANORDER): break else: order = NPY.FORTRANORDER self.order = order parse_func_flags(space, self, w_flags) self.op_flags = parse_op_arg(space, "op_flags", w_op_flags, len(self.seq), parse_op_flag) # handle w_op_axes oa_ndim = -1 if not space.is_none(w_op_axes): oa_ndim = self.set_op_axes(space, w_op_axes) self.ndim = calculate_ndim(self.seq, oa_ndim) # handle w_op_dtypes part 1: creating self.dtypes list from input if not space.is_none(w_op_dtypes): w_seq_as_list = space.listview(w_op_dtypes) self.dtypes = [decode_w_dtype(space, w_elem) for w_elem in w_seq_as_list] if len(self.dtypes) != len(self.seq): raise oefmt(space.w_ValueError, "op_dtypes must be a tuple/list matching the number of ops") else: self.dtypes = [] # handle None or writable operands, calculate my shape outargs = [i for i in range(len(self.seq)) if self.seq[i] is None or self.op_flags[i].rw == "w"] if len(outargs) > 0: out_shape = shape_agreement_multiple(space, [self.seq[i] for i in outargs]) else: out_shape = None if space.isinstance_w(w_itershape, space.w_tuple) or space.isinstance_w(w_itershape, space.w_list): self.shape = [space.int_w(i) for i in space.listview(w_itershape)] else: self.shape = shape_agreement_multiple(space, self.seq, shape=out_shape) if len(outargs) > 0: # Make None operands writeonly and flagged for allocation if len(self.dtypes) > 0: out_dtype = self.dtypes[outargs[0]] else: out_dtype = None for i in range(len(self.seq)): if self.seq[i] is None: self.op_flags[i].allocate = True continue if self.op_flags[i].rw == "w": continue out_dtype = find_binop_result_dtype(space, self.seq[i].get_dtype(), out_dtype) for i in outargs: if self.seq[i] is None: # XXX can we postpone allocation to later? self.seq[i] = W_NDimArray.from_shape(space, self.shape, out_dtype) else: if not self.op_flags[i].broadcast: # Raises if output cannot be broadcast try: shape_agreement(space, self.shape, self.seq[i], False) except OperationError as e: raise oefmt( space.w_ValueError, "non-broadcastable" " output operand with shape %s doesn't match " "the broadcast shape %s", str(self.seq[i].get_shape()), str(self.shape), ) if self.tracked_index != "": order = self.order if order == NPY.KEEPORDER: order = self.seq[0].implementation.order if self.tracked_index == "multi": backward = False else: backward = (order == NPY.CORDER and self.tracked_index != "C") or ( order == NPY.FORTRANORDER and self.tracked_index != "F" ) self.index_iter = IndexIterator(self.shape, backward=backward) # handle w_op_dtypes part 2: copy where needed if possible if len(self.dtypes) > 0: for i in range(len(self.seq)): self_d = self.dtypes[i] seq_d = self.seq[i].get_dtype() if not self_d: self.dtypes[i] = seq_d elif self_d != seq_d: impl = self.seq[i].implementation if self.buffered or "r" in self.op_flags[i].tmp_copy: if not can_cast_array(space, self.seq[i], self_d, self.casting): raise oefmt( space.w_TypeError, "Iterator operand %d" " dtype could not be cast from %s to %s" " according to the rule '%s'", i, space.str_w(seq_d.descr_repr(space)), space.str_w(self_d.descr_repr(space)), self.casting, ) order = support.get_order_as_CF(impl.order, self.order) new_impl = impl.astype(space, self_d, order).copy(space) self.seq[i] = W_NDimArray(new_impl) else: raise oefmt( space.w_TypeError, "Iterator " "operand required copying or buffering, " "but neither copying nor buffering was " "enabled", ) if "w" in self.op_flags[i].rw: if not can_cast_type(space, self_d, seq_d, self.casting): raise oefmt( space.w_TypeError, "Iterator" " requested dtype could not be cast from " " %s to %s, the operand %d dtype, accord" "ing to the rule '%s'", space.str_w(self_d.descr_repr(space)), space.str_w(seq_d.descr_repr(space)), i, self.casting, ) elif self.buffered and not (self.external_loop and len(self.seq) < 2): for i in range(len(self.seq)): if i not in outargs: self.seq[i] = self.seq[i].descr_copy(space, w_order=space.wrap(self.order)) self.dtypes = [s.get_dtype() for s in self.seq] else: # copy them from seq self.dtypes = [s.get_dtype() for s in self.seq] # create an iterator for each operand self.iters = [] for i in range(len(self.seq)): it = self.get_iter(space, i) it.contiguous = False self.iters.append((it, it.reset())) if self.external_loop: coalesce_axes(self, space)
def descr_ravel(self, space): from pypy.module.micronumpy.base import convert_to_array w_values = space.newtuple([self]) return convert_to_array(space, w_values)
def count_nonzero(space, w_obj): return space.wrap(loop.count_all_true(convert_to_array(space, w_obj)))
def call(self, space, args_w): if len(args_w) > 2: [w_lhs, w_rhs, w_out] = args_w else: [w_lhs, w_rhs] = args_w w_out = None w_lhs = convert_to_array(space, w_lhs) w_rhs = convert_to_array(space, w_rhs) w_ldtype = w_lhs.get_dtype() w_rdtype = w_rhs.get_dtype() if w_ldtype.is_str_type() and w_rdtype.is_str_type() and \ self.comparison_func: pass elif (w_ldtype.is_str_type() or w_rdtype.is_str_type()) and \ self.comparison_func and w_out is None: return space.wrap(False) elif (w_ldtype.is_flexible_type() or \ w_rdtype.is_flexible_type()): raise OperationError(space.w_TypeError, space.wrap( 'unsupported operand dtypes %s and %s for "%s"' % \ (w_rdtype.get_name(), w_ldtype.get_name(), self.name))) if self.are_common_types(w_ldtype, w_rdtype): if not w_lhs.is_scalar() and w_rhs.is_scalar(): w_rdtype = w_ldtype elif w_lhs.is_scalar() and not w_rhs.is_scalar(): w_ldtype = w_rdtype if (self.int_only and (not w_ldtype.is_int_type() or not w_rdtype.is_int_type()) or not self.allow_bool and (w_ldtype.is_bool_type() or w_rdtype.is_bool_type()) or not self.allow_complex and (w_ldtype.is_complex_type() or w_rdtype.is_complex_type())): raise OperationError(space.w_TypeError, space.wrap("Unsupported types")) calc_dtype = find_binop_result_dtype( space, w_ldtype, w_rdtype, promote_to_float=self.promote_to_float, promote_bools=self.promote_bools) if space.is_none(w_out): out = None elif not isinstance(w_out, W_NDimArray): raise OperationError(space.w_TypeError, space.wrap('output must be an array')) else: out = w_out calc_dtype = out.get_dtype() if self.comparison_func: res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if w_lhs.is_scalar() and w_rhs.is_scalar(): arr = self.func(calc_dtype, w_lhs.get_scalar_value().convert_to(calc_dtype), w_rhs.get_scalar_value().convert_to(calc_dtype)) if isinstance(out, W_NDimArray): if out.is_scalar(): out.set_scalar_value(arr) else: out.fill(arr) else: out = arr return out new_shape = shape_agreement(space, w_lhs.get_shape(), w_rhs) new_shape = shape_agreement(space, new_shape, out, broadcast_down=False) return loop.call2(space, new_shape, self.func, calc_dtype, res_dtype, w_lhs, w_rhs, out)
def dot(space, w_obj1, w_obj2, w_out=None): w_arr = convert_to_array(space, w_obj1) if w_arr.is_scalar(): return convert_to_array(space, w_obj2).descr_dot(space, w_arr, w_out) return w_arr.descr_dot(space, w_obj2, w_out)
def dot(space, w_obj1, w_obj2, w_out=None): w_arr = convert_to_array(space, w_obj1) if w_arr.is_scalar(): return convert_to_array(space, w_obj2).descr_dot(space, w_arr, w_out) return w_arr.descr_dot(space, w_obj2, w_out)
def set_imag(self, space, orig_array, w_value): tmp = self.get_imag(space, orig_array) tmp.setslice(space, convert_to_array(space, w_value))
def reduce(self, space, w_obj, promote_to_largest, w_axis, keepdims=False, out=None, dtype=None, cumulative=False): if self.argcount != 2: raise OperationError( space.w_ValueError, space.wrap("reduce only " "supported for binary functions")) assert isinstance(self, W_Ufunc2) obj = convert_to_array(space, w_obj) if obj.get_dtype().is_flexible_type(): raise OperationError( space.w_TypeError, space.wrap('cannot perform reduce for flexible type')) obj_shape = obj.get_shape() if obj.is_scalar(): return obj.get_scalar_value() shapelen = len(obj_shape) axis = unwrap_axis_arg(space, shapelen, w_axis) assert axis >= 0 dtype = interp_dtype.decode_w_dtype(space, dtype) if dtype is None: if self.comparison_func: dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: dtype = find_unaryop_result_dtype( space, obj.get_dtype(), promote_to_float=self.promote_to_float, promote_to_largest=promote_to_largest, promote_bools=True) if self.identity is None: for i in range(shapelen): if space.is_none(w_axis) or i == axis: if obj_shape[i] == 0: raise operationerrfmt( space.w_ValueError, "zero-size array to " "%s.reduce without identity", self.name) if shapelen > 1 and axis < shapelen: temp = None if cumulative: shape = obj_shape[:] temp_shape = obj_shape[:axis] + obj_shape[axis + 1:] if out: dtype = out.get_dtype() temp = W_NDimArray.from_shape(space, temp_shape, dtype, w_instance=obj) elif keepdims: shape = obj_shape[:axis] + [1] + obj_shape[axis + 1:] else: shape = obj_shape[:axis] + obj_shape[axis + 1:] if out: # Test for shape agreement # XXX maybe we need to do broadcasting here, although I must # say I don't understand the details for axis reduce if len(out.get_shape()) > len(shape): raise operationerrfmt( space.w_ValueError, 'output parameter for reduction operation %s' + ' has too many dimensions', self.name) elif len(out.get_shape()) < len(shape): raise operationerrfmt( space.w_ValueError, 'output parameter for reduction operation %s' + ' does not have enough dimensions', self.name) elif out.get_shape() != shape: raise operationerrfmt( space.w_ValueError, 'output parameter shape mismatch, expecting [%s]' + ' , got [%s]', ",".join([str(x) for x in shape]), ",".join([str(x) for x in out.get_shape()]), ) dtype = out.get_dtype() else: out = W_NDimArray.from_shape(space, shape, dtype, w_instance=obj) return loop.do_axis_reduce(shape, self.func, obj, dtype, axis, out, self.identity, cumulative, temp) if cumulative: if out: if out.get_shape() != [obj.get_size()]: raise OperationError( space.w_ValueError, space.wrap("out of incompatible size")) else: out = W_NDimArray.from_shape(space, [obj.get_size()], dtype, w_instance=obj) loop.compute_reduce_cumulative(obj, out, dtype, self.func, self.identity) return out if out: if len(out.get_shape()) > 0: raise operationerrfmt( space.w_ValueError, "output parameter " "for reduction operation %s has too many" " dimensions", self.name) dtype = out.get_dtype() res = loop.compute_reduce(obj, dtype, self.func, self.done_func, self.identity) if out: out.set_scalar_value(res) return out return res
def call(self, space, args_w): if len(args_w) > 2: [w_lhs, w_rhs, w_out] = args_w else: [w_lhs, w_rhs] = args_w w_out = None w_lhs = convert_to_array(space, w_lhs) w_rhs = convert_to_array(space, w_rhs) w_ldtype = w_lhs.get_dtype() w_rdtype = w_rhs.get_dtype() if w_ldtype.is_str_type() and w_rdtype.is_str_type() and \ self.comparison_func: pass elif (w_ldtype.is_str_type() or w_rdtype.is_str_type()) and \ self.comparison_func and w_out is None: return space.wrap(False) elif (w_ldtype.is_flexible_type() or \ w_rdtype.is_flexible_type()): raise OperationError(space.w_TypeError, space.wrap( 'unsupported operand dtypes %s and %s for "%s"' % \ (w_rdtype.get_name(), w_ldtype.get_name(), self.name))) if self.are_common_types(w_ldtype, w_rdtype): if not w_lhs.is_scalar() and w_rhs.is_scalar(): w_rdtype = w_ldtype elif w_lhs.is_scalar() and not w_rhs.is_scalar(): w_ldtype = w_rdtype if (self.int_only and (not w_ldtype.is_int_type() or not w_rdtype.is_int_type()) or not self.allow_bool and (w_ldtype.is_bool_type() or w_rdtype.is_bool_type()) or not self.allow_complex and (w_ldtype.is_complex_type() or w_rdtype.is_complex_type())): raise OperationError(space.w_TypeError, space.wrap("Unsupported types")) calc_dtype = find_binop_result_dtype(space, w_ldtype, w_rdtype, promote_to_float=self.promote_to_float, promote_bools=self.promote_bools) if space.is_none(w_out): out = None elif not isinstance(w_out, W_NDimArray): raise OperationError(space.w_TypeError, space.wrap( 'output must be an array')) else: out = w_out calc_dtype = out.get_dtype() if self.comparison_func: res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if w_lhs.is_scalar() and w_rhs.is_scalar(): arr = self.func(calc_dtype, w_lhs.get_scalar_value().convert_to(calc_dtype), w_rhs.get_scalar_value().convert_to(calc_dtype) ) if isinstance(out, W_NDimArray): if out.is_scalar(): out.set_scalar_value(arr) else: out.fill(arr) else: out = arr return out new_shape = shape_agreement(space, w_lhs.get_shape(), w_rhs) new_shape = shape_agreement(space, new_shape, out, broadcast_down=False) return loop.call2(space, new_shape, self.func, calc_dtype, res_dtype, w_lhs, w_rhs, out)
def reduce(self, space, w_obj, promote_to_largest, w_axis, keepdims=False, out=None, dtype=None, cumulative=False): if self.argcount != 2: raise OperationError(space.w_ValueError, space.wrap("reduce only " "supported for binary functions")) assert isinstance(self, W_Ufunc2) obj = convert_to_array(space, w_obj) if obj.get_dtype().is_flexible_type(): raise OperationError(space.w_TypeError, space.wrap('cannot perform reduce for flexible type')) obj_shape = obj.get_shape() if obj.is_scalar(): return obj.get_scalar_value() shapelen = len(obj_shape) axis = unwrap_axis_arg(space, shapelen, w_axis) assert axis >= 0 dtype = interp_dtype.decode_w_dtype(space, dtype) if dtype is None: if self.comparison_func: dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: dtype = find_unaryop_result_dtype( space, obj.get_dtype(), promote_to_float=self.promote_to_float, promote_to_largest=promote_to_largest, promote_bools=True ) if self.identity is None: for i in range(shapelen): if space.is_none(w_axis) or i == axis: if obj_shape[i] == 0: raise operationerrfmt(space.w_ValueError, "zero-size array to " "%s.reduce without identity", self.name) if shapelen > 1 and axis < shapelen: temp = None if cumulative: shape = obj_shape[:] temp_shape = obj_shape[:axis] + obj_shape[axis + 1:] if out: dtype = out.get_dtype() temp = W_NDimArray.from_shape(space, temp_shape, dtype, w_instance=obj) elif keepdims: shape = obj_shape[:axis] + [1] + obj_shape[axis + 1:] else: shape = obj_shape[:axis] + obj_shape[axis + 1:] if out: # Test for shape agreement # XXX maybe we need to do broadcasting here, although I must # say I don't understand the details for axis reduce if len(out.get_shape()) > len(shape): raise operationerrfmt(space.w_ValueError, 'output parameter for reduction operation %s' + ' has too many dimensions', self.name) elif len(out.get_shape()) < len(shape): raise operationerrfmt(space.w_ValueError, 'output parameter for reduction operation %s' + ' does not have enough dimensions', self.name) elif out.get_shape() != shape: raise operationerrfmt(space.w_ValueError, 'output parameter shape mismatch, expecting [%s]' + ' , got [%s]', ",".join([str(x) for x in shape]), ",".join([str(x) for x in out.get_shape()]), ) dtype = out.get_dtype() else: out = W_NDimArray.from_shape(space, shape, dtype, w_instance=obj) return loop.do_axis_reduce(shape, self.func, obj, dtype, axis, out, self.identity, cumulative, temp) if cumulative: if out: if out.get_shape() != [obj.get_size()]: raise OperationError(space.w_ValueError, space.wrap( "out of incompatible size")) else: out = W_NDimArray.from_shape(space, [obj.get_size()], dtype, w_instance=obj) loop.compute_reduce_cumulative(obj, out, dtype, self.func, self.identity) return out if out: if len(out.get_shape())>0: raise operationerrfmt(space.w_ValueError, "output parameter " "for reduction operation %s has too many" " dimensions",self.name) dtype = out.get_dtype() res = loop.compute_reduce(obj, dtype, self.func, self.done_func, self.identity) if out: out.set_scalar_value(res) return out return res
def __init__(self, space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, w_itershape, buffersize=0, order=NPY.KEEPORDER, allow_backward=True): self.external_loop = False self.buffered = False self.tracked_index = '' self.common_dtype = False self.delay_bufalloc = False self.grow_inner = False self.ranged = False self.refs_ok = False self.reduce_ok = False self.zerosize_ok = False self.index_iter = None self.done = False self.first_next = True self.op_axes = [] self.allow_backward = allow_backward if not space.is_w(w_casting, space.w_None): self.casting = space.str_w(w_casting) else: self.casting = 'safe' # convert w_seq operands to a list of W_NDimArray if space.isinstance_w(w_seq, space.w_tuple) or \ space.isinstance_w(w_seq, space.w_list): w_seq_as_list = space.listview(w_seq) self.seq = [ convert_to_array(space, w_elem) if not space.is_none(w_elem) else None for w_elem in w_seq_as_list ] else: self.seq = [convert_to_array(space, w_seq)] if order == NPY.ANYORDER: # 'A' means "'F' order if all the arrays are Fortran contiguous, # 'C' order otherwise" order = NPY.CORDER for s in self.seq: if s and not (s.get_flags() & NPY.ARRAY_F_CONTIGUOUS): break else: order = NPY.FORTRANORDER elif order == NPY.KEEPORDER: # 'K' means "as close to the order the array elements appear in # memory as possible", so match self.order to seq.order order = NPY.CORDER for s in self.seq: if s and not (s.get_order() == NPY.FORTRANORDER): break else: order = NPY.FORTRANORDER self.order = order parse_func_flags(space, self, w_flags) self.op_flags = parse_op_arg(space, 'op_flags', w_op_flags, len(self.seq), parse_op_flag) # handle w_op_axes oa_ndim = -1 if not space.is_none(w_op_axes): oa_ndim = self.set_op_axes(space, w_op_axes) self.ndim = calculate_ndim(self.seq, oa_ndim) # handle w_op_dtypes part 1: creating self.dtypes list from input if not space.is_none(w_op_dtypes): w_seq_as_list = space.listview(w_op_dtypes) self.dtypes = [ decode_w_dtype(space, w_elem) for w_elem in w_seq_as_list ] if len(self.dtypes) != len(self.seq): raise oefmt( space.w_ValueError, "op_dtypes must be a tuple/list matching the number of ops" ) else: self.dtypes = [] # handle None or writable operands, calculate my shape outargs = [ i for i in range(len(self.seq)) if self.seq[i] is None or self.op_flags[i].rw == 'w' ] if len(outargs) > 0: out_shape = shape_agreement_multiple( space, [self.seq[i] for i in outargs]) else: out_shape = None if space.isinstance_w(w_itershape, space.w_tuple) or \ space.isinstance_w(w_itershape, space.w_list): self.shape = [space.int_w(i) for i in space.listview(w_itershape)] else: self.shape = shape_agreement_multiple(space, self.seq, shape=out_shape) if len(outargs) > 0: # Make None operands writeonly and flagged for allocation if len(self.dtypes) > 0: out_dtype = self.dtypes[outargs[0]] else: out_dtype = None for i in range(len(self.seq)): if self.seq[i] is None: self.op_flags[i].allocate = True continue if self.op_flags[i].rw == 'w': continue out_dtype = find_binop_result_dtype( space, self.seq[i].get_dtype(), out_dtype) for i in outargs: if self.seq[i] is None: # XXX can we postpone allocation to later? self.seq[i] = W_NDimArray.from_shape( space, self.shape, out_dtype) else: if not self.op_flags[i].broadcast: # Raises if output cannot be broadcast try: shape_agreement(space, self.shape, self.seq[i], False) except OperationError as e: raise oefmt( space.w_ValueError, "non-broadcastable" " output operand with shape %s doesn't match " "the broadcast shape %s", str(self.seq[i].get_shape()), str(self.shape)) if self.tracked_index != "": order = self.order if order == NPY.KEEPORDER: order = self.seq[0].implementation.order if self.tracked_index == "multi": backward = False else: backward = ((order == NPY.CORDER and self.tracked_index != 'C') or (order == NPY.FORTRANORDER and self.tracked_index != 'F')) self.index_iter = IndexIterator(self.shape, backward=backward) # handle w_op_dtypes part 2: copy where needed if possible if len(self.dtypes) > 0: for i in range(len(self.seq)): self_d = self.dtypes[i] seq_d = self.seq[i].get_dtype() if not self_d: self.dtypes[i] = seq_d elif self_d != seq_d: impl = self.seq[i].implementation if self.buffered or 'r' in self.op_flags[i].tmp_copy: if not can_cast_array(space, self.seq[i], self_d, self.casting): raise oefmt( space.w_TypeError, "Iterator operand %d" " dtype could not be cast from %s to %s" " according to the rule '%s'", i, space.str_w(seq_d.descr_repr(space)), space.str_w(self_d.descr_repr(space)), self.casting) order = support.get_order_as_CF(impl.order, self.order) new_impl = impl.astype(space, self_d, order).copy(space) self.seq[i] = W_NDimArray(new_impl) else: raise oefmt( space.w_TypeError, "Iterator " "operand required copying or buffering, " "but neither copying nor buffering was " "enabled") if 'w' in self.op_flags[i].rw: if not can_cast_type(space, self_d, seq_d, self.casting): raise oefmt( space.w_TypeError, "Iterator" " requested dtype could not be cast from " " %s to %s, the operand %d dtype, accord" "ing to the rule '%s'", space.str_w(self_d.descr_repr(space)), space.str_w(seq_d.descr_repr(space)), i, self.casting) elif self.buffered and not (self.external_loop and len(self.seq) < 2): for i in range(len(self.seq)): if i not in outargs: self.seq[i] = self.seq[i].descr_copy(space, w_order=space.wrap( self.order)) self.dtypes = [s.get_dtype() for s in self.seq] else: #copy them from seq self.dtypes = [s.get_dtype() for s in self.seq] # create an iterator for each operand self.iters = [] for i in range(len(self.seq)): it = self.get_iter(space, i) it.contiguous = False self.iters.append((it, it.reset())) if self.external_loop: coalesce_axes(self, space)
def __init__(self, space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, w_itershape, buffersize=0, order='K'): from pypy.module.micronumpy.ufuncs import find_binop_result_dtype self.order = order self.external_loop = False self.buffered = False self.tracked_index = '' self.common_dtype = False self.delay_bufalloc = False self.grow_inner = False self.ranged = False self.refs_ok = False self.reduce_ok = False self.zerosize_ok = False self.index_iter = None self.done = False self.first_next = True self.op_axes = [] # convert w_seq operands to a list of W_NDimArray if space.isinstance_w(w_seq, space.w_tuple) or \ space.isinstance_w(w_seq, space.w_list): w_seq_as_list = space.listview(w_seq) self.seq = [convert_to_array(space, w_elem) if not space.is_none(w_elem) else None for w_elem in w_seq_as_list] else: self.seq = [convert_to_array(space, w_seq)] parse_func_flags(space, self, w_flags) self.op_flags = parse_op_arg(space, 'op_flags', w_op_flags, len(self.seq), parse_op_flag) # handle w_op_axes oa_ndim = -1 if not space.is_none(w_op_axes): oa_ndim = self.set_op_axes(space, w_op_axes) self.ndim = calculate_ndim(self.seq, oa_ndim) # handle w_op_dtypes part 1: creating self.dtypes list from input if not space.is_none(w_op_dtypes): w_seq_as_list = space.listview(w_op_dtypes) self.dtypes = [decode_w_dtype(space, w_elem) for w_elem in w_seq_as_list] if len(self.dtypes) != len(self.seq): raise oefmt(space.w_ValueError, "op_dtypes must be a tuple/list matching the number of ops") else: self.dtypes = [] # handle None or writable operands, calculate my shape outargs = [i for i in range(len(self.seq)) if self.seq[i] is None or self.op_flags[i].rw == 'w'] if len(outargs) > 0: out_shape = shape_agreement_multiple(space, [self.seq[i] for i in outargs]) else: out_shape = None if space.isinstance_w(w_itershape, space.w_tuple) or \ space.isinstance_w(w_itershape, space.w_list): self.shape = [space.int_w(i) for i in space.listview(w_itershape)] else: self.shape = shape_agreement_multiple(space, self.seq, shape=out_shape) if len(outargs) > 0: # Make None operands writeonly and flagged for allocation if len(self.dtypes) > 0: out_dtype = self.dtypes[outargs[0]] else: out_dtype = None for i in range(len(self.seq)): if self.seq[i] is None: self.op_flags[i].allocate = True continue if self.op_flags[i].rw == 'w': continue out_dtype = find_binop_result_dtype( space, self.seq[i].get_dtype(), out_dtype) for i in outargs: if self.seq[i] is None: # XXX can we postpone allocation to later? self.seq[i] = W_NDimArray.from_shape(space, self.shape, out_dtype) else: if not self.op_flags[i].broadcast: # Raises if ooutput cannot be broadcast shape_agreement(space, self.shape, self.seq[i], False) if self.tracked_index != "": if self.order == "K": self.order = self.seq[0].implementation.order if self.tracked_index == "multi": backward = False else: backward = self.order != self.tracked_index self.index_iter = IndexIterator(self.shape, backward=backward) # handle w_op_dtypes part 2: copy where needed if possible if len(self.dtypes) > 0: for i in range(len(self.seq)): selfd = self.dtypes[i] seq_d = self.seq[i].get_dtype() if not selfd: self.dtypes[i] = seq_d elif selfd != seq_d: if not 'r' in self.op_flags[i].tmp_copy: raise oefmt(space.w_TypeError, "Iterator operand required copying or " "buffering for operand %d", i) impl = self.seq[i].implementation new_impl = impl.astype(space, selfd) self.seq[i] = W_NDimArray(new_impl) else: #copy them from seq self.dtypes = [s.get_dtype() for s in self.seq] # create an iterator for each operand self.iters = [] for i in range(len(self.seq)): it = get_iter(space, self.order, self.seq[i], self.shape, self.dtypes[i], self.op_flags[i], self) it.contiguous = False self.iters.append((it, it.reset())) if self.external_loop: coalesce_axes(self, space)
def descr_ravel(self, space): from pypy.module.micronumpy.base import convert_to_array w_values = space.newtuple([self]) return convert_to_array(space, w_values)