def test_use_numpy_strict_false(self): # here the value is perfect, and we're not strict about it, # so creation should work u = SharedVariable( name="u", type=Tensor(broadcastable=[False], dtype="float64"), value=np.asarray([1.0, 2.0]), strict=False, ) # check that assignments to value are cast properly u.set_value([3, 4]) assert type(u.get_value()) is np.ndarray assert str(u.get_value(borrow=True).dtype) == "float64" assert np.all(u.get_value() == [3, 4]) # check that assignments of nonsense fail try: u.set_value("adsf") assert 0 except ValueError: pass # check that an assignment of a perfect value results in no copying uval = aesara._asarray([5, 6, 7, 8], dtype="float64") u.set_value(uval, borrow=True) assert u.get_value(borrow=True) is uval
def perform(self, node, inp, out): multi_index, dims = inp[:-1], inp[-1] res = np.ravel_multi_index(multi_index, dims, mode=self.mode, order=self.order) out[0][0] = aesara._asarray(res, node.outputs[0].dtype)
def perform(self, node, inp, out_, params): (x,) = inp (out,) = out_ if out[0] is None: out[0] = aesara._asarray(x.shape[self.i], dtype="int64") else: out[0][...] = x.shape[self.i]
def perform(self, node, inputs, out_): rout, out = out_ # Use self.fn to draw shape worth of random numbers. # Numbers are drawn from r if self.inplace is True, and from a # copy of r if self.inplace is False r, shape, args = inputs[0], inputs[1], inputs[2:] assert type(r) == np.random.RandomState, (type(r), r) # If shape == [], that means no shape is enforced, and numpy is # trusted to draw the appropriate number of samples, numpy uses # shape "None" to represent that. Else, numpy expects a tuple. # TODO: compute the appropriate shape, and pass it to numpy. if len(shape) == 0: shape = None else: shape = tuple(shape) if shape is not None and self.outtype.ndim != len( shape) + self.ndim_added: raise ValueError("Shape mismatch: self.outtype.ndim (%i) !=" " len(shape) (%i) + self.ndim_added (%i)" % (self.outtype.ndim, len(shape), self.ndim_added)) if not self.inplace: r = copy(r) rout[0] = r rval = self.exec_fn(r, *(args + [shape])) if (not isinstance(rval, np.ndarray) or str(rval.dtype) != node.outputs[1].type.dtype): rval = aesara._asarray(rval, dtype=node.outputs[1].type.dtype) # When shape is None, numpy has a tendency to unexpectedly # return a scalar instead of a higher-dimension array containing # only one element. This value should be reshaped if shape is None and rval.ndim == 0 and self.outtype.ndim > 0: rval = rval.reshape([1] * self.outtype.ndim) if len(rval.shape) != self.outtype.ndim: raise ValueError('Shape mismatch: "out" should have dimension %i,' ' but the value produced by "perform" has' " dimension %i" % (self.outtype.ndim, len(rval.shape))) # Check the output has the right shape if shape is not None: if self.ndim_added == 0 and shape != rval.shape: raise ValueError( 'Shape mismatch: "out" should have shape %s, but the' ' value produced by "perform" has shape %s' % (shape, rval.shape)) elif self.ndim_added > 0 and shape != rval.shape[:-self.ndim_added]: raise ValueError( 'Shape mismatch: "out" should have shape starting with' " %s (plus %i extra dimensions), but the value produced" ' by "perform" has shape %s' % (shape, self.ndim_added, rval.shape)) out[0] = rval
def test_XOR_inplace(): dtype = [ "int8", "int16", "int32", "int64", ] for dtype in dtype: x, y = vector(dtype=dtype), vector(dtype=dtype) l = _asarray([0, 0, 1, 1], dtype=dtype) r = _asarray([0, 1, 0, 1], dtype=dtype) ix = x ix = xor_inplace(ix, y) gn = inplace_func([x, y], ix) _ = gn(l, r) # test the in-place stuff assert np.all(l == np.asarray([0, 1, 1, 0])), l
def _numpy_true_div(x, y): # Performs true division, and cast the result in the type we expect. # # We define that function so we can use it in TrueDivTester.expected, # because simply calling np.true_divide could cause a dtype mismatch. out = np.true_divide(x, y) # Use floatX as the result of int / int if x.dtype in tensor.discrete_dtypes and y.dtype in tensor.discrete_dtypes: out = aesara._asarray(out, dtype=config.floatX) return out
def perform(self, node, inp, out): indices, dims = inp res = np.unravel_index(indices, dims, order=self.order) assert len(res) == len(out) for i in range(len(out)): ret = aesara._asarray(res[i], node.outputs[0].dtype) if ret.base is not None: # NumPy will return a view when it can. # But we don't want that. ret = ret.copy() out[i][0] = ret
def perform(self, node, inputs, output_storage): a = inputs[0] axis = inputs[1] if axis is not None: if axis != int(axis): raise ValueError("sort axis must be an integer or None") axis = int(axis) z = output_storage[0] z[0] = aesara._asarray( np.argsort(a, axis, self.kind, self.order), dtype=node.outputs[0].dtype )
def test_0(self): for op_fn in [_convert_to_int32, _convert_to_float32, _convert_to_float64]: for type_fn in bvector, ivector, fvector, dvector: x = type_fn() f = function([x], op_fn(x)) xval = aesara._asarray(np.random.rand(10) * 10, dtype=type_fn.dtype) yval = f(xval) assert ( str(yval.dtype) == op_fn.scalar_op.output_types_preference.spec[0].dtype )
def test_gemv_dimensions(self, dtype="float32"): alpha = aesara.shared(aesara._asarray(1.0, dtype=dtype), name="alpha") beta = aesara.shared(aesara._asarray(1.0, dtype=dtype), name="beta") z = beta * self.y + alpha * tensor.dot(self.A, self.x) f = aesara.function([self.A, self.x, self.y], z, mode=self.mode) # Matrix value A_val = np.ones((5, 3), dtype=dtype) # Different vector length ones_3 = np.ones(3, dtype=dtype) ones_4 = np.ones(4, dtype=dtype) ones_5 = np.ones(5, dtype=dtype) ones_6 = np.ones(6, dtype=dtype) f(A_val, ones_3, ones_5) f(A_val[::-1, ::-1], ones_3, ones_5) with pytest.raises(ValueError): f(A_val, ones_4, ones_5) with pytest.raises(ValueError): f(A_val, ones_3, ones_6) with pytest.raises(ValueError): f(A_val, ones_4, ones_6)
def test_givens(self): x = shared(0) assign = pfunc([], x, givens={x: 3}) assert assign() == 3 assert x.get_value(borrow=True) == 0 y = tensor.ivector() f = pfunc([y], (y * x), givens={x: 6}) assert np.all(f([1, 1, 1]) == [6, 6, 6]) assert x.get_value() == 0 z = tensor.ivector() c = z * y f = pfunc([y], (c + 7), givens={z: aesara._asarray([4, 4, 4], dtype="int32")}) assert np.all(f([1, 1, 1]) == [11, 11, 11]) assert x.get_value() == 0
def as_ar(a): return aesara._asarray(a, dtype="int32")
def filter_inplace(self, data, old_data, strict=False, allow_downcast=None): if isinstance(data, gpuarray.GpuArray) and data.typecode == self.typecode: # This is just to make this condition not enter the # following branches pass elif strict: if not isinstance(data, gpuarray.GpuArray): raise TypeError("%s expected a GpuArray object." % self, data, type(data)) if self.typecode != data.typecode: raise TypeError("%s expected typecode %d (dtype %s), " "got %d (dtype %s)." % (self, self.typecode, self.dtype, data.typecode, str(data.dtype))) if self.context != data.context: raise TypeError("data context does not match type context") # fallthrough to ndim check elif allow_downcast or (allow_downcast is None and type(data) == float and self.dtype == config.floatX): if not isinstance(data, gpuarray.GpuArray): data = np.array(data, dtype=self.dtype, copy=False, ndmin=len(self.broadcastable)) else: data = gpuarray.array( data, dtype=self.typecode, copy=False, ndmin=len(self.broadcastable), context=self.context, ) else: if not hasattr(data, "dtype"): converted_data = aesara._asarray(data, self.dtype) # We use the `values_eq` static function from TensorType # to handle NaN values. if TensorType.values_eq(np.asarray(data), converted_data, force_same_dtype=False): data = converted_data up_dtype = scalar.upcast(self.dtype, data.dtype) if up_dtype == self.dtype: if not isinstance(data, gpuarray.GpuArray): data = np.array(data, dtype=self.dtype, copy=False) else: data = gpuarray.array(data, dtype=self.dtype, copy=False) else: raise TypeError("%s cannot store a value of dtype %s " "without risking loss of precision." % (self, data.dtype)) if self.ndim != data.ndim: raise TypeError( "Wrong number of dimensions: expected %s, " "got %s with shape %s." % (self.ndim, data.ndim, data.shape), data, ) shp = data.shape for i, b in enumerate(self.broadcastable): if b and shp[i] != 1: raise TypeError( "Non-unit value on shape on a broadcastable" " dimension.", shp, self.broadcastable, ) if not isinstance(data, gpuarray.GpuArray): if (old_data is not None and old_data.shape == data.shape and ( # write() only work if the destitation is contiguous. old_data.flags["C_CONTIGUOUS"] or old_data.flags["F_CONTIGUOUS"])): old_data.write(data) data = old_data else: data = pygpu.array(data, context=self.context) return data
def perform(self, node, inp, out_): (x,) = inp (out,) = out_ out[0] = aesara._asarray(x.shape, dtype="int64")