Ejemplo n.º 1
0
 def perform(self, node, inp, out_, params):
     (x, ) = inp
     (out, ) = out_
     if out[0] is None:
         out[0] = _asarray(np.shape(x)[self.i], dtype="int64")
     else:
         out[0][...] = np.shape(x)[self.i]
Ejemplo n.º 2
0
    def perform(self, node, inputs, outputs):
        rng_var_out, smpl_out = outputs

        rng, size, dtype, *args = inputs

        out_var = node.outputs[1]

        # If `size == []`, that means no size is enforced, and NumPy is trusted
        # to draw the appropriate number of samples, NumPy uses `size=None` to
        # represent that.  Otherwise, NumPy expects a tuple.
        if np.size(size) == 0:
            size = None
        else:
            size = tuple(size)

        # Draw from `rng` if `self.inplace` is `True`, and from a copy of `rng`
        # otherwise.
        if not self.inplace:
            rng = copy(rng)

        rng_var_out[0] = rng

        smpl_val = self.rng_fn(rng, *(args + [size]))

        if (not isinstance(smpl_val, np.ndarray)
                or str(smpl_val.dtype) != out_var.type.dtype):
            smpl_val = _asarray(smpl_val, dtype=out_var.type.dtype)

        smpl_out[0] = smpl_val
Ejemplo n.º 3
0
 def perform(self, node, inp, out):
     multi_index, dims = inp[:-1], inp[-1]
     res = np.ravel_multi_index(multi_index,
                                dims,
                                mode=self.mode,
                                order=self.order)
     out[0][0] = _asarray(res, node.outputs[0].dtype)
Ejemplo n.º 4
0
    def test_use_numpy_strict_false(self):

        # here the value is perfect, and we're not strict about it,
        # so creation should work
        u = SharedVariable(
            name="u",
            type=TensorType(shape=[False], dtype="float64"),
            value=np.asarray([1.0, 2.0]),
            strict=False,
        )

        # check that assignments to value are cast properly
        u.set_value([3, 4])
        assert type(u.get_value()) is np.ndarray
        assert str(u.get_value(borrow=True).dtype) == "float64"
        assert np.all(u.get_value() == [3, 4])

        # check that assignments of nonsense fail
        try:
            u.set_value("adsf")
            assert 0
        except ValueError:
            pass

        # check that an assignment of a perfect value results in no copying
        uval = _asarray([5, 6, 7, 8], dtype="float64")
        u.set_value(uval, borrow=True)
        assert u.get_value(borrow=True) is uval
Ejemplo n.º 5
0
    def test_0(self, op_fn, type_fn):
        x = type_fn()
        f = function([x], op_fn(x))

        xval = _asarray(np.random.random(10) * 10, dtype=type_fn.dtype)
        yval = f(xval)
        assert str(yval.dtype
                   ) == op_fn.scalar_op.output_types_preference.spec[0].dtype
Ejemplo n.º 6
0
def test_XOR_inplace():
    dtype = [
        "int8",
        "int16",
        "int32",
        "int64",
    ]

    for dtype in dtype:
        x, y = vector(dtype=dtype), vector(dtype=dtype)
        l = _asarray([0, 0, 1, 1], dtype=dtype)
        r = _asarray([0, 1, 0, 1], dtype=dtype)
        ix = x
        ix = xor_inplace(ix, y)
        gn = inplace_func([x, y], ix)
        _ = gn(l, r)
        # test the in-place stuff
        assert np.all(l == np.asarray([0, 1, 1, 0])), l
Ejemplo n.º 7
0
def _numpy_true_div(x, y):
    # Performs true division, and cast the result in the type we expect.
    #
    # We define that function so we can use it in TrueDivTester.expected,
    # because simply calling np.true_divide could cause a dtype mismatch.
    out = np.true_divide(x, y)
    # Use floatX as the result of int / int
    if x.dtype in discrete_dtypes and y.dtype in discrete_dtypes:
        out = _asarray(out, dtype=config.floatX)
    return out
Ejemplo n.º 8
0
 def perform(self, node, inputs, output_storage):
     a = inputs[0]
     axis = inputs[1]
     if axis is not None:
         if axis != int(axis):
             raise ValueError("sort axis must be an integer or None")
         axis = int(axis)
     z = output_storage[0]
     z[0] = _asarray(np.argsort(a, axis, self.kind, self.order),
                     dtype=node.outputs[0].dtype)
Ejemplo n.º 9
0
 def perform(self, node, inp, out):
     indices, dims = inp
     res = np.unravel_index(indices, dims, order=self.order)
     assert len(res) == len(out)
     for i in range(len(out)):
         ret = _asarray(res[i], node.outputs[0].dtype)
         if ret.base is not None:
             # NumPy will return a view when it can.
             # But we don't want that.
             ret = ret.copy()
         out[i][0] = ret
Ejemplo n.º 10
0
    def test_0(self):
        for op_fn in [
                _convert_to_int32, _convert_to_float32, _convert_to_float64
        ]:
            for type_fn in bvector, ivector, fvector, dvector:
                x = type_fn()
                f = function([x], op_fn(x))

                xval = _asarray(np.random.rand(10) * 10, dtype=type_fn.dtype)
                yval = f(xval)
                assert (str(yval.dtype) ==
                        op_fn.scalar_op.output_types_preference.spec[0].dtype)
Ejemplo n.º 11
0
    def test_gemv_dimensions(self, dtype="float32"):
        alpha = aesara.shared(_asarray(1.0, dtype=dtype), name="alpha")
        beta = aesara.shared(_asarray(1.0, dtype=dtype), name="beta")

        z = beta * self.y + alpha * aet.dot(self.A, self.x)
        f = aesara.function([self.A, self.x, self.y], z, mode=self.mode)

        # Matrix value
        A_val = np.ones((5, 3), dtype=dtype)
        # Different vector length
        ones_3 = np.ones(3, dtype=dtype)
        ones_4 = np.ones(4, dtype=dtype)
        ones_5 = np.ones(5, dtype=dtype)
        ones_6 = np.ones(6, dtype=dtype)

        f(A_val, ones_3, ones_5)
        f(A_val[::-1, ::-1], ones_3, ones_5)
        with pytest.raises(ValueError):
            f(A_val, ones_4, ones_5)
        with pytest.raises(ValueError):
            f(A_val, ones_3, ones_6)
        with pytest.raises(ValueError):
            f(A_val, ones_4, ones_6)
Ejemplo n.º 12
0
    def test_givens(self):
        x = shared(0)
        assign = pfunc([], x, givens={x: 3})
        assert assign() == 3
        assert x.get_value(borrow=True) == 0

        y = ivector()
        f = pfunc([y], (y * x), givens={x: 6})
        assert np.all(f([1, 1, 1]) == [6, 6, 6])
        assert x.get_value() == 0

        z = ivector()
        c = z * y
        f = pfunc([y], (c + 7), givens={z: _asarray([4, 4, 4], dtype="int32")})
        assert np.all(f([1, 1, 1]) == [11, 11, 11])
        assert x.get_value() == 0
Ejemplo n.º 13
0
def scalar_constructor(value,
                       name=None,
                       strict=False,
                       allow_downcast=None,
                       borrow=False,
                       target="cpu"):
    """
    SharedVariable constructor for scalar values. Default: int64 or float64.

    Notes
    -----
    We implement this using 0-d tensors for now.

    We ignore the borrow parameter as we convert ``value`` to an
    ndarray (this is a new object). This respects the semantic of
    borrow, as it is a hint to Aesara that we can reuse it.

    """
    if target != "cpu":
        raise TypeError("not for cpu")

    if not isinstance(value, (np.number, float, int, complex)):
        raise TypeError()
    try:
        dtype = value.dtype
    except Exception:
        dtype = np.asarray(value).dtype

    dtype = str(dtype)
    value = _asarray(value, dtype=dtype)
    tensor_type = TensorType(dtype=str(value.dtype), broadcastable=[])

    try:
        # Do not pass the dtype to asarray because we want this to fail if
        # strict is True and the types do not match.
        rval = ScalarSharedVariable(
            type=tensor_type,
            value=np.array(value, copy=True),
            name=name,
            strict=strict,
            allow_downcast=allow_downcast,
        )
        return rval
    except Exception:
        traceback.print_exc()
        raise
Ejemplo n.º 14
0
 def just_vals(v):
     return Reshape(2)(v, _asarray([2, 3], dtype="int32"))
Ejemplo n.º 15
0
    def filter(self, data, strict=False, allow_downcast=None):
        """
        Convert `data` to something which can be associated to a
        `TensorVariable`.

        This function is not meant to be called in user code. It is for
        `Linker` instances to use when running a compiled graph.

        """
        # Explicit error message when one accidentally uses a Variable as
        # input (typical mistake, especially with shared variables).
        if isinstance(data, Variable):
            raise TypeError(
                "Expected an array-like object, but found a Variable: "
                "maybe you are trying to call a function on a (possibly "
                "shared) variable instead of a numeric array?")

        if isinstance(data, np.memmap) and (data.dtype == self.numpy_dtype):
            # numpy.memmap is a "safe" subclass of ndarray,
            # so we can use it wherever we expect a base ndarray.
            # however, casting it would defeat the purpose of not
            # loading the whole data into memory
            pass
        elif isinstance(data, np.ndarray) and (data.dtype == self.numpy_dtype):
            if data.dtype.num != self.numpy_dtype.num:
                data = _asarray(data, dtype=self.dtype)
            # -- now fall through to ndim check
        elif strict:
            # If any of the two conditions above was not met,
            # we raise a meaningful TypeError.
            if not isinstance(data, np.ndarray):
                raise TypeError(
                    f"{self} expected a ndarray object (got {type(data)}).")
            if data.dtype != self.numpy_dtype:
                raise TypeError(
                    f"{self} expected an ndarray with dtype={self.numpy_dtype} (got {data.dtype})."
                )
        else:
            if allow_downcast:
                # Convert to self.dtype, regardless of the type of data
                data = _asarray(data, dtype=self.dtype)
                # TODO: consider to pad shape with ones to make it consistent
                # with self.broadcastable... like vector->row type thing
            else:
                if isinstance(data, np.ndarray):
                    # Check if self.dtype can accurately represent data
                    # (do not try to convert the data)
                    up_dtype = aes.upcast(self.dtype, data.dtype)
                    if up_dtype == self.dtype:
                        # Bug in the following line when data is a
                        # scalar array, see
                        # http://projects.scipy.org/numpy/ticket/1611
                        # data = data.astype(self.dtype)
                        data = _asarray(data, dtype=self.dtype)
                    if up_dtype != self.dtype:
                        err_msg = (
                            f"{self} cannot store a value of dtype {data.dtype} without "
                            "risking loss of precision. If you do not mind "
                            "this loss, you can: "
                            f"1) explicitly cast your data to {self.dtype}, or "
                            '2) set "allow_input_downcast=True" when calling '
                            f'"function". Value: "{repr(data)}"')
                        raise TypeError(err_msg)
                elif (allow_downcast is None
                      and isinstance(data, (float, np.floating))
                      and self.dtype == config.floatX):
                    # Special case where we allow downcasting of Python float
                    # literals to floatX, even when floatX=='float32'
                    data = _asarray(data, self.dtype)
                else:
                    # data has to be converted.
                    # Check that this conversion is lossless
                    converted_data = _asarray(data, self.dtype)
                    # We use the `values_eq` static function from TensorType
                    # to handle NaN values.
                    if TensorType.values_eq(np.asarray(data),
                                            converted_data,
                                            force_same_dtype=False):
                        data = converted_data
                    else:
                        # Do not print a too long description of data
                        # (ndarray truncates it, but it's not sure for data)
                        str_data = str(data)
                        if len(str_data) > 80:
                            str_data = str_data[:75] + "(...)"

                        err_msg = (
                            f"{self} cannot store accurately value {data}, "
                            f"it would be represented as {converted_data}. "
                            "If you do not mind this precision loss, you can: "
                            "1) explicitly convert your data to a numpy array "
                            f"of dtype {self.dtype}, or "
                            '2) set "allow_input_downcast=True" when calling '
                            '"function".')
                        raise TypeError(err_msg)

        if self.ndim != data.ndim:
            raise TypeError(
                f"Wrong number of dimensions: expected {self.ndim},"
                f" got {data.ndim} with shape {data.shape}.")
        if not data.flags.aligned:
            try:
                msg = "object buffer" + str(data.data)
            except AttributeError:
                msg = ""
            raise TypeError(
                "The numpy.ndarray object is not aligned."
                " Aesara C code does not support that.",
                msg,
                "object shape",
                data.shape,
                "object strides",
                data.strides,
                "object dtype",
                data.dtype,
            )

        i = 0
        for b in self.broadcastable:
            if b and data.shape[i] != 1:
                raise TypeError(
                    "Non-unit value on shape on a broadcastable"
                    " dimension.",
                    data.shape,
                    self.broadcastable,
                )
            i += 1
        if self.filter_checks_isfinite and not np.all(np.isfinite(data)):
            raise ValueError("non-finite elements not allowed")
        return data
Ejemplo n.º 16
0
 def perform(self, node, inp, out_):
     (x, ) = inp
     (out, ) = out_
     out[0] = _asarray(np.shape(x), dtype="int64")
Ejemplo n.º 17
0
 def as_ar(a):
     return _asarray(a, dtype="int32")
Ejemplo n.º 18
0
    def filter_inplace(self,
                       data,
                       old_data,
                       strict=False,
                       allow_downcast=None):
        if isinstance(data,
                      gpuarray.GpuArray) and data.typecode == self.typecode:
            # This is just to make this condition not enter the
            # following branches
            pass
        elif strict:
            if not isinstance(data, gpuarray.GpuArray):
                raise TypeError(f"{self} expected a GpuArray object.", data,
                                type(data))
            if self.typecode != data.typecode:
                raise TypeError(
                    f"{self} expected typecode {int(self.typecode)} (dtype {self.dtype}), "
                    f"got {int(data.typecode)} (dtype {data.dtype}).")
            if self.context != data.context:
                raise TypeError("data context does not match type context")
            # fallthrough to ndim check
        elif allow_downcast or (allow_downcast is None and isinstance(
                data, float) and self.dtype == config.floatX):
            if not isinstance(data, gpuarray.GpuArray):
                data = np.array(data,
                                dtype=self.dtype,
                                copy=False,
                                ndmin=len(self.broadcastable))
            else:
                data = gpuarray.array(
                    data,
                    dtype=self.typecode,
                    copy=False,
                    ndmin=len(self.broadcastable),
                    context=self.context,
                )
        else:
            if not hasattr(data, "dtype"):
                converted_data = _asarray(data, self.dtype)
                # We use the `values_eq` static function from TensorType
                # to handle NaN values.
                if TensorType.values_eq(np.asarray(data),
                                        converted_data,
                                        force_same_dtype=False):
                    data = converted_data

            up_dtype = aes.upcast(self.dtype, data.dtype)
            if up_dtype == self.dtype:
                if not isinstance(data, gpuarray.GpuArray):
                    data = np.array(data, dtype=self.dtype, copy=False)
                else:
                    data = gpuarray.array(data, dtype=self.dtype, copy=False)
            else:
                raise TypeError(
                    f"{self} cannot store a value of dtype {data.dtype} "
                    "without risking loss of precision.")

        if self.ndim != data.ndim:
            raise TypeError(
                f"Wrong number of dimensions: expected {self.ndim}, "
                f"got {data.ndim} with shape {data.shape}.",
                data,
            )
        shp = data.shape
        for i, b in enumerate(self.broadcastable):
            if b and shp[i] != 1:
                raise TypeError(
                    "Non-unit value on shape on a broadcastable"
                    " dimension.",
                    shp,
                    self.broadcastable,
                )
        if not isinstance(data, gpuarray.GpuArray):
            if (old_data is not None and old_data.shape == data.shape and (
                    # write() only work if the destitation is contiguous.
                    old_data.flags["C_CONTIGUOUS"]
                    or old_data.flags["F_CONTIGUOUS"])):
                old_data.write(data)
                data = old_data
            else:
                data = pygpu.array(data, context=self.context)
        return data
Ejemplo n.º 19
0
    def test_basics(self):
        a = dvector()
        b = dmatrix()
        d = dmatrix()

        # basic to 1 dim(without list)
        c = reshape(b, as_tensor_variable(6), ndim=1)
        f = self.function([b], c)

        b_val1 = np.asarray([[0, 1, 2], [3, 4, 5]])
        c_val1 = np.asarray([0, 1, 2, 3, 4, 5])
        b_val2 = b_val1.T
        c_val2 = np.asarray([0, 3, 1, 4, 2, 5])

        f_out1 = f(b_val1)
        f_out2 = f(b_val2)
        assert np.array_equal(f_out1, c_val1), (f_out1, c_val1)
        assert np.array_equal(f_out2, c_val2), (f_out2, c_val2)

        # basic to 1 dim(with list)
        c = reshape(b, (as_tensor_variable(6),), ndim=1)
        f = self.function([b], c)
        assert np.array_equal(
            f(np.asarray([[0, 1, 2], [3, 4, 5]])), np.asarray([0, 1, 2, 3, 4, 5])
        )

        # basic to shape object of same ndim
        c = reshape(b, d.shape)
        f = self.function([b, d], c)
        assert np.array_equal(
            f(np.asarray([[0, 1, 2], [3, 4, 5]]), [[0, 1], [2, 3], [4, 5]]),
            np.asarray([[0, 1], [2, 3], [4, 5]]),
        )

        # basic to 2 dims
        c = reshape(a, [2, 3])
        f = self.function([a], c)
        assert np.array_equal(
            f(np.asarray([0, 1, 2, 3, 4, 5])), np.asarray([[0, 1, 2], [3, 4, 5]])
        )

        # test that it works without inplace operations
        a_val = np.asarray([0, 1, 2, 3, 4, 5])
        a_val_copy = np.asarray([0, 1, 2, 3, 4, 5])
        b_val = np.asarray([[0, 1, 2], [3, 4, 5]])

        f_sub = self.function([a, b], c - b)
        assert np.array_equal(f_sub(a_val, b_val), np.zeros_like(b_val))
        assert np.array_equal(a_val, a_val_copy)

        # test that it works with inplace operations
        a_val = _asarray([0, 1, 2, 3, 4, 5], dtype="float64")
        a_val_copy = _asarray([0, 1, 2, 3, 4, 5], dtype="float64")
        b_val = _asarray([[0, 1, 2], [3, 4, 5]], dtype="float64")

        f_sub = self.function([a, b], c - b)
        assert np.array_equal(f_sub(a_val, b_val), np.zeros_like(b_val))
        assert np.array_equal(a_val, a_val_copy)

        # verify gradient
        def just_vals(v):
            return Reshape(2)(v, _asarray([2, 3], dtype="int32"))

        utt.verify_grad(just_vals, [a_val], mode=self.mode)

        # test infer_shape
        self._compile_and_check([a], [c], (a_val,), self.op)

        # test broadcast flag for constant value of 1
        c = reshape(b, (b.shape[0], b.shape[1], 1))
        # That reshape may get replaced with a dimshuffle, with is ignored,
        # so we pass "ignore_empty=True"
        f = self.function([b], c, ignore_empty=True)
        assert np.array_equal(
            f(np.asarray([[0, 1, 2], [3, 4, 5]])),
            np.asarray([[[0], [1], [2]], [[3], [4], [5]]]),
        )
        assert f.maker.fgraph.toposort()[-1].outputs[0].type.broadcastable == (
            False,
            False,
            True,
        )

        # test broadcast flag for constant value of 1 if it cannot be
        # replaced with dimshuffle
        c = reshape(b, (b.shape[1], b.shape[0], 1))
        f = self.function([b], c, ignore_empty=True)
        assert np.array_equal(
            f(np.asarray([[0, 1, 2], [3, 4, 5]])),
            np.asarray([[[0], [1]], [[2], [3]], [[4], [5]]]),
        )
        assert f.maker.fgraph.toposort()[-1].outputs[0].type.broadcastable == (
            False,
            False,
            True,
        )