Ejemplo n.º 1
0
 def test_validate(self):
     try:
         MyOp(Generic()(), MyType(1)())  # MyOp requires MyType instances
         raise Exception("Expected an exception")
     except Exception as e:
         if str(e) != "Error 1":
             raise
Ejemplo n.º 2
0
 def test_invalid_modes(self):
     # Modes 'r+', 'r', and 'w+' cannot work with Aesara, becausei
     # the output array may be modified inplace, and that should not
     # modify the original file.
     path = Variable(Generic())
     for mmap_mode in ("r+", "r", "w+", "toto"):
         with pytest.raises(ValueError):
             load(path, "int32", (False,), mmap_mode)
Ejemplo n.º 3
0
 def test_basic(self):
     path = Variable(Generic())
     # Not specifying mmap_mode defaults to None, and the data is
     # copied into main memory
     x = load(path, "int32", (False,))
     y = x * 2
     fn = function([path], y)
     assert (fn(self.filename) == (self.data * 2)).all()
Ejemplo n.º 4
0
 def make_node(self):
     return Apply(
         self,
         [],
         [
             Variable(Generic()),
             tensor(self.dtype, shape=self.broadcastable),
         ],
     )
Ejemplo n.º 5
0
 def test1(self):
     path = Variable(Generic())
     # 'c' means "copy-on-write", which allow the array to be overwritten
     # by an inplace Op in the graph, without modifying the underlying
     # file.
     x = load(path, "int32", (False,), "c")
     # x ** 2 has been chosen because it will work inplace.
     y = (x ** 2).sum()
     fn = function([path], y)
     # Call fn() twice, to check that inplace ops do not cause trouble
     assert (fn(self.filename) == (self.data ** 2).sum()).all()
     assert (fn(self.filename) == (self.data ** 2).sum()).all()
Ejemplo n.º 6
0
 def test_hash_and_eq_params_type(self):
     w1 = ParamsType(
         a1=TensorType("int64", (False, False)),
         a2=TensorType("int64", (False, True, False, False, True)),
         a3=Generic(),
     )
     w2 = ParamsType(
         a1=TensorType("int64", (False, False)),
         a2=TensorType("int64", (False, True, False, False, True)),
         a3=Generic(),
     )
     assert w1 == w2
     assert not (w1 != w2)
     assert hash(w1) == hash(w2)
     assert w1.name == w2.name
     # Changing attributes names only.
     w2 = ParamsType(
         a1=TensorType("int64", (False, False)),
         other_name=TensorType(
             "int64",
             (False, True, False, False, True)),  # a2 -> other_name
         a3=Generic(),
     )
     assert w1 != w2
     # Changing attributes types only.
     w2 = ParamsType(
         a1=TensorType("int64", (False, False)),
         a2=Generic(),  # changing class
         a3=Generic(),
     )
     assert w1 != w2
     # Changing attributes types characteristics only.
     w2 = ParamsType(
         a1=TensorType("int64", (False, True)),  # changing broadcasting
         a2=TensorType("int64", (False, True, False, False, True)),
         a3=Generic(),
     )
     assert w1 != w2
Ejemplo n.º 7
0
 def make_node(self, path):
     if isinstance(path, str):
         path = Constant(Generic(), path)
     return Apply(self, [path],
                  [tensor(self.dtype, shape=self.broadcastable)])
Ejemplo n.º 8
0
 def make_node(self, request, data):
     return Apply(self, [request, data], [Variable(Generic())])
Ejemplo n.º 9
0
 def make_node(self, data):
     return Apply(self, [data], [Variable(Generic()), data.type()])
Ejemplo n.º 10
0
 def test_memmap(self):
     path = Variable(Generic())
     x = load(path, "int32", (False,), mmap_mode="c")
     fn = function([path], x)
     assert type(fn(self.filename)) == np.core.memmap
Ejemplo n.º 11
0
class SearchsortedOp(COp):
    """Wrapper of numpy.searchsorted.

    For full documentation, see :func:`searchsorted`.

    See Also
    --------
    searchsorted : numpy-like function to use the SearchsortedOp

    """

    params_type = Generic()
    __props__ = ("side", )
    check_input = False

    def __init__(self, side="left"):
        if side == "left" or side == "right":
            self.side = side
        else:
            raise ValueError(
                f"'{side}' is an invalid value for keyword 'side'")

    def get_params(self, node):
        return self.side

    def make_node(self, x, v, sorter=None):
        x = aet.as_tensor(x, ndim=1)
        v = aet.as_tensor(v)
        out_type = v.type.clone(dtype="int64")
        if sorter is None:
            return Apply(self, [x, v], [out_type()])
        else:
            sorter = aet.as_tensor(sorter, ndim=1)
            if PYTHON_INT_BITWIDTH == 32 and sorter.dtype == "int64":
                raise TypeError(
                    "numpy.searchsorted with Python 32bit do not support a"
                    " sorter of int64.")
            if sorter.type not in int_vector_types:
                raise TypeError("sorter must be an integer vector",
                                sorter.type)
            return Apply(self, [x, v, sorter], [out_type()])

    def infer_shape(self, fgraph, node, shapes):
        return [shapes[1]]

    def perform(self, node, inputs, output_storage, params):
        x = inputs[0]
        v = inputs[1]
        if len(node.inputs) == 3:
            sorter = inputs[2]
        else:
            sorter = None
        z = output_storage[0]

        z[0] = np.searchsorted(x, v, side=params,
                               sorter=sorter).astype(node.outputs[0].dtype)

    def c_support_code_struct(self, node, name):
        return f"""
            int right_{name};
        """

    def c_init_code_struct(self, node, name, sub):
        side = sub["params"]
        fail = sub["fail"]
        return ("""
            PyObject* tmp_%(name)s = PyUnicode_FromString("right");
            if (tmp_%(name)s == NULL)
                %(fail)s;
            right_%(name)s = PyUnicode_Compare(%(side)s, tmp_%(name)s);
            Py_DECREF(tmp_%(name)s);
        """ % locals())

    def c_code(self, node, name, inames, onames, sub):
        sorter = None
        if len(node.inputs) == 3:
            x, v, sorter = inames
        else:
            x, v = inames
        if not sorter:
            sorter = "NULL"
        (z, ) = onames
        fail = sub["fail"]

        return ("""
            Py_XDECREF(%(z)s);
            %(z)s = (PyArrayObject*) PyArray_SearchSorted(%(x)s, (PyObject*) %(v)s,
                                                          right_%(name)s ? NPY_SEARCHLEFT : NPY_SEARCHRIGHT, (PyObject*) %(sorter)s);
            if (!%(z)s)
                %(fail)s;
            if (PyArray_TYPE(%(z)s) != NPY_INT64){
                PyObject * tmp = PyArray_Cast(%(z)s, NPY_INT64);
                Py_XDECREF(%(z)s);
                %(z)s = (PyArrayObject*) tmp;
            }
        """ % locals())

    def c_code_cache_version(self):
        return (2, )

    def grad(self, inputs, output_gradients):
        num_ins = len(inputs)
        if num_ins == 3:
            x, v, sorter = inputs
        else:
            x, v = inputs

        x_grad = _float_zeros_like(x)
        v_grad = _float_zeros_like(v)
        if num_ins == 3:
            return [x_grad, v_grad, disconnected_type()]
        else:
            return [x_grad, v_grad]
Ejemplo n.º 12
0
    def test_params_type_filtering(self):
        shape_tensor5 = (1, 2, 2, 3, 2)
        size_tensor5 = (shape_tensor5[0] * shape_tensor5[1] *
                        shape_tensor5[2] * shape_tensor5[3] * shape_tensor5[4])
        random_tensor = np.random.normal(
            size=size_tensor5).reshape(shape_tensor5)

        w = ParamsType(
            a1=TensorType("int32", (False, False)),
            a2=TensorType("float64", (False, False, False, False, False)),
            a3=Generic(),
        )

        # With a value that does not match the params type.
        o = Params(
            w,
            a1=np.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11,
                                                12]]).astype("int64"),
            a2=random_tensor.astype("float32"),
            a3=2000,
        )
        # should fail (o.a1 is not int32, o.a2 is not float64)
        with pytest.raises(TypeError):
            w.filter(o, True)
        # should fail (o.a1 is not int32, o.a2 is not float64, and downcast is disallowed)
        with pytest.raises(TypeError):
            w.filter(o, False, False)
        # Should pass.
        w.filter(o, strict=False, allow_downcast=True)

        # With a value that matches the params type.
        o1 = Params(
            w,
            a1=np.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11,
                                                12]]).astype("int32"),
            a2=random_tensor.astype("float64"),
            a3=2000,
        )
        # All should pass.
        w.filter(o1, strict=True)
        w.filter(o1, strict=False, allow_downcast=False)
        w.filter(o1, strict=False, allow_downcast=True)

        # Check values_eq and values_eq_approx.
        o2 = Params(
            w,
            a1=np.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11,
                                                12]]).astype("int32"),
            a2=random_tensor.astype("float64"),
            a3=2000,
        )
        assert w.values_eq(o1, o2)
        assert w.values_eq_approx(o1, o2)

        # Check value_eq_approx.
        # NB: I don't know exactly which kind of differences is rejected by values_eq but accepted by values_eq_approx.
        # So, I just play a little with float values.
        o3 = Params(
            w,
            a1=np.asarray([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11,
                                                12]]).astype("int32"),
            a2=(random_tensor.astype("float32") * 10 / 2.2 * 2.19999999999 /
                10).astype("float64"),
            a3=2000.0 - 0.00000000000000001,
        )
        assert w.values_eq_approx(o1, o3)
Ejemplo n.º 13
0
 def test_hash_and_eq_params(self):
     wp1 = ParamsType(
         a=Generic(),
         array=TensorType("int64", (False, )),
         floatting=Scalar("float64"),
         npy_scalar=TensorType("float64", tuple()),
     )
     wp2 = ParamsType(
         a=Generic(),
         array=TensorType("int64", (False, )),
         floatting=Scalar("float64"),
         npy_scalar=TensorType("float64", tuple()),
     )
     w1 = Params(
         wp1,
         a=1,
         array=np.asarray([1, 2, 4, 5, 7]),
         floatting=-4.5,
         npy_scalar=np.asarray(12),
     )
     w2 = Params(
         wp2,
         a=1,
         array=np.asarray([1, 2, 4, 5, 7]),
         floatting=-4.5,
         npy_scalar=np.asarray(12),
     )
     assert w1 == w2
     assert not (w1 != w2)
     assert hash(w1) == hash(w2)
     # Changing attributes names only (a -> other_name).
     wp2_other = ParamsType(
         other_name=Generic(),
         array=TensorType("int64", (False, )),
         floatting=Scalar("float64"),
         npy_scalar=TensorType("float64", tuple()),
     )
     w2 = Params(
         wp2_other,
         other_name=1,
         array=np.asarray([1, 2, 4, 5, 7]),
         floatting=-4.5,
         npy_scalar=np.asarray(12),
     )
     assert w1 != w2
     # Changing attributes values only (now a=2).
     w2 = Params(
         wp2,
         a=2,
         array=np.asarray([1, 2, 4, 5, 7]),
         floatting=-4.5,
         npy_scalar=np.asarray(12),
     )
     assert w1 != w2
     # Changing NumPy array values (5 -> -5).
     w2 = Params(
         wp2,
         a=1,
         array=np.asarray([1, 2, 4, -5, 7]),
         floatting=-4.5,
         npy_scalar=np.asarray(12),
     )
     assert w1 != w2
Ejemplo n.º 14
0
import numpy as np
import pytest

import aesara
from aesara import tensor as at
from aesara.graph.basic import Apply
from aesara.graph.op import COp, ExternalCOp
from aesara.graph.params_type import Params, ParamsType
from aesara.graph.type import EnumList, Generic
from aesara.scalar import Scalar
from aesara.tensor.type import TensorType, matrix
from tests import unittest_tools as utt

tensor_type_0d = TensorType("float64", tuple())
scalar_type = Scalar("float64")
generic_type = Generic()


# A test op to compute `y = a*x^2 + bx + c` for any tensor x, with a, b, c as op params.
class QuadraticOpFunc(COp):
    __props__ = ("a", "b", "c")
    params_type = ParamsType(a=tensor_type_0d, b=scalar_type, c=generic_type)

    def __init__(self, a, b, c):
        self.a = a
        self.b = b
        self.c = c

    def make_node(self, x):
        x = at.as_tensor_variable(x)
        return Apply(self, [x], [x.type()])
Ejemplo n.º 15
0
class GpuMaxAndArgmax(COp):
    """
    GPU version of MaxAndArgmax

    """

    params_type = Generic()
    __props__ = ("axis", )
    argmax_dtype = "int64"

    def __init__(self, axis):
        assert isinstance(axis, (list, tuple))
        self.axis = tuple(axis)

    def get_params(self, node):
        return self.axis

    def make_node(self, X):
        context_name = infer_context_name(X)
        # We keep the original broadcastable flags for dimensions on which
        # we do not perform the max / argmax.
        all_axes = set(self.axis)
        broadcastable = [
            b for i, b in enumerate(X.type.broadcastable) if i not in all_axes
        ]
        inputs = [as_gpuarray_variable(X, context_name)]
        outputs = [
            GpuArrayType(X.type.dtype,
                         broadcastable,
                         context_name=context_name)(),
            GpuArrayType(self.argmax_dtype,
                         broadcastable,
                         context_name=context_name)(),
        ]
        return Apply(self, inputs, outputs)

    def c_headers(self, **kwargs):
        return ["<numpy_compat.h>", "<gpuarray_helper.h>"]

    def c_header_dirs(self, **kwargs):
        return [pygpu.get_include(), gpuarray_helper_inc_dir()]

    def c_code(self, node, name, input_names, output_names, sub):
        # Recall: X = input_names[0]
        # Recall: axes = sub['params']
        # Recall: max, argmax = output_names
        # Recall: fail = sub['fail']
        max_typecode = pygpu.gpuarray.dtype_to_typecode(node.inputs[0].dtype)
        argmax_typecode = pygpu.gpuarray.dtype_to_typecode(self.argmax_dtype)
        ret = """
        #if PY_MAJOR_VERSION >= 3
            #ifndef PyInt_AS_LONG
                #define PyInt_AS_LONG PyLong_AS_LONG
            #endif
        #endif

        int err = 0;

        unsigned  %(name)s_redux_len = PyTuple_GET_SIZE(%(axes)s);
        unsigned* %(name)s_axes_to_reduce = (unsigned*)malloc(%(name)s_redux_len * sizeof(unsigned));
        for (unsigned i = 0; i < %(name)s_redux_len; ++i) {
            PyObject* axis_object = PyTuple_GET_ITEM(%(axes)s, i);
            %(name)s_axes_to_reduce[i] = (unsigned) PyInt_AS_LONG(axis_object);
        }

        size_t  %(name)s_input_ndim = PyGpuArray_NDIM(%(X)s);
        size_t  %(name)s_output_ndim = %(name)s_input_ndim - %(name)s_redux_len;
        size_t* %(name)s_output_dims = (size_t*)malloc(%(name)s_output_ndim * sizeof(size_t));
        if (%(name)s_redux_len == 1) {
            for (unsigned i = 0; i < %(name)s_axes_to_reduce[0]; ++i) {
                %(name)s_output_dims[i] = PyGpuArray_DIM(%(X)s, i);
            }
            for (unsigned i = %(name)s_axes_to_reduce[0] + 1; i < %(name)s_input_ndim; ++i) {
                %(name)s_output_dims[i-1] = PyGpuArray_DIM(%(X)s, i);
            }
        } else {
            int64_t current_input_pos = -1;
            int64_t current_output_pos = -1;
            for (unsigned i = 0; i < %(name)s_redux_len; ++i) {
                for (++current_input_pos; current_input_pos < %(name)s_axes_to_reduce[i]; ++current_input_pos) {
                    %(name)s_output_dims[++current_output_pos] = PyGpuArray_DIM(%(X)s, current_input_pos);
                }
            }
            for (++current_input_pos; current_input_pos < %(name)s_input_ndim; ++current_input_pos) {
                %(name)s_output_dims[++current_output_pos] = PyGpuArray_DIM(%(X)s, current_input_pos);
            }
        }

        if (aesara_prep_output(&%(max)s, %(name)s_output_ndim, %(name)s_output_dims, %(max_typecode)s, GA_C_ORDER, %(X)s->context)) {
            PyErr_SetString(PyExc_RuntimeError, "GpuMaxAndArgmax: unable to prepare max output.");
            %(fail)s
        }
        if (aesara_prep_output(&%(argmax)s, %(name)s_output_ndim, %(name)s_output_dims, %(argmax_typecode)s, GA_C_ORDER, %(X)s->context)) {
            PyErr_SetString(PyExc_RuntimeError, "GpuMaxAndArgmax: unable to prepare argmax output.");
            %(fail)s
        }

        if (%(name)s_input_ndim == 0) {
            /* GpuArray_maxandargmax can't handle a 0-d array
             * because it expects that 1 <= redux_len <= input_ndim.
             * As input_ndim == 0, then 1 <= redux_len <= 0 is false.
             * To handle this case we copy input to max and we set argmax to 0.
             */
            if (GA_NO_ERROR != GpuArray_setarray(&%(max)s->ga, &%(X)s->ga)) {
                PyErr_SetString(PyExc_RuntimeError, "GpuMaxAndArgmax: unable to copy input to max when input is a scalar.");
                %(fail)s
            }
            if (GA_NO_ERROR != GpuArray_memset(&%(argmax)s->ga, 0)) {
                PyErr_SetString(PyExc_RuntimeError, "GpuMaxAndArgmax: unable to set argmax to 0 when input is a scalar.");
                %(fail)s
            }
        } else if (GA_NO_ERROR != (err =
            GpuArray_maxandargmax(&%(max)s->ga, &%(argmax)s->ga, &%(X)s->ga, %(name)s_redux_len, %(name)s_axes_to_reduce)
        )) {
            PyErr_Format(PyExc_RuntimeError,
                "GpuMaxAndArgmax: unable to compute gpuarray maxandargmax: error %%d: %%s (%%s).",
                err, gpuarray_error_str(err), GpuArray_error(&%(X)s->ga, err));
            %(fail)s
        }
        """
        return ret % {
            "X": input_names[0],
            "axes": sub["params"],
            "max": output_names[0],
            "argmax": output_names[1],
            "max_typecode": max_typecode,
            "argmax_typecode": argmax_typecode,
            "name": name,
            "fail": sub["fail"],
        }

    def c_code_cleanup(self, node, name, inputs, outputs, sub):
        return """
        free(%(name)s_output_dims);
        free(%(name)s_axes_to_reduce);
        """ % {
            "name": name,
        }

    def c_code_cache_version(self):
        return (2, )