Пример #1
0
def test_clone_new_inputs():
    """Make sure that `Apply.clone_with_new_inputs` properly handles `Type` changes."""

    x = at.tensor(np.float64, shape=(None,))
    y = at.tensor(np.float64, shape=(1,))

    z = at.add(x, y)
    assert z.type.shape == (None,)

    x_new = at.tensor(np.float64, shape=(1,))

    # The output nodes should be reconstructed, because the input types' static
    # shape information increased in specificity
    z_node_new = z.owner.clone_with_new_inputs([x_new, y])

    assert z_node_new.outputs[0].type.shape == (1,)
    assert z_node_new.inputs[0].type.shape == (1,)
    assert z_node_new.inputs[1].type.shape == (1,)

    # Now, attempt to decrease the specificity of the first input's static
    # shape information, but, because we're using strict conversion, we
    # shouldn't lose any information
    z = at.add(x_new, y)
    assert z.type.shape == (1,)

    z_node_new = z.owner.clone_with_new_inputs([x, y], strict=True)

    assert z_node_new.outputs[0].type.shape == (1,)
    assert z_node_new.inputs[0].type.shape == (1,)
    assert z_node_new.inputs[1].type.shape == (1,)
Пример #2
0
 def manual_setup_method(self, dtype="float64"):
     # This tests can run even when aesara.config.blas.ldflags is empty.
     self.dtype = dtype
     self.mode = aesara.compile.get_default_mode().including("fast_run")
     self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))
     self.a = tensor.tensor(dtype=dtype, broadcastable=())
     self.x = tensor.tensor(dtype=dtype, broadcastable=(False,))
     self.y = tensor.tensor(dtype=dtype, broadcastable=(False,))
     self.Aval = np.ones((2, 3), dtype=dtype)
     self.xval = np.asarray([1, 2], dtype=dtype)
     self.yval = np.asarray([1.5, 2.7, 3.9], dtype=dtype)
 def setup_method(self):
     self.mode = aesara.compile.get_default_mode()
     self.mode = self.mode.including("fast_run")
     self.mode = self.mode.excluding("c_blas")  # c_blas trumps scipy Ops
     dtype = self.dtype = "float64"  # optimization isn't dtype-dependent
     self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))
     self.a = tensor.tensor(dtype=dtype, broadcastable=())
     self.x = tensor.tensor(dtype=dtype, broadcastable=(False,))
     self.y = tensor.tensor(dtype=dtype, broadcastable=(False,))
     self.Aval = np.ones((2, 3), dtype=dtype)
     self.xval = np.asarray([1, 2], dtype=dtype)
     self.yval = np.asarray([1.5, 2.7, 3.9], dtype=dtype)
Пример #4
0
 def test_numpy_2d(self):
     for shp0 in [(2, 3)]:
         x = tensor.tensor(dtype="floatX", broadcastable=(False,) * len(shp0))
         a = np.asarray(self.rng.rand(*shp0)).astype(config.floatX)
         for shp1 in [(6, 7)]:
             if len(shp0) + len(shp1) == 2:
                 continue
             y = tensor.tensor(dtype="floatX", broadcastable=(False,) * len(shp1))
             f = function([x, y], kron(x, y))
             b = self.rng.rand(*shp1).astype(config.floatX)
             out = f(a, b)
             assert np.allclose(out, np.kron(a, b))
Пример #5
0
    def setup_method(self):
        self.mode = mode_with_gpu
        dtype = self.dtype = "float32"  # optimization isn't dtype-dependent
        self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))
        self.a = tensor.tensor(dtype=dtype, broadcastable=())
        self.x = tensor.tensor(dtype=dtype, broadcastable=(False, ))
        self.y = tensor.tensor(dtype=dtype, broadcastable=(False, ))
        self.ger_destructive = gpuger_inplace

        # data on the gpu make the op always inplace
        self.ger = gpuger_inplace
        self.gemm = gpugemm_inplace
        super().setup_method()
Пример #6
0
def test_advinc_subtensor1_vector_scalar():
    # Test the case where x is a vector and y a scalar
    shp = (3, )
    for dtype1, dtype2 in [
        ("float32", "int8"),
        ("float32", "float64"),
        ("float16", "int8"),
        ("float16", "float64"),
        ("float16", "float16"),
        ("int8", "int8"),
        ("int16", "int16"),
    ]:
        shared = gpuarray_shared_constructor
        xval = np.arange(np.prod(shp), dtype=dtype1).reshape(shp) + 1
        yval = np.asarray(10, dtype=dtype2)
        x = shared(xval, name="x")
        y = tensor.tensor(dtype=yval.dtype,
                          broadcastable=(False, ) * len(yval.shape),
                          name="y")
        expr = tensor.advanced_inc_subtensor1(x, y, [0, 2])
        f = aesara.function([y], expr, mode=mode_with_gpu)

        assert (sum([
            isinstance(
                node.op,
                (GpuAdvancedIncSubtensor1_dev20, GpuAdvancedIncSubtensor1),
            ) for node in f.maker.fgraph.toposort()
        ]) == 1)
        rval = f(yval)
        rep = xval.copy()
        rep[[0, 2]] += yval
        assert np.allclose(rval, rep)
Пример #7
0
def test_advinc_subtensor1_dtype():
    # Test the mixed dtype case
    shp = (3, 4)
    for dtype1, dtype2 in [
        ("float32", "int8"),
        ("float32", "float64"),
        ("uint64", "int8"),
        ("int64", "uint8"),
        ("float16", "int8"),
        ("float16", "float64"),
        ("float16", "float16"),
    ]:
        shared = gpuarray_shared_constructor
        xval = np.arange(np.prod(shp), dtype=dtype1).reshape(shp) + 1
        yval = np.empty((2, ) + shp[1:], dtype=dtype2)
        yval[:] = 10
        x = shared(xval, name="x")
        y = tensor.tensor(dtype=yval.dtype,
                          broadcastable=(False, ) * len(yval.shape),
                          name="y")
        expr = tensor.advanced_inc_subtensor1(x, y, [0, 2])
        f = aesara.function([y], expr, mode=mode_with_gpu)
        assert (sum([
            isinstance(node.op, GpuAdvancedIncSubtensor1_dev20)
            for node in f.maker.fgraph.toposort()
        ]) == 1)
        rval = f(yval)
        rep = xval.copy()
        np.add.at(rep, [[0, 2]], yval)
        assert np.allclose(rval, rep)
Пример #8
0
def test_incsub_f16():
    shp = (3, 3)
    shared = gpuarray_shared_constructor
    xval = np.arange(np.prod(shp), dtype="float16").reshape(shp) + 1
    yval = np.empty((2, ) + shp[1:], dtype="float16")
    yval[:] = 2
    x = shared(xval, name="x")
    y = tensor.tensor(dtype="float16",
                      broadcastable=(False, ) * len(shp),
                      name="y")
    expr = tensor.advanced_inc_subtensor1(x, y, [0, 2])
    f = aesara.function([y], expr, mode=mode_with_gpu)
    assert (sum([
        isinstance(node.op, GpuAdvancedIncSubtensor1)
        for node in f.maker.fgraph.toposort()
    ]) == 1)
    rval = f(yval)
    rep = xval.copy()
    np.add.at(rep, [[0, 2]], yval)
    assert np.allclose(rval, rep)

    expr = tensor.inc_subtensor(x[1:], y)
    f = aesara.function([y], expr, mode=mode_with_gpu)
    assert (sum([
        isinstance(node.op, GpuIncSubtensor)
        for node in f.maker.fgraph.toposort()
    ]) == 1)
    rval = f(yval)
    rep = xval.copy()
    rep[1:] += yval
    assert np.allclose(rval, rep)
def test_local_dimshuffle_subtensor():

    dimshuffle_subtensor = out2in(local_dimshuffle_subtensor)

    x = tensor.dtensor4("x")
    x = tensor.patternbroadcast(x, (False, True, False, False))
    i = tensor.iscalar("i")

    out = x[:, :, 10:30, ::i].dimshuffle(0, 2, 3)

    g = FunctionGraph([x, i], [out])
    dimshuffle_subtensor(g)

    topo = g.toposort()
    assert any([not isinstance(x, DimShuffle) for x in topo])

    # Test dimshuffle remove dimensions the subtensor don't "see".
    x = tensor.tensor(broadcastable=(False, True, False), dtype="float64")
    out = x[i].dimshuffle(1)

    g = FunctionGraph([x, i], [out])
    dimshuffle_subtensor(g)

    topo = g.toposort()
    assert any([not isinstance(x, DimShuffle) for x in topo])

    # Test dimshuffle remove dimensions the subtensor don't "see" but
    # have in between dimensions.
    x = tensor.tensor(broadcastable=(False, True, False, True),
                      dtype="float64")
    out = x[i].dimshuffle(1)

    f = aesara.function([x, i], out)

    topo = f.maker.fgraph.toposort()
    assert any([not isinstance(x, DimShuffle) for x in topo])
    assert f(np.random.rand(5, 1, 4, 1), 2).shape == (4, )

    # Test a corner case that had Aesara return a bug.
    x = tensor.dtensor4("x")
    x = tensor.patternbroadcast(x, (False, True, False, False))

    assert x[:, :, 0:3, ::-1].dimshuffle(0, 2, 3).eval({
        x: np.ones((5, 1, 6, 7))
    }).shape == (5, 3, 7)
Пример #10
0
 def make_node(self):
     return gof.Apply(
         self,
         [],
         [
             aesara.Variable(Generic()),
             tensor(self.dtype, broadcastable=self.broadcastable),
         ],
     )
Пример #11
0
def test_debugprint_compiled_fn():

    M = at.tensor(np.float64, shape=(20000, 2, 2))
    one = at.as_tensor(1, dtype=np.int64)
    zero = at.as_tensor(0, dtype=np.int64)

    def no_shared_fn(n, x_tm1, M):
        p = M[n, x_tm1]
        return at.switch(at.lt(zero, p[0]), one, zero)

    out, updates = aesara.scan(
        no_shared_fn,
        outputs_info=[{
            "initial": zero,
            "taps": [-1]
        }],
        sequences=[at.arange(M.shape[0])],
        non_sequences=[M],
        allow_gc=False,
        mode="FAST_RUN",
    )

    # In this case, `debugprint` should print the compiled inner-graph
    # (i.e. from `Scan._fn`)
    out = aesara.function([M], out, updates=updates, mode="FAST_RUN")

    expected_output = """forall_inplace,cpu,scan_fn} [id A] 2 (outer_out_sit_sot-0)
    |TensorConstant{20000} [id B] (n_steps)
    |TensorConstant{[    0    ..998 19999]} [id C] (outer_in_seqs-0)
    |IncSubtensor{InplaceSet;:int64:} [id D] 1 (outer_in_sit_sot-0)
    | |AllocEmpty{dtype='int64'} [id E] 0
    | | |TensorConstant{20000} [id B]
    | |TensorConstant{(1,) of 0} [id F]
    | |ScalarConstant{1} [id G]
    |<TensorType(float64, (20000, 2, 2))> [id H] (outer_in_non_seqs-0)

    Inner graphs:

    forall_inplace,cpu,scan_fn} [id A] (outer_out_sit_sot-0)
    >Elemwise{Composite{Switch(LT(i0, i1), i2, i0)}} [id I] (inner_out_sit_sot-0)
    > |TensorConstant{0} [id J]
    > |Subtensor{int64, int64, int64} [id K]
    > | |*2-<TensorType(float64, (20000, 2, 2))> [id L] -> [id H] (inner_in_non_seqs-0)
    > | |ScalarFromTensor [id M]
    > | | |*0-<TensorType(int64, ())> [id N] -> [id C] (inner_in_seqs-0)
    > | |ScalarFromTensor [id O]
    > | | |*1-<TensorType(int64, ())> [id P] -> [id D] (inner_in_sit_sot-0)
    > | |ScalarConstant{0} [id Q]
    > |TensorConstant{1} [id R]
    """

    output_str = debugprint(out, file="str", print_op_info=True)
    lines = output_str.split("\n")

    for truth, out in zip(expected_output.split("\n"), lines):
        assert truth.strip() == out.strip()
Пример #12
0
    def test_infer_shape(self):
        a = tt.tensor(config.floatX, [False, True, False])
        shape = list(a.shape)
        out = self.op(a, shape)

        self._compile_and_check(
            [a] + shape,
            [out],
            [np.random.rand(2, 1, 3).astype(config.floatX), 2, 1, 3],
            self.op_class,
        )

        a = tt.tensor(config.floatX, [False, True, False])
        shape = [tt.iscalar() for i in range(4)]
        self._compile_and_check(
            [a] + shape,
            [self.op(a, shape)],
            [np.random.rand(2, 1, 3).astype(config.floatX), 6, 2, 5, 3],
            self.op_class,
        )
Пример #13
0
    def test_perform(self):
        scipy = pytest.importorskip("scipy")

        for shp0 in [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]:
            x = tensor.tensor(dtype="floatX", broadcastable=(False,) * len(shp0))
            a = np.asarray(self.rng.rand(*shp0)).astype(config.floatX)
            for shp1 in [(6,), (6, 7), (6, 7, 8), (6, 7, 8, 9)]:
                if len(shp0) + len(shp1) == 2:
                    continue
                y = tensor.tensor(dtype="floatX", broadcastable=(False,) * len(shp1))
                f = function([x, y], kron(x, y))
                b = self.rng.rand(*shp1).astype(config.floatX)
                out = f(a, b)
                # Newer versions of scipy want 4 dimensions at least,
                # so we have to add a dimension to a and flatten the result.
                if len(shp0) + len(shp1) == 3:
                    scipy_val = scipy.linalg.kron(a[np.newaxis, :], b).flatten()
                else:
                    scipy_val = scipy.linalg.kron(a, b)
                utt.assert_allclose(out, scipy_val)
Пример #14
0
 def make_node(self, pvals, unis, n=1):
     pvals = aet.as_tensor_variable(pvals)
     unis = aet.as_tensor_variable(unis)
     if pvals.ndim != 2:
         raise NotImplementedError("pvals ndim should be 2", pvals.ndim)
     if unis.ndim != 1:
         raise NotImplementedError("unis ndim should be 1", unis.ndim)
     if self.odtype == "auto":
         odtype = pvals.dtype
     else:
         odtype = self.odtype
     out = aet.tensor(dtype=odtype, broadcastable=pvals.type.broadcastable)
     return Apply(self, [pvals, unis, as_scalar(n)], [out])
Пример #15
0
def test_advinc_subtensor1():
    # Test the second case in the opt local_gpu_advanced_incsubtensor1
    for shp in [(3, 3), (3, 3, 3)]:
        shared = gpuarray_shared_constructor
        xval = np.arange(np.prod(shp), dtype="float32").reshape(shp) + 1
        yval = np.empty((2, ) + shp[1:], dtype="float32")
        yval[:] = 10
        x = shared(xval, name="x")
        y = tensor.tensor(dtype="float32",
                          broadcastable=(False, ) * len(shp),
                          name="y")
        expr = tensor.advanced_inc_subtensor1(x, y, [0, 2])
        f = aesara.function([y], expr, mode=mode_with_gpu)
        assert (sum([
            isinstance(node.op, GpuAdvancedIncSubtensor1)
            for node in f.maker.fgraph.toposort()
        ]) == 1)
        rval = f(yval)
        rep = xval.copy()
        np.add.at(rep, [0, 2], yval)
        assert np.allclose(rval, rep)
Пример #16
0
 def get_host_tensor(self):
     broadcastable = (False, ) * self.tensor_size
     return aet.tensor(self.dtype, broadcastable)
Пример #17
0
 def make_node(self, path):
     if isinstance(path, str):
         path = Constant(Generic(), path)
     return gof.Apply(
         self, [path],
         [tensor(self.dtype, broadcastable=self.broadcastable)])
Пример #18
0
 def make_node(self, request, data):
     return gof.Apply(
         self,
         [request, data],
         [tensor(data.dtype, broadcastable=data.broadcastable)],
     )