Ejemplo n.º 1
0
def test_cpu_target_with_shared_variable():
    srng = MRG_RandomStream()
    s = np.random.rand(2, 3).astype("float32")
    x = gpuarray_shared_constructor(s, name="x")
    try:
        # To have aesara.shared(x) try to move on the GPU
        aesara.compile.shared_constructor(gpuarray_shared_constructor)
        y = srng.uniform(x.shape, target="cpu")
        y.name = "y"
        z = (x * y).sum()
        z.name = "z"

        fz = aesara.function([], z, mode=mode)

        nodes = fz.maker.fgraph.toposort()
        assert not any(
            [isinstance(node.op, GPUA_mrg_uniform) for node in nodes])
    finally:
        aesara.compile.shared_constructor(gpuarray_shared_constructor,
                                          remove=True)
Ejemplo n.º 2
0
    def test_max_pool_2d_6D(self):
        rng = np.random.RandomState(utt.fetch_seed())
        maxpoolshps = [(3, 2)]
        imval = rng.rand(2, 1, 1, 1, 3, 4)
        images = TensorType("float64", [False] * 6)()

        for maxpoolshp, ignore_border, mode in product(
                maxpoolshps,
            [True, False],
            ["max", "sum", "average_inc_pad", "average_exc_pad"],
        ):
            # print 'maxpoolshp =', maxpoolshp
            # print 'ignore_border =', ignore_border
            numpy_output_val = self.numpy_max_pool_2d(imval,
                                                      maxpoolshp,
                                                      ignore_border,
                                                      mode=mode)
            output = pool_2d(images, maxpoolshp, ignore_border, mode=mode)
            output_val = function([images], output)(imval)
            utt.assert_allclose(output_val, numpy_output_val)
Ejemplo n.º 3
0
    def test_perform(self):
        scipy = pytest.importorskip("scipy")

        for shp0 in [(2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)]:
            x = tensor.tensor(dtype="floatX", broadcastable=(False,) * len(shp0))
            a = np.asarray(self.rng.rand(*shp0)).astype(config.floatX)
            for shp1 in [(6,), (6, 7), (6, 7, 8), (6, 7, 8, 9)]:
                if len(shp0) + len(shp1) == 2:
                    continue
                y = tensor.tensor(dtype="floatX", broadcastable=(False,) * len(shp1))
                f = function([x, y], kron(x, y))
                b = self.rng.rand(*shp1).astype(config.floatX)
                out = f(a, b)
                # Newer versions of scipy want 4 dimensions at least,
                # so we have to add a dimension to a and flatten the result.
                if len(shp0) + len(shp1) == 3:
                    scipy_val = scipy.linalg.kron(a[np.newaxis, :], b).flatten()
                else:
                    scipy_val = scipy.linalg.kron(a, b)
                utt.assert_allclose(out, scipy_val)
Ejemplo n.º 4
0
        def test_shape_i(self):
            dtype = self.dtype
            if dtype is None:
                dtype = aesara.config.floatX

            rng = np.random.RandomState(utt.fetch_seed())
            x = np.asarray(rng.uniform(0, 1, [2, 4]), dtype=dtype)
            x = self.cast_value(x)

            self.ref_fct(x)
            x_shared = self.shared_constructor(x, borrow=False)
            self.aesara_fct(x_shared)

            f = aesara.function([], x_shared.shape[1])
            topo = f.maker.fgraph.toposort()

            assert np.all(f() == (4))
            if aesara.config.mode != "FAST_COMPILE":
                assert len(topo) == 1
                assert isinstance(topo[0].op, Shape_i)
Ejemplo n.º 5
0
    def test_optimizations_vm(self):
        skip_if_blas_ldflags_empty()
        """ Test vector dot matrix """
        f = aesara.function([self.x, self.A],
                            aet.dot(self.x, self.A),
                            mode=self.mode)

        # Assert that the dot was optimized somehow
        self.assertFunctionContains0(f, aet.dot)
        self.assertFunctionContains1(f, CGemv(inplace=True))

        # Assert they produce the same output
        assert np.allclose(f(self.xval, self.Aval),
                           np.dot(self.xval, self.Aval))

        # Test with negative strides on 2 dims
        assert np.allclose(
            f(self.xval, self.Aval[::-1, ::-1]),
            np.dot(self.xval, self.Aval[::-1, ::-1]),
        )
Ejemplo n.º 6
0
 def check(dtype, N, M_=None, k=0):
     # Aesara does not accept None as a tensor.
     # So we must use a real value.
     M = M_
     # Currently DebugMode does not support None as inputs even if this is
     # allowed.
     if M is None:
         M = N
     N_symb = iscalar()
     M_symb = iscalar()
     k_symb = iscalar()
     out = aet.tri(N_symb, M_symb, k_symb,
                   dtype=dtype) + np.array(1).astype(dtype)
     f = aesara.function([N_symb, M_symb, k_symb], out, mode=mode_with_gpu)
     result = np.asarray(f(N, M, k)) - np.array(1).astype(dtype)
     assert np.allclose(result, np.tri(N, M_, k, dtype=dtype))
     assert result.dtype == np.dtype(dtype)
     assert any([
         isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()
     ])
Ejemplo n.º 7
0
def test_undefined_grad_opt():
    # Make sure that undefined grad get removed in optimized graph.
    random = MRG_RandomStream(np.random.randint(1, 2147462579))

    pvals = aesara.shared(np.random.rand(10, 20).astype(config.floatX))
    pvals = pvals / pvals.sum(axis=1)
    pvals = zero_grad(pvals)

    samples = random.multinomial(pvals=pvals, n=1)
    samples = aet.cast(samples, pvals.dtype)
    samples = zero_grad(samples)

    cost = aet_sum(samples + pvals)
    grad_res = grad(cost, samples)

    f = aesara.function([], grad_res)
    assert not any([
        isinstance(node.op, UndefinedGrad)
        for node in f.maker.fgraph.apply_nodes
    ])
Ejemplo n.º 8
0
def test_dirichlet_infer_shape(M, size):
    rv = dirichlet(M, size=size)
    rv_shape = list(dirichlet._infer_shape(size or (), [M], None))

    all_args = (M, ) + size
    fn_inputs = [
        i
        for i in graph_inputs([a for a in all_args if isinstance(a, Variable)])
        if not isinstance(i, (Constant, SharedVariable))
    ]
    aesara_fn = function(fn_inputs,
                         [aet.as_tensor(o) for o in rv_shape + [rv]],
                         mode=py_mode)

    *rv_shape_val, rv_val = aesara_fn(*[
        i.tag.test_value for i in fn_inputs
        if not isinstance(i, (SharedVariable, Constant))
    ])

    assert tuple(rv_shape_val) == tuple(rv_val.shape)
Ejemplo n.º 9
0
    def run_ctc(self, activations, labels, input_length, expected_costs,
                expected_grads):
        # Create symbolic variables
        t_activations = aesara.shared(activations, name="activations")
        t_activation_times = aesara.shared(input_length,
                                           name="activation_times")
        t_labels = aesara.shared(labels, name="labels")

        t_cost = ctc(t_activations, t_labels, t_activation_times)
        # Symbolic gradient of CTC cost
        t_grad = tt.grad(tt.mean(t_cost), t_activations)
        # Compile symbolic functions
        train = aesara.function([], [t_cost, t_grad])

        cost, grad = train()

        utt.assert_allclose(expected_grads / cost.shape[0], grad)
        utt.assert_allclose(expected_costs, cost)

        self.check_grads_disabled(t_activations, t_labels, t_activation_times)
Ejemplo n.º 10
0
    def test_fail_select_alot(self):
        # Tests that multinomial_wo_replacement fails when asked to sample more
        # elements than the actual number of elements

        th_rng = RandomStream(12345)

        p = fmatrix()
        n = iscalar()
        with pytest.warns(DeprecationWarning):
            m = th_rng.multinomial_wo_replacement(pvals=p, n=n)

        f = function([p, n], m, allow_input_downcast=True)

        n_elements = 100
        n_selected = 200
        rng = np.random.default_rng(12345)
        pvals = rng.integers(1, 100, (1, n_elements)).astype(config.floatX)
        pvals /= pvals.sum(1)
        with pytest.raises(ValueError):
            f(pvals, n_selected)
Ejemplo n.º 11
0
    def test_sparseblockgemv(self):
        # Compares the numpy and aesara versions of sparseblockgemv.

        b = fmatrix()
        W = ftensor4()
        h = ftensor3()
        iIdx = imatrix()
        oIdx = imatrix()

        o = self.gemv_op(b.take(oIdx, axis=0), W, h, iIdx, oIdx)

        f = aesara.function([W, h, iIdx, b, oIdx], o, mode=self.mode)

        W_val, h_val, iIdx_val, b_val, oIdx_val = self.gemv_data()

        th_out = f(W_val, h_val, iIdx_val, b_val, oIdx_val)
        ref_out = self.gemv_numpy(b_val.take(oIdx_val, axis=0), W_val, h_val,
                                  iIdx_val, oIdx_val)

        utt.assert_allclose(ref_out, th_out)
Ejemplo n.º 12
0
    def test_grad_constant(self):
        # Test that the gradient handles Constants and consider_constant variables
        # consistently

        x = scalar()
        y = scalar()
        z_x = x + y
        z_one = one + y
        g_x = grad(z_x, x, consider_constant=[x])
        g_one = grad(z_one, one)

        f = aesara.function([x, y], [g_x, g_one])

        g_x, g_one = f(1, 0.5)

        if not np.allclose(g_x, g_one):
            raise AssertionError(
                "Gradient using consider constant is " + str(g_x) +
                " but gradient with respect to the same Constant is " +
                str(g_one))
Ejemplo n.º 13
0
    def test_sparseblockgemv_grad_shape(self):
        b = tensor.fmatrix()
        W = tensor.ftensor4()
        h = tensor.ftensor3()
        iIdx = tensor.imatrix()
        oIdx = tensor.imatrix()

        o = self.gemv_op(b.take(oIdx, axis=0), W, h, iIdx, oIdx)
        go = aesara.grad(o.sum(), [b, W, h])

        f = aesara.function([W, h, iIdx, b, oIdx], go, mode=self.mode)

        W_val, h_val, iIdx_val, b_val, oIdx_val = self.gemv_data()

        # just make sure that it runs correcly and all the shapes are ok.
        b_g, W_g, h_g = f(W_val, h_val, iIdx_val, b_val, oIdx_val)

        assert b_g.shape == b_val.shape
        assert h_g.shape == h_val.shape
        assert W_g.shape == W_val.shape
Ejemplo n.º 14
0
    def test_fail_select_alot(self):
        # Tests that multinomial_wo_replacement fails when asked to sample more
        # elements than the actual number of elements

        th_rng = RandomStreams(12345)

        p = tensor.fmatrix()
        n = tensor.iscalar()
        m = th_rng.multinomial_wo_replacement(pvals=p, n=n)

        f = function([p, n], m, allow_input_downcast=True)

        n_elements = 100
        n_selected = 200
        np.random.seed(12345)
        pvals = np.random.randint(1, 100,
                                  (1, n_elements)).astype(config.floatX)
        pvals /= pvals.sum(1)
        with pytest.raises(ValueError):
            f(pvals, n_selected)
Ejemplo n.º 15
0
    def test_fail_select_alot(self):
        # Tests that ChoiceFromUniform fails when asked to sample more
        # elements than the actual number of elements

        p = tensor.fmatrix()
        u = tensor.fvector()
        n = tensor.iscalar()
        m = multinomial.ChoiceFromUniform(odtype="auto")(p, u, n)

        f = function([p, u, n], m, allow_input_downcast=True)

        n_elements = 100
        n_selected = 200
        np.random.seed(12345)
        uni = np.random.rand(n_selected).astype(config.floatX)
        pvals = np.random.randint(1, 100,
                                  (1, n_elements)).astype(config.floatX)
        pvals /= pvals.sum(1)
        with pytest.raises(ValueError):
            f(pvals, uni, n_selected)
Ejemplo n.º 16
0
    def test_combined_1d(self, size, k, dtype, sorted, idx_dtype):
        if isinstance(k, str):
            k = eval(k.replace("n", str(size)))

        x = vector(name="x", dtype=dtype)
        yv, yi = topk_and_argtopk(x, k, sorted=sorted, idx_dtype=idx_dtype)
        fn = aesara.function([x], [yv, yi], mode=self.mode)
        assert any(
            isinstance(n.op, self.op_class)
            for n in fn.maker.fgraph.apply_nodes)
        # generate a all-unique array
        xval = gen_unique_vector(size, dtype)
        yvval, yival = fn(xval)
        idx = slice(-k, None) if k > 0 else slice(-k)
        goali = np.argsort(xval)[idx].astype(idx_dtype)
        goalv = xval[goali]

        # due to uniqueness, we expect indices same
        assert np.all(xval[np.sort(yival)] == xval[np.sort(goali)])
        utt.assert_allclose(np.sort(yvval), goalv)
Ejemplo n.º 17
0
    def test_reverse_inplace(self):
        mySymbolicMatricesList = TypedListType(
            tt.TensorType(aesara.config.floatX, (False, False)))()

        z = Reverse()(mySymbolicMatricesList)
        m = aesara.compile.mode.get_default_mode().including(
            "typed_list_inplace_opt")
        f = aesara.function(
            [In(mySymbolicMatricesList, borrow=True, mutable=True)],
            z,
            accept_inplace=True,
            mode=m,
        )
        assert f.maker.fgraph.toposort()[0].op.inplace

        x = rand_ranged(-1000, 1000, [100, 101])

        y = rand_ranged(-1000, 1000, [100, 101])

        assert np.array_equal(f([x, y]), [y, x])
Ejemplo n.º 18
0
    def test_multiple_out_crash(self):
        # This test failed up to commit 2faeb62c38
        p0 = self.shared(np.asarray(np.random.random([4, 8]),
                                    dtype=self.dtype))
        p1 = self.shared(np.asarray(np.random.random(8), dtype=self.dtype))
        p2 = self.shared(np.asarray(np.random.random([8, 3]),
                                    dtype=self.dtype))
        p3 = self.shared(np.asarray(np.random.random(3), dtype=self.dtype))
        p = [p0, p1, p2, p3]

        # in my code these vars are the result of applying scan
        ften0 = tensor3("ft0", dtype=self.dtype)
        fmat1 = matrix("fm1", dtype=self.dtype)
        ften2 = tensor3("ft2", dtype=self.dtype)
        fmat3 = matrix("fm3", dtype=self.dtype)

        # then I keep only the last iteration
        fsub0 = ften0[-1]
        fsub1 = fmat1[-1]
        fsub2 = ften2[-1]
        fsub3 = fmat3[-1]

        fsub = [fsub0, fsub1, fsub2, fsub3]

        acc = at.constant(1, "int8") >= 0

        new_positions = ifelse(acc, fsub, p)

        new_updates = [(p[0], new_positions[0])]

        f = function([ften0, fmat1, ften2, fmat3], [],
                     updates=new_updates,
                     mode=self.mode)
        self.assertFunctionContains1(f, self.get_ifelse(4))

        i1 = np.asarray(np.random.random([19, 4, 8]), dtype=self.dtype)
        i2 = np.asarray(np.random.random([19, 8]), dtype=self.dtype)
        i3 = np.asarray(np.random.random([19, 8, 3]), dtype=self.dtype)
        i4 = np.asarray(np.random.random([19, 3]), dtype=self.dtype)

        f(i1, i2, i3, i4)
Ejemplo n.º 19
0
def test_multinomial_input_dtype():
    # This tests the MultinomialFromUniform Op directly, not going through the
    # multinomial() call in GPU random generation.

    for idtype in ["float32", "float16", "float64"]:
        for odtype in ["float32", "float16", "float64", "int32"]:

            p = tensor.matrix("p", idtype)
            u = tensor.vector("u", idtype)
            # p = tensor.dmatrix('p')
            # u = tensor.dvector('u')
            m = aesara.sandbox.multinomial.MultinomialFromUniform(odtype)(p, u)

            # the m*2 allows the multinomial to reuse output
            f = function([p, u],
                         m * 2,
                         allow_input_downcast=True,
                         mode=mode_with_gpu)

            assert any([
                type(node.op) is GPUAMultinomialFromUniform
                for node in f.maker.fgraph.toposort()
            ])

            # test that both first and second samples can be drawn
            utt.assert_allclose(f([[1, 0], [0, 1]], [0.1, 0.1]),
                                [[2, 0], [0, 2]])

            # test that both second labels can be drawn
            r = f([[0.2, 0.8], [0.3, 0.7]], [0.31, 0.31])
            utt.assert_allclose(r, [[0, 2], [0, 2]])

            # test that both first labels can be drawn
            r = f([[0.2, 0.8], [0.3, 0.7]], [0.21, 0.21])
            utt.assert_allclose(r, [[0, 2], [2, 0]])

            # change the size to make sure output gets reallocated ok
            # and also make sure that the GPU version doesn't screw up the
            # transposed-ness
            r = f([[0.2, 0.8]], [0.25])
            utt.assert_allclose(r, [[0, 2]])
Ejemplo n.º 20
0
def test_batch_normalization_train_without_running_averages():
    # compile and run batch_normalization_train without running averages
    utt.seed_rng()

    x, scale, bias, dy = (
        tensor4("x"),
        tensor4("scale"),
        tensor4("bias"),
        tensor4("dy"),
    )
    data_shape = (5, 10, 30, 25)
    param_shape = (1, 10, 30, 25)

    # forward pass
    out, x_mean, x_invstd = batchnorm.batch_normalization_train(
        x, scale, bias, "per-activation"
    )
    # backward pass
    grads = aet.grad(None, wrt=[x, scale, bias], known_grads={out: dy})
    # compile
    f = aesara.function([x, scale, bias, dy], [out, x_mean, x_invstd] + grads)
    # check if the abstract Ops have been replaced
    assert not any(
        [
            isinstance(
                n.op,
                (
                    batchnorm.AbstractBatchNormTrain,
                    batchnorm.AbstractBatchNormInference,
                    batchnorm.AbstractBatchNormTrainGrad,
                ),
            )
            for n in f.maker.fgraph.toposort()
        ]
    )
    # run
    X = 4 + 3 * np.random.randn(*data_shape).astype(aesara.config.floatX)
    Dy = -1 + 2 * np.random.randn(*data_shape).astype(aesara.config.floatX)
    Scale = np.random.randn(*param_shape).astype(aesara.config.floatX)
    Bias = np.random.randn(*param_shape).astype(aesara.config.floatX)
    f(X, Scale, Bias, Dy)
Ejemplo n.º 21
0
    def test_grad_disconnected(self):
        # tests corner cases of gradient for shape and alloc

        x = vector(name="x")
        total = x.sum()
        total.name = "total"
        num_elements = x.shape[0]
        num_elements.name = "num_elements"
        silly_vector = aet.alloc(total / num_elements, num_elements)
        silly_vector.name = "silly_vector"
        cost = silly_vector.sum()
        cost.name = "cost"
        # note that cost simplifies to be the same as "total"
        g = grad(cost, x, add_names=False)
        # we still need to pass in x because it determines the shape of
        # the output
        f = aesara.function([x], g)
        rng = np.random.RandomState([2012, 9, 5])
        x = np.cast[x.dtype](rng.randn(3))
        g = f(x)
        assert np.allclose(g, np.ones(x.shape, dtype=x.dtype))
Ejemplo n.º 22
0
    def test_single_or_any_axis(self, axis, op):
        # the following ops can be specified with either a single axis or every
        # axis:
        x = dtensor3()
        a = np.random.random((3, 2, 4))
        # We don't need to test all opt and C code, as this is tested
        # by the ops tests.
        mode = Mode(optimizer="fast_compile", linker="py")

        f = function(
            [x],
            [
                op(x, axis=axis, keepdims=True),
                self.makeKeepDims_local(x, op(x, axis=axis, keepdims=False),
                                        axis),
            ],
            mode=mode,
        )
        ans1, ans2 = f(a)
        assert np.allclose(ans1, ans2)
        assert ans1.shape == ans2.shape
Ejemplo n.º 23
0
def test_trace():
    rng = np.random.RandomState(utt.fetch_seed())
    x = matrix()
    g = trace(x)
    f = aesara.function([x], g)

    for shp in [(2, 3), (3, 2), (3, 3)]:
        m = rng.rand(*shp).astype(config.floatX)
        v = np.trace(m)
        assert v == f(m)

    xx = vector()
    ok = False
    try:
        trace(xx)
    except TypeError:
        ok = True
    except ValueError:
        ok = True

    assert ok
Ejemplo n.º 24
0
    def test_select_distinct(self):
        # Tests that multinomial_wo_replacement always selects distinct elements

        th_rng = RandomStream(12345)

        p = fmatrix()
        n = iscalar()
        m = th_rng.multinomial_wo_replacement(pvals=p, n=n)

        f = function([p, n], m, allow_input_downcast=True)

        n_elements = 1000
        all_indices = range(n_elements)
        np.random.seed(12345)
        for i in [5, 10, 50, 100, 500, n_elements]:
            pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
            pvals /= pvals.sum(1)
            res = f(pvals, i)
            res = np.squeeze(res)
            assert len(res) == i
            assert np.all(np.in1d(np.unique(res), all_indices)), res
Ejemplo n.º 25
0
    def test_select_distinct(self):
        # Tests that ChoiceFromUniform always selects distinct elements

        p = fmatrix()
        u = fvector()
        n = iscalar()
        m = multinomial.ChoiceFromUniform(odtype="auto")(p, u, n)

        f = function([p, u, n], m, allow_input_downcast=True)

        n_elements = 1000
        all_indices = range(n_elements)
        np.random.seed(12345)
        for i in [5, 10, 50, 100, 500, n_elements]:
            uni = np.random.rand(i).astype(config.floatX)
            pvals = np.random.randint(1, 100, (1, n_elements)).astype(config.floatX)
            pvals /= pvals.sum(1)
            res = f(pvals, uni, i)
            res = np.squeeze(res)
            assert len(res) == i, res
            assert np.all(np.in1d(np.unique(res), all_indices)), res
Ejemplo n.º 26
0
    def test_pushout3(self):
        x1 = scalar("x1")
        y1 = scalar("x2")
        y2 = scalar("y2")
        c = iscalar("c")
        two = np.asarray(2, dtype=aesara.config.floatX)
        x, y = ifelse(c, (x1, y1), (two, y2), name="f1")
        o3 = np.asarray(0.3, dtype=aesara.config.floatX)
        o2 = np.asarray(0.2, dtype=aesara.config.floatX)
        z = ifelse(c, o3, o2, name="f2")
        out = x * z * y

        f = function([x1, y1, y2, c], out, allow_input_downcast=True)
        assert isinstance(f.maker.fgraph.toposort()[-1].op, IfElse)
        rng = np.random.RandomState(utt.fetch_seed())
        vx1 = rng.uniform()
        vy1 = rng.uniform()
        vy2 = rng.uniform()

        assert np.allclose(f(vx1, vy1, vy2, 1), vx1 * vy1 * 0.3)
        assert np.allclose(f(vx1, vy1, vy2, 0), 2 * vy2 * 0.2)
Ejemplo n.º 27
0
def test_advinc_subtensor1():
    # Test the second case in the opt local_gpu_advanced_incsubtensor1
    for shp in [(3, 3), (3, 3, 3)]:
        shared = gpuarray_shared_constructor
        xval = np.arange(np.prod(shp), dtype="float32").reshape(shp) + 1
        yval = np.empty((2, ) + shp[1:], dtype="float32")
        yval[:] = 10
        x = shared(xval, name="x")
        y = tensor.tensor(dtype="float32",
                          broadcastable=(False, ) * len(shp),
                          name="y")
        expr = tensor.advanced_inc_subtensor1(x, y, [0, 2])
        f = aesara.function([y], expr, mode=mode_with_gpu)
        assert (sum([
            isinstance(node.op, GpuAdvancedIncSubtensor1)
            for node in f.maker.fgraph.toposort()
        ]) == 1)
        rval = f(yval)
        rep = xval.copy()
        np.add.at(rep, [0, 2], yval)
        assert np.allclose(rval, rep)
Ejemplo n.º 28
0
def test_deterministic_flag():
    shp = (3, 4)
    for dtype1, dtype2 in [("float32", "int8")]:
        shared = gpuarray_shared_constructor
        xval = np.arange(np.prod(shp), dtype=dtype1).reshape(shp) + 1
        yval = np.empty((2, ) + shp[1:], dtype=dtype2)
        yval[:] = 10
        x = shared(xval, name="x")
        y = tensor.tensor(dtype=yval.dtype,
                          broadcastable=(False, ) * len(yval.shape),
                          name="y")
        expr = tensor.advanced_inc_subtensor1(x, y, [0, 2])
        f = aesara.function([y], expr, mode=mode_with_gpu)
        assert (sum([
            isinstance(node.op, GpuAdvancedIncSubtensor1)
            for node in f.maker.fgraph.toposort()
        ]) == 1)
        rval = f(yval)
        rep = xval.copy()
        np.add.at(rep, [[0, 2]], yval)
        assert np.allclose(rval, rep)
Ejemplo n.º 29
0
    def test_topk_1d(self, size, k, dtype, sorted):
        if isinstance(k, str):
            k = eval(k.replace("n", str(size)))

        x = vector(name="x", dtype=dtype)
        y = topk(x, k, sorted=sorted)
        fn = aesara.function([x], y, mode=self.mode)
        assert any(
            isinstance(n.op, self.op_class)
            for n in fn.maker.fgraph.apply_nodes)
        # assert local_useless_topk opt is done properly
        assert 1 == len(fn.maker.fgraph.outputs[0].owner.outputs)

        # generate a all-unique array
        xval = gen_unique_vector(size, dtype)
        yval = fn(xval)
        idx = slice(-k, None) if k > 0 else slice(-k)
        goal = np.sort(xval)[idx]

        assert yval.dtype == goal.dtype
        utt.assert_allclose(goal, np.sort(yval))
Ejemplo n.º 30
0
    def test_perform(self):
        x = matrix()
        y = scalar()
        z = iscalar()

        f = function([x, y, z], fill_diagonal_offset(x, y, z))
        for test_offset in (-5, -4, -1, 0, 1, 4, 5):
            for shp in [(8, 8), (5, 8), (8, 5), (5, 5)]:
                a = np.random.random(shp).astype(config.floatX)
                val = np.cast[config.floatX](np.random.random())
                out = f(a, val, test_offset)
                # We can't use np.fill_diagonal as it is bugged.
                assert np.allclose(np.diag(out, test_offset), val)
                if test_offset >= 0:
                    assert (out == val).sum() == min(
                        min(a.shape), a.shape[1] - test_offset
                    )
                else:
                    assert (out == val).sum() == min(
                        min(a.shape), a.shape[0] + test_offset
                    )