Пример #1
0
def test_multMatVect():
    A1 = lmatrix("A1")
    s1 = ivector("s1")
    m1 = iscalar("m1")
    A2 = lmatrix("A2")
    s2 = ivector("s2")
    m2 = iscalar("m2")

    g0 = rng_mrg.DotModulo()(A1, s1, m1, A2, s2, m2)
    f0 = function([A1, s1, m1, A2, s2, m2], g0)

    i32max = np.iinfo(np.int32).max
    rng = np.random.default_rng(utt.fetch_seed())
    A1 = rng.integers(0, i32max, (3, 3)).astype("int64")
    s1 = rng.integers(0, i32max, 3).astype("int32")
    m1 = np.asarray(rng.integers(i32max), dtype="int32")
    A2 = rng.integers(0, i32max, (3, 3)).astype("int64")
    s2 = rng.integers(0, i32max, 3).astype("int32")
    m2 = np.asarray(rng.integers(i32max), dtype="int32")

    f0.input_storage[0].storage[0] = A1
    f0.input_storage[1].storage[0] = s1
    f0.input_storage[2].storage[0] = m1
    f0.input_storage[3].storage[0] = A2
    f0.input_storage[4].storage[0] = s2
    f0.input_storage[5].storage[0] = m2

    r_a1 = rng_mrg.matVecModM(A1, s1, m1)
    r_a2 = rng_mrg.matVecModM(A2, s2, m2)
    f0.vm()
    r_b = f0.output_storage[0].value

    assert np.allclose(r_a1, r_b[:3])
    assert np.allclose(r_a2, r_b[3:])
Пример #2
0
def test_to_one_hot():
    v = ivector()
    o = to_one_hot(v, 10)
    f = aesara.function([v], o)
    out = f([1, 2, 3, 5, 6])
    assert out.dtype == config.floatX
    assert np.allclose(
        out,
        [
            [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
            [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
            [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
            [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
            [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
        ],
    )

    v = ivector()
    o = to_one_hot(v, 10, dtype="int32")
    f = aesara.function([v], o)
    out = f([1, 2, 3, 5, 6])
    assert out.dtype == "int32"
    assert np.allclose(
        out,
        [
            [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
            [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
            [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
            [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
            [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
        ],
    )
Пример #3
0
    def test_pooling_with_tensor_vars(self):
        x = ftensor4()
        window_size = ivector()
        stride = ivector()
        padding = ivector()
        data = np.random.normal(0, 1, (1, 1, 5, 5)).astype("float32")

        # checking variable params vs fixed params
        for ignore_border in [True, False]:
            for mode in ["max", "sum", "average_inc_pad", "average_exc_pad"]:
                y = pool_2d(x, window_size, ignore_border, stride, padding,
                            mode)
                dx = aesara.gradient.grad(y.sum(), x)
                var_fct = aesara.function([x, window_size, stride, padding],
                                          [y, dx])
                for ws in (4, 2, 5):
                    for st in (2, 3):
                        for pad in (0, 1):
                            if (pad > st or st > ws
                                    or (pad != 0 and not ignore_border) or
                                (mode == "average_exc_pad" and pad != 0)):
                                continue
                            y = pool_2d(x, (ws, ws), ignore_border, (st, st),
                                        (pad, pad), mode)
                            dx = aesara.gradient.grad(y.sum(), x)
                            fix_fct = aesara.function([x], [y, dx])
                            var_y, var_dx = var_fct(data, (ws, ws), (st, st),
                                                    (pad, pad))
                            fix_y, fix_dx = fix_fct(data)
                            utt.assert_allclose(var_y, fix_y)
                            utt.assert_allclose(var_dx, fix_dx)
Пример #4
0
def multMatVect(v, A, m1, B, m2):
    # TODO : need description for parameter and return
    """
    Multiply the first half of v by A with a modulo of m1 and the second half
    by B with a modulo of m2.

    Notes
    -----
    The parameters of dot_modulo are passed implicitly because passing them
    explicitly takes more time than running the function's C-code.

    """
    if multMatVect.dot_modulo is None:
        A_sym = lmatrix("A")
        s_sym = ivector("s")
        m_sym = iscalar("m")
        A2_sym = lmatrix("A2")
        s2_sym = ivector("s2")
        m2_sym = iscalar("m2")
        o = DotModulo()(A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym)
        multMatVect.dot_modulo = function(
            [A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym], o, profile=False)

    # This way of calling the Aesara fct is done to bypass Aesara overhead.
    f = multMatVect.dot_modulo
    f.input_storage[0].storage[0] = A
    f.input_storage[1].storage[0] = v[:3]
    f.input_storage[2].storage[0] = m1
    f.input_storage[3].storage[0] = B
    f.input_storage[4].storage[0] = v[3:]
    f.input_storage[5].storage[0] = m2
    f.fn()
    r = f.output_storage[0].storage[0]

    return r
Пример #5
0
def test_seed_fn():
    idx = ivector()

    for new_seed, same in [(234, True), (None, True), (23, False)]:
        random = MRG_RandomStream(234)
        fn1 = function([], random.uniform((2, 2), dtype="float32"))
        fn2 = function([], random.uniform((3, 3), nstreams=2, dtype="float32"))
        fn3 = function([idx], random.uniform(idx, nstreams=3, ndim=1, dtype="float32"))

        fn1_val0 = fn1()
        fn1_val1 = fn1()
        assert not np.allclose(fn1_val0, fn1_val1)
        fn2_val0 = fn2()
        fn2_val1 = fn2()
        assert not np.allclose(fn2_val0, fn2_val1)
        fn3_val0 = fn3([4])
        fn3_val1 = fn3([4])
        assert not np.allclose(fn3_val0, fn3_val1)
        assert fn1_val0.size == 4
        assert fn2_val0.size == 9

        random.seed(new_seed)

        fn1_val2 = fn1()
        fn1_val3 = fn1()
        fn2_val2 = fn2()
        fn2_val3 = fn2()
        fn3_val2 = fn3([4])
        fn3_val3 = fn3([4])
        assert np.allclose(fn1_val0, fn1_val2) == same
        assert np.allclose(fn1_val1, fn1_val3) == same
        assert np.allclose(fn2_val0, fn2_val2) == same
        assert np.allclose(fn2_val1, fn2_val3) == same
        assert np.allclose(fn3_val0, fn3_val2) == same
        assert np.allclose(fn3_val1, fn3_val3) == same
Пример #6
0
def test__getitem__AdvancedSubtensor_bool():
    x = matrix("x")
    i = TensorType("bool", (False, False))("i")

    z = x[i]
    op_types = [
        type(node.op) for node in aesara.graph.basic.io_toposort([x, i], [z])
    ]
    assert op_types[-1] == AdvancedSubtensor

    i = TensorType("bool", (False, ))("i")
    z = x[:, i]
    op_types = [
        type(node.op) for node in aesara.graph.basic.io_toposort([x, i], [z])
    ]
    assert op_types[-1] == AdvancedSubtensor

    i = TensorType("bool", (False, ))("i")
    z = x[..., i]
    op_types = [
        type(node.op) for node in aesara.graph.basic.io_toposort([x, i], [z])
    ]
    assert op_types[-1] == AdvancedSubtensor

    with pytest.raises(TypeError):
        z = x[[True, False], i]

    z = x[ivector("b"), i]
    op_types = [
        type(node.op) for node in aesara.graph.basic.io_toposort([x, i], [z])
    ]
    assert op_types[-1] == AdvancedSubtensor
Пример #7
0
    def test_subtensor_multiple_slices(self):
        r"""
        This addresses a bug that happens when you have multiple subtensors
        on the output of `Scan`.  The bug requires the reshape to be produced,
        and it has something to do with how the `Subtensor`\s overlap.
        """

        def f_pow2(x_tm1):
            return 2 * x_tm1

        state = vector("state")
        n_steps = iscalar("nsteps")
        output, updates = scan(
            f_pow2,
            [],
            state,
            [],
            n_steps=n_steps,
            truncate_gradient=-1,
            go_backwards=False,
        )
        nw_shape = ivector("nw_shape")
        # Note that the output is reshaped to 3 dimensional tensor, and
        my_f = function(
            [state, n_steps, nw_shape],
            [reshape(output, nw_shape, ndim=3)[:-2], output[:-4]],
            updates=updates,
            allow_input_downcast=True,
        )
        nodes = [x for x in my_f.maker.fgraph.toposort() if isinstance(x.op, Scan)]
        # This assertion fails if savemem optimization failed on scan
        if config.mode != "FAST_COMPILE":
            assert nodes[0].op._scan_savemem_visited
        rng = np.random.default_rng(utt.fetch_seed())
        my_f(rng.uniform(size=(3,)), 4, np.int64([2, 2, 3]))
Пример #8
0
    def test_can_not_infer_nb_dim(self):
        # Was reported in gh-5613. Test that we do not crash
        # or that we crash in a few other case found while
        # investigating that case

        img = tensor4("img")
        patches = nnet.neighbours.images2neibs(img, [16, 16])
        extractPatches = aesara.function([img], patches, mode=self.mode)

        patsRecovery = matrix("patsRecovery")
        original_size = ivector("original_size")

        for mode in ["valid", "ignore_borders"]:
            out = neibs2images(patsRecovery, (16, 16),
                               original_size,
                               mode=mode)
            f = aesara.function([patsRecovery, original_size],
                                out,
                                mode=self.mode)

            im_val = np.ones((1, 3, 320, 320), dtype=np.float32)
            neibs = extractPatches(im_val)
            f(neibs, im_val.shape)
            # Wrong number of dimensions
            with pytest.raises(ValueError):
                f(neibs, (1, 1, 3, 320, 320))
            # End up with a step of 0
            # This can lead to division by zero in DebugMode
            with pytest.raises((ValueError, ZeroDivisionError)):
                f(neibs, (3, 320, 320, 1))
Пример #9
0
    def test_givens(self):
        x = shared(0)
        assign = pfunc([], x, givens={x: 3})
        assert assign() == 3
        assert x.get_value(borrow=True) == 0

        y = ivector()
        f = pfunc([y], (y * x), givens={x: 6})
        assert np.all(f([1, 1, 1]) == [6, 6, 6])
        assert x.get_value() == 0

        z = ivector()
        c = z * y
        f = pfunc([y], (c + 7), givens={z: _asarray([4, 4, 4], dtype="int32")})
        assert np.all(f([1, 1, 1]) == [11, 11, 11])
        assert x.get_value() == 0
Пример #10
0
    def test_bad_shape(self):
        a = matrix("a")
        shapes = ivector("shapes")
        rng = np.random.RandomState(seed=utt.fetch_seed())
        a_val = rng.uniform(size=(3, 4)).astype(config.floatX)

        # Test reshape to 1 dim
        r = a.reshape(shapes, ndim=1)

        f = self.function([a, shapes], r)
        with pytest.raises(ValueError):
            f(a_val, [13])

        # Test reshape to 2 dim
        r = a.reshape(shapes, ndim=2)

        f = self.function([a, shapes], r)

        with pytest.raises(ValueError):
            f(a_val, [-1, 5])
        with pytest.raises(ValueError):
            f(a_val, [7, -1])
        with pytest.raises(ValueError):
            f(a_val, [7, 5])
        with pytest.raises(ValueError):
            f(a_val, [-1, -1])
Пример #11
0
    def test_bad_shape(self):
        a = matrix("a")
        shapes = ivector("shapes")
        rng = np.random.default_rng(seed=utt.fetch_seed())
        a_val = rng.uniform(size=(3, 4)).astype(config.floatX)

        # Test reshape to 1 dim
        r = a.reshape(shapes, ndim=1)

        f = self.function([a, shapes], r)
        with pytest.raises(ValueError):
            f(a_val, [13])

        # Test reshape to 2 dim
        r = a.reshape(shapes, ndim=2)

        f = self.function([a, shapes], r)

        with pytest.raises(ValueError):
            f(a_val, [-1, 5])
        with pytest.raises(ValueError):
            f(a_val, [7, -1])
        with pytest.raises(ValueError):
            f(a_val, [7, 5])
        with pytest.raises(ValueError):
            f(a_val, [-1, -1])
        with pytest.raises(
                ValueError,
                match=".*Shape argument to Reshape has incorrect length.*"):
            f(a_val, [3, 4, 1])
Пример #12
0
def test_get_vector_length():
    # Test `Shape`s
    x = aesara.shared(np.zeros((2, 3, 4, 5)))
    assert get_vector_length(x.shape) == 4

    # Test `SpecifyShape`
    x = specify_shape(ivector(), (10, ))
    assert get_vector_length(x) == 10
Пример #13
0
    def test_more_shapes(self):
        # TODO: generalize infer_shape to account for tensor variable
        # (non-constant) input shape
        admat = dmatrix()
        ndim = 1
        admat_val = random(3, 4)
        self._compile_and_check([admat], [Reshape(ndim)(admat, [12])],
                                [admat_val], Reshape)

        self._compile_and_check([admat], [Reshape(ndim)(admat, [-1])],
                                [admat_val], Reshape)

        ndim = 2
        self._compile_and_check([admat], [Reshape(ndim)(admat, [4, 3])],
                                [admat_val], Reshape)

        self._compile_and_check([admat], [Reshape(ndim)(admat, [4, -1])],
                                [admat_val], Reshape)

        self._compile_and_check([admat], [Reshape(ndim)(admat, [3, -1])],
                                [admat_val], Reshape)

        self._compile_and_check([admat], [Reshape(ndim)(admat, [-1, 3])],
                                [admat_val], Reshape)
        self._compile_and_check([admat], [Reshape(ndim)(admat, [-1, 4])],
                                [admat_val], Reshape)

        aivec = ivector()
        self._compile_and_check([admat, aivec], [Reshape(ndim)(admat, aivec)],
                                [admat_val, [4, 3]], Reshape)

        self._compile_and_check([admat, aivec], [Reshape(ndim)(admat, aivec)],
                                [admat_val, [4, -1]], Reshape)

        adtens4 = dtensor4()
        ndim = 4
        adtens4_val = random(2, 4, 3, 5)
        self._compile_and_check([adtens4],
                                [Reshape(ndim)(adtens4, [1, -1, 10, 4])],
                                [adtens4_val], Reshape)

        self._compile_and_check([adtens4],
                                [Reshape(ndim)(adtens4, [1, 3, 10, 4])],
                                [adtens4_val], Reshape)

        self._compile_and_check(
            [adtens4, aivec],
            [Reshape(ndim)(adtens4, aivec)],
            [adtens4_val, [1, -1, 10, 4]],
            Reshape,
        )

        self._compile_and_check(
            [adtens4, aivec],
            [Reshape(ndim)(adtens4, aivec)],
            [adtens4_val, [1, 3, 10, 4]],
            Reshape,
        )
Пример #14
0
def test_local_csm_properties_csm():
    data = vector()
    indices, indptr, shape = (ivector(), ivector(), ivector())
    mode = get_default_mode()
    mode = mode.including("specialize", "local_csm_properties_csm")
    for CS, cast in [
        (sparse.CSC, sp.sparse.csc_matrix),
        (sparse.CSR, sp.sparse.csr_matrix),
    ]:
        f = aesara.function(
            [data, indices, indptr, shape],
            sparse.csm_properties(CS(data, indices, indptr, shape)),
            mode=mode,
        )
        assert not any(
            isinstance(node.op, (sparse.CSM, sparse.CSMProperties))
            for node in f.maker.fgraph.toposort())
        v = cast(random_lil((10, 40), config.floatX, 3))
        f(v.data, v.indices, v.indptr, v.shape)
Пример #15
0
 def test_infer_shape(self):
     rng = np.random.RandomState(3453)
     adtens4 = dtensor4()
     aivec = ivector()
     aivec_val = [3, 4, 2, 5]
     adtens4_val = rng.rand(*aivec_val)
     self._compile_and_check(
         [adtens4, aivec],
         [SpecifyShape()(adtens4, aivec)],
         [adtens4_val, aivec_val],
         SpecifyShape,
     )
Пример #16
0
def test_bug_2009_07_17_borrowed_output():
    # Regression test for a bug where output was borrowed by mistake.
    a = dmatrix()
    b = dmatrix()
    # The output should *NOT* be borrowed.
    g = function([a, b], Out(dot(a, b), borrow=False))

    x = np.zeros((1, 2))
    y = np.ones((2, 5))

    z = g(x, y)
    # print(z)  # Should be zero.
    x.fill(1)
    # print(g(x, y))  # Should be non-zero.
    # print(z)  # Should still be zero.
    assert np.linalg.norm(z) == 0

    # The code above was supposed to fail when it was written (or, more
    # accurately, on the next revision, i.e. when it was merged with the
    # rest of the code, i.e. on revision cac9c9e9f08e).
    # However, for some reason, it does not fail anymore when at this revision.
    # Thus, a new test (below) was added that exhibits the same issue. Note
    # that it may better be moved into the test_nnet.py test file if it turns
    # out the bug was caused by 'crossentropy_softmax_argmax_1hot_with_bias',
    # and was not a more general issue.
    test_output_activation_no_bias = dmatrix()
    test_b2 = dvector()
    test_target = ivector()
    nll_softmax_argmax = crossentropy_softmax_argmax_1hot_with_bias(
        test_output_activation_no_bias, test_b2, test_target)
    output = nll_softmax_argmax[1]
    g = function(
        [test_output_activation_no_bias, test_b2, test_target],
        Out(output, borrow=False),
    )

    a = np.zeros((1, 5))
    b = np.ones(5)
    c = np.zeros(1, dtype=np.int32)

    z = g(a, b, c)
    z_backup = copy.copy(z)
    id_z = id(z)
    # print(f"Output z after first call: {z}")
    a[0, 0] = 1
    id_other = id(g(a, b, c))
    # print(f"Output z after second call: {z}")
    # Ensure that calling the function again returns a pointer towards a new
    # array.
    assert id_z != id_other
    # Just to be 100% sure, ensure that z was not altered.
    assert (z == z_backup).all()
Пример #17
0
def test_local_csm_grad_c():
    data = vector()
    indices, indptr, shape = (ivector(), ivector(), ivector())
    mode = get_default_mode()

    if aesara.config.mode == "FAST_COMPILE":
        mode = Mode(linker="c|py", optimizer="fast_compile")

    mode = mode.including("specialize", "local_csm_grad_c")
    for CS, cast in [
        (sparse.CSC, sp.sparse.csc_matrix),
        (sparse.CSR, sp.sparse.csr_matrix),
    ]:
        cost = aet_sum(sparse.DenseFromSparse()(CS(data, indices, indptr, shape)))
        f = aesara.function(
            [data, indices, indptr, shape], aesara.grad(cost, data), mode=mode
        )
        assert not any(
            isinstance(node.op, sparse.CSMGrad) for node in f.maker.fgraph.toposort()
        )
        v = cast(random_lil((10, 40), config.floatX, 3))
        f(v.data, v.indices, v.indptr, v.shape)
Пример #18
0
    def test_op(self, axis, cond, shape):
        cond_var = ivector()
        data = np.random.random(size=shape).astype(config.floatX)
        data_var = matrix()

        f = aesara.function([cond_var, data_var],
                            self.op(cond_var, data_var, axis=axis))

        expected = np.compress(cond, data, axis=axis)
        tested = f(cond, data)

        assert tested.shape == expected.shape
        assert np.allclose(tested, expected)
Пример #19
0
    def test_bad_number_of_shape(self):
        # Test that the number of dimensions provided is good
        specify_shape = SpecifyShape()

        x = vector()
        shape_vec = ivector()
        xval = np.random.random((2)).astype(config.floatX)
        with pytest.raises(AssertionError, match="will never match"):
            specify_shape(x, [])
        with pytest.raises(AssertionError, match="will never match"):
            specify_shape(x, [2, 2])

        f = aesara.function([x, shape_vec], specify_shape(x, shape_vec), mode=self.mode)
        assert isinstance(
            [n for n in f.maker.fgraph.toposort() if isinstance(n.op, SpecifyShape)][0]
            .inputs[0]
            .type,
            self.input_type,
        )
        expected = r"(Got 1 dimensions \(shape \(2,\)\), expected 0 dimensions with shape \(\).)"
        expected += r"|(Got 1 dimensions, expected 0 dimensions.)"
        with pytest.raises(AssertionError, match=expected):
            f(xval, [])
        expected = r"(Got 1 dimensions \(shape \(2,\)\), expected 2 dimensions with shape \(2, 2\).)"
        expected += r"|(SpecifyShape: Got 1 dimensions, expected 2 dimensions.)"
        with pytest.raises(AssertionError, match=expected):
            f(xval, [2, 2])

        x = matrix()
        xval = np.random.random((2, 3)).astype(config.floatX)
        for shape_ in [(), (1,), (2, 3, 4)]:
            with pytest.raises(AssertionError, match="will never match"):
                specify_shape(x, shape_)
            f = aesara.function(
                [x, shape_vec], specify_shape(x, shape_vec), mode=self.mode
            )
            assert isinstance(
                [
                    n
                    for n in f.maker.fgraph.toposort()
                    if isinstance(n.op, SpecifyShape)
                ][0]
                .inputs[0]
                .type,
                self.input_type,
            )
            s_exp = str(shape_).replace("(", r"\(").replace(")", r"\)")
            expected = rf"(Got 2 dimensions \(shape \(2, 3\)\), expected {len(shape_)} dimensions with shape {s_exp}.)"
            expected += rf"|(SpecifyShape: Got 2 dimensions, expected {len(shape_)} dimensions.)"
            with pytest.raises(AssertionError, match=expected):
                f(xval, shape_)
Пример #20
0
    def test_bad_number_of_shape(self):
        # Test that the number of dimensions provided is good
        specify_shape = SpecifyShape()

        x = vector()
        shape_vec = ivector()
        xval = np.random.rand(2).astype(config.floatX)
        with pytest.raises(AssertionError):
            specify_shape(x, [])
        with pytest.raises(AssertionError):
            specify_shape(x, [2, 2])

        f = aesara.function([x, shape_vec], specify_shape(x, shape_vec), mode=self.mode)
        assert isinstance(
            [n for n in f.maker.fgraph.toposort() if isinstance(n.op, SpecifyShape)][0]
            .inputs[0]
            .type,
            self.input_type,
        )
        with pytest.raises(AssertionError):
            f(xval, [])
        with pytest.raises(AssertionError):
            f(xval, [2, 2])

        x = matrix()
        xval = np.random.rand(2, 3).astype(config.floatX)
        for shape_ in [(), (1,), (2, 3, 4)]:
            with pytest.raises(AssertionError):
                specify_shape(x, shape_)
            f = aesara.function(
                [x, shape_vec], specify_shape(x, shape_vec), mode=self.mode
            )
            assert isinstance(
                [
                    n
                    for n in f.maker.fgraph.toposort()
                    if isinstance(n.op, SpecifyShape)
                ][0]
                .inputs[0]
                .type,
                self.input_type,
            )
            with pytest.raises(AssertionError):
                f(xval, shape_)
Пример #21
0
def test__getitem__AdvancedSubtensor():
    # Make sure we get `AdvancedSubtensor`s for basic indexing operations
    x = matrix("x")
    i = ivector("i")

    # This is a `__getitem__` call that's redirected to `_tensor_py_operators.take`
    z = x[i]
    op_types = [
        type(node.op) for node in aesara.graph.basic.io_toposort([x, i], [z])
    ]
    assert op_types[-1] == AdvancedSubtensor1

    # This should index nothing (i.e. return an empty copy of `x`)
    # We check that the index is empty
    z = x[[]]
    op_types = [
        type(node.op) for node in aesara.graph.basic.io_toposort([x, i], [z])
    ]
    assert op_types == [AdvancedSubtensor1]
    assert isinstance(z.owner.inputs[1], TensorConstant)

    # This is also a `__getitem__` call that's redirected to `_tensor_py_operators.take`
    z = x[:, i]
    op_types = [
        type(node.op) for node in aesara.graph.basic.io_toposort([x, i], [z])
    ]
    assert op_types == [DimShuffle, AdvancedSubtensor1, DimShuffle]

    z = x[..., i, None]
    op_types = [
        type(node.op) for node in aesara.graph.basic.io_toposort([x, i], [z])
    ]
    assert op_types == [MakeSlice, AdvancedSubtensor]

    z = x[i, None]
    op_types = [
        type(node.op) for node in aesara.graph.basic.io_toposort([x, i], [z])
    ]
    assert op_types[-1] == AdvancedSubtensor
Пример #22
0
    def test_remove_constants_and_unused_inputs_scan_non_seqs(self):
        """Test the rewrite `remove_constants_and_unused_inputs_scan` for non-sequences."""
        W = matrix(name="W")
        v = ivector(name="v")
        y1, _ = scan(
            lambda i, W: W[i], sequences=v, outputs_info=None, non_sequences=[W]
        )
        y2, _ = scan(
            lambda i, _, W: W[i],
            sequences=v,
            outputs_info=None,
            non_sequences=[W[0], W],
        )
        y3, _ = scan(
            lambda i, W, _: W[i],
            sequences=v,
            outputs_info=None,
            non_sequences=[W, W[0]],
        )
        y4, _ = scan(
            lambda i, _, _2, W: W[i],
            sequences=v,
            outputs_info=None,
            non_sequences=[W[0], W[0], W],
        )
        y5, _ = scan(
            lambda i, _, W, _2: W[i],
            sequences=v,
            outputs_info=None,
            non_sequences=[W[0], W, W[0]],
        )
        y6, _ = scan(
            lambda i, W, _, _2: W[i],
            sequences=v,
            outputs_info=None,
            non_sequences=[W, W[0], W[0]],
        )
        # TODO: y7 have problem during run time. I think it should
        # raise an error during the scan construction.
        # y7, _ = scan(lambda i, W, _, _2: W[i], sequences=v,
        #                    outputs_info=None, non_sequences=[v, W[0], W])

        W_val = np.random.normal(size=(3, 3)).astype(config.floatX)
        exp_val = W_val[np.r_[1, 2]]

        for out in [y1, y2, y3, y4, y5, y6]:
            f = function([W, v], out, mode=self.mode)

            res = f(W_val, [1, 2])
            assert np.array_equal(res, exp_val)

            scan_nodes = scan_nodes_from_fct(f)
            assert len(scan_nodes) == 1

            scan_node = scan_nodes[0]
            assert len(scan_node.inputs[1:]) == len(set(scan_node.inputs[1:]))
            inp = scan_node.op.inner_non_seqs(scan_node.op.inner_inputs)
            assert len(inp) == 1
            assert len(inp) == len(set(inp))

            inp = scan_node.op.outer_non_seqs(scan_node.inputs)
            assert len(inp) == 1
            assert len(inp) == len(set(inp))
Пример #23
0
    def test_remove_constants_and_unused_inputs_scan_seqs(self):
        """Test the opt remove_constants_and_unused_inputs_scan for sequences."""
        W = matrix(name="W")
        v = ivector(name="v")
        vv = matrix(name="vv")
        y1, _ = scan(
            lambda i, W: W[i], sequences=v, outputs_info=None, non_sequences=[W]
        )
        y2, _ = scan(
            lambda i, _, W: W[i], sequences=[v, v], outputs_info=None, non_sequences=W
        )
        y3, _ = scan(
            lambda i, _, W: W[i],
            sequences=[v, vv[0]],
            outputs_info=None,
            non_sequences=W,
        )
        y4, _ = scan(
            lambda _, i, W: W[i],
            sequences=[vv[0], v],
            outputs_info=None,
            non_sequences=W,
        )
        y5, _ = scan(
            lambda _, i, _2, W: W[i],
            sequences=[vv, v, vv[0]],
            outputs_info=None,
            non_sequences=W,
        )
        y6, _ = scan(
            lambda _, _2, i, W: W[i],
            sequences=[vv[0], vv, v],
            outputs_info=None,
            non_sequences=W,
        )
        y7, _ = scan(
            lambda i, _, _2, W: W[i],
            sequences=[v, vv[0], vv[0]],
            outputs_info=None,
            non_sequences=W,
        )
        y8, _ = scan(
            lambda _, i, W, _2, _3: W[i],
            sequences=[vv[0], v],
            outputs_info=None,
            non_sequences=[W, W[0], W[0]],
        )

        W_val = np.random.normal(size=(3, 3)).astype(config.floatX)
        exp_val = W_val[np.r_[1, 2]]

        for out in [y1, y2, y3, y4, y5, y6, y7, y8]:
            f = function(
                [W, v, vv],
                out,
                on_unused_input="ignore",
                mode=self.mode,
            )

            res = f(W_val, [1, 2], W_val)
            assert np.array_equal(res, exp_val)

            scan_nodes = scan_nodes_from_fct(f)
            assert len(scan_nodes) == 1
            scan_node = scan_nodes[0]

            assert len(scan_node.inputs[1:]) == len(set(scan_node.inputs[1:]))
            inp = scan_node.op.inner_seqs(scan_node.op.inner_inputs)
            assert len(inp) == 1
            inp = scan_node.op.outer_seqs(scan_node.inputs)
            assert len(inp) == 1
            inp = scan_node.op.inner_non_seqs(scan_node.op.inner_inputs)
            assert len(inp) == 1
            inp = scan_node.op.outer_non_seqs(scan_node.inputs)
            assert len(inp) == 1
Пример #24
0
    def test_ravel_multi_index(self):
        def check(shape, index_ndim, mode, order):
            multi_index = np.unravel_index(np.arange(np.product(shape)),
                                           shape,
                                           order=order)
            # create some invalid indices to test the mode
            if mode in ("wrap", "clip"):
                multi_index = (multi_index[0] - 1, ) + multi_index[1:]
            # test with scalars and higher-dimensional indices
            if index_ndim == 0:
                multi_index = tuple(i[-1] for i in multi_index)
            elif index_ndim == 2:
                multi_index = tuple(i[:, np.newaxis] for i in multi_index)
            multi_index_symb = [aesara.shared(i) for i in multi_index]

            # reference result
            ref = np.ravel_multi_index(multi_index, shape, mode, order)

            def fn(mi, s):
                return function([], ravel_multi_index(mi, s, mode, order))

            # shape given as a tuple
            f_array_tuple = fn(multi_index, shape)
            f_symb_tuple = fn(multi_index_symb, shape)
            np.testing.assert_equal(ref, f_array_tuple())
            np.testing.assert_equal(ref, f_symb_tuple())

            # shape given as an array
            shape_array = np.array(shape)
            f_array_array = fn(multi_index, shape_array)
            np.testing.assert_equal(ref, f_array_array())

            # shape given as an Aesara variable
            shape_symb = aesara.shared(shape_array)
            f_array_symb = fn(multi_index, shape_symb)
            np.testing.assert_equal(ref, f_array_symb())

            # shape testing
            self._compile_and_check(
                [],
                [ravel_multi_index(multi_index, shape_symb, mode, order)],
                [],
                RavelMultiIndex,
            )

        for mode in ("raise", "wrap", "clip"):
            for order in ("C", "F"):
                for index_ndim in (0, 1, 2):
                    check((3, ), index_ndim, mode, order)
                    check((3, 4), index_ndim, mode, order)
                    check((3, 4, 5), index_ndim, mode, order)

        # must provide integers
        with pytest.raises(TypeError):
            ravel_multi_index((fvector(), ivector()), (3, 4))
        with pytest.raises(TypeError):
            ravel_multi_index(((3, 4), ivector()), (3.4, 3.2))

        # dims must be a 1D sequence
        with pytest.raises(TypeError):
            ravel_multi_index(((3, 4), ), ((3, 4), ))
Пример #25
0
    def test_unravel_index(self):
        def check(shape, index_ndim, order):
            indices = np.arange(np.product(shape))
            # test with scalars and higher-dimensional indices
            if index_ndim == 0:
                indices = indices[-1]
            elif index_ndim == 2:
                indices = indices[:, np.newaxis]
            indices_symb = aesara.shared(indices)

            # reference result
            ref = np.unravel_index(indices, shape, order=order)

            def fn(i, d):
                return function([], unravel_index(i, d, order=order))

            # shape given as a tuple
            f_array_tuple = fn(indices, shape)
            f_symb_tuple = fn(indices_symb, shape)
            np.testing.assert_equal(ref, f_array_tuple())
            np.testing.assert_equal(ref, f_symb_tuple())

            # shape given as an array
            shape_array = np.array(shape)
            f_array_array = fn(indices, shape_array)
            np.testing.assert_equal(ref, f_array_array())

            # shape given as an Aesara variable
            shape_symb = aesara.shared(shape_array)
            f_array_symb = fn(indices, shape_symb)
            np.testing.assert_equal(ref, f_array_symb())

            # shape given as a Shape op (unravel_index will use get_vector_length
            # to infer the number of dimensions)
            indexed_array = aesara.shared(np.random.uniform(size=shape_array))
            f_array_shape = fn(indices, indexed_array.shape)
            np.testing.assert_equal(ref, f_array_shape())

            # shape testing
            self._compile_and_check(
                [],
                unravel_index(indices, shape_symb, order=order),
                [],
                UnravelIndex,
            )

        for order in ("C", "F"):
            for index_ndim in (0, 1, 2):
                check((3, ), index_ndim, order)
                check((3, 4), index_ndim, order)
                check((3, 4, 5), index_ndim, order)

        # must specify ndim if length of dims is not fixed
        with pytest.raises(ValueError):
            unravel_index(ivector(), ivector())

        # must provide integers
        with pytest.raises(TypeError):
            unravel_index(fvector(), (3, 4))
        with pytest.raises(TypeError):
            unravel_index((3, 4), (3.4, 3.2))

        # dims must be a 1D sequence
        with pytest.raises(TypeError):
            unravel_index((3, 4), 3)
        with pytest.raises(TypeError):
            unravel_index((3, 4), ((3, 4), ))
Пример #26
0
def test_mlp():
    """
    Demonstrate stochastic gradient descent optimization for a multilayer
    perceptron

    This is demonstrated on MNIST.

    :type learning_rate: float
    :param learning_rate: learning rate used (factor for the stochastic
    gradient

    :type n_epochs: int
    :param n_epochs: maximal number of epochs to run the optimizer

    :type dataset: string
    :param dataset: the path of the MNIST dataset file from
                         http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz


    """
    datasets = gen_data()

    train_set_x, train_set_y = datasets[0]
    valid_set_x, valid_set_y = datasets[1]
    test_set_x, test_set_y = datasets[2]

    batch_size = 100  # size of the minibatch

    # compute number of minibatches for training, validation and testing
    # n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
    # n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
    # n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size

    ######################
    # BUILD ACTUAL MODEL #
    ######################
    # print '... building the model'

    # allocate symbolic variables for the data
    index = lscalar()  # index to a [mini]batch
    x = matrix("x")  # the data is presented as rasterized images
    y = ivector("y")  # the labels are presented as 1D vector of
    # [int] labels

    rng = np.random.RandomState(1234)

    # construct the MLP class
    classifier = MLP(rng=rng, input=x, n_in=28 * 28, n_hidden=500, n_out=10)

    # the cost we minimize during training is the negative log likelihood of
    # the model.
    # We take the mean of the cost over each minibatch.
    cost = classifier.negative_log_likelihood(y).mean()

    # compute the gradient of cost with respect to theta (stored in params)
    # the resulting gradients will be stored in a list gparams
    gparams = []
    for param in classifier.params:
        gparam = grad(cost, param)
        gparams.append(gparam)

    # Some optimizations needed are tagged with 'fast_run'
    # TODO: refine that and include only those
    mode = aesara.compile.get_default_mode().including("fast_run")

    updates2 = OrderedDict()

    updates2[classifier.hiddenLayer.params[0]] = grad(
        cost, classifier.hiddenLayer.params[0])
    train_model = aesara.function(
        inputs=[index],
        updates=updates2,
        givens={
            x: train_set_x[index * batch_size:(index + 1) * batch_size],
            y: train_set_y[index * batch_size:(index + 1) * batch_size],
        },
        mode=mode,
    )
    # print 'MODEL 1'
    # aesara.printing.debugprint(train_model, print_type=True)
    assert any([
        isinstance(i.op, CrossentropySoftmax1HotWithBiasDx)
        for i in train_model.maker.fgraph.toposort()
    ])

    # Even without FeatureShape
    train_model = aesara.function(
        inputs=[index],
        updates=updates2,
        mode=mode.excluding("ShapeOpt"),
        givens={
            x: train_set_x[index * batch_size:(index + 1) * batch_size],
            y: train_set_y[index * batch_size:(index + 1) * batch_size],
        },
    )
    # print
    # print 'MODEL 2'
    # aesara.printing.debugprint(train_model, print_type=True)
    assert any([
        isinstance(i.op, CrossentropySoftmax1HotWithBiasDx)
        for i in train_model.maker.fgraph.toposort()
    ])
Пример #27
0
def test_h_softmax():
    # Tests the output dimensions of the h_softmax when a target is provided or
    # not.

    input_size = 4
    batch_size = 2
    h_softmax_level1_size = 5
    h_softmax_level2_size = 3
    output_size = h_softmax_level1_size * h_softmax_level2_size

    # First level of h_softmax
    W1 = np.asarray(np.random.normal(size=(input_size, h_softmax_level1_size)),
                    dtype=config.floatX)
    W1 = aesara.shared(W1)
    b1 = aesara.shared(
        np.asarray(np.zeros((h_softmax_level1_size, )), dtype=config.floatX))

    # Second level of h_softmax
    W2 = np.asarray(
        np.random.normal(size=(h_softmax_level1_size, input_size,
                               h_softmax_level2_size)),
        dtype=config.floatX,
    )
    W2 = aesara.shared(W2)
    b2 = aesara.shared(
        np.asarray(
            np.zeros((h_softmax_level1_size, h_softmax_level2_size)),
            dtype=config.floatX,
        ))

    x = matrix("x")
    y = ivector("y")

    # This only computes the output corresponding to the target
    y_hat_tg = h_softmax(
        x,
        batch_size,
        output_size,
        h_softmax_level1_size,
        h_softmax_level2_size,
        W1,
        b1,
        W2,
        b2,
        y,
    )

    # This computes all the outputs
    y_hat_all = h_softmax(
        x,
        batch_size,
        output_size,
        h_softmax_level1_size,
        h_softmax_level2_size,
        W1,
        b1,
        W2,
        b2,
    )

    fun_output_tg = aesara.function([x, y], y_hat_tg)
    fun_output = aesara.function([x], y_hat_all)

    x_mat = np.random.normal(size=(batch_size,
                                   input_size)).astype(config.floatX)
    y_mat = np.random.default_rng().integers(0, output_size,
                                             batch_size).astype("int32")
    tg_output = fun_output_tg(x_mat, y_mat)
    all_outputs = fun_output(x_mat)

    assert tg_output.shape == (batch_size, )
    assert all_outputs.shape == (batch_size, output_size)

    # Verifies that the outputs computed by fun_output_tg are the same as those
    # computed by fun_output.
    utt.assert_allclose(all_outputs[np.arange(0, batch_size), y_mat],
                        tg_output)
Пример #28
0
def test_jax_scan_multiple_output():
    """Test a scan implementation of a SEIR model.

    SEIR model definition:
    S[t+1] = S[t] - B[t]
    E[t+1] = E[t] +B[t] - C[t]
    I[t+1] = I[t+1] + C[t] - D[t]

    B[t] ~ Binom(S[t], beta)
    C[t] ~ Binom(E[t], gamma)
    D[t] ~ Binom(I[t], delta)
    """
    def binomln(n, k):
        return gammaln(n + 1) - gammaln(k + 1) - gammaln(n - k + 1)

    def binom_log_prob(n, p, value):
        return binomln(n, value) + value * log(p) + (n - value) * log(1 - p)

    # sequences
    aet_C = ivector("C_t")
    aet_D = ivector("D_t")
    # outputs_info (initial conditions)
    st0 = lscalar("s_t0")
    et0 = lscalar("e_t0")
    it0 = lscalar("i_t0")
    logp_c = scalar("logp_c")
    logp_d = scalar("logp_d")
    # non_sequences
    beta = scalar("beta")
    gamma = scalar("gamma")
    delta = scalar("delta")

    # TODO: Use random streams when their JAX conversions are implemented.
    # trng = aesara.tensor.random.RandomStream(1234)

    def seir_one_step(ct0, dt0, st0, et0, it0, logp_c, logp_d, beta, gamma,
                      delta):
        # bt0 = trng.binomial(n=st0, p=beta)
        bt0 = st0 * beta
        bt0 = bt0.astype(st0.dtype)

        logp_c1 = binom_log_prob(et0, gamma, ct0).astype(logp_c.dtype)
        logp_d1 = binom_log_prob(it0, delta, dt0).astype(logp_d.dtype)

        st1 = st0 - bt0
        et1 = et0 + bt0 - ct0
        it1 = it0 + ct0 - dt0
        return st1, et1, it1, logp_c1, logp_d1

    (st, et, it, logp_c_all, logp_d_all), _ = scan(
        fn=seir_one_step,
        sequences=[aet_C, aet_D],
        outputs_info=[st0, et0, it0, logp_c, logp_d],
        non_sequences=[beta, gamma, delta],
    )
    st.name = "S_t"
    et.name = "E_t"
    it.name = "I_t"
    logp_c_all.name = "C_t_logp"
    logp_d_all.name = "D_t_logp"

    out_fg = FunctionGraph(
        [aet_C, aet_D, st0, et0, it0, logp_c, logp_d, beta, gamma, delta],
        [st, et, it, logp_c_all, logp_d_all],
    )

    s0, e0, i0 = 100, 50, 25
    logp_c0 = np.array(0.0, dtype=config.floatX)
    logp_d0 = np.array(0.0, dtype=config.floatX)
    beta_val, gamma_val, delta_val = [
        np.array(val, dtype=config.floatX)
        for val in [0.277792, 0.135330, 0.108753]
    ]
    C = np.array([3, 5, 8, 13, 21, 26, 10, 3], dtype=np.int32)
    D = np.array([1, 2, 3, 7, 9, 11, 5, 1], dtype=np.int32)

    test_input_vals = [
        C,
        D,
        s0,
        e0,
        i0,
        logp_c0,
        logp_d0,
        beta_val,
        gamma_val,
        delta_val,
    ]
    compare_jax_and_py(out_fg, test_input_vals)