Ejemplo n.º 1
0
def test_speed_lazy(linker):
    # TODO FIXME: This isn't a real test.

    def build_graph(x, depth=5):
        z = x
        for d in range(depth):
            z = ifelse(z[0] > 0, -z, z)
        return z

    steps_a = 10
    steps_b = 100
    x = vector()
    a = build_graph(x, steps_a)
    b = build_graph(x, steps_b)

    f_a = function([x], a, mode=Mode(optimizer=None, linker=linker))
    f_b = function([x], b, mode=Mode(optimizer=None, linker=linker))

    f_a([2.0])
    t0 = time.time()
    f_a([2.0])
    t1 = time.time()

    f_b([2.0])

    t2 = time.time()
    f_b([2.0])
    t3 = time.time()

    t_a = t1 - t0
    t_b = t3 - t2

    print(f"{linker} takes {1000 * (t_b - t_a) / (steps_b - steps_a):f} s/Kop")
Ejemplo n.º 2
0
    def time_linker(name, linker):
        steps_a = 10
        steps_b = 100
        x = vector()
        a = build_graph(x, steps_a)
        b = build_graph(x, steps_b)

        f_a = function([x], a, mode=Mode(optimizer=None, linker=linker()))
        f_b = function([x], b, mode=Mode(optimizer=None, linker=linker()))

        f_a([2.0])
        t0 = time.time()
        f_a([2.0])
        t1 = time.time()

        f_b([2.0])

        t2 = time.time()
        f_b([2.0])
        t3 = time.time()

        t_a = t1 - t0
        t_b = t3 - t2

        print(
            f"{name} takes {1000 * (t_b - t_a) / (steps_b - steps_a):f} s/Kop")
Ejemplo n.º 3
0
def test_CheckAndRaise_basic_c(linker):
    exc_msg = "this is the exception"
    check_and_raise = CheckAndRaise(CustomException, exc_msg)

    conds = at.scalar()
    y = check_and_raise(at.as_tensor(1), conds)
    y_fn = aesara.function([conds], y, mode=Mode(linker))

    with pytest.raises(CustomException, match=exc_msg):
        y_fn(0)

    x = at.vector()
    y = check_and_raise(x, conds)
    y_fn = aesara.function([conds, x],
                           y.shape,
                           mode=Mode(linker, OPT_FAST_RUN))

    x_val = np.array([1.0], dtype=aesara.config.floatX)
    assert np.array_equal(y_fn(0, x_val), x_val)

    y = check_and_raise(x, at.as_tensor(0))
    y_grad = aesara.grad(y.sum(), [x])
    y_fn = aesara.function([x], y_grad, mode=Mode(linker, OPT_FAST_RUN))

    assert np.array_equal(y_fn(x_val), [x_val])
Ejemplo n.º 4
0
def test_nested():
    notimpl = NotImplementedOp()
    ifelseifelseif = IfElseIfElseIf()

    x1 = scalar("x1")
    x2 = scalar("x2")
    c1 = scalar("c1")
    c2 = scalar("c2")
    t1 = ifelse(c1, x1, notimpl(x2))
    t1.name = "t1"
    t2 = t1 * 10
    t2.name = "t2"
    t3 = ifelse(c2, t2, x1 + t1)
    t3.name = "t3"
    t4 = ifelseifelseif(eq(x1, x2), x1, eq(x1, 5), x2, c2, t3, t3 + 0.5)
    t4.name = "t4"

    linker = aesara.link.vm.VMLinker(lazy=False)
    f = function([c1, c2, x1, x2], t4, mode=Mode(linker=linker, optimizer="fast_run"))
    with pytest.raises(NotImplementedOpException):
        f(1, 0, np.array(10, dtype=x1.dtype), 0)

    linker = aesara.link.vm.VMLinker(lazy=True)
    f = function([c1, c2, x1, x2], t4, mode=Mode(linker=linker, optimizer="fast_run"))
    assert f(1, 0, np.array(10, dtype=x1.dtype), 0) == 20.5
Ejemplo n.º 5
0
def compare_jax_and_py(
    fgraph,
    inputs,
    assert_fn=None,
    must_be_device_array=True,
):
    """Function to compare python graph output and jax compiled output for testing equality

    In the tests below computational graphs are defined in Aesara. These graphs are then passed to
    this function which then compiles the graphs in both jax and python, runs the calculation
    in both and checks if the results are the same

    Parameters
    ----------
    fgraph: FunctionGraph
        Aesara function Graph object
    inputs: iter
        Inputs for function graph
    assert_fn: func, opt
        Assert function used to check for equality between python and jax. If not
        provided uses np.testing.assert_allclose
    must_be_device_array: Bool
        Checks for instance of jax.interpreters.xla.DeviceArray. For testing purposes
        if this device array is found it indicates if the result was computed by jax

    Returns
    -------
    jax_res

    """
    if assert_fn is None:
        assert_fn = partial(np.testing.assert_allclose, rtol=1e-4)

    opts = Query(include=[None], exclude=["cxx_only", "BlasOpt"])
    jax_mode = Mode(JAXLinker(), opts)
    py_mode = Mode("py", opts)

    aesara_jax_fn = function(fgraph.inputs, fgraph.outputs, mode=jax_mode)
    jax_res = aesara_jax_fn(*inputs)

    if must_be_device_array:
        if isinstance(jax_res, list):
            assert all(
                isinstance(res, jax.interpreters.xla.DeviceArray)
                for res in jax_res)
        else:
            assert isinstance(jax_res, jax.interpreters.xla.DeviceArray)

    aesara_py_fn = function(fgraph.inputs, fgraph.outputs, mode=py_mode)
    py_res = aesara_py_fn(*inputs)

    if len(fgraph.outputs) > 1:
        for j, p in zip(jax_res, py_res):
            assert_fn(j, p)
    else:
        assert_fn(jax_res, py_res)

    return jax_res
Ejemplo n.º 6
0
 def test_c_nan(self):
     for dtype in ["floatX", "complex64", "complex128"]:
         self.with_mode(Mode(linker="c"), aes.add, dtype=dtype, test_nan=True)
         self.with_mode(Mode(linker="c"), aes.mul, dtype=dtype, test_nan=True)
     for dtype in ["floatX"]:
         self.with_mode(
             Mode(linker="c"), aes.scalar_minimum, dtype=dtype, test_nan=True
         )
         self.with_mode(
             Mode(linker="c"), aes.scalar_maximum, dtype=dtype, test_nan=True
         )
Ejemplo n.º 7
0
 def test_perform(self):
     for dtype in ["bool", "floatX", "complex64", "complex128", "int8", "uint8"]:
         self.with_mode(Mode(linker="py"), aes.add, dtype=dtype)
         self.with_mode(Mode(linker="py"), aes.mul, dtype=dtype)
         self.with_mode(Mode(linker="py"), aes.scalar_maximum, dtype=dtype)
         self.with_mode(Mode(linker="py"), aes.scalar_minimum, dtype=dtype)
         self.with_mode(Mode(linker="py"), aes.and_, dtype=dtype, tensor_op=at_all)
         self.with_mode(Mode(linker="py"), aes.or_, dtype=dtype, tensor_op=at_any)
     for dtype in ["int8", "uint8"]:
         self.with_mode(Mode(linker="py"), aes.or_, dtype=dtype)
         self.with_mode(Mode(linker="py"), aes.and_, dtype=dtype)
         self.with_mode(Mode(linker="py"), aes.xor, dtype=dtype)
Ejemplo n.º 8
0
    def test_argtopk_1d_collision(self, size, k, dtype, sorted):
        # with non-unique kth max value
        if isinstance(k, str):
            k = eval(k.replace("n", str(size)))

        x = vector(name="x", dtype=dtype)
        y = argtopk(x, k, sorted=sorted, idx_dtype="int32")
        # DebugMode won't like the index change on collision on CPU
        # So don't use DebugMode here.
        mode = self.mode
        if isinstance(self.mode, aesara.compile.debugmode.DebugMode):
            mode = Mode(optimizer=mode.optimizer)
        fn = aesara.function([x], y, mode=mode)
        assert any([
            isinstance(n.op, self.op_class)
            for n in fn.maker.fgraph.apply_nodes
        ])
        rng = np.random.default_rng(utt.fetch_seed())
        xval = np.repeat(
            rng.uniform(-100.0, 100.0, size=size // 2).astype(dtype), 2)
        xval = xval[rng.permutation(size)]
        yval = fn(xval)
        idx = slice(-k, None) if k > 0 else slice(-k)
        goal = np.argsort(xval)[idx].astype("int32")
        utt.assert_allclose(np.sort(xval[yval]), np.sort(xval[goal]))
Ejemplo n.º 9
0
def apply_local_opt_to_rv(opt, op_fn, dist_op, dist_params, size, rng):
    dist_params_aet = []
    for p in dist_params:
        p_aet = aet.as_tensor(p).type()
        p_aet.tag.test_value = p
        dist_params_aet.append(p_aet)

    size_aet = []
    for s in size:
        s_aet = iscalar()
        s_aet.tag.test_value = s
        size_aet.append(s_aet)

    dist_st = op_fn(dist_op(*dist_params_aet, size=size_aet, rng=rng))

    f_inputs = [
        p for p in dist_params_aet + size_aet
        if not isinstance(p, (slice, Constant))
    ]

    mode = Mode("py", EquilibriumOptimizer([opt], max_use_ratio=100))

    f_opt = function(
        f_inputs,
        dist_st,
        mode=mode,
    )

    (new_out, ) = f_opt.maker.fgraph.outputs

    return new_out, f_inputs, dist_st, f_opt
Ejemplo n.º 10
0
    def test_norm(self, axis):

        x = dtensor3()
        a = np.random.random((3, 2, 4)).astype(aesara.config.floatX)
        mode = Mode(optimizer="fast_compile", linker="py")

        f = function(
            [x],
            [
                x.norm(L=1, axis=axis, keepdims=True),
                self.makeKeepDims_local(
                    x, x.norm(L=1, axis=axis, keepdims=False), axis),
            ],
            mode=mode,
        )

        ans1, ans2 = f(a)
        assert np.allclose(ans1, ans2)
        assert ans1.shape == ans2.shape

        g = function(
            [x],
            [
                x.norm(L=2, axis=axis, keepdims=True),
                self.makeKeepDims_local(
                    x, x.norm(L=2, axis=axis, keepdims=False), axis),
            ],
            mode=mode,
        )

        ans1, ans2 = g(a)
        assert np.allclose(ans1, ans2)
        assert ans1.shape == ans2.shape
Ejemplo n.º 11
0
    def test_modes(self):
        # this is a quick test after the LazyLinker branch merge
        # to check that all the current modes can still be used.
        linker_classes_involved = []

        predef_modes = ["FAST_COMPILE", "FAST_RUN", "DEBUG_MODE"]

        # Linkers to use with regular Mode
        if config.cxx:
            linkers = [
                "py", "c|py", "c|py_nogc", "vm", "vm_nogc", "cvm", "cvm_nogc"
            ]
        else:
            linkers = ["py", "c|py", "c|py_nogc", "vm", "vm_nogc"]
        modes = predef_modes + [Mode(linker, "fast_run") for linker in linkers]

        for mode in modes:
            x = matrix()
            y = vector()
            f = function([x, y], x + y, mode=mode)
            # test that it runs something
            f([[1, 2], [3, 4]], [5, 6])
            linker_classes_involved.append(f.maker.mode.linker.__class__)
            # print 'MODE:', mode, f.maker.mode.linker, 'stop'

        # regression check:
        # there should be
        # - `VMLinker`
        # - OpWiseCLinker (FAST_RUN)
        # - PerformLinker (FAST_COMPILE)
        # - DebugMode's Linker  (DEBUG_MODE)
        assert 4 == len(set(linker_classes_involved))
Ejemplo n.º 12
0
def test_ifelse_lazy_c():
    a = scalar()
    b = generic()
    c = generic()

    notimpl = NotImplementedOp()

    cloops = [True, False]

    if aesara.config.cxx == "":
        cloops = [False]

    for use_cloop in cloops:
        for lazy in [True, None]:
            linker = aesara.link.vm.VMLinker(use_cloop=use_cloop, lazy=lazy)
            f = function(
                [a, b, c],
                ifelse(a, notimpl(b), c),
                mode=Mode(linker=linker, optimizer="fast_run"),
            )

            with pytest.raises(NotImplementedOpException):
                f(1, "a", "b")

            assert f(0, "a", "b") == "b"
Ejemplo n.º 13
0
def test_jax_BatchedDot():
    # tensor3 . tensor3
    a = tensor3("a")
    a.tag.test_value = (np.linspace(-1, 1,
                                    10 * 5 * 3).astype(config.floatX).reshape(
                                        (10, 5, 3)))
    b = tensor3("b")
    b.tag.test_value = (np.linspace(1, -1,
                                    10 * 3 * 2).astype(config.floatX).reshape(
                                        (10, 3, 2)))
    out = aet_blas.BatchedDot()(a, b)
    fgraph = FunctionGraph([a, b], [out])
    compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])

    # A dimension mismatch should raise a TypeError for compatibility
    inputs = [get_test_value(a)[:-1], get_test_value(b)]
    opts = OptimizationQuery(include=[None], exclude=["cxx_only", "BlasOpt"])
    jax_mode = Mode(JAXLinker(), opts)
    aesara_jax_fn = function(fgraph.inputs, fgraph.outputs, mode=jax_mode)
    with pytest.raises(TypeError):
        aesara_jax_fn(*inputs)

    # matrix . matrix
    a = matrix("a")
    a.tag.test_value = np.linspace(-1, 1, 5 * 3).astype(config.floatX).reshape(
        (5, 3))
    b = matrix("b")
    b.tag.test_value = np.linspace(1, -1, 5 * 3).astype(config.floatX).reshape(
        (5, 3))
    out = aet_blas.BatchedDot()(a, b)
    fgraph = FunctionGraph([a, b], [out])
    compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])
Ejemplo n.º 14
0
    def test_borrow_output(self):
        a = dmatrix()
        f = function([a], Out(a, borrow=False))
        o = np.ones((3, 3))
        assert o is not f(
            o)  # function no longer permits aliasing outputs to inputs

        f = function([a], Out(a * 4, borrow=False))
        o = np.ones((3, 3))
        four = f(o)
        assert np.all(four == 4)
        f(o + 0.1)  # should not clobber the memory used to store four
        assert np.all(four == 4)

        f = function([a],
                     Out(a * 4, borrow=True),
                     mode=Mode("c|py_nogc", "fast_run"))
        o = np.ones((3, 3))
        four = f(o)
        assert np.all(four == 4)
        f(o + 0.1)  # should clobber the memory used to store four
        if config.cxx:
            assert not np.all(four == 4)
        else:
            # The Elemwise.perform method don't reuse memory
            # as some numpy version don't support that correctly.
            assert np.all(four == 4)
Ejemplo n.º 15
0
def test_reallocation():
    x = scalar("x")
    y = scalar("y")
    z = tanh(3 * x + y) + cosh(x + 5 * y)
    # The functinality is currently implement for non lazy and non c VM only.
    for linker in [
            VMLinker(allow_gc=False, lazy=False, use_cloop=False),
            VMLinker(allow_gc=True, lazy=False, use_cloop=False),
    ]:
        m = get_mode(Mode(linker=linker))
        m = m.excluding("fusion", "inplace")

        f = function([x, y], z, name="test_reduce_memory", mode=m)
        output = f(1, 2)
        assert output
        storage_map = f.fn.storage_map

        def check_storage(storage_map):
            for i in storage_map:
                if not isinstance(i, TensorConstant):
                    keys_copy = list(storage_map.keys())[:]
                    keys_copy.remove(i)
                    for o in keys_copy:
                        if storage_map[i][
                                0] and storage_map[i][0] is storage_map[o][0]:
                            return [True, storage_map[o][0]]
            return [False, None]

        assert check_storage(storage_map)[0]
        assert len({id(v) for v in storage_map.values()}) < len(storage_map)
Ejemplo n.º 16
0
def test_Mode_basic():
    db = SequenceDB()
    mode = Mode(linker="py", optimizer=OptimizationQuery(include=None), db=db)

    assert mode.optdb is db

    assert str(mode).startswith("Mode(linker=py, optimizer=OptimizationQuery")
Ejemplo n.º 17
0
    def test_memory_leak(self, inplace):
        import gc

        n = 100_000

        x = aesara.shared(np.ones(n, dtype=np.float64))

        y = x.dimshuffle([0, "x"])
        y.owner.op.inplace = inplace

        f = aesara.function([], y, mode=Mode(optimizer=None))

        assert len(f.maker.fgraph.apply_nodes) == 2
        assert isinstance(f.maker.fgraph.toposort()[0].op, DimShuffle)

        assert f.maker.fgraph.toposort()[0].op.inplace is inplace

        tracemalloc.start()

        blocks_last = None
        block_diffs = []
        for i in range(50):
            x.set_value(np.ones(n))
            _ = f()
            _ = gc.collect()
            blocks_i, _ = tracemalloc.get_traced_memory()
            if blocks_last is not None:
                blocks_diff = (blocks_i - blocks_last) // 10**3
                block_diffs.append(blocks_diff)
            blocks_last = blocks_i

        tracemalloc.stop()
        assert np.allclose(np.mean(block_diffs), 0)
Ejemplo n.º 18
0
def test_clinker_literal_cache():
    mode = Mode(linker="c")

    A = matrix()
    input1 = vector()

    normal_svd = np.array(
        [
            [5.936276e01, -4.664007e-07, -2.56265e-06],
            [-4.664007e-07, 9.468691e-01, -3.18862e-02],
            [-2.562651e-06, -3.188625e-02, 1.05226e00],
        ],
        dtype=config.floatX,
    )

    orientationi = np.array([59.36276866, 1.06116353, 0.93797339],
                            dtype=config.floatX)

    for out1 in [A - input1[0] * np.identity(3), input1[0] * np.identity(3)]:
        benchmark = function(inputs=[A, input1],
                             outputs=[out1],
                             on_unused_input="ignore",
                             mode=mode)

        out1 = benchmark(normal_svd, orientationi)
Ejemplo n.º 19
0
def test_NoOutputFromInplace():
    x = matrix()
    y = matrix()
    a = dot(x, y)
    b = tanh(a)
    c = tanh(dot(2 * x, y))

    # Ensure that the elemwise op that produces the output is inplace when
    # using a mode that does not include the optimization
    fct_no_opt = function([x, y], [b, c], mode="FAST_RUN")
    op = fct_no_opt.maker.fgraph.outputs[0].owner.op
    assert op.destroy_map and 0 in op.destroy_map
    op = fct_no_opt.maker.fgraph.outputs[1].owner.op
    assert op.destroy_map and 0 in op.destroy_map

    # Ensure that the elemwise op that produces the output is not inplace when
    # using a mode that includes the optimization
    opt = AddFeatureOptimizer(NoOutputFromInplace([1]))
    mode_opt = Mode(linker="py", optimizer="fast_run").register((opt, 49.9))

    fct_opt = function([x, y], [b, c], mode=mode_opt)
    op = fct_opt.maker.fgraph.outputs[0].owner.op
    assert op.destroy_map and 0 in op.destroy_map
    op = fct_opt.maker.fgraph.outputs[1].owner.op
    assert not op.destroy_map or 0 not in op.destroy_map
Ejemplo n.º 20
0
    def with_linker(self, linker):
        for xsh, shuffle, zsh in [
            ((2, 3), (1, "x", 0), (3, 1, 2)),
            ((1, 2, 3), (1, 2), (2, 3)),
            ((1, 2, 1, 3), (1, 3), (2, 3)),
            ((2, 3, 4), (2, 1, 0), (4, 3, 2)),
            ((2, 3, 4), ("x", 2, 1, 0, "x"), (1, 4, 3, 2, 1)),
            ((1, 4, 3, 2, 1), (3, 2, 1), (2, 3, 4)),
            ((1, 1, 4), (1, 2), (1, 4)),
            ((1, 1, 1), (), ()),
            ((1, ), ("x", "x"), (1, 1)),
        ]:
            ib = [(entry == 1) for entry in xsh]
            x = self.type(self.dtype, ib)("x")
            e = self.op(ib, shuffle)(x)
            f = aesara.function([x], e, mode=Mode(linker=linker))
            assert f(np.ones(xsh, dtype=self.dtype)).shape == zsh
            # test that DimShuffle.infer_shape work correctly
            x = self.type(self.dtype, ib)("x")
            e = self.op(ib, shuffle)(x)
            f = aesara.function([x],
                                e.shape,
                                mode=Mode(linker=linker),
                                on_unused_input="ignore")
            assert all(f(np.ones(xsh, dtype=self.dtype))) == all(zsh)

        # Test when we drop a axis that is not broadcastable
        ib = [False, True, False]
        x = self.type(self.dtype, ib)("x")
        with pytest.raises(ValueError):
            self.op(ib, shuffle)

        # Test when we drop a axis that don't have shape 1
        ib = [True, True, False]
        x = self.type(self.dtype, ib)("x")
        e = self.op(ib, (1, 2))(x)
        f = aesara.function([x], e.shape, mode=Mode(linker=linker))
        with pytest.raises(TypeError):
            f(np.ones((2, 1, 4)))

        # Test that we can't take a dimensions multiple time
        xsh, shuffle, zsh = ((1, 1, 4), (0, 1, 2, 0), (1, 4))
        ib = [False, True, False]
        x = self.type(self.dtype, ib)("x")
        with pytest.raises(ValueError):
            DimShuffle(ib, shuffle)
Ejemplo n.º 21
0
 def test_input_dimensions_overflow(self):
     # Elemwise.perform used to compute the product
     # of input shapes to check if there was a zero in them,
     # it overflowed in this case.
     a, b, c, d, e, f = vectors("abcdef")
     s = a + b + c + d + e + f
     g = aesara.function([a, b, c, d, e, f], s, mode=Mode(linker="py"))
     g(*[np.zeros(2**11, config.floatX) for i in range(6)])
Ejemplo n.º 22
0
def test_betainc_derivative_nan():
    a, b, x = at.scalars("a", "b", "x")
    res = betainc_der(a, b, x, True)
    test_func = function([a, b, x], res, mode=Mode("py"))
    assert not np.isnan(test_func(1, 1, 1))
    assert np.isnan(test_func(1, 1, -1))
    assert np.isnan(test_func(1, 1, 2))
    assert np.isnan(test_func(1, -1, 1))
    assert np.isnan(test_func(1, 1, -1))
Ejemplo n.º 23
0
    def check_partial_function(linker_name):
        x = scalar("input")
        y = x**2
        f = function([x], [y + 7, y - 9, y / 14.0],
                     mode=Mode(optimizer=None, linker=linker_name))

        assert f(3, output_subset=[0, 1, 2]) == f(3)
        assert f(4, output_subset=[0, 2]) == [f(4)[0], f(4)[2]]
        utt.assert_allclose(f(5), np.array([32.0, 16.0, 1.7857142857142858]))
Ejemplo n.º 24
0
 def test_c(self):
     for dtype in self.dtypes + self.bin_dtypes:
         for op in self.reds:
             self.with_mode(
                 Mode(linker="c", optimizer=mode_with_gpu.optimizer),
                 op,
                 dtype=dtype,
                 pre_scalar_op=self.pre_scalar_op,
             )
Ejemplo n.º 25
0
def test_VMLinker_make_vm_cvm():
    # We don't want this at module level, since CXX might not be present
    from aesara.link.c.cvm import CVM

    a = scalar()
    linker = VMLinker(allow_gc=False, use_cloop=True)

    f = function([a], a, mode=Mode(optimizer=None, linker=linker))
    assert isinstance(f.fn, CVM)
Ejemplo n.º 26
0
def test_grad_abs():
    a = fscalar("a")
    b = aesara.tensor.nnet.relu(a)
    c = aesara.grad(b, a)
    f = aesara.function([a], c, mode=Mode(optimizer=None))
    # Currently Aesara return 0.5, but it isn't sure it won't change
    # in the futur.
    ret = f(0.0)
    assert ret == 0.5, ret
Ejemplo n.º 27
0
        def time_linker(name, linker):
            steps_a = 10
            x = dvector()
            a = build_graph(x, steps_a)

            f_a = function([x], a, mode=Mode(optimizer=None, linker=linker()))
            inp = np.random.rand(1000000)
            for i in range(500):
                f_a(inp)
Ejemplo n.º 28
0
def test_shared_input_output():
    # Test bug reported on the mailing list by Alberto Orlandi
    # https://groups.google.com/d/topic/theano-users/6dLaEqc2R6g/discussion
    # The shared variable is both an input and an output of the function.
    inc = iscalar("inc")
    state = shared(0)
    state.name = "state"
    linker = CLinker()
    mode = Mode(linker=linker)
    f = function([inc], state, updates=[(state, state + inc)], mode=mode)
    g = function([inc], state, updates=[(state, state + inc)])

    # Initial value
    f0 = f(0)
    g0 = g(0)
    assert f0 == g0 == 0, (f0, g0)

    # Increment state via f, returns the previous value.
    f2 = f(2)
    assert f2 == f0, (f2, f0)
    f0 = f(0)
    g0 = g(0)
    assert f0 == g0 == 2, (f0, g0)

    # Increment state via g, returns the previous value
    g3 = g(3)
    assert g3 == g0, (g3, g0)
    f0 = f(0)
    g0 = g(0)
    assert f0 == g0 == 5, (f0, g0)

    vstate = shared(np.zeros(3, dtype="int32"))
    vstate.name = "vstate"
    fv = function([inc], vstate, updates=[(vstate, vstate + inc)], mode=mode)
    gv = function([inc], vstate, updates=[(vstate, vstate + inc)])

    # Initial value
    fv0 = fv(0)
    gv0 = gv(0)
    assert np.all(fv0 == 0), fv0
    assert np.all(gv0 == 0), gv0

    # Increment state via f, returns the previous value.
    fv2 = fv(2)
    assert np.all(fv2 == fv0), (fv2, fv0)
    fv0 = fv(0)
    gv0 = gv(0)
    assert np.all(fv0 == 2), fv0
    assert np.all(gv0 == 2), gv0

    # Increment state via g, returns the previous value
    gv3 = gv(3)
    assert np.all(gv3 == gv0), (gv3, gv0)
    fv0 = fv(0)
    gv0 = gv(0)
    assert np.all(fv0 == 5), fv0
    assert np.all(gv0 == 5), gv0
Ejemplo n.º 29
0
def test_DimShuffle_lift(ds_order, lifted, dist_op, dist_params, size, rtol):

    rng = shared(np.random.RandomState(1233532), borrow=False)

    dist_params_aet = []
    for p in dist_params:
        p_aet = aet.as_tensor(p).type()
        p_aet.tag.test_value = p
        dist_params_aet.append(p_aet)

    size_aet = []
    for s in size:
        s_aet = iscalar()
        s_aet.tag.test_value = s
        size_aet.append(s_aet)

    dist_st = dist_op(*dist_params_aet, size=size_aet, rng=rng).dimshuffle(ds_order)

    f_inputs = [
        p for p in dist_params_aet + size_aet if not isinstance(p, (slice, Constant))
    ]

    mode = Mode(
        "py", EquilibriumOptimizer([local_dimshuffle_rv_lift], max_use_ratio=100)
    )

    f_opt = function(
        f_inputs,
        dist_st,
        mode=mode,
    )

    (new_out,) = f_opt.maker.fgraph.outputs

    if lifted:
        assert new_out.owner.op == dist_op
        assert all(
            isinstance(i.owner.op, DimShuffle)
            for i in new_out.owner.inputs[3:]
            if i.owner
        )
    else:
        assert isinstance(new_out.owner.op, DimShuffle)
        return

    f_base = function(
        f_inputs,
        dist_st,
        mode=no_mode,
    )

    arg_values = [p.get_test_value() for p in f_inputs]
    res_base = f_base(*arg_values)
    res_opt = f_opt(*arg_values)

    np.testing.assert_allclose(res_base, res_opt, rtol=rtol)
Ejemplo n.º 30
0
    def test_callback_with_ifelse(self):
        a, b, c = scalars("abc")
        f = function(
            [a, b, c],
            ifelse(a, 2 * b, 2 * c),
            mode=Mode(optimizer=None, linker=VMLinker(callback=self.callback)),
        )

        f(1, 2, 3)
        assert self.n_callbacks["IfElse"] == 2