Esempio n. 1
0
def test_speed_lazy(linker):
    # TODO FIXME: This isn't a real test.

    def build_graph(x, depth=5):
        z = x
        for d in range(depth):
            z = ifelse(z[0] > 0, -z, z)
        return z

    steps_a = 10
    steps_b = 100
    x = vector()
    a = build_graph(x, steps_a)
    b = build_graph(x, steps_b)

    f_a = function([x], a, mode=Mode(optimizer=None, linker=linker))
    f_b = function([x], b, mode=Mode(optimizer=None, linker=linker))

    f_a([2.0])
    t0 = time.time()
    f_a([2.0])
    t1 = time.time()

    f_b([2.0])

    t2 = time.time()
    f_b([2.0])
    t3 = time.time()

    t_a = t1 - t0
    t_b = t3 - t2

    print(f"{linker} takes {1000 * (t_b - t_a) / (steps_b - steps_a):f} s/Kop")
Esempio n. 2
0
    def __init__(self):
        a = scalar()  # the a is for 'anonymous' (un-named).
        x, s = scalars("xs")
        v = vector("v")

        self.s = s
        self.x = x
        self.v = v

        self.e = a * x + s

        self.f1 = function(
            [
                x,
                In(a, value=1.0, name="a"),
                In(s, value=0.0, update=s + a * x, mutable=True),
            ],
            s + a * x,
        )

        self.f2 = function(
            [
                x,
                In(a, value=1.0, name="a"),
                In(s, value=self.f1.container[s], update=s + a * x, mutable=True),
            ],
            s + a * x,
        )
Esempio n. 3
0
def test_NoOutputFromInplace():
    x = matrix()
    y = matrix()
    a = dot(x, y)
    b = tanh(a)
    c = tanh(dot(2 * x, y))

    # Ensure that the elemwise op that produces the output is inplace when
    # using a mode that does not include the optimization
    fct_no_opt = function([x, y], [b, c], mode="FAST_RUN")
    op = fct_no_opt.maker.fgraph.outputs[0].owner.op
    assert op.destroy_map and 0 in op.destroy_map
    op = fct_no_opt.maker.fgraph.outputs[1].owner.op
    assert op.destroy_map and 0 in op.destroy_map

    # Ensure that the elemwise op that produces the output is not inplace when
    # using a mode that includes the optimization
    opt = AddFeatureOptimizer(NoOutputFromInplace([1]))
    mode_opt = Mode(linker="py", optimizer="fast_run").register((opt, 49.9))

    fct_opt = function([x, y], [b, c], mode=mode_opt)
    op = fct_opt.maker.fgraph.outputs[0].owner.op
    assert op.destroy_map and 0 in op.destroy_map
    op = fct_opt.maker.fgraph.outputs[1].owner.op
    assert not op.destroy_map or 0 not in op.destroy_map
Esempio n. 4
0
def test_seed_fn():
    idx = ivector()

    for new_seed, same in [(234, True), (None, True), (23, False)]:
        random = MRG_RandomStream(234)
        fn1 = function([], random.uniform((2, 2), dtype="float32"))
        fn2 = function([], random.uniform((3, 3), nstreams=2, dtype="float32"))
        fn3 = function([idx], random.uniform(idx, nstreams=3, ndim=1, dtype="float32"))

        fn1_val0 = fn1()
        fn1_val1 = fn1()
        assert not np.allclose(fn1_val0, fn1_val1)
        fn2_val0 = fn2()
        fn2_val1 = fn2()
        assert not np.allclose(fn2_val0, fn2_val1)
        fn3_val0 = fn3([4])
        fn3_val1 = fn3([4])
        assert not np.allclose(fn3_val0, fn3_val1)
        assert fn1_val0.size == 4
        assert fn2_val0.size == 9

        random.seed(new_seed)

        fn1_val2 = fn1()
        fn1_val3 = fn1()
        fn2_val2 = fn2()
        fn2_val3 = fn2()
        fn3_val2 = fn3([4])
        fn3_val3 = fn3([4])
        assert np.allclose(fn1_val0, fn1_val2) == same
        assert np.allclose(fn1_val1, fn1_val3) == same
        assert np.allclose(fn2_val0, fn2_val2) == same
        assert np.allclose(fn2_val1, fn2_val3) == same
        assert np.allclose(fn3_val0, fn3_val2) == same
        assert np.allclose(fn3_val1, fn3_val3) == same
Esempio n. 5
0
 def test_masked_input(self):
     m = matrix("m")
     mt = m.T
     mt.name = "m.T"
     with pytest.raises(UnusedInputError):
         function([m, mt], mt * 2)
     function([m, mt], mt * 2, on_unused_input="ignore")
Esempio n. 6
0
    def time_linker(name, linker):
        steps_a = 5
        steps_b = 100
        x = vector()
        a = build_graph(x, steps_a)
        b = build_graph(x, steps_b)

        f_a = function([x], a, mode=Mode(optimizer=None, linker=linker()))
        f_b = function([x], b, mode=Mode(optimizer=None, linker=linker()))

        f_a([2.0, 3.0])
        t0 = time.time()
        f_a([2.0, 3.0])
        t1 = time.time()

        f_b([2.0, 3.0])

        t2 = time.time()
        f_b([2.0, 3.0])
        t3 = time.time()

        t_a = t1 - t0
        t_b = t3 - t2

        print(
            f"{name} takes {1000 * (t_b - t_a) / (steps_b - steps_a):f} s/Kop")
Esempio n. 7
0
    def test_f_contiguous(self):
        a = fmatrix("a")
        b = fmatrix("b")
        z = BrokenCImplementationAdd()(a, b)
        # In this test, we do not want z to be an output of the graph.
        out = dot(z, np.eye(7))

        a_val = self.rng.randn(7, 7).astype("float32")
        b_val = self.rng.randn(7, 7).astype("float32")

        # Should work
        mode = DebugMode(check_preallocated_output=["c_contiguous"])

        f = function([a, b], out, mode=mode)
        f(a_val, b_val)
        # print 'out_val =', out_val
        # print out_val.strides

        # Should raise an Exception, since the output buffer is
        # used incorrectly.
        mode = DebugMode(check_preallocated_output=["f_contiguous"])

        f = function([a, b], out, mode=mode)

        if config.cxx:
            with pytest.raises(BadThunkOutput):
                f(a_val, b_val)
        else:
            # The python code of this op is good.
            f(a_val, b_val)
Esempio n. 8
0
    def test_check_isfinite(self):
        x = vector()
        f = function([x], (x + 2) * 5, mode="DEBUG_MODE")
        g = function([x], log(x), mode="DEBUG_MODE")

        # this should work
        f(np.log([3, 4, 5]).astype(config.floatX))

        # if TensorType.filter_checks_isfinite were true, these would raise
        # ValueError
        # if not, DebugMode will check internally, and raise InvalidValueError
        # passing an invalid value as an input should trigger ValueError
        with pytest.raises(InvalidValueError):
            f(np.log([3, -4, 5]).astype(config.floatX))
        with pytest.raises(InvalidValueError):
            f((np.asarray([0, 1.0, 0]) / 0).astype(config.floatX))
        with pytest.raises(InvalidValueError):
            f((np.asarray([1.0, 1.0, 1.0]) / 0).astype(config.floatX))

        # generating an invalid value internally should trigger
        # InvalidValueError
        with pytest.raises(InvalidValueError):
            g(np.asarray([3, -4, 5], dtype=config.floatX))

        # this should disable the exception
        TensorType.filter_checks_isfinite = False
        predefined_modes["DEBUG_MODE"].check_isfinite = False
        # insert several Inf
        f(np.asarray(np.asarray([1.0, 1.0, 1.0]) / 0, dtype=config.floatX))
Esempio n. 9
0
    def test_f_contiguous_out(self):
        # Same test as test_f_contiguous, but check that it works
        # even if z _is_ the output of the graph
        a = fmatrix("a")
        b = fmatrix("b")
        out = BrokenCImplementationAdd()(a, b)

        a_val = self.rng.randn(7, 7).astype("float32")
        b_val = self.rng.randn(7, 7).astype("float32")

        # Should work
        mode = DebugMode(check_preallocated_output=["c_contiguous"])

        f = function([a, b], out, mode=mode)
        f(a_val, b_val)
        # print 'out_val =', out_val
        # print out_val.strides

        # Should raise an Exception, since the output buffer is
        # used incorrectly.
        mode = DebugMode(check_preallocated_output=["f_contiguous"])

        f = function([a, b], out, mode=mode)

        if config.cxx:
            with pytest.raises(BadThunkOutput):
                f(a_val, b_val)
        else:
            # The python code of this op is good.
            f(a_val, b_val)
Esempio n. 10
0
def test_stochasticoptimization():

    # this optimization alternates between triggering and not triggering.

    last_time_replaced = [False]

    @local_optimizer([add])
    def insert_broken_add_sometimes(fgraph, node):
        if node.op == add:
            last_time_replaced[0] = not last_time_replaced[0]
            if last_time_replaced[0]:
                return [off_by_half(*node.inputs)]
        return False

    edb = EquilibriumDB()
    edb.register("insert_broken_add_sometimes", insert_broken_add_sometimes,
                 "all")
    opt = edb.query("+all")

    a = dvector()
    b = dvector()

    with pytest.raises(StochasticOrder):
        function(
            [a, b],
            add(a, b),
            mode=DebugMode(
                optimizer=opt,
                check_c_code=True,
                stability_patience=max(2, config.DebugMode__patience),
            ),
        )
Esempio n. 11
0
    def test_givens_input_var(self):
        # Ensure error is raised when trying to replace an input variable.

        x = scalar("x")
        y = x * 2
        with pytest.raises(RuntimeError):
            function([x], y, givens={x: x + 1})
Esempio n. 12
0
    def test_constant_output(self):
        # Test that if the output is a constant, we respect the aesara memory interface
        f = function([], aet.constant([4]))
        # print f.maker.fgraph.toposort()
        out = f()
        assert (out == 4).all()
        out[0] = 3
        out2 = f()
        # If the following 2 asserts fail it mean Aesara broke it's memory contract.
        assert out2 is not out
        assert (out2 == 4).all()

        # Test that if the output is a constant and borrow, we respect the aesara memory interface
        f = function([], Out(aet.constant([4]), borrow=True))
        # print f.maker.fgraph.toposort()
        out = f()
        assert (out == 4).all()
        out[0] = 3
        out2 = f()

        if isinstance(get_default_mode(), DebugMode):
            # In DebugMode, we don't implement optimization based on borrow on the output.
            assert (out2 == 4).all()
        else:
            assert out2 is out
            assert (out2 == 3).all()
Esempio n. 13
0
    def test_shared_state2(self):
        a = scalar()  # the a is for 'anonymous' (un-named).
        x, s = scalars("xs")

        f = function(
            [
                x,
                In(a, value=1.0, name="a"),
                In(s, value=0.0, update=s + a * x, mutable=False),
            ],
            s + a * x,
        )
        g = function(
            [x, In(a, value=1.0, name="a"), In(s, value=f.container[s])], s + a * x
        )

        f(1, 2)
        assert f[s] == 2
        assert g[s] == 2
        f(1, 2)
        assert f[s] == 4
        assert g[s] == 4
        g(1, 2)  # has no effect on state
        assert f[s] == 4
        assert g[s] == 4
Esempio n. 14
0
    def test_copy_delete_updates(self):
        w = iscalar("w")
        x = fscalar("x")
        # SharedVariable for tests, one of them has update
        y = shared(value=1, name="y")
        z = shared(value=2, name="z")
        out = x + y + z

        # Test for different linkers
        # for mode in ["FAST_RUN","FAST_COMPILE"]:
        # second_time = False
        for mode in ["FAST_RUN", "FAST_COMPILE"]:
            ori = function([x], out, mode=mode, updates={z: z * 2})
            cpy = ori.copy(delete_updates=True)

            assert cpy(1)[0] == 4
            assert cpy(1)[0] == 4
            assert cpy(1)[0] == 4

        # Test if unused implicit and explicit inputs from delete_updates
        # are ignored as intended.
        for mode in ["FAST_RUN", "FAST_COMPILE"]:
            ori = function([x], x, mode=mode, updates={z: z * 2})
            cpy = ori.copy(delete_updates=True)

            ori = function([x, w], x, mode=mode, updates={z: z + w})
            cpy = ori.copy(delete_updates=True)
Esempio n. 15
0
    def test_shared_state0(self):
        a = scalar()  # the a is for 'anonymous' (un-named).
        x, s = scalars("xs")

        f = function(
            [
                x,
                In(a, value=1.0, name="a"),
                In(s, value=0.0, update=s + a * x, mutable=True),
            ],
            s + a * x,
        )
        g = function(
            [
                x,
                In(a, value=1.0, name="a"),
                In(s, value=f.container[s], update=s - a * x, mutable=True),
            ],
            s + a * x,
        )

        f(1, 2)
        assert f[s] == 2
        assert g[s] == 2
        g(1, 2)
        assert f[s] == 0
        assert g[s] == 0
Esempio n. 16
0
def test_FunctionMaker_cache_optimizations():

    opt_db_file = os.path.join(config.compiledir, "optimized_graphs.pkl")
    if os.path.exists(opt_db_file):
        os.remove(opt_db_file)

    floatX = "float32"
    mode = config.mode
    if mode in ["DEBUG_MODE", "DebugMode"]:
        mode = "FAST_RUN"

    graph_db_file = os.path.join(config.compiledir, "optimized_graphs.pkl")
    assert not os.path.exists(graph_db_file)

    with config.change_flags(cache_optimizations=True):
        a = fmatrix("a")
        b = fmatrix("b")
        c = shared(np.ones((10, 10), dtype=floatX))
        d = shared(np.ones((10, 10), dtype=floatX))
        e = aet_sum(aet_sum(aet_sum(a ** 2 + b) + c) + d)
        f1 = function([a, b], e, mode=mode)

        # FIXME: We can do much better about testing this.
        assert os.path.exists(graph_db_file)

        m = fmatrix("x1")
        n = fmatrix("x2")
        p = shared(np.ones((10, 10), dtype=floatX))
        q = shared(np.ones((10, 10), dtype=floatX))
        j = aet_sum(aet_sum(aet_sum(m ** 2 + n) + p) + q)
        f2 = function([m, n], j, mode=mode)

        in1 = np.ones((10, 10), dtype=floatX)
        in2 = np.ones((10, 10), dtype=floatX)
        assert f1(in1, in2) == f2(in1, in2)
Esempio n. 17
0
    def test_swap_SharedVariable_with_given(self):
        # A special testcase for logistic_sgd.py in Deep Learning Tutorial
        # This test assert that SharedVariable in different function have same storage

        train_x = shared(value=np.random.rand(10, 10).astype(config.floatX))
        test_x = shared(value=np.random.rand(10, 10).astype(config.floatX))

        train_y = shared(value=np.random.rand(10, 1).astype(config.floatX))
        test_y = shared(value=np.random.rand(10, 1).astype(config.floatX))

        i = iscalar("index")
        x = vector("x")
        y = vector("y")
        # this formular has no sense but for a test
        out = (aet_sum(x) - y) ** 2
        train = function(
            [i],
            out,
            givens={x: train_x[i], y: train_y[i]},
            updates={train_x: train_x + 0.1},
        )

        test_def = function([i], out, givens={x: test_x[i], y: test_y[i]})
        test_cpy = train.copy(
            swap={train_x: test_x, train_y: test_y}, delete_updates=True
        )

        for in1, in2 in zip(test_def.maker.inputs, test_cpy.maker.inputs):
            assert in1.value is in2.value
Esempio n. 18
0
def check_shape_lifted_rv(rv, params, size, rng):
    aet_params = []
    for p in params:
        p_aet = aet.as_tensor(p)
        p_aet = p_aet.type()
        p_aet.tag.test_value = p
        aet_params.append(p_aet)

    aet_size = []
    for s in size:
        s_aet = aet.as_tensor(s)
        s_aet = s_aet.type()
        s_aet.tag.test_value = s
        aet_size.append(s_aet)

    rv = rv(*aet_params, size=aet_size, rng=rng)
    rv_lifted = lift_rv_shapes(rv.owner)

    # Make sure the size input is empty
    assert np.array_equal(rv_lifted.inputs[1].data, [])

    f_ref = function(
        aet_params + aet_size,
        rv,
        mode=no_mode,
    )
    f_lifted = function(
        aet_params + aet_size,
        rv_lifted.outputs[1],
        mode=no_mode,
    )
    f_ref_val = f_ref(*(params + size))
    f_lifted_val = f_lifted(*(params + size))
    assert np.array_equal(f_ref_val, f_lifted_val)
Esempio n. 19
0
    def test_borrow_output(self):
        a = dmatrix()
        f = function([a], Out(a, borrow=False))
        o = np.ones((3, 3))
        assert o is not f(
            o)  # function no longer permits aliasing outputs to inputs

        f = function([a], Out(a * 4, borrow=False))
        o = np.ones((3, 3))
        four = f(o)
        assert np.all(four == 4)
        f(o + 0.1)  # should not clobber the memory used to store four
        assert np.all(four == 4)

        f = function([a],
                     Out(a * 4, borrow=True),
                     mode=Mode("c|py_nogc", "fast_run"))
        o = np.ones((3, 3))
        four = f(o)
        assert np.all(four == 4)
        f(o + 0.1)  # should clobber the memory used to store four
        if config.cxx:
            assert not np.all(four == 4)
        else:
            # The Elemwise.perform method don't reuse memory
            # as some numpy version don't support that correctly.
            assert np.all(four == 4)
Esempio n. 20
0
    def test_disconnected_input(self):
        a = scalar("a")
        v = vector("v")
        with pytest.raises(UnusedInputError):
            function([a, v], v * 2)

        function([a, v], v * 2, on_unused_input="ignore")
Esempio n. 21
0
 def test_in_shared_variable(self):
     # Ensure that an error is raised if the In wrapped is used to wrap
     # a shared variable
     a = aesara.shared(1.0)
     a_wrapped = In(a, update=a + 1)
     with pytest.raises(TypeError):
         function([a_wrapped])
Esempio n. 22
0
def test_badoptimization_opt_err():
    # This variant of test_badoptimization() replace the working code
    # with a new apply node that will raise an error.
    @local_optimizer([add])
    def insert_bigger_b_add(fgraph, node):
        if node.op == add:
            inputs = list(node.inputs)
            if inputs[-1].owner is None:
                inputs[-1] = at.concatenate((inputs[-1], inputs[-1]))
                return [node.op(*inputs)]
        return False

    @local_optimizer([add])
    def insert_bad_dtype(fgraph, node):
        if node.op == add:
            inputs = list(node.inputs)
            if inputs[-1].owner is None:

                return [node.outputs[0].astype("float32")]
        return False

    edb = EquilibriumDB()
    edb.register("insert_bigger_b_add", insert_bigger_b_add, "all")
    opt = edb.query("+all")
    edb2 = EquilibriumDB()
    edb2.register("insert_bad_dtype", insert_bad_dtype, "all")
    opt2 = edb2.query("+all")

    a = dvector()
    b = dvector()

    f = function([a, b], a + b, mode=DebugMode(optimizer=opt))
    with pytest.raises(ValueError, match=r"insert_bigger_b_add"):
        f(
            [1.0, 2.0, 3.0],
            [2, 3, 4],
        )

    # Test that opt that do an illegal change still get the error from graph.
    with pytest.raises(TypeError) as einfo:
        with config.change_flags(on_opt_error="raise"):
            f2 = function(
                [a, b],
                a + b,
                mode=DebugMode(optimizer=opt2, stability_patience=1),
            )
        f2(
            [1.0, 2.0, 3.0],
            [2, 3, 4],
        )

    # Test that we can reraise the error with an extended message
    with pytest.raises(TypeError):
        e = einfo.value
        new_e = e.__class__("TTT" + str(e))
        exc_type, exc_value, exc_trace = sys.exc_info()
        exc_value = new_e
        raise exc_value.with_traceback(exc_trace)
Esempio n. 23
0
def compare_jax_and_py(
    fgraph,
    inputs,
    assert_fn=None,
    must_be_device_array=True,
):
    """Function to compare python graph output and jax compiled output for testing equality

    In the tests below computational graphs are defined in Aesara. These graphs are then passed to
    this function which then compiles the graphs in both jax and python, runs the calculation
    in both and checks if the results are the same

    Parameters
    ----------
    fgraph: FunctionGraph
        Aesara function Graph object
    inputs: iter
        Inputs for function graph
    assert_fn: func, opt
        Assert function used to check for equality between python and jax. If not
        provided uses np.testing.assert_allclose
    must_be_device_array: Bool
        Checks for instance of jax.interpreters.xla.DeviceArray. For testing purposes
        if this device array is found it indicates if the result was computed by jax

    Returns
    -------
    jax_res

    """
    if assert_fn is None:
        assert_fn = partial(np.testing.assert_allclose, rtol=1e-4)

    opts = Query(include=[None], exclude=["cxx_only", "BlasOpt"])
    jax_mode = Mode(JAXLinker(), opts)
    py_mode = Mode("py", opts)

    aesara_jax_fn = function(fgraph.inputs, fgraph.outputs, mode=jax_mode)
    jax_res = aesara_jax_fn(*inputs)

    if must_be_device_array:
        if isinstance(jax_res, list):
            assert all(
                isinstance(res, jax.interpreters.xla.DeviceArray)
                for res in jax_res)
        else:
            assert isinstance(jax_res, jax.interpreters.xla.DeviceArray)

    aesara_py_fn = function(fgraph.inputs, fgraph.outputs, mode=py_mode)
    py_res = aesara_py_fn(*inputs)

    if len(fgraph.outputs) > 1:
        for j, p in zip(jax_res, py_res):
            assert_fn(j, p)
    else:
        assert_fn(jax_res, py_res)

    return jax_res
Esempio n. 24
0
def test_NanGuardMode():
    # Tests if NanGuardMode is working by feeding in numpy.inf and numpy.nans
    # intentionally. A working implementation should be able to capture all
    # the abnormalties.
    rng = np.random.default_rng(2482)
    x = matrix()
    w = shared(rng.standard_normal((5, 7)).astype(config.floatX))
    y = dot(x, w)

    fun = function([x], y, mode=NanGuardMode(nan_is_error=True, inf_is_error=True))
    a = rng.standard_normal((3, 5)).astype(config.floatX)

    with pytest.warns(RuntimeWarning):
        infa = np.tile((np.asarray(100.0) ** 1000000).astype(config.floatX), (3, 5))

    nana = np.tile(np.asarray(np.nan).astype(config.floatX), (3, 5))

    biga = np.tile(np.asarray(1e20).astype(config.floatX), (3, 5))

    fun(a)  # normal values

    # Temporarily silence logger
    _logger = logging.getLogger("aesara.compile.nanguardmode")
    try:
        _logger.propagate = False
        with pytest.raises(AssertionError):
            fun(infa)  # INFs
        with pytest.raises(AssertionError), pytest.warns(RuntimeWarning):
            fun(nana)  # NANs
        with pytest.raises(AssertionError):
            fun(biga)  # big values
    finally:
        _logger.propagate = True

    # slices
    a = rng.standard_normal((3, 4, 5)).astype(config.floatX)

    with pytest.warns(RuntimeWarning):
        infa = np.tile((np.asarray(100.0) ** 1000000).astype(config.floatX), (3, 4, 5))

    nana = np.tile(np.asarray(np.nan).astype(config.floatX), (3, 4, 5))

    biga = np.tile(np.asarray(1e20).astype(config.floatX), (3, 4, 5))

    x = tensor3()
    y = x[:, at.arange(2), at.arange(2), None]
    fun = function([x], y, mode=NanGuardMode(nan_is_error=True, inf_is_error=True))
    fun(a)  # normal values
    try:
        _logger.propagate = False
        with pytest.raises(AssertionError):
            fun(infa)  # INFs
        with pytest.raises(AssertionError), pytest.warns(RuntimeWarning):
            fun(nana)  # NANs
        with pytest.raises(AssertionError):
            fun(biga)  # big values
    finally:
        _logger.propagate = True
Esempio n. 25
0
def test_shared_input_output():
    # Test bug reported on the mailing list by Alberto Orlandi
    # https://groups.google.com/d/topic/theano-users/6dLaEqc2R6g/discussion
    # The shared variable is both an input and an output of the function.
    inc = iscalar("inc")
    state = shared(0)
    state.name = "state"
    linker = CLinker()
    mode = Mode(linker=linker)
    f = function([inc], state, updates=[(state, state + inc)], mode=mode)
    g = function([inc], state, updates=[(state, state + inc)])

    # Initial value
    f0 = f(0)
    g0 = g(0)
    assert f0 == g0 == 0, (f0, g0)

    # Increment state via f, returns the previous value.
    f2 = f(2)
    assert f2 == f0, (f2, f0)
    f0 = f(0)
    g0 = g(0)
    assert f0 == g0 == 2, (f0, g0)

    # Increment state via g, returns the previous value
    g3 = g(3)
    assert g3 == g0, (g3, g0)
    f0 = f(0)
    g0 = g(0)
    assert f0 == g0 == 5, (f0, g0)

    vstate = shared(np.zeros(3, dtype="int32"))
    vstate.name = "vstate"
    fv = function([inc], vstate, updates=[(vstate, vstate + inc)], mode=mode)
    gv = function([inc], vstate, updates=[(vstate, vstate + inc)])

    # Initial value
    fv0 = fv(0)
    gv0 = gv(0)
    assert np.all(fv0 == 0), fv0
    assert np.all(gv0 == 0), gv0

    # Increment state via f, returns the previous value.
    fv2 = fv(2)
    assert np.all(fv2 == fv0), (fv2, fv0)
    fv0 = fv(0)
    gv0 = gv(0)
    assert np.all(fv0 == 2), fv0
    assert np.all(gv0 == 2), gv0

    # Increment state via g, returns the previous value
    gv3 = gv(3)
    assert np.all(gv3 == gv0), (gv3, gv0)
    fv0 = fv(0)
    gv0 = gv(0)
    assert np.all(fv0 == 5), fv0
    assert np.all(gv0 == 5), gv0
Esempio n. 26
0
def test_empty_givens_updates():
    # Regression test for bug fixed in 8625e03.

    # Empty givens / updates dictionaries were not properly detected before,
    # triggering useless crashes at compile time.
    x = scalar()
    y = x * 2
    function([In(x)], y, givens={})
    function([In(x)], y, updates={})
Esempio n. 27
0
def test_DimShuffle_lift(ds_order, lifted, dist_op, dist_params, size, rtol):

    rng = shared(np.random.RandomState(1233532), borrow=False)

    dist_params_aet = []
    for p in dist_params:
        p_aet = aet.as_tensor(p).type()
        p_aet.tag.test_value = p
        dist_params_aet.append(p_aet)

    size_aet = []
    for s in size:
        s_aet = iscalar()
        s_aet.tag.test_value = s
        size_aet.append(s_aet)

    dist_st = dist_op(*dist_params_aet, size=size_aet, rng=rng).dimshuffle(ds_order)

    f_inputs = [
        p for p in dist_params_aet + size_aet if not isinstance(p, (slice, Constant))
    ]

    mode = Mode(
        "py", EquilibriumOptimizer([local_dimshuffle_rv_lift], max_use_ratio=100)
    )

    f_opt = function(
        f_inputs,
        dist_st,
        mode=mode,
    )

    (new_out,) = f_opt.maker.fgraph.outputs

    if lifted:
        assert new_out.owner.op == dist_op
        assert all(
            isinstance(i.owner.op, DimShuffle)
            for i in new_out.owner.inputs[3:]
            if i.owner
        )
    else:
        assert isinstance(new_out.owner.op, DimShuffle)
        return

    f_base = function(
        f_inputs,
        dist_st,
        mode=no_mode,
    )

    arg_values = [p.get_test_value() for p in f_inputs]
    res_base = f_base(*arg_values)
    res_opt = f_opt(*arg_values)

    np.testing.assert_allclose(res_base, res_opt, rtol=rtol)
Esempio n. 28
0
def compare_jax_and_py(
    fgraph: FunctionGraph,
    test_inputs: iter,
    assert_fn: Optional[callable] = None,
    must_be_device_array: bool = True,
):
    """Function to compare python graph output and jax compiled output for testing equality

    In the tests below computational graphs are defined in Aesara. These graphs are then passed to
    this function which then compiles the graphs in both jax and python, runs the calculation
    in both and checks if the results are the same

    Parameters
    ----------
    fgraph: FunctionGraph
        Aesara function Graph object
    test_inputs: iter
        Numerical inputs for testing the function graph
    assert_fn: func, opt
        Assert function used to check for equality between python and jax. If not
        provided uses np.testing.assert_allclose
    must_be_device_array: Bool
        Checks for instance of jax.interpreters.xla.DeviceArray. For testing purposes
        if this device array is found it indicates if the result was computed by jax

    Returns
    -------
    jax_res

    """
    if assert_fn is None:
        assert_fn = partial(np.testing.assert_allclose, rtol=1e-4)

    fn_inputs = [i for i in fgraph.inputs if not isinstance(i, SharedVariable)]
    aesara_jax_fn = function(fn_inputs, fgraph.outputs, mode=jax_mode)
    jax_res = aesara_jax_fn(*test_inputs)

    if must_be_device_array:
        if isinstance(jax_res, list):
            assert all(
                isinstance(res, jax.interpreters.xla.DeviceArray)
                for res in jax_res)
        else:
            assert isinstance(jax_res, jax.interpreters.xla.DeviceArray)

    aesara_py_fn = function(fn_inputs, fgraph.outputs, mode=py_mode)
    py_res = aesara_py_fn(*test_inputs)

    if len(fgraph.outputs) > 1:
        for j, p in zip(jax_res, py_res):
            assert_fn(j, p)
    else:
        assert_fn(jax_res, py_res)

    return jax_res
Esempio n. 29
0
def test_vm_gc():
    x = vector()
    p = RunOnce()(x)
    mode = Mode(linker=VMLinker(lazy=True))
    f = function([In(x, mutable=True)], [p + 1, p + 2], mode=mode)
    f([1, 2, 3])

    p = RunOnce()(x)
    pp = p + p
    f = function([x], [pp + pp], mode=mode)
    f([1, 2, 3])
Esempio n. 30
0
def test_bug_2009_07_17_borrowed_output():
    # Regression test for a bug where output was borrowed by mistake.
    a = dmatrix()
    b = dmatrix()
    # The output should *NOT* be borrowed.
    g = function([a, b], Out(dot(a, b), borrow=False))

    x = np.zeros((1, 2))
    y = np.ones((2, 5))

    z = g(x, y)
    # print(z)  # Should be zero.
    x.fill(1)
    # print(g(x, y))  # Should be non-zero.
    # print(z)  # Should still be zero.
    assert np.linalg.norm(z) == 0

    # The code above was supposed to fail when it was written (or, more
    # accurately, on the next revision, i.e. when it was merged with the
    # rest of the code, i.e. on revision cac9c9e9f08e).
    # However, for some reason, it does not fail anymore when at this revision.
    # Thus, a new test (below) was added that exhibits the same issue. Note
    # that it may better be moved into the test_nnet.py test file if it turns
    # out the bug was caused by 'crossentropy_softmax_argmax_1hot_with_bias',
    # and was not a more general issue.
    test_output_activation_no_bias = dmatrix()
    test_b2 = dvector()
    test_target = ivector()
    nll_softmax_argmax = crossentropy_softmax_argmax_1hot_with_bias(
        test_output_activation_no_bias, test_b2, test_target)
    output = nll_softmax_argmax[1]
    g = function(
        [test_output_activation_no_bias, test_b2, test_target],
        Out(output, borrow=False),
    )

    a = np.zeros((1, 5))
    b = np.ones(5)
    c = np.zeros(1, dtype=np.int32)

    z = g(a, b, c)
    z_backup = copy.copy(z)
    id_z = id(z)
    # print(f"Output z after first call: {z}")
    a[0, 0] = 1
    id_other = id(g(a, b, c))
    # print(f"Output z after second call: {z}")
    # Ensure that calling the function again returns a pointer towards a new
    # array.
    assert id_z != id_other
    # Just to be 100% sure, ensure that z was not altered.
    assert (z == z_backup).all()