Exemplo n.º 1
0
    def test_aliased_outputs_bad(self):
        # here the alias between outputs is not ok because destroying one
        # destroys the other, but there's no way to warn aesara about it
        # through the view_map mechanism.
        class CustomOp(Op):
            def make_node(self, a, b):
                c = a.type()
                d = a.type()
                return Apply(self, [a, b], [c, d])

            def perform(self, node, inp, out):
                a, b = inp
                c, d = out
                r = a * 1
                c[0] = r[:-1]
                d[0] = r[1:]

        custom_op = CustomOp()

        x = dvector()
        y = dvector()
        bad_xy0, bad_xy1 = custom_op(x, y)
        out = bad_xy0 * 2 + bad_xy1 * 2
        f = aesara.function([x, y], out, mode="DEBUG_MODE")

        with pytest.raises(BadViewMap):
            f([1, 2, 3, 4], [5, 6, 7, 8])
Exemplo n.º 2
0
    def test_aliased_outputs_ok(self):
        # here aliased outputs is ok because they are both aliased to an input
        # as well
        class CustomOp(Op):
            view_map = {0: [0], 1: [0]}

            def make_node(self, a, b):
                c = a.type()
                d = a.type()
                return Apply(self, [a, b], [c, d])

            def perform(self, node, inp, out):
                a, b = inp
                c, d = out
                c[0] = a
                d[0] = a[1:]

        x = dvector("x")
        y = dvector("y")
        f = aesara.function([x, y], CustomOp()(x, y), mode="DEBUG_MODE")

        r0, r1 = f([1, 2, 3, 4], [5, 6, 7, 8])

        assert np.all(r0 == [1, 2, 3, 4])
        assert np.all(r1 == [2, 3, 4])
Exemplo n.º 3
0
def test_stochasticoptimization():

    # this optimization alternates between triggering and not triggering.

    last_time_replaced = [False]

    @local_optimizer([add])
    def insert_broken_add_sometimes(fgraph, node):
        if node.op == add:
            last_time_replaced[0] = not last_time_replaced[0]
            if last_time_replaced[0]:
                return [off_by_half(*node.inputs)]
        return False

    edb = EquilibriumDB()
    edb.register("insert_broken_add_sometimes", insert_broken_add_sometimes,
                 "all")
    opt = edb.query("+all")

    a = dvector()
    b = dvector()

    with pytest.raises(StochasticOrder):
        aesara.function(
            [a, b],
            add(a, b),
            mode=DebugMode(
                optimizer=opt,
                check_c_code=True,
                stability_patience=max(2, config.DebugMode__patience),
            ),
        )
Exemplo n.º 4
0
 def test_goodviewmap(self):
     goodop = self.BadAddRef()
     goodop.view_map = {0: [1]}
     x = dvector()
     y = dvector()
     f = aesara.function([x, y], goodop(x, y), mode="DEBUG_MODE")
     # Shouldn't raise an error
     f([1, 5, 1], [3, 4, 2, 1, 4])
Exemplo n.º 5
0
 def test_badviewmap_slice(self):
     x = dvector()
     y = dvector()
     f = aesara.function([x, y],
                         self.BadAddSlice()(x, y),
                         mode="DEBUG_MODE")
     with pytest.raises(BadViewMap):
         f([1, 2], [3, 4])
Exemplo n.º 6
0
def test_badoptimization_opt_err():
    # This variant of test_badoptimization() replace the working code
    # with a new apply node that will raise an error.
    @local_optimizer([add])
    def insert_bigger_b_add(fgraph, node):
        if node.op == add:
            inputs = list(node.inputs)
            if inputs[-1].owner is None:
                inputs[-1] = aet.concatenate((inputs[-1], inputs[-1]))
                return [node.op(*inputs)]
        return False

    @local_optimizer([add])
    def insert_bad_dtype(fgraph, node):
        if node.op == add:
            inputs = list(node.inputs)
            if inputs[-1].owner is None:

                return [node.outputs[0].astype("float32")]
        return False

    edb = EquilibriumDB()
    edb.register("insert_bigger_b_add", insert_bigger_b_add, "all")
    opt = edb.query("+all")
    edb2 = EquilibriumDB()
    edb2.register("insert_bad_dtype", insert_bad_dtype, "all")
    opt2 = edb2.query("+all")

    a = dvector()
    b = dvector()

    f = aesara.function([a, b], a + b, mode=DebugMode(optimizer=opt))
    with pytest.raises(ValueError, match=r"insert_bigger_b_add"):
        f(
            [1.0, 2.0, 3.0],
            [2, 3, 4],
        )

    # Test that opt that do an illegal change still get the error from graph.
    with pytest.raises(TypeError) as einfo:
        with config.change_flags(on_opt_error="raise"):
            f2 = aesara.function(
                [a, b],
                a + b,
                mode=DebugMode(optimizer=opt2, stability_patience=1),
            )
        f2(
            [1.0, 2.0, 3.0],
            [2, 3, 4],
        )

    # Test that we can reraise the error with an extended message
    with pytest.raises(TypeError):
        e = einfo.value
        new_e = e.__class__("TTT" + str(e))
        exc_type, exc_value, exc_trace = sys.exc_info()
        exc_value = new_e
        raise exc_value.with_traceback(exc_trace)
Exemplo n.º 7
0
def test_op_invalid_input_types():
    class TestOp(aesara.graph.op.Op):
        itypes = [dvector, dvector, dvector]
        otypes = [dvector]

        def perform(self, node, inputs, outputs):
            pass

    msg = r"^Invalid input types for Op.*"
    with pytest.raises(TypeError, match=msg):
        TestOp()(dvector(), dscalar(), dvector())
Exemplo n.º 8
0
def test_op_invalid_input_types():
    class TestOp(aesara.graph.op.Op):
        itypes = [dvector, dvector, dvector]
        otypes = [dvector]

        def perform(self, node, inputs, outputs):
            pass

    msg = r"^Invalid input types for Op TestOp:\nInput 2/3: Expected TensorType\(float64, vector\)"
    with pytest.raises(TypeError, match=msg):
        TestOp()(dvector(), dscalar(), dvector())
Exemplo n.º 9
0
    def test_input_aliasing_affecting_inplace_operations(self):

        # Note: to trigger this bug with aesara rev 4586:2bc6fc7f218b,
        #        you need to make in inputs mutable (so that inplace
        #        operations are used) and to break the elemwise composition
        #        with some non-elemwise op (here dot)
        x = dvector()
        y = dvector()
        m1 = dmatrix()
        m2 = dmatrix()
        f = function(
            [
                In(x, mutable=True),
                In(y, mutable=True),
                In(m1, mutable=True),
                In(m2, mutable=True),
            ],
            aet.dot((x * 2), m1) + aet.dot((y * 3), m2),
        )
        # Test 1. If the same variable is given twice

        # Compute bogus values
        v = np.asarray([1, 2, 3, 4, 5], dtype="float64")
        m = np.asarray(
            [
                [1, 0, 0, 0, 0],
                [0, 1, 0, 0, 0],
                [0, 0, 1, 0, 0],
                [0, 0, 0, 1, 0],
                [0, 0, 0, 0, 1],
            ],
            dtype="float64",
        )
        bogus_vals = f(v, v, m, m)
        # Since we used inplace operation v and m may be corrupted
        # so we need to recreate them

        v = np.asarray([1, 2, 3, 4, 5], dtype="float64")
        m = np.asarray(
            [
                [1, 0, 0, 0, 0],
                [0, 1, 0, 0, 0],
                [0, 0, 1, 0, 0],
                [0, 0, 0, 1, 0],
                [0, 0, 0, 0, 1],
            ],
            dtype="float64",
        )
        m_copy = m.copy()
        v_copy = v.copy()
        vals = f(v, v_copy, m, m_copy)

        assert np.allclose(vals, bogus_vals)
Exemplo n.º 10
0
 def make_node(self, x, y, rcond):
     x = as_tensor_variable(x)
     y = as_tensor_variable(y)
     rcond = as_tensor_variable(rcond)
     return Apply(
         self,
         [x, y, rcond],
         [
             matrix(),
             dvector(),
             lscalar(),
             dvector(),
         ],
     )
Exemplo n.º 11
0
 def test3(self):
     a = dvector()
     w2 = sort(a)
     f = aesara.function([a], w2)
     gv = f(self.v_val)
     gt = np.sort(self.v_val)
     utt.assert_allclose(gv, gt)
Exemplo n.º 12
0
 def test_multiple_inplace(self):
     skip_if_blas_ldflags_empty()
     x = dmatrix("x")
     y = dvector("y")
     z = dvector("z")
     f = aesara.function([x, y, z], [at.dot(y, x), at.dot(z, x)], mode=mode_blas_opt)
     vx = np.random.random((3, 3))
     vy = np.random.random((3))
     vz = np.random.random((3))
     out = f(vx, vy, vz)
     assert np.allclose(out[0], np.dot(vy, vx))
     assert np.allclose(out[1], np.dot(vz, vx))
     assert (
         len([n for n in f.maker.fgraph.apply_nodes if isinstance(n.op, AllocEmpty)])
         == 2
     )
Exemplo n.º 13
0
def test_baddestroymap_c():
    x = dvector()
    f = aesara.function([x],
                        wb2i(x),
                        mode=debugmode.DebugMode(check_py_code=False))
    with pytest.raises(debugmode.BadDestroyMap):
        assert np.all(f([1, 2]) == [2, 4])
Exemplo n.º 14
0
 def test_in_update_wrong_dtype(self):
     # Ensure that an error is raised if an In-wrapped variables has
     # an update of a different type
     a = dscalar("a")
     b = dvector("b")
     with pytest.raises(TypeError):
         In(a, update=b)
Exemplo n.º 15
0
    def test_fail(self):
        # Test that conv2d fails for dimensions other than 2 or 3.

        with pytest.raises(Exception):
            conv.conv2d(dtensor4(), dtensor3())
        with pytest.raises(Exception):
            conv.conv2d(dtensor3(), dvector())
Exemplo n.º 16
0
 def test_badviewmap_c(self):
     x = dvector()
     f = aesara.function([x],
                         wb1i(x),
                         mode=debugmode.DebugMode(check_py_code=False))
     with pytest.raises(debugmode.BadViewMap):
         f([1, 2])
Exemplo n.º 17
0
    def test_partial_input_aliasing_affecting_inplace_operations(self):

        # Note: to trigger this bug with aesara rev 4586:2bc6fc7f218b,
        #        you need to make in inputs mutable ( so that inplace
        #        operations are used) and to break the elemwise composition
        #        with some non-elemwise op ( here dot )
        x = dvector()
        y = dvector()
        z = dvector()
        m1 = dmatrix()
        m2 = dmatrix()
        m3 = dmatrix()

        # Test 2. If variables only partial overlap
        #   more exactly we care about the case when we have a,b,c
        #   and a shares memory with b, b shares memory with c, but
        #   c does not share memory with a

        f = aesara.function(
            [
                In(x, mutable=True),
                In(y, mutable=True),
                In(z, mutable=True),
                In(m1, mutable=True),
                In(m2, mutable=True),
                In(m3, mutable=True),
            ],
            (aet.dot((x * 2), m1) + aet.dot((y * 3), m2) + aet.dot(
                (z * 4), m3)),
        )

        # Compute bogus values
        v = np.asarray([1, 2, 3, 4, 5], dtype="float64")
        m = np.asarray([[1, 0], [0, 1]], dtype="float64")
        bogus_vals = f(v[:2], v[1:3], v[2:4], m, m, m)
        # Since we used inplace operation v and m may be corrupted
        # so we need to recreate them

        v = np.asarray([1, 2, 3, 4, 5], dtype="float64")
        m = np.asarray([[1, 0], [0, 1]], dtype="float64")
        m_copy1 = m.copy()
        v_copy1 = v.copy()
        m_copy2 = m.copy()
        v_copy2 = v.copy()
        vals = f(v[:2], v_copy1[1:3], v_copy2[2:4], m, m_copy1, m_copy2)

        assert np.allclose(vals, bogus_vals)
Exemplo n.º 18
0
def test_argsort():
    # Set up
    rng = np.random.default_rng(seed=utt.fetch_seed())
    m_val = rng.random((3, 2))
    v_val = rng.random((4))

    # Example 1
    a = dmatrix()
    w = argsort(a)
    f = aesara.function([a], w)
    gv = f(m_val)
    gt = np.argsort(m_val)
    utt.assert_allclose(gv, gt)

    # Example 2
    a = dmatrix()
    axis = lscalar()
    w = argsort(a, axis)
    f = aesara.function([a, axis], w)
    for axis_val in 0, 1:
        gv = f(m_val, axis_val)
        gt = np.argsort(m_val, axis_val)
        utt.assert_allclose(gv, gt)

    # Example 3
    a = dvector()
    w2 = argsort(a)
    f = aesara.function([a], w2)
    gv = f(v_val)
    gt = np.argsort(v_val)
    utt.assert_allclose(gv, gt)

    # Example 4
    a = dmatrix()
    axis = lscalar()
    l = argsort(a, axis, "mergesort")
    f = aesara.function([a, axis], l)
    for axis_val in 0, 1:
        gv = f(m_val, axis_val)
        gt = np.argsort(m_val, axis_val)
        utt.assert_allclose(gv, gt)

    # Example 5
    a = dmatrix()
    axis = lscalar()
    a1 = ArgSortOp("mergesort", [])
    a2 = ArgSortOp("quicksort", [])
    # All the below should give true
    assert a1 != a2
    assert a1 == ArgSortOp("mergesort", [])
    assert a2 == ArgSortOp("quicksort", [])

    # Example 6: Testing axis=None
    a = dmatrix()
    w2 = argsort(a, None)
    f = aesara.function([a], w2)
    gv = f(m_val)
    gt = np.argsort(m_val, None)
    utt.assert_allclose(gv, gt)
Exemplo n.º 19
0
 def make_node(self, M):
     M = aet.as_tensor_variable(M)
     if M.ndim != 0:
         raise TypeError(f"{self.__class__.__name__} only works on scalar input")
     elif M.dtype not in integer_dtypes:
         # dtype is an Aesara attribute here
         raise TypeError(f"{self.__class__.__name__} only works on integer input")
     return Apply(self, [M], [dvector()])
Exemplo n.º 20
0
def test_jax_basic_multiout_omni():
    # Test that a single output of a multi-output `Op` can be used as input to
    # another `Op`
    x = dvector()
    mx, amx = MaxAndArgmax([0])(x)
    out = mx * amx
    out_fg = FunctionGraph([x], [out])
    compare_jax_and_py(out_fg, [np.r_[1, 2]])
Exemplo n.º 21
0
    def test_optimizations_preserved(self):
        a = dvector()  # the a is for 'anonymous' (un-named).
        x = dvector("x")
        s = dvector("s")
        xm = dmatrix("x")
        sm = dmatrix("s")

        f = function(
            [a, x, s, xm, sm],
            ((a.T.T) * (dot(xm, (sm.T.T.T)) + x).T * (x / x) + s),
        )
        old_default_mode = config.mode
        old_default_opt = config.optimizer
        old_default_link = config.linker
        try:
            try:
                str_f = pickle.dumps(f, protocol=-1)
                config.mode = "Mode"
                config.linker = "py"
                config.optimizer = "None"
                g = pickle.loads(str_f)
                # print g.maker.mode
                # print compile.mode.default_mode
            except NotImplementedError as e:
                if e[0].startswith("DebugMode is not pickl"):
                    g = "ok"
        finally:
            config.mode = old_default_mode
            config.optimizer = old_default_opt
            config.linker = old_default_link

        if g == "ok":
            return

        assert f.maker is not g.maker
        assert f.maker.fgraph is not g.maker.fgraph
        tf = f.maker.fgraph.toposort()
        tg = f.maker.fgraph.toposort()
        assert len(tf) == len(tg)
        for nf, ng in zip(tf, tg):
            assert nf.op == ng.op
            assert len(nf.inputs) == len(ng.inputs)
            assert len(nf.outputs) == len(ng.outputs)
            assert [i.type for i in nf.inputs] == [i.type for i in ng.inputs]
            assert [i.type for i in nf.outputs] == [i.type for i in ng.outputs]
Exemplo n.º 22
0
        def time_linker(name, linker):
            steps_a = 10
            x = dvector()
            a = build_graph(x, steps_a)

            f_a = function([x], a, mode=Mode(optimizer=None, linker=linker()))
            inp = np.random.rand(1000000)
            for i in range(500):
                f_a(inp)
Exemplo n.º 23
0
    def test_wrong_input(self):
        # Make sure errors are raised when image and kernel are not 4D tensors

        with pytest.raises(Exception):
            self.validate((3, 2, 8, 8), (4, 2, 5, 5), "valid", input=dmatrix())
        with pytest.raises(Exception):
            self.validate((3, 2, 8, 8), (4, 2, 5, 5), "valid", filters=dvector())
        with pytest.raises(Exception):
            self.validate((3, 2, 8, 8), (4, 2, 5, 5), "valid", input=dtensor3())
Exemplo n.º 24
0
def test_baddestroymap():
    class BadAdd(Op):
        def make_node(self, a, b):
            c = a.type()
            return Apply(self, [a, b], [c])

        def perform(self, node, inp, out):
            a, b = inp
            (c, ) = out
            c[0] = a
            c[0] += b

    x = dvector()
    y = dvector()
    f = aesara.function([x, y], BadAdd()(x, y), mode="DEBUG_MODE")

    with pytest.raises(BadDestroyMap):
        f([1, 2], [3, 4])
Exemplo n.º 25
0
    def __init__(
        self,
        input=None,
        target=None,
        n_input=1,
        n_hidden=1,
        n_output=1,
        lr=1e-3,
        **kw,
    ):
        super().__init__(**kw)

        if input is None:
            input = dvector("input")
        if target is None:
            target = dvector("target")

        self.input = input
        self.target = target
        self.lr = shared(lr, "learning_rate")
        self.w1 = shared(np.zeros((n_hidden, n_input)), "w1")
        self.w2 = shared(np.zeros((n_output, n_hidden)), "w2")
        # print self.lr.type

        self.hidden = sigmoid(dot(self.w1, self.input))
        self.output = dot(self.w2, self.hidden)
        self.cost = aet_sum((self.output - self.target)**2)

        self.sgd_updates = {
            self.w1: self.w1 - self.lr * grad(self.cost, self.w1),
            self.w2: self.w2 - self.lr * grad(self.cost, self.w2),
        }

        self.sgd_step = pfunc(
            params=[self.input, self.target],
            outputs=[self.output, self.cost],
            updates=self.sgd_updates,
        )

        self.compute_output = pfunc([self.input], self.output)

        self.output_from_hidden = pfunc([self.hidden], self.output)
Exemplo n.º 26
0
    def test_on_real_input(self):
        x = dvector()
        rng = np.random.default_rng(23)
        xval = rng.standard_normal((10))
        np.all(0 == aesara.function([x], imag(x))(xval))
        np.all(xval == aesara.function([x], real(x))(xval))

        x = imatrix()
        xval = np.asarray(rng.standard_normal((3, 3)) * 100, dtype="int32")
        np.all(0 == aesara.function([x], imag(x))(xval))
        np.all(xval == aesara.function([x], real(x))(xval))
Exemplo n.º 27
0
    def test_on_real_input(self):
        x = dvector()
        rng = np.random.RandomState(23)
        xval = rng.randn(10)
        np.all(0 == aesara.function([x], imag(x))(xval))
        np.all(xval == aesara.function([x], real(x))(xval))

        x = imatrix()
        xval = np.asarray(rng.randn(3, 3) * 100, dtype="int32")
        np.all(0 == aesara.function([x], imag(x))(xval))
        np.all(xval == aesara.function([x], real(x))(xval))
Exemplo n.º 28
0
    def test_check_isfinite_disabled(self):
        x = dvector()
        f = function([x], (x + 2) * 5, mode=DebugMode(check_isfinite=False))

        # nan should go through
        f(np.log([3, -4, 5]))

        # inf should go through
        infs = np.asarray([1.0, 1.0, 1.0]) / 0
        # print infs
        f(infs)
        return
Exemplo n.º 29
0
def test_scan_debugprint1():
    k = iscalar("k")
    A = dvector("A")

    # Symbolic description of the result
    result, updates = aesara.scan(
        fn=lambda prior_result, A: prior_result * A,
        outputs_info=aet.ones_like(A),
        non_sequences=A,
        n_steps=k,
    )

    final_result = result[-1]
    output_str = debugprint(final_result, file="str")
    lines = output_str.split("\n")

    expected_output = """Subtensor{int64} [id A] ''
     |Subtensor{int64::} [id B] ''
     | |for{cpu,scan_fn} [id C] ''
     | | |k [id D]
     | | |IncSubtensor{Set;:int64:} [id E] ''
     | | | |AllocEmpty{dtype='float64'} [id F] ''
     | | | | |Elemwise{add,no_inplace} [id G] ''
     | | | | | |k [id D]
     | | | | | |Subtensor{int64} [id H] ''
     | | | | |   |Shape [id I] ''
     | | | | |   | |Rebroadcast{(0, False)} [id J] ''
     | | | | |   |   |InplaceDimShuffle{x,0} [id K] ''
     | | | | |   |     |Elemwise{second,no_inplace} [id L] ''
     | | | | |   |       |A [id M]
     | | | | |   |       |InplaceDimShuffle{x} [id N] ''
     | | | | |   |         |TensorConstant{1.0} [id O]
     | | | | |   |ScalarConstant{0} [id P]
     | | | | |Subtensor{int64} [id Q] ''
     | | | |   |Shape [id R] ''
     | | | |   | |Rebroadcast{(0, False)} [id J] ''
     | | | |   |ScalarConstant{1} [id S]
     | | | |Rebroadcast{(0, False)} [id J] ''
     | | | |ScalarFromTensor [id T] ''
     | | |   |Subtensor{int64} [id H] ''
     | | |A [id M]
     | |ScalarConstant{1} [id U]
     |ScalarConstant{-1} [id V]

    Inner graphs:

    for{cpu,scan_fn} [id C] ''
     >Elemwise{mul,no_inplace} [id W] ''
     > |<TensorType(float64, vector)> [id X] -> [id E]
     > |A_copy [id Y] -> [id M]"""

    for truth, out in zip(expected_output.split("\n"), lines):
        assert truth.strip() == out.strip()
Exemplo n.º 30
0
def test_pydotprint_long_name():
    # This is a REALLY PARTIAL TEST.
    # It prints a graph where there are variable and apply nodes whose long
    # names are different, but not the shortened names.
    # We should not merge those nodes in the dot graph.
    x = dvector()
    mode = aesara.compile.mode.get_default_mode().excluding("fusion")
    f = aesara.function([x], [x * 2, x + x], mode=mode)
    f([1, 2, 3, 4])

    pydotprint(f, max_label_size=5, print_output_file=False)
    pydotprint([x * 2, x + x], max_label_size=5, print_output_file=False)