예제 #1
0
    def time_linker(name, linker):
        steps_a = 10
        steps_b = 100
        x = tensor.vector()
        a = build_graph(x, steps_a)
        b = build_graph(x, steps_b)

        f_a = function([x], a, mode=Mode(optimizer=None, linker=linker()))
        f_b = function([x], b, mode=Mode(optimizer=None, linker=linker()))

        f_a([2.0])
        t0 = time.time()
        f_a([2.0])
        t1 = time.time()

        f_b([2.0])

        t2 = time.time()
        f_b([2.0])
        t3 = time.time()

        t_a = t1 - t0
        t_b = t3 - t2

        print("{} takes {:f} s/Kop".format(name, (1000 * (t_b - t_a) /
                                                  (steps_b - steps_a))))
예제 #2
0
    def test_modes(self):
        # this is a quick test after the LazyLinker branch merge
        # to check that all the current modes can still be used.
        linker_classes_involved = []

        predef_modes = ["FAST_COMPILE", "FAST_RUN", "DEBUG_MODE"]

        # Linkers to use with regular Mode
        if aesara.config.cxx:
            linkers = [
                "py", "c|py", "c|py_nogc", "vm", "vm_nogc", "cvm", "cvm_nogc"
            ]
        else:
            linkers = ["py", "c|py", "c|py_nogc", "vm", "vm_nogc"]
        modes = predef_modes + [Mode(linker, "fast_run") for linker in linkers]

        for mode in modes:
            x = tt.matrix()
            y = tt.vector()
            f = aesara.function([x, y], x + y, mode=mode)
            # test that it runs something
            f([[1, 2], [3, 4]], [5, 6])
            linker_classes_involved.append(f.maker.mode.linker.__class__)
            # print 'MODE:', mode, f.maker.mode.linker, 'stop'

        # regression check:
        # there should be
        # - VM_Linker
        # - OpWiseCLinker (FAST_RUN)
        # - PerformLinker (FAST_COMPILE)
        # - DebugMode's Linker  (DEBUG_MODE)
        assert 4 == len(set(linker_classes_involved))
예제 #3
0
 def test_c(self):
     for dtype in self.dtypes + self.bin_dtypes:
         for op in self.reds:
             self.with_mode(
                 Mode(linker="c", optimizer=mode_with_gpu.optimizer),
                 op,
                 dtype=dtype,
                 pre_scalar_op=self.pre_scalar_op,
             )
예제 #4
0
        def time_linker(name, linker):
            steps_a = 10
            x = tensor.dvector()
            a = build_graph(x, steps_a)

            f_a = function([x], a, mode=Mode(optimizer=None, linker=linker()))
            inp = np.random.rand(1000000)
            for i in range(500):
                f_a(inp)
예제 #5
0
    def check_partial_function(linker_name):
        x = tensor.scalar("input")
        y = x**2
        f = aesara.function([x], [y + 7, y - 9, y / 14.0],
                            mode=Mode(optimizer=None, linker=linker_name))

        assert f(3, output_subset=[0, 1, 2]) == f(3)
        assert f(4, output_subset=[0, 2]) == [f(4)[0], f(4)[2]]
        utt.assert_allclose(f(5), np.array([32.0, 16.0, 1.7857142857142858]))
예제 #6
0
    def check_partial_function_output_keys(linker_name):
        x = tensor.scalar("input")
        y = 3 * x
        f = aesara.function([x], {
            "a": y * 5,
            "b": y - 7
        },
                            mode=Mode(optimizer=None, linker=linker_name))

        assert f(5, output_subset=["a"])["a"] == f(5)["a"]
예제 #7
0
    def test_callback_with_ifelse(self):
        a, b, c = tensor.scalars("abc")
        f = function(
            [a, b, c],
            ifelse(a, 2 * b, 2 * c),
            mode=Mode(optimizer=None,
                      linker=vm.VM_Linker(callback=self.callback)),
        )

        f(1, 2, 3)
        assert self.n_callbacks["IfElse"] == 2
예제 #8
0
 def test_c_nan(self):
     for dtype in self.dtypes:
         if not dtype.startswith("float"):
             continue
         for op in self.reds:
             self.with_mode(
                 Mode(linker="c", optimizer=mode_with_gpu.optimizer),
                 op,
                 dtype=dtype,
                 test_nan=True,
                 pre_scalar_op=self.pre_scalar_op,
             )
예제 #9
0
    def check_updates(linker_name):
        x = tensor.lscalar("input")
        y = aesara.shared(np.asarray(1, "int64"), name="global")
        f = aesara.function(
            [x],
            [x, x + 34],
            updates=[(y, x + 1)],
            mode=Mode(optimizer=None, linker=linker_name),
        )
        g = aesara.function(
            [x],
            [x - 6],
            updates=[(y, y + 3)],
            mode=Mode(optimizer=None, linker=linker_name),
        )

        assert f(3, output_subset=[]) == []
        assert y.get_value() == 4
        assert g(30, output_subset=[0]) == [24]
        assert g(40, output_subset=[]) == []
        assert y.get_value() == 10
예제 #10
0
    def test_callback(self):
        a, b, c = tensor.scalars("abc")
        f = function(
            [a, b, c],
            (a + b) + c,
            mode=Mode(optimizer=None,
                      linker=vm.VM_Linker(callback=self.callback)),
        )

        f(1, 2, 3)
        assert sum(self.n_callbacks.values()) == len(f.maker.fgraph.toposort())
        f(1, 2, 3)
        assert sum(
            self.n_callbacks.values()) == len(f.maker.fgraph.toposort()) * 2
예제 #11
0
def test_c_thunks():
    a = tensor.scalars("a")
    b, c = tensor.vectors("bc")
    cases = [False]
    if aesara.config.cxx:
        cases.append(True)
    for c_thunks in cases:
        f = function(
            [a, b, c],
            ifelse(a, a * b, b * c),
            mode=Mode(optimizer=None,
                      linker=vm.VM_Linker(c_thunks=c_thunks, use_cloop=False)),
        )
        f(1, [2], [3, 2])
        with pytest.raises(ValueError):
            f(0, [2], [3, 4])
        assert any([hasattr(t, "cthunk") for t in f.fn.thunks]) == c_thunks
예제 #12
0
    def test_no_leak_many_graphs():
        # Verify no memory leaks when creating and deleting a lot of functions

        # This isn't really a unit test, you have to run it and look at top to
        # see if there's a leak
        for i in range(10000):
            x = tensor.vector()
            z = x
            for d in range(10):
                z = tensor.sin(-z + 1)

            f = function([x], z, mode=Mode(optimizer=None, linker="cvm"))
            if not i % 100:
                print(gc.collect())
            sys.stdout.flush()

            gc.collect()
            if 1:
                f([2.0])
                f([3.0])
                f([4.0])
                f([5.0])