コード例 #1
0
ファイル: test_rop.py プロジェクト: ricardoV94/aesara
    def check_rop_lop(self, y, out_shape):
        """
        As check_mat_rop_lop, except the input is self.x which is a
        vector. The output is still a vector.
        """
        # TEST ROP
        vx = np.asarray(self.rng.uniform(size=self.in_shape),
                        aesara.config.floatX)
        vv = np.asarray(self.rng.uniform(size=self.in_shape),
                        aesara.config.floatX)

        yv = Rop(y, self.x, self.v)
        rop_f = function([self.x, self.v], yv, on_unused_input="ignore")
        J, _ = aesara.scan(
            lambda i, y, x: grad(y[i], x),
            sequences=aet.arange(y.shape[0]),
            non_sequences=[y, self.x],
        )
        sy = dot(J, self.v)

        scan_f = function([self.x, self.v], sy, on_unused_input="ignore")

        v1 = rop_f(vx, vv)
        v2 = scan_f(vx, vv)
        assert np.allclose(v1, v2), f"ROP mismatch: {v1} {v2}"

        try:
            Rop(
                aesara.clone_replace(y, replace={self.x: break_op(self.x)}),
                self.x,
                self.v,
            )
        except ValueError:
            pytest.skip("Rop does not handle non-differentiable inputs "
                        "correctly. Bug exposed by fixing Add.grad method.")

        vx = np.asarray(self.rng.uniform(size=self.in_shape),
                        aesara.config.floatX)
        vv = np.asarray(self.rng.uniform(size=out_shape), aesara.config.floatX)

        yv = Lop(y, self.x, self.v)
        lop_f = function([self.x, self.v], yv, on_unused_input="ignore")
        J, _ = aesara.scan(
            lambda i, y, x: grad(y[i], x),
            sequences=aet.arange(y.shape[0]),
            non_sequences=[y, self.x],
        )
        sy = dot(self.v, J)

        scan_f = function([self.x, self.v], sy)

        v1 = lop_f(vx, vv)
        v2 = scan_f(vx, vv)
        assert np.allclose(v1, v2), f"LOP mismatch: {v1} {v2}"
コード例 #2
0
ファイル: test_rop.py プロジェクト: ricardoV94/aesara
    def check_mat_rop_lop(self, y, out_shape):
        """
        Test the Rop/Lop when input is a matrix and the output is a vector

        :param y: the output variable of the op applied to self.mx
        :param out_shape: Used to generate a random tensor
                          corresponding to the evaluation point of the Rop
                          (i.e. the tensor with which you multiply the
                          Jacobian). It should be a tuple of ints.

        If the Op has more than 1 input, one of them must be mx, while
        others must be shared variables / constants. We will test only
        against the input self.mx, so you must call
        check_mat_rop_lop/check_rop_lop for the other inputs.

        We expect all inputs/outputs have dtype floatX.

        If you want to test an Op with an output matrix, add a sum
        after the Op you want to test.
        """
        vx = np.asarray(self.rng.uniform(size=self.mat_in_shape),
                        aesara.config.floatX)
        vv = np.asarray(self.rng.uniform(size=self.mat_in_shape),
                        aesara.config.floatX)
        yv = Rop(y, self.mx, self.mv)
        rop_f = function([self.mx, self.mv], yv, on_unused_input="ignore")
        sy, _ = aesara.scan(
            lambda i, y, x, v: (grad(y[i], x) * v).sum(),
            sequences=aet.arange(y.shape[0]),
            non_sequences=[y, self.mx, self.mv],
        )
        scan_f = function([self.mx, self.mv], sy, on_unused_input="ignore")

        v1 = rop_f(vx, vv)
        v2 = scan_f(vx, vv)

        assert np.allclose(v1, v2), f"ROP mismatch: {v1} {v2}"

        self.check_nondiff_rop(
            aesara.clone_replace(y, replace={self.mx: break_op(self.mx)}))

        vv = np.asarray(self.rng.uniform(size=out_shape), aesara.config.floatX)
        yv = Lop(y, self.mx, self.v)
        lop_f = function([self.mx, self.v], yv)

        sy = grad((self.v * y).sum(), self.mx)
        scan_f = function([self.mx, self.v], sy)

        v1 = lop_f(vx, vv)
        v2 = scan_f(vx, vv)
        assert np.allclose(v1, v2), f"LOP mismatch: {v1} {v2}"
コード例 #3
0
ファイル: test_pool.py プロジェクト: ricardoV94/aesara
def test_pool2d():
    shps = [
        (1, 12),
        (1, 1, 12),
        (1, 1, 1, 12),
        (1, 1, 2, 2),
        (1, 1, 1, 1),
        (1, 1, 4, 4),
        (1, 1, 10, 11),
        (1, 2, 2, 2),
        (3, 5, 4, 4),
        (25, 1, 7, 7),
        (1, 1, 12, 12),
        (1, 1, 2, 14),
        (1, 1, 12, 14),
        (1, 1, 14, 14),
        (1, 1, 16, 16),
        (1, 1, 18, 18),
        (1, 1, 24, 24),
        (1, 6, 24, 24),
        (10, 1, 24, 24),
        (10, 6, 24, 24),
        (30, 6, 12, 12),
        (30, 2, 24, 24),
        (30, 6, 24, 24),
        (10, 10, 10, 11),
        (1, 1, 10, 1025),
        (1, 1, 10, 1023),
        (1, 1, 1025, 10),
        (1, 1, 1023, 10),
        (3, 2, 16, 16, 16),
        (3, 2, 6, 6, 6, 5),
        (3, 2, 6, 6, 6, 5, 7),
    ]

    np.random.RandomState(utt.fetch_seed()).shuffle(shps)
    test_ws = (2, 2), (3, 2), (1, 1)
    test_st = (2, 2), (3, 2), (1, 1)
    test_mode = ["max", "sum", "average_inc_pad", "average_exc_pad"]

    ref_mode = copy.copy(mode_without_gpu)
    ref_mode.check_py_code = False
    gpu_mode = mode_with_gpu.excluding("cudnn")
    gpu_mode.check_py_code = False

    for shp in shps:
        for mode, ws, st in itertools.product(test_mode, test_ws, test_st):
            if ws[0] > shp[-2] or ws[1] > shp[-1]:
                continue
            for ignore_border, pad in zip((True, False), [(1, 1), (0, 0)]):
                if pad[0] >= ws[0] or pad[1] >= ws[1]:
                    continue
                if mode == "average_exc_pad" and (pad[0] > 0 or pad[1] > 0):
                    continue
                # print('test_pool2d', shp, ws, st, pad, mode, ignore_border)
                ds_op = Pool(ndim=len(ws), mode=mode, ignore_border=ignore_border)

                a = aesara.shared(rand(*shp), "a")
                a_pooled = ds_op(aet.as_tensor_variable(a), ws, st, pad)

                f = aesara.function([], a_pooled, mode=gpu_mode)
                f2 = aesara.function([], a_pooled, mode=ref_mode)

                assert any(
                    [isinstance(node.op, GpuPool) for node in f.maker.fgraph.toposort()]
                )
                assert any(
                    [isinstance(node.op, Pool) for node in f2.maker.fgraph.toposort()]
                )
                assert np.allclose(f(), f2()), (shp, ws, st, pad, mode, ignore_border)

                a_pooled_grad = grad(a_pooled.sum(), a)

                g = aesara.function([], a_pooled_grad, mode=gpu_mode)
                g2 = aesara.function([], a_pooled_grad, mode=ref_mode)

                if mode == "max":
                    gop = GpuMaxPoolGrad
                    gop2 = MaxPoolGrad
                else:
                    gop = GpuAveragePoolGrad
                    gop2 = AveragePoolGrad
                assert any(
                    [isinstance(node.op, gop) for node in g.maker.fgraph.toposort()]
                )
                assert any(
                    [isinstance(node.op, gop2) for node in g2.maker.fgraph.toposort()]
                )

                assert np.allclose(g(), g2()), (shp, ws, st, pad, mode, ignore_border)

                # test rop and grad grad for max pooling
                # for average pooling grad grad is just average pooling grad
                if mode != "max":
                    continue

                ea = aesara.shared(rand(*shp), "ea")

                gr = aesara.function([], Rop(a_pooled, a, ea), mode=gpu_mode)
                gr2 = aesara.function([], Rop(a_pooled, a, ea), mode=ref_mode)

                assert any(
                    [
                        isinstance(node.op, GpuDownsampleFactorMaxGradGrad)
                        for node in gr.maker.fgraph.toposort()
                    ]
                )
                assert any(
                    [
                        isinstance(node.op, DownsampleFactorMaxGradGrad)
                        for node in gr2.maker.fgraph.toposort()
                    ]
                )
                assert np.allclose(gr(), gr2()), (shp, ws, st, pad, mode, ignore_border)

                ggf = Lop(grad((a_pooled ** 2).sum(), a), a, a)

                gg = aesara.function([], ggf, mode=gpu_mode)
                gg2 = aesara.function([], ggf, mode=ref_mode)

                assert any(
                    [
                        isinstance(node.op, GpuDownsampleFactorMaxGradGrad)
                        for node in gg.maker.fgraph.toposort()
                    ]
                )
                assert any(
                    [
                        isinstance(node.op, DownsampleFactorMaxGradGrad)
                        for node in gg2.maker.fgraph.toposort()
                    ]
                )
                assert np.allclose(gg(), gg2()), (shp, ws, st, pad, mode, ignore_border)