Пример #1
0
    def check_mat_rop_lop(self, y, out_shape):
        """
        Test the Rop/Lop when input is a matrix and the output is a vector

        :param y: the output variable of the op applied to self.mx
        :param out_shape: Used to generate a random tensor
                          corresponding to the evaluation point of the Rop
                          (i.e. the tensor with which you multiply the
                          Jacobian). It should be a tuple of ints.

        If the Op has more than 1 input, one of them must be mx, while
        others must be shared variables / constants. We will test only
        against the input self.mx, so you must call
        check_mat_rop_lop/check_rop_lop for the other inputs.

        We expect all inputs/outputs have dtype floatX.

        If you want to test an Op with an output matrix, add a sum
        after the Op you want to test.
        """
        vx = np.asarray(self.rng.uniform(size=self.mat_in_shape),
                        aesara.config.floatX)
        vv = np.asarray(self.rng.uniform(size=self.mat_in_shape),
                        aesara.config.floatX)
        yv = Rop(y, self.mx, self.mv)
        rop_f = function([self.mx, self.mv], yv, on_unused_input="ignore")
        sy, _ = aesara.scan(
            lambda i, y, x, v: (grad(y[i], x) * v).sum(),
            sequences=aet.arange(y.shape[0]),
            non_sequences=[y, self.mx, self.mv],
        )
        scan_f = function([self.mx, self.mv], sy, on_unused_input="ignore")

        v1 = rop_f(vx, vv)
        v2 = scan_f(vx, vv)

        assert np.allclose(v1, v2), f"ROP mismatch: {v1} {v2}"

        self.check_nondiff_rop(
            aesara.clone_replace(y, replace={self.mx: break_op(self.mx)}))

        vv = np.asarray(self.rng.uniform(size=out_shape), aesara.config.floatX)
        yv = Lop(y, self.mx, self.v)
        lop_f = function([self.mx, self.v], yv)

        sy = grad((self.v * y).sum(), self.mx)
        scan_f = function([self.mx, self.v], sy)

        v1 = lop_f(vx, vv)
        v2 = scan_f(vx, vv)
        assert np.allclose(v1, v2), f"LOP mismatch: {v1} {v2}"
Пример #2
0
    def test_multiple_outputs(self):
        m = matrix("m")
        v = vector("v")
        m_ = matrix("m_")
        v_ = vector("v_")

        mval = self.rng.uniform(size=(3, 7)).astype(aesara.config.floatX)
        vval = self.rng.uniform(size=(7, )).astype(aesara.config.floatX)
        m_val = self.rng.uniform(size=(3, 7)).astype(aesara.config.floatX)
        v_val = self.rng.uniform(size=(7, )).astype(aesara.config.floatX)

        rop_out1 = Rop([m, v, m + v], [m, v], [m_, v_])
        assert isinstance(rop_out1, list)
        assert len(rop_out1) == 3
        rop_out2 = Rop((m, v, m + v), [m, v], [m_, v_])
        assert isinstance(rop_out2, tuple)
        assert len(rop_out2) == 3

        all_outs = []
        for o in rop_out1, rop_out2:
            all_outs.extend(o)
        f = aesara.function([m, v, m_, v_], all_outs)
        f(mval, vval, m_val, v_val)
Пример #3
0
 def test_rop(self, cls_ofg):
     a = vector()
     M = matrix()
     b = dot(a, M)
     op_matmul = cls_ofg([a, M], [b])
     x = vector()
     W = matrix()
     y = op_matmul(x, W)
     du = vector()
     dv = Rop(y, x, du)
     fn = function([x, W, du], dv)
     xval = np.random.rand(16).astype(config.floatX)
     Wval = np.random.rand(16, 16).astype(config.floatX)
     duval = np.random.rand(16).astype(config.floatX)
     dvval = np.dot(duval, Wval)
     dvval2 = fn(xval, Wval, duval)
     assert np.allclose(dvval2, dvval)
Пример #4
0
 def test_rop(self, cls_ofg):
     a = vector()
     M = matrix()
     b = dot(a, M)
     op_matmul = cls_ofg([a, M], [b])
     x = vector()
     W = matrix()
     y = op_matmul(x, W)
     du = vector()
     dv = Rop(y, x, du)
     fn = function([x, W, du], dv)
     xval = np.random.random((16, )).astype(config.floatX)
     Wval = np.random.random((16, 16)).astype(config.floatX)
     duval = np.random.random((16, )).astype(config.floatX)
     dvval = np.dot(duval, Wval)
     dvval2 = fn(xval, Wval, duval)
     np.testing.assert_array_almost_equal(dvval2, dvval, 4)
Пример #5
0
    def test_rop_override(self, cls_ofg):
        x, y = vectors("xy")

        def ro(inps, epts):
            x, y = inps
            u, v = epts
            return [u * y * 2.0 + x * v * 1.5]

        u, v = vectors("uv")
        op_mul_rop = cls_ofg([x, y, u, v], ro([x, y], [u, v]))
        op_mul = cls_ofg([x, y], [x * y], rop_overrides=ro)
        op_mul2 = cls_ofg([x, y], [x * y], rop_overrides=op_mul_rop)

        # single override case
        xx, yy = vector("xx"), vector("yy")
        du, dv = vector("du"), vector("dv")
        for op in [op_mul, op_mul2]:
            zz = op_mul(xx, yy)
            dw = Rop(zz, [xx, yy], [du, dv])
            fn = function([xx, yy, du, dv], dw)
            vals = np.random.rand(4, 32).astype(config.floatX)
            dwval = fn(*vals)
            assert np.allclose(
                dwval, vals[0] * vals[3] * 1.5 + vals[1] * vals[2] * 2.0)
Пример #6
0
def test_pool2d():
    shps = [
        (1, 12),
        (1, 1, 12),
        (1, 1, 1, 12),
        (1, 1, 2, 2),
        (1, 1, 1, 1),
        (1, 1, 4, 4),
        (1, 1, 10, 11),
        (1, 2, 2, 2),
        (3, 5, 4, 4),
        (25, 1, 7, 7),
        (1, 1, 12, 12),
        (1, 1, 2, 14),
        (1, 1, 12, 14),
        (1, 1, 14, 14),
        (1, 1, 16, 16),
        (1, 1, 18, 18),
        (1, 1, 24, 24),
        (1, 6, 24, 24),
        (10, 1, 24, 24),
        (10, 6, 24, 24),
        (30, 6, 12, 12),
        (30, 2, 24, 24),
        (30, 6, 24, 24),
        (10, 10, 10, 11),
        (1, 1, 10, 1025),
        (1, 1, 10, 1023),
        (1, 1, 1025, 10),
        (1, 1, 1023, 10),
        (3, 2, 16, 16, 16),
        (3, 2, 6, 6, 6, 5),
        (3, 2, 6, 6, 6, 5, 7),
    ]

    np.random.RandomState(utt.fetch_seed()).shuffle(shps)
    test_ws = (2, 2), (3, 2), (1, 1)
    test_st = (2, 2), (3, 2), (1, 1)
    test_mode = ["max", "sum", "average_inc_pad", "average_exc_pad"]

    ref_mode = copy.copy(mode_without_gpu)
    ref_mode.check_py_code = False
    gpu_mode = mode_with_gpu.excluding("cudnn")
    gpu_mode.check_py_code = False

    for shp in shps:
        for mode, ws, st in itertools.product(test_mode, test_ws, test_st):
            if ws[0] > shp[-2] or ws[1] > shp[-1]:
                continue
            for ignore_border, pad in zip((True, False), [(1, 1), (0, 0)]):
                if pad[0] >= ws[0] or pad[1] >= ws[1]:
                    continue
                if mode == "average_exc_pad" and (pad[0] > 0 or pad[1] > 0):
                    continue
                # print('test_pool2d', shp, ws, st, pad, mode, ignore_border)
                ds_op = Pool(ndim=len(ws), mode=mode, ignore_border=ignore_border)

                a = aesara.shared(rand(*shp), "a")
                a_pooled = ds_op(aet.as_tensor_variable(a), ws, st, pad)

                f = aesara.function([], a_pooled, mode=gpu_mode)
                f2 = aesara.function([], a_pooled, mode=ref_mode)

                assert any(
                    [isinstance(node.op, GpuPool) for node in f.maker.fgraph.toposort()]
                )
                assert any(
                    [isinstance(node.op, Pool) for node in f2.maker.fgraph.toposort()]
                )
                assert np.allclose(f(), f2()), (shp, ws, st, pad, mode, ignore_border)

                a_pooled_grad = grad(a_pooled.sum(), a)

                g = aesara.function([], a_pooled_grad, mode=gpu_mode)
                g2 = aesara.function([], a_pooled_grad, mode=ref_mode)

                if mode == "max":
                    gop = GpuMaxPoolGrad
                    gop2 = MaxPoolGrad
                else:
                    gop = GpuAveragePoolGrad
                    gop2 = AveragePoolGrad
                assert any(
                    [isinstance(node.op, gop) for node in g.maker.fgraph.toposort()]
                )
                assert any(
                    [isinstance(node.op, gop2) for node in g2.maker.fgraph.toposort()]
                )

                assert np.allclose(g(), g2()), (shp, ws, st, pad, mode, ignore_border)

                # test rop and grad grad for max pooling
                # for average pooling grad grad is just average pooling grad
                if mode != "max":
                    continue

                ea = aesara.shared(rand(*shp), "ea")

                gr = aesara.function([], Rop(a_pooled, a, ea), mode=gpu_mode)
                gr2 = aesara.function([], Rop(a_pooled, a, ea), mode=ref_mode)

                assert any(
                    [
                        isinstance(node.op, GpuDownsampleFactorMaxGradGrad)
                        for node in gr.maker.fgraph.toposort()
                    ]
                )
                assert any(
                    [
                        isinstance(node.op, DownsampleFactorMaxGradGrad)
                        for node in gr2.maker.fgraph.toposort()
                    ]
                )
                assert np.allclose(gr(), gr2()), (shp, ws, st, pad, mode, ignore_border)

                ggf = Lop(grad((a_pooled ** 2).sum(), a), a, a)

                gg = aesara.function([], ggf, mode=gpu_mode)
                gg2 = aesara.function([], ggf, mode=ref_mode)

                assert any(
                    [
                        isinstance(node.op, GpuDownsampleFactorMaxGradGrad)
                        for node in gg.maker.fgraph.toposort()
                    ]
                )
                assert any(
                    [
                        isinstance(node.op, DownsampleFactorMaxGradGrad)
                        for node in gg2.maker.fgraph.toposort()
                    ]
                )
                assert np.allclose(gg(), gg2()), (shp, ws, st, pad, mode, ignore_border)