コード例 #1
0
def test_grad():
    """
    Test Op's gradient w.r.t top_down against theano graph implementation
    """

    rng = np.random.RandomState([2012,7,19])
    batch_size_list = [1]
    channels = 16
    rows_list = [2, 24]
    pool_rows_list = [2, 3]

    # TODO theano graph version fails with pool shape 1,1,
    # try it with python version

    for batch_size in batch_size_list:
        for rows, pool_rows in zip(rows_list, pool_rows_list):
            cols = rows
            pool_cols = pool_rows

            zv = rng.randn(channels, rows, cols,
                    batch_size).astype(config.floatX)
            tv = rng.randn(channels, rows / pool_rows, cols / \
                    pool_cols, batch_size).astype(config.floatX)

            z = T.tensor4()
            t = T.tensor4()

            # gpu op
            p, h = prob_max_pool_c01b(z, (pool_rows, pool_cols), top_down = t)
            gh_t = T.grad(h.sum(), t)
            gp_t = T.grad(p.sum(), t)
            gh_z = T.grad(h.sum(), z)
            gp_z = T.grad(p.sum(), z)
            gph_z = T.grad(p.sum() + h.sum(), z)
            gph_t = T.grad(p.sum() + h.sum(), t)

            func = function([z, t], [gh_t, gp_t, gh_z, gp_z, gph_z, gph_t],
                                mode = mode_with_gpu)
            op_rval = func(zv, tv)

            # theano graph
            p, h = max_pool_c01b(z, (pool_rows, pool_cols) , top_down = t)
            gh_t = T.grad(h.sum(), t)
            gp_t = T.grad(p.sum(), t)
            gh_z = T.grad(h.sum(), z)
            gp_z = T.grad(p.sum(), z)
            gph_z = T.grad(p.sum() + h.sum(), z)
            gph_t = T.grad(p.sum() + h.sum(), t)

            func = function([z, t], [gh_t, gp_t, gh_z, gp_z, gph_z, gph_t],
                                mode = mode_without_gpu)
            th_rval = func(zv, tv)

            for op, th in zip (op_rval, th_rval):
                assert np.allclose(op, th, rtol=1e-04, atol=1e-06)
コード例 #2
0
def test_top_down_grad_correctness():
    """
    Test Op's gradient w.r.t top_down against theano graph implementation
    """

    rng = np.random.RandomState([2012, 7, 19])
    batch_size_list = [128]
    channels = 16
    rows_list = [2, 8, 20]
    pool_rows_list = [2, 4, 5]

    # TODO theano graph version fails with pool shape 1,1,
    # try it with python version

    # TODO the results doesn't match for (30,30), (3, 3)
    # check verify grad

    for batch_size in batch_size_list:
        for rows, pool_rows in zip(rows_list, pool_rows_list):
            cols = rows
            pool_cols = pool_rows

            zv = rng.randn(channels, rows, cols,
                           batch_size).astype(config.floatX)
            tv = rng.randn(channels, rows / pool_rows, cols / pool_cols,
                           batch_size).astype(config.floatX)

            z = T.tensor4()
            t = T.tensor4()

            # gpu op
            p, h = prob_max_pool_c01b(z, (pool_rows, pool_cols), top_down=t)
            gt = T.grad(h.sum() + p.sum(), t)
            gt = T.grad(h.sum() + p.sum(), t)
            func = function([z, t], gt, mode=mode_with_gpu)

            op_gt = func(zv, tv)

            # theano graph
            p, h = max_pool_c01b(z, (pool_rows, pool_cols), top_down=t)
            gt = T.grad(h.sum() + p.sum(), t)
            func = function([z, t], gt, mode=mode_without_gpu)

            th_gt = func(zv, tv)

            print batch_size, rows, pool_rows
            assert np.allclose(op_gt, th_gt, rtol=1e-04, atol=1e-06)
コード例 #3
0
def test_top_down_grad_correctness():
    """
    Test Op's gradient w.r.t top_down against theano graph implementation
    """

    rng = np.random.RandomState([2012,7,19])
    batch_size_list = [128]
    channels = 16
    rows_list = [2, 8, 20]
    pool_rows_list = [2, 4, 5]

    # TODO theano graph version fails with pool shape 1,1,
    # try it with python version

    # TODO the results doesn't match for (30,30), (3, 3)
    # check verify grad

    for batch_size in batch_size_list:
        for rows, pool_rows in zip(rows_list, pool_rows_list):
            cols = rows
            pool_cols = pool_rows

            zv = rng.randn(channels, rows, cols, batch_size).astype(config.floatX)
            tv = rng.randn(channels, rows / pool_rows, cols / pool_cols, batch_size).astype(config.floatX)

            z = T.tensor4()
            t = T.tensor4()

            # gpu op
            p, h = prob_max_pool_c01b(z, (pool_rows, pool_cols), top_down = t)
            gt = T.grad(h.sum() + p.sum(), t)
            gt = T.grad(h.sum() + p.sum(), t)
            func = function([z, t], gt, mode = mode_with_gpu)

            op_gt = func(zv, tv)

            # theano graph
            p, h = max_pool_c01b(z, (pool_rows, pool_cols) , top_down = t)
            gt = T.grad(h.sum() + p.sum(), t)
            func = function([z, t], gt, mode = mode_without_gpu)

            th_gt = func(zv, tv)

            print batch_size, rows, pool_rows
            assert np.allclose(op_gt, th_gt, rtol=1e-04, atol=1e-06)
コード例 #4
0
def test_top_donw_correctness():
    """
    Test the forward pass Op against theano graph implementation
    """

    rng = np.random.RandomState([2012,7,19])
    batch_size_list = [1]
    channels = 16
    rows_list = [2, 24]
    pool_rows_list = [2, 3]

    # TODO theano graph version fails with pool shape 1,1,
    # try it with python version

    for batch_size in batch_size_list:
        for rows, pool_rows in zip(rows_list, pool_rows_list):
            cols = rows
            pool_cols = pool_rows

            zv = rng.randn(channels, rows, cols, batch_size).astype(config.floatX)
            tv = rng.randn(channels, rows / pool_rows, cols / pool_cols, batch_size).astype(config.floatX)

            z = T.tensor4()
            t = T.tensor4()

            # gpu op
            p, h = prob_max_pool_c01b(z, (pool_rows, pool_cols), top_down = t)
            func = function([z, t], [p, h], mode = mode_with_gpu)

            p_op, h_op = func(zv, tv)

            # theano graph
            p, h = max_pool_c01b(z, (pool_rows, pool_cols), top_down = t)
            func = function([z, t], [p, h], mode = mode_without_gpu)

            p_th, h_th = func(zv, tv)

            assert np.allclose(p_op, p_th)
            assert np.allclose(h_op, h_th)
コード例 #5
0
def test_grad_correctness():
    """
    Test Op's gradient against theano graph implementation
    """

    rng = np.random.RandomState([2012, 7, 19])
    batch_size_list = [1, 5, 128]
    channels = 16
    rows_list = [2, 8, 30]
    pool_rows_list = [2, 4, 3]

    #TODO theano graph version fails with pool shape 1,1,
    # try it with python version

    for batch_size in batch_size_list:
        for rows, pool_rows in zip(rows_list, pool_rows_list):
            cols = rows
            pool_cols = pool_rows

            zv = rng.randn(channels, rows, cols,
                           batch_size).astype(config.floatX)

            z = T.tensor4()

            # gpu op
            p, h = prob_max_pool_c01b(z, (pool_rows, pool_cols))
            gz = T.grad(h.sum() + p.sum(), z)
            func = function([z], gz, mode=mode_with_gpu)

            op_gz = func(zv)

            # theano graph
            p, h = max_pool_c01b(z, (pool_rows, pool_cols))
            gz = T.grad(h.sum() + p.sum(), z)
            func = function([z], gz, mode=mode_without_gpu)

            th_gz = func(zv)

            assert np.allclose(op_gz, th_gz, rtol=1e-04, atol=1e-06)
コード例 #6
0
def test_grad_correctness():
    """
    Test Op's gradient against theano graph implementation
    """

    rng = np.random.RandomState([2012,7,19])
    batch_size_list = [1, 5, 128]
    channels = 16
    rows_list = [2, 8, 30]
    pool_rows_list = [2, 4, 3]

    #TODO theano graph version fails with pool shape 1,1,
    # try it with python version

    for batch_size in batch_size_list:
        for rows, pool_rows in zip(rows_list, pool_rows_list):
            cols = rows
            pool_cols = pool_rows

            zv = rng.randn(channels, rows, cols, batch_size).astype(config.floatX)

            z = T.tensor4()

            # gpu op
            p, h = prob_max_pool_c01b(z, (pool_rows, pool_cols) )
            gz = T.grad(h.sum() + p.sum(), z)
            func = function([z], gz, mode = mode_with_gpu)

            op_gz = func(zv)

            # theano graph
            p, h = max_pool_c01b(z, (pool_rows, pool_cols) )
            gz = T.grad(h.sum() + p.sum(), z)
            func = function([z], gz, mode = mode_without_gpu)

            th_gz = func(zv)

            assert np.allclose(op_gz, th_gz, rtol=1e-04, atol=1e-06)
コード例 #7
0
ファイル: test.py プロジェクト: vd114/galatea
import numpy as np
from pylearn2.utils import sharedX
from pylearn2.expr.probabilistic_max_pooling import max_pool_c01b

X = sharedX(np.zeros((16, 34, 34, 2)))

P, H = max_pool_c01b(X, pool_shape=[2, 2])

obj = P.sum() + H.sum()

from theano import tensor as T
from theano import function

g = T.grad(obj, X)

f = function([], g)