def check_correctness(f):
    rng = np.random.RandomState([2012, 7, 19])
    batch_size = 5
    rows = 32
    cols = 30
    channels = 3
    pool_rows = 2
    pool_cols = 3
    zv = rng.randn(batch_size, rows, cols, channels).astype(
        config.floatX) * 2. - 3.

    p_np, h_np = max_pool_python(zv, (pool_rows, pool_cols))

    z_th = T.TensorType(broadcastable=(False, False, False, False),
                        dtype=config.floatX)()
    z_th.name = 'z_th'

    p_th, h_th = f(z_th, (pool_rows, pool_cols))

    func = function([z_th], [p_th, h_th])

    pv, hv = func(zv)

    assert p_np.shape == pv.shape
    assert h_np.shape == hv.shape
    if not np.allclose(h_np, hv):
        print((h_np.min(), h_np.max()))
        print((hv.min(), hv.max()))
        assert False
    assert np.allclose(p_np, pv)
def check_correctness_c01b(f):
    """
    Tests that the theano expression emitted by f computes the same values
    as the ground truth python function
    Note: to keep the python version as dead simple as possible (i.e., to make
    sure there are not bugs in the ground truth) it uses the numerically
    unstable version of softmax. So this test does not work with too big of
    numbers.
    """

    rng = np.random.RandomState([2013, 5, 6])
    batch_size = 5
    rows = 32
    cols = 30
    channels = 3
    pool_rows = 2
    pool_cols = 3

    # Do the python ground truth in b01c format
    zv = rng.randn(batch_size,  rows, cols,
                   channels).astype(config.floatX) * 1. - 1.5
    top_down_v = rng.randn(batch_size, rows / pool_rows, cols / pool_cols,
                           channels).astype(config.floatX)

    p_np, h_np = max_pool_python(zv, (pool_rows, pool_cols), top_down_v)

    # Dimshuffle the inputs into c01b for the theano implementation
    z_th = T.TensorType(broadcastable=(False, False, False, False),
                        dtype = config.floatX)()
    z_th.tag.test_value = zv
    z_th.name = 'z_th'
    zr = z_th.dimshuffle(3, 1, 2, 0)

    top_down_th = T.TensorType(broadcastable=(False, False, False, False),
                               dtype = config.floatX)()
    top_down_th.name = 'top_down_th'
    top_down_th.tag.test_value = top_down_v
    top_down_r = top_down_th.dimshuffle(3, 1, 2, 0)

    p_th, h_th = f(zr, (pool_rows, pool_cols), top_down_r)

    func = function([z_th, top_down_th], [p_th.dimshuffle(3, 1, 2, 0),
                                          h_th.dimshuffle(3, 1, 2, 0)])

    pv, hv = func(zv, top_down_v)

    if not p_np.shape == pv.shape:
        raise AssertionError(str((p_np.shape, pv.shape)))
    assert h_np.shape == hv.shape
    if not np.allclose(h_np, hv):
        print (h_np.min(), h_np.max())
        print (hv.min(), hv.max())
        assert False
    if not np.allclose(p_np, pv):
        diff = abs(p_np - pv)
        print 'max diff ', diff.max()
        print 'min diff ', diff.min()
        print 'ave diff ', diff.mean()
        assert False
    warnings.warn("TODO: make sampling tests run on c01b format of pooling.")
def check_correctness(f):
    rng = np.random.RandomState([2012, 7, 19])
    batch_size = 5
    rows = 32
    cols = 30
    channels = 3
    pool_rows = 2
    pool_cols = 3
    zv = rng.randn(batch_size, rows, cols,
                   channels).astype(config.floatX) * 2. - 3.

    p_np, h_np = max_pool_python(zv, (pool_rows, pool_cols))

    z_th = T.TensorType(broadcastable=(False, False, False, False),
                        dtype=config.floatX)()
    z_th.name = 'z_th'

    p_th, h_th = f(z_th, (pool_rows, pool_cols))

    func = function([z_th], [p_th, h_th])

    pv, hv = func(zv)

    assert p_np.shape == pv.shape
    assert h_np.shape == hv.shape
    if not np.allclose(h_np, hv):
        print (h_np.min(), h_np.max())
        print (hv.min(), hv.max())
        assert False
    assert np.allclose(p_np, pv)
Exemplo n.º 4
0
def check_correctness_c01b(f):

    # Tests that the theano expression emitted by f computes the same values
    # as the ground truth python function
    # Note: to keep the python version as dead simple as possible (i.e., to make
    # sure there are not bugs in the ground truth) it uses the numerically
    # unstable version of softmax. So this test does not work with too big of
    # numbers.

    rng = np.random.RandomState([2013, 5, 6])
    batch_size = 5
    rows = 32
    cols = 30
    channels = 3
    pool_rows = 2
    pool_cols = 3

    # Do the python ground truth in b01c format
    zv = rng.randn(batch_size, rows, cols, channels).astype(
        config.floatX) * 1. - 1.5
    top_down_v = rng.randn(batch_size, rows / pool_rows, cols / pool_cols,
                           channels).astype(config.floatX)

    p_np, h_np = max_pool_python(zv, (pool_rows, pool_cols), top_down_v)

    # Dimshuffle the inputs into c01b for the theano implementation
    z_th = T.TensorType(broadcastable=(False, False, False, False),
                        dtype=config.floatX)()
    z_th.tag.test_value = zv
    z_th.name = 'z_th'
    zr = z_th.dimshuffle(3, 1, 2, 0)

    top_down_th = T.TensorType(broadcastable=(False, False, False, False),
                               dtype=config.floatX)()
    top_down_th.name = 'top_down_th'
    top_down_th.tag.test_value = top_down_v
    top_down_r = top_down_th.dimshuffle(3, 1, 2, 0)

    p_th, h_th = f(zr, (pool_rows, pool_cols), top_down_r)

    func = function([z_th, top_down_th],
                    [p_th.dimshuffle(3, 1, 2, 0),
                     h_th.dimshuffle(3, 1, 2, 0)])

    pv, hv = func(zv, top_down_v)

    if not p_np.shape == pv.shape:
        raise AssertionError(str((p_np.shape, pv.shape)))
    assert h_np.shape == hv.shape
    if not np.allclose(h_np, hv):
        print(h_np.min(), h_np.max())
        print(hv.min(), hv.max())
        assert False
    if not np.allclose(p_np, pv):
        diff = abs(p_np - pv)
        print 'max diff ', diff.max()
        print 'min diff ', diff.min()
        print 'ave diff ', diff.mean()
        assert False
    warnings.warn("TODO: make sampling tests run on c01b format of pooling.")
def check_correctness_bc01(f):
    """
    Tests that the theano expression emitted by f computes the same values
    as the ground truth python function
    Note: to keep the python version as dead simple as possible (i.e., to make
    sure there are not bugs in the ground truth) it uses the numerically
    unstable verison of softmax. So this test does not work with too big of
    numbers.
    """

    rng = np.random.RandomState([2012, 7, 19])
    batch_size = 5
    rows = 32
    cols = 30
    channels = 3
    pool_rows = 2
    pool_cols = 3
    zv = rng.randn(batch_size, rows, cols, channels).astype(
        config.floatX) * 1. - 1.5
    top_down_v = rng.randn(batch_size, rows / pool_rows, cols / pool_cols,
                           channels).astype(config.floatX)

    p_np, h_np = max_pool_python(zv, (pool_rows, pool_cols), top_down_v)

    z_th = T.TensorType(broadcastable=(False, False, False, False),
                        dtype=config.floatX)()
    z_th.name = 'z_th'
    zr = z_th.dimshuffle(0, 3, 1, 2)

    top_down_th = T.TensorType(broadcastable=(False, False, False, False),
                               dtype=config.floatX)()
    top_down_th.name = 'top_down_th'
    top_down_r = top_down_th.dimshuffle(0, 3, 1, 2)

    p_th, h_th = f(zr, (pool_rows, pool_cols), top_down_r)

    func = function([z_th, top_down_th],
                    [p_th.dimshuffle(0, 2, 3, 1),
                     h_th.dimshuffle(0, 2, 3, 1)])

    pv, hv = func(zv, top_down_v)

    assert p_np.shape == pv.shape
    assert h_np.shape == hv.shape
    if not np.allclose(h_np, hv):
        print((h_np.min(), h_np.max()))
        print((hv.min(), hv.max()))
        assert False
    if not np.allclose(p_np, pv):
        diff = abs(p_np - pv)
        print('max diff ', diff.max())
        print('min diff ', diff.min())
        print('ave diff ', diff.mean())
        assert False
def check_correctness_bc01(f):
    """
    Tests that the theano expression emitted by f computes the same values
    as the ground truth python function
    Note: to keep the python version as dead simple as possible (i.e., to make
    sure there are not bugs in the ground truth) it uses the numerically
    unstable verison of softmax. So this test does not work with too big of
    numbers.
    """

    rng = np.random.RandomState([2012, 7, 19])
    batch_size = 5
    rows = 32
    cols = 30
    channels = 3
    pool_rows = 2
    pool_cols = 3
    zv = rng.randn(batch_size,  rows, cols,
                   channels).astype(config.floatX) * 1. - 1.5
    top_down_v = rng.randn(batch_size, rows / pool_rows, cols / pool_cols,
                           channels).astype(config.floatX)

    p_np, h_np = max_pool_python(zv, (pool_rows, pool_cols), top_down_v)

    z_th = T.TensorType(broadcastable=(False, False, False, False),
                        dtype = config.floatX)()
    z_th.name = 'z_th'
    zr = z_th.dimshuffle(0, 3, 1, 2)

    top_down_th = T.TensorType(broadcastable=(False, False, False, False),
                               dtype = config.floatX)()
    top_down_th.name = 'top_down_th'
    top_down_r = top_down_th.dimshuffle(0, 3, 1, 2)

    p_th, h_th = f(zr, (pool_rows, pool_cols), top_down_r)

    func = function([z_th, top_down_th], [p_th.dimshuffle(0, 2, 3, 1),
                                          h_th.dimshuffle(0, 2, 3, 1)])

    pv, hv = func(zv, top_down_v)

    assert p_np.shape == pv.shape
    assert h_np.shape == hv.shape
    if not np.allclose(h_np, hv):
        print (h_np.min(), h_np.max())
        print (hv.min(), hv.max())
        assert False
    if not np.allclose(p_np, pv):
        diff = abs(p_np - pv)
        print 'max diff ', diff.max()
        print 'min diff ', diff.min()
        print 'ave diff ', diff.mean()
        assert False