예제 #1
0
 def check(input_dim, expected_shape, broadcastable=None,
           conserve_memory=True):
     bn = BatchNormalization(input_dim=input_dim,
                             broadcastable=broadcastable,
                             conserve_memory=conserve_memory)
     if broadcastable is None:
         if not isinstance(input_dim, collections.Sequence):
             b_input_dim = (input_dim,)
         else:
             b_input_dim = input_dim
         input_broadcastable = tuple(False for _ in range(len(b_input_dim)))
     else:
         input_broadcastable = broadcastable
     bn.allocate()
     assert conserve_memory == bn.conserve_memory
     assert input_dim == bn.input_dim
     assert bn.broadcastable == broadcastable
     assert bn.scale.broadcastable == input_broadcastable
     assert bn.shift.broadcastable == input_broadcastable
     assert bn.population_mean.broadcastable == input_broadcastable
     assert bn.population_stdev.broadcastable == input_broadcastable
     assert_allclose(bn.population_mean.get_value(borrow=True), 0.)
     assert_allclose(bn.population_stdev.get_value(borrow=True), 1.)
     assert_equal(bn.scale.get_value(borrow=True).shape, expected_shape)
     assert_equal(bn.shift.get_value(borrow=True).shape, expected_shape)
     assert_equal(bn.population_mean.get_value(borrow=True).shape,
                  expected_shape)
     assert_equal(bn.population_stdev.get_value(borrow=True).shape,
                  expected_shape)
     assert numpy.isnan(bn.shift.get_value(borrow=True)).all()
     assert numpy.isnan(bn.scale.get_value(borrow=True)).all()
     bn.initialize()
     assert_allclose(bn.shift.get_value(borrow=True), 0.)
     assert_allclose(bn.scale.get_value(borrow=True), 1.)
예제 #2
0
def join(small_image,
         big_image,
         n_filter_small,
         n_filter_big,
         big_img_size_in,
         ordering=''):

    # upsample small image
    upsampled_small = tensor.repeat(small_image, 2, axis=2)
    upsampled_small = tensor.repeat(upsampled_small, 2, axis=3)

    img_size_small = (n_filter_small, big_img_size_in[0], big_img_size_in[1])
    img_size_big = (n_filter_big, big_img_size_in[0], big_img_size_in[1])

    bn_small = BatchNormalization(img_size_small,
                                  name='bn_small%s' % (ordering, ))
    bn_small.initialize()
    bn_big = BatchNormalization(img_size_big, name='bn_big%s' % (ordering, ))
    bn_big.initialize()

    depth_concat = tensor.concatenate(
        [bn_small.apply(upsampled_small),
         bn_big.apply(big_image)], axis=1)

    return depth_concat
예제 #3
0
def apply_setup(input_dim, broadcastable, conserve_memory):
    """Common setup code."""
    bn = BatchNormalization(input_dim, broadcastable, conserve_memory,
                            epsilon=1e-4)
    bn.initialize()
    b_len = (len(input_dim) if isinstance(input_dim, collections.Sequence)
             else 1)
    x = tensor.TensorType(theano.config.floatX,
                          [False] * (b_len + 1))()
    return bn, x
예제 #4
0
def test_batch_normalization_simple():
    x = tensor.matrix()
    eps = 1e-4
    bn = BatchNormalization(input_dim=4, epsilon=eps)
    bn.initialize()
    with batch_normalization(bn):
        y = bn.apply(x)
    rng = numpy.random.RandomState((2016, 1, 18))
    x_ = rng.uniform(size=(5, 4)).astype(theano.config.floatX)
    y_ = y.eval({x: x_})
    y_expected = (x_ - x_.mean(axis=0)) / numpy.sqrt(x_.var(axis=0) + eps)
    assert_allclose(y_, y_expected, rtol=1e-4)
예제 #5
0
def test_batch_normalization_simple():
    x = tensor.matrix()
    eps = 1e-4
    bn = BatchNormalization(input_dim=4, epsilon=eps)
    bn.initialize()
    with batch_normalization(bn):
        y = bn.apply(x)
    rng = numpy.random.RandomState((2016, 1, 18))
    x_ = rng.uniform(size=(5, 4)).astype(theano.config.floatX)
    y_ = y.eval({x: x_})
    y_expected = (x_ - x_.mean(axis=0)) / numpy.sqrt(x_.var(axis=0) + eps)
    assert_allclose(y_, y_expected, rtol=1e-4)