Exemple #1
0
def test_defaults_sequence2():
    seq = DefaultsSequence(input_dim=(3, 4, 4),
                           lists=[
                               Convolutional(num_filters=10,
                                             stride=(2, 2),
                                             filter_size=(3, 3)),
                               BatchNormalization(),
                               Rectifier(),
                               Flattener(),
                               Linear(output_dim=10),
                               BatchNormalization(),
                               Rectifier(),
                               Linear(output_dim=12),
                               BatchNormalization(),
                               Rectifier()
                           ])
    seq.weights_init = Constant(1.0)
    seq.biases_init = Constant(0.0)
    seq.push_allocation_config()
    seq.push_initialization_config()
    seq.initialize()

    x = T.tensor4('input')
    y = seq.apply(x)
    func_ = theano.function([x], [y])

    x_val = np.ones((1, 3, 4, 4), dtype=theano.config.floatX)
    res = func_(x_val)[0]
    assert_allclose(res.shape, (1, 12))
Exemple #2
0
def test_batchnorm_training():
    layer = BatchNormalization(input_dim=5)
    layer.initialize()
    x = T.matrix()

    x_val = np.ones((6, 5), dtype=theano.config.floatX)
    x_val[0, 0] = 10.0

    y = layer.apply(x)
    _func = theano.function([x], y)
    ret = _func(x_val)

    assert_allclose(ret[0, 0], 2.23606801)
    assert_allclose(ret[1:5, 0], -0.44721359)

    assert_allclose(ret[0:5, 1:5], 0)
def test_batchnorm_training():
    layer = BatchNormalization(
            input_dim = 5)
    layer.initialize()
    x = T.matrix()

    x_val = np.ones((6, 5), dtype=theano.config.floatX)
    x_val[0,0] = 10.0

    y = layer.apply(x)
    _func = theano.function([x], y)
    ret = _func(x_val)

    assert_allclose(ret[0,0], 2.23606801)
    assert_allclose(ret[1:5, 0], -0.44721359)

    assert_allclose(ret[0:5,1:5], 0)
def test_batchnorm_infer():
    layer = BatchNormalization(input_dim = (4, 5, 5))
    layer.accumulate = True
    layer.initialize()
    x = T.tensor4("features")
    x_val = [np.ones((6, 4, 5, 5), dtype=theano.config.floatX) for _ in range(2)]
    x_val[0][0,0,0,0] = 10.0
    x_val[1][0,0,0,0] = -200.0
    y = layer.apply(x)

    dataset = IterableDataset(dict(features=x_val))
    data_stream = DataStream(dataset)
    cg = ComputationGraph([y])

    infer_population(data_stream, cg, 2)

    assert layer.use_population == True
    assert_allclose(layer.u.get_value(), np.array([0.72, 2, 2, 2]))
    assert_allclose(layer.n.get_value(), np.array([2]))
def test_batchnormconv_training():
    layer = BatchNormalization(input_dim = (4, 5, 5))
    layer.initialize()
    x = T.tensor4()

    x_val = np.ones((6, 4, 5, 5), dtype=theano.config.floatX)
    x_val[0,0,0,0] = 10.0

    y = layer.apply(x)
    _func = theano.function([x], y)
    ret = _func(x_val)
    assert_equal(ret.shape, (6, 4, 5, 5))
    assert_allclose(ret[0, 0, 0, 0], 12.20655537)
    assert_allclose(ret[0, 0, 1, 0], -0.08192328)
    assert_allclose(ret[0, 0, 3, 3], -0.08192328)

    assert_allclose(ret[1, 0, 0, 0], -0.08192328)

    assert_allclose(ret[0:6, 1:4, 0:5, 0:5], 0)
Exemple #6
0
def test_batchnormconv_training():
    layer = BatchNormalization(input_dim=(4, 5, 5))
    layer.initialize()
    x = T.tensor4()

    x_val = np.ones((6, 4, 5, 5), dtype=theano.config.floatX)
    x_val[0, 0, 0, 0] = 10.0

    y = layer.apply(x)
    _func = theano.function([x], y)
    ret = _func(x_val)
    assert_equal(ret.shape, (6, 4, 5, 5))
    assert_allclose(ret[0, 0, 0, 0], 12.20655537)
    assert_allclose(ret[0, 0, 1, 0], -0.08192328)
    assert_allclose(ret[0, 0, 3, 3], -0.08192328)

    assert_allclose(ret[1, 0, 0, 0], -0.08192328)

    assert_allclose(ret[0:6, 1:4, 0:5, 0:5], 0)
def test_batchnorm_get_set():
    layer = BatchNormalization(input_dim = (4, 5, 5))
    layer.accumulate = True
    layer.initialize()
    x = T.tensor4("features")
    x_val = [np.ones((6, 4, 5, 5), dtype=theano.config.floatX) for _ in range(2)]
    x_val[0][0,0,0,0] = 10.0
    x_val[1][0,0,0,0] = -200.0
    y = layer.apply(x)

    dataset = IterableDataset(dict(features=x_val))
    data_stream = DataStream(dataset)
    cg = Model([y])
    infer_population(data_stream, cg, 2)
    values_dict = get_batchnorm_parameter_values(cg)
    assert len(values_dict.keys()) == 3
    assert_allclose(values_dict['/batchnormalization.n'], np.array([2]))
    values_dict['/batchnormalization.n'] = np.array([5.], dtype="float32")
    set_batchnorm_parameter_values(cg, values_dict)

    assert_allclose(layer.n.get_value(), np.array([5.]))
Exemple #8
0
def test_batchnorm_rolling():
    layer = BatchNormalization(input_dim=5, rolling_accumulate=True)
    layer.initialize()
    x = T.matrix()

    x_val = np.ones((6, 5), dtype=theano.config.floatX)
    x_val[0, 0] = 10.0

    y = layer.apply(x)
    cg = ComputationGraph([y])

    _func = cg.get_theano_function()
    for i in range(100):
        ret = _func(x_val)
    u = layer.u.get_value()
    assert_allclose(u[0], 1.58491838)
    assert_allclose(u[1], 0.6339674)

    s = layer.s.get_value()
    assert_allclose(s[0], 7.13214684)
    assert_allclose(s[1], 0.)
def test_batchnorm_rolling():
    layer = BatchNormalization(
            input_dim = 5, rolling_accumulate=True)
    layer.initialize()
    x = T.matrix()

    x_val = np.ones((6, 5), dtype=theano.config.floatX)
    x_val[0,0] = 10.0

    y = layer.apply(x)
    cg = ComputationGraph([y])

    _func = cg.get_theano_function()
    for i in range(100):
        ret = _func(x_val)
    u = layer.u.get_value()
    assert_allclose(u[0], 1.58491838)
    assert_allclose(u[1], 0.6339674)

    s = layer.s.get_value()
    assert_allclose(s[0], 7.13214684)
    assert_allclose(s[1], 0.)
Exemple #10
0
def test_defaults_sequence1():
    seq = DefaultsSequence(input_dim=9,
                           lists=[
                               Linear(output_dim=10),
                               BatchNormalization(),
                               Rectifier(),
                               Linear(output_dim=12),
                               BatchNormalization(),
                               Rectifier()
                           ])
    seq.weights_init = Constant(1.0)
    seq.biases_init = Constant(0.0)
    seq.push_allocation_config()
    seq.push_initialization_config()
    seq.initialize()

    x = T.matrix('input')
    y = seq.apply(x)
    func_ = theano.function([x], [y])

    x_val = np.ones((1, 9), dtype=theano.config.floatX)
    res = func_(x_val)[0]
    assert_allclose(res.shape, (1, 12))
Exemple #11
0
def test_batchnorm_infer():
    layer = BatchNormalization(input_dim=(4, 5, 5))
    layer.accumulate = True
    layer.initialize()
    x = T.tensor4("features")
    x_val = [
        np.ones((6, 4, 5, 5), dtype=theano.config.floatX) for _ in range(2)
    ]
    x_val[0][0, 0, 0, 0] = 10.0
    x_val[1][0, 0, 0, 0] = -200.0
    y = layer.apply(x)

    dataset = IterableDataset(dict(features=x_val))
    data_stream = DataStream(dataset)
    cg = ComputationGraph([y])

    infer_population(data_stream, cg, 2)

    assert layer.use_population == True
    assert_allclose(layer.u.get_value(), np.array([0.72, 2, 2, 2]))
    assert_allclose(layer.n.get_value(), np.array([2]))
Exemple #12
0
def test_batchnorm_get_set():
    layer = BatchNormalization(input_dim=(4, 5, 5))
    layer.accumulate = True
    layer.initialize()
    x = T.tensor4("features")
    x_val = [
        np.ones((6, 4, 5, 5), dtype=theano.config.floatX) for _ in range(2)
    ]
    x_val[0][0, 0, 0, 0] = 10.0
    x_val[1][0, 0, 0, 0] = -200.0
    y = layer.apply(x)

    dataset = IterableDataset(dict(features=x_val))
    data_stream = DataStream(dataset)
    cg = Model([y])
    infer_population(data_stream, cg, 2)
    values_dict = get_batchnorm_parameter_values(cg)
    assert len(values_dict.keys()) == 3
    assert_allclose(values_dict['/batchnormalization.n'], np.array([2]))
    values_dict['/batchnormalization.n'] = np.array([5.], dtype="float32")
    set_batchnorm_parameter_values(cg, values_dict)

    assert_allclose(layer.n.get_value(), np.array([5.]))