예제 #1
0
파일: test_mlp.py 프로젝트: yyc2019/deeppy
def test_classification():
    # Make dataset
    n_classes = 2
    n_samples = 1000
    n_features = 48
    x, y = make_classification(n_samples=n_samples,
                               n_features=n_features,
                               n_classes=n_classes,
                               n_informative=n_classes * 2,
                               random_state=1)
    x = x.astype(dp.float_)
    y = y.astype(dp.int_)
    n_train = int(0.8 * n_samples)
    x_train = x[:n_train]
    y_train = y[:n_train]
    x_test = x[n_train:]
    y_test = y[n_train:]

    scaler = dp.StandardScaler()
    x_train = scaler.fit_transform(x_train)
    x_test = scaler.transform(x_test)

    # Setup feeds
    batch_size = 16
    train_feed = dp.SupervisedFeed(x_train, y_train, batch_size=batch_size)
    test_feed = dp.Feed(x_test)

    # Setup neural network
    weight_decay = 1e-03
    net = dp.NeuralNetwork(
        layers=[
            dp.Affine(
                n_out=32,
                weights=dp.Parameter(dp.AutoFiller(),
                                     weight_decay=weight_decay),
            ),
            dp.ReLU(),
            dp.Affine(
                n_out=64,
                weights=dp.Parameter(dp.AutoFiller(),
                                     weight_decay=weight_decay),
            ),
            dp.ReLU(),
            dp.Affine(
                n_out=n_classes,
                weights=dp.Parameter(dp.AutoFiller()),
            ),
        ],
        loss=dp.SoftmaxCrossEntropy(),
    )

    # Train neural network
    learn_rule = dp.Momentum(learn_rate=0.01 / batch_size, momentum=0.9)
    trainer = dp.GradientDescent(net, train_feed, learn_rule)
    trainer.train_epochs(n_epochs=10)

    # Evaluate on test data
    error = np.mean(net.predict(test_feed) != y_test)
    print('Test error rate: %.4f' % error)
    assert error < 0.2
예제 #2
0
def test_fully_connected():
    confs = itertools.product(batch_sizes, n_ins, n_outs)
    for batch_size, n_in, n_out in confs:
        print('Affine: batch_size=%i, n_in=%i, n_out=%i' %
              (batch_size, n_in, n_out))
        x_shape = (batch_size, n_in)
        x = np.random.normal(size=x_shape).astype(ca.float_)
        w = np.random.normal(size=(n_in, n_out)).astype(ca.float_)
        b = np.random.normal(size=n_out).astype(ca.float_)
        layer = dp.Affine(n_out, weights=w, bias=b)
        layer.setup(x_shape)
        assert layer.y_shape(x_shape) == (batch_size, n_out)

        y = np.array(layer.fprop(ca.array(x)))
        assert allclose(np.dot(x, w) + b, y)

        y_grad = y
        x_grad = np.array(layer.bprop(ca.array(y_grad)))
        assert allclose(np.dot(y, w.T), x_grad)

        check_grad(layer, x)
        check_params(layer)
예제 #3
0
        y[n] = 1
    else:
        y[n] = 0
    n += 1

# Prepare network feeds
batch_size = 128
train_feed = dp.SupervisedSiameseFeed(x1, x2, y, batch_size=batch_size)

# Setup network
w_gain = 1.5
w_decay = 1e-4
net = dp.SiameseNetwork(
    siamese_layers=[
        dp.Affine(
            n_out=1024,
            weights=dp.Parameter(dp.AutoFiller(w_gain), weight_decay=w_decay),
        ),
        dp.ReLU(),
        dp.Affine(
            n_out=1024,
            weights=dp.Parameter(dp.AutoFiller(w_gain), weight_decay=w_decay),
        ),
        dp.ReLU(),
        dp.Affine(
            n_out=2,
            weights=dp.Parameter(dp.AutoFiller(w_gain)),
        ),
    ],
    loss=dp.ContrastiveLoss(margin=1.0),
)
예제 #4
0
파일: test_mlp.py 프로젝트: obinsc/deeppy
def test_classification():
    # Make dataset
    n_classes = 2
    n_samples = 1000
    n_features = 48
    x, y = make_classification(
        n_samples=n_samples, n_features=n_features, n_classes=n_classes,
        n_informative=n_classes*2, random_state=1
    )

    n_train = int(0.8 * n_samples)
    n_val = int(0.5 * (n_samples - n_train))

    x_train = x[:n_train]
    y_train = y[:n_train]
    x_val = x[n_train:n_train+n_val]
    y_val = y[n_train:n_train+n_val]
    x_test = x[n_train+n_val:]
    y_test = y[n_train+n_val:]

    scaler = dp.StandardScaler()
    x_train = scaler.fit_transform(x_train)
    x_val = scaler.transform(x_val)
    x_test = scaler.transform(x_test)

    # Setup input
    batch_size = 16
    train_input = dp.SupervisedInput(x_train, y_train, batch_size=batch_size)
    val_input = dp.Input(x_val)
    test_input = dp.Input(x_test)

    # Setup neural network
    weight_decay = 1e-03
    net = dp.NeuralNetwork(
        layers=[
            dp.Affine(
                n_out=32,
                weights=dp.Parameter(dp.AutoFiller(),
                                     weight_decay=weight_decay),
            ),
            dp.ReLU(),
            dp.Affine(
                n_out=64,
                weights=dp.Parameter(dp.AutoFiller(),
                                     weight_decay=weight_decay),
            ),
            dp.ReLU(),
            dp.Affine(
                n_out=n_classes,
                weights=dp.Parameter(dp.AutoFiller()),
            ),
        ],
        loss=dp.SoftmaxCrossEntropy(),
    )

    # Train neural network
    def val_error():
        return np.mean(net.predict(val_input) != y_val)
    trainer = dp.GradientDescent(
        min_epochs=10, learn_rule=dp.Momentum(learn_rate=0.01, momentum=0.9),
    )
    trainer.train(net, train_input, val_error)

    # Evaluate on test data
    error = np.mean(net.predict(test_input) != y_test)
    print('Test error rate: %.4f' % error)
    assert error < 0.2
예제 #5
0
        method='max',
    )


net = dp.NeuralNetwork(
    layers=[
        conv_layer(32),
        dp.ReLU(),
        pool_layer(),
        conv_layer(32),
        dp.ReLU(),
        pool_layer(),
        conv_layer(64),
        dp.ReLU(),
        pool_layer(),
        dp.Flatten(),
        dp.Dropout(),
        dp.Affine(n_out=64,
                  weights=dp.Parameter(dp.AutoFiller(gain=1.25),
                                       weight_decay=0.03)),
        dp.ReLU(),
        dp.Affine(
            n_out=dataset.n_classes,
            weights=dp.Parameter(dp.AutoFiller(gain=1.25)),
        )
    ],
    loss=dp.SoftmaxCrossEntropy(),
)

profile(net, train_input)
예제 #6
0
n_epochs = 25
learn_rate = 0.025 / batch_size
learn_rule = dp.Momentum(learn_rate, momentum=0.9)
for ae in sae.ae_models():
    trainer = dp.GradientDescent(ae, train_feed, learn_rule)
    trainer.train_epochs(n_epochs)

# Train stacked autoencoders
trainer = dp.GradientDescent(sae, train_feed, learn_rule)
trainer.train_epochs(n_epochs)

# Setup neural network using the stacked autoencoder layers
net = dp.NeuralNetwork(
    layers=sae.feedforward_layers() + [
        dp.Affine(
            n_out=dataset.n_classes,
            weights=dp.Parameter(dp.AutoFiller()),
        ),
    ],
    loss=dp.SoftmaxCrossEntropy(),
)

# Fine-tune neural network
train_feed = dp.SupervisedFeed(x_train, y_train, batch_size=batch_size)
test_feed = dp.Feed(x_test)
trainer = dp.GradientDescent(net, train_feed, learn_rule)
trainer.train_epochs(n_epochs)

# Evaluate on test data
error = np.mean(net.predict(test_feed) != y_test)
print('Test error rate: %.4f' % error)
예제 #7
0
파일: mlp_mnist.py 프로젝트: yyc2019/deeppy
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)

# Prepare network feeds
batch_size = 128
train_feed = dp.SupervisedFeed(x_train, y_train, batch_size=batch_size)
test_feed = dp.Feed(x_test)

# Setup network
weight_gain = 2.0
weight_decay = 0.0005
net = dp.NeuralNetwork(
    layers=[
        dp.Affine(
            n_out=1024,
            weights=dp.Parameter(dp.AutoFiller(weight_gain),
                                 weight_decay=weight_decay),
        ),
        dp.ReLU(),
        dp.Affine(
            n_out=1024,
            weights=dp.Parameter(dp.AutoFiller(weight_gain),
                                 weight_decay=weight_decay),
        ),
        dp.ReLU(),
        dp.Affine(
            n_out=dataset.n_classes,
            weights=dp.Parameter(dp.AutoFiller()),
        ),
    ],
    loss=dp.SoftmaxCrossEntropy(),
예제 #8
0
weight_gain_fc = 1.84
weight_decay_fc = 0.002
net = dp.NeuralNetwork(
    layers=[
        conv_layer(32),
        dp.ReLU(),
        pool_layer(),
        conv_layer(64),
        dp.ReLU(),
        pool_layer(),
        dp.Flatten(),
        dp.Dropout(),
        dp.Affine(
            n_out=512,
            weights=dp.Parameter(dp.AutoFiller(weight_gain_fc),
                                 weight_decay=weight_decay_fc),
        ),
        dp.ReLU(),
        dp.Affine(
            n_out=dataset.n_classes,
            weights=dp.Parameter(dp.AutoFiller(weight_gain_fc)),
        ),
    ],
    loss=dp.SoftmaxCrossEntropy(),
)

# Train network
n_epochs = [50, 15, 15]
learn_rate = 0.05 / batch_size
learn_rule = dp.Momentum(momentum=0.9)