Example #1
0
def test_classification():
    # Make dataset
    n_classes = 2
    n_samples = 1000
    n_features = 48
    x, y = make_classification(n_samples=n_samples,
                               n_features=n_features,
                               n_classes=n_classes,
                               n_informative=n_classes * 2,
                               random_state=1)
    x = x.astype(dp.float_)
    y = y.astype(dp.int_)
    n_train = int(0.8 * n_samples)
    x_train = x[:n_train]
    y_train = y[:n_train]
    x_test = x[n_train:]
    y_test = y[n_train:]

    scaler = dp.StandardScaler()
    x_train = scaler.fit_transform(x_train)
    x_test = scaler.transform(x_test)

    # Setup feeds
    batch_size = 16
    train_feed = dp.SupervisedFeed(x_train, y_train, batch_size=batch_size)
    test_feed = dp.Feed(x_test)

    # Setup neural network
    weight_decay = 1e-03
    net = dp.NeuralNetwork(
        layers=[
            dp.Affine(
                n_out=32,
                weights=dp.Parameter(dp.AutoFiller(),
                                     weight_decay=weight_decay),
            ),
            dp.ReLU(),
            dp.Affine(
                n_out=64,
                weights=dp.Parameter(dp.AutoFiller(),
                                     weight_decay=weight_decay),
            ),
            dp.ReLU(),
            dp.Affine(
                n_out=n_classes,
                weights=dp.Parameter(dp.AutoFiller()),
            ),
        ],
        loss=dp.SoftmaxCrossEntropy(),
    )

    # Train neural network
    learn_rule = dp.Momentum(learn_rate=0.01 / batch_size, momentum=0.9)
    trainer = dp.GradientDescent(net, train_feed, learn_rule)
    trainer.train_epochs(n_epochs=10)

    # Evaluate on test data
    error = np.mean(net.predict(test_feed) != y_test)
    print('Test error rate: %.4f' % error)
    assert error < 0.2
Example #2
0
def test_softmaxcrossentropy():
    confs = itertools.product(batch_sizes, n_ins)
    for batch_size, n_in in confs:
        print('SoftmaxCrossEntropy: batch_size=%i, n_in=%i' %
              (batch_size, n_in))
        x_shape = (batch_size, n_in)
        x = np.random.normal(size=x_shape)
        y = np.random.randint(low=0, high=n_in, size=batch_size)
        loss = dp.SoftmaxCrossEntropy()
        loss._setup(x_shape)
        assert loss.loss(ca.array(x), ca.array(y)).shape == x_shape[:1]
        check_grad(loss, x, y)
def train_network(model, x_train, n_epochs=1000, learn_rate=0.2, batch_size=64,
                  seq_size=50, epoch_size=100):
    recurrent_nodes, fc_out = model
    n_classes = fc_out.n_out
    recurrent_graph = RecurrentGraph(
        recurrent_nodes=recurrent_nodes, seq_size=seq_size,
        batch_size=batch_size, cyclic=True, dropout=0.5
    )
    net = dp.NeuralNetwork(
        layers=[
            OneHot(n_classes=n_classes),
            Reshape((seq_size, batch_size, -1)),
            recurrent_graph,
            Reshape((seq_size*batch_size, -1)),
            fc_out,
        ],
        loss=dp.SoftmaxCrossEntropy(),
    )
    net.phase = 'train'

    # Prepare network inputs
    train_input = SupervisedSequenceInput(
        x_train, seq_size=seq_size, batch_size=batch_size,
        epoch_size=epoch_size
    )

    # Train network
    try:
        trainer = dp.StochasticGradientDescent(
            max_epochs=n_epochs, min_epochs=n_epochs,
            learn_rule=dp.RMSProp(learn_rate=learn_rate),
        )
        test_error = None
        trainer.train(net, train_input, test_error)
    except KeyboardInterrupt:
        pass
    return recurrent_nodes, fc_out
Example #4
0
def test_classification():
    # Make dataset
    n_classes = 2
    n_samples = 1000
    n_features = 48
    x, y = make_classification(
        n_samples=n_samples, n_features=n_features, n_classes=n_classes,
        n_informative=n_classes*2, random_state=1
    )

    n_train = int(0.8 * n_samples)
    n_val = int(0.5 * (n_samples - n_train))

    x_train = x[:n_train]
    y_train = y[:n_train]
    x_val = x[n_train:n_train+n_val]
    y_val = y[n_train:n_train+n_val]
    x_test = x[n_train+n_val:]
    y_test = y[n_train+n_val:]

    scaler = dp.StandardScaler()
    x_train = scaler.fit_transform(x_train)
    x_val = scaler.transform(x_val)
    x_test = scaler.transform(x_test)

    # Setup input
    batch_size = 16
    train_input = dp.SupervisedInput(x_train, y_train, batch_size=batch_size)
    val_input = dp.Input(x_val)
    test_input = dp.Input(x_test)

    # Setup neural network
    weight_decay = 1e-03
    net = dp.NeuralNetwork(
        layers=[
            dp.Affine(
                n_out=32,
                weights=dp.Parameter(dp.AutoFiller(),
                                     weight_decay=weight_decay),
            ),
            dp.ReLU(),
            dp.Affine(
                n_out=64,
                weights=dp.Parameter(dp.AutoFiller(),
                                     weight_decay=weight_decay),
            ),
            dp.ReLU(),
            dp.Affine(
                n_out=n_classes,
                weights=dp.Parameter(dp.AutoFiller()),
            ),
        ],
        loss=dp.SoftmaxCrossEntropy(),
    )

    # Train neural network
    def val_error():
        return np.mean(net.predict(val_input) != y_val)
    trainer = dp.GradientDescent(
        min_epochs=10, learn_rule=dp.Momentum(learn_rate=0.01, momentum=0.9),
    )
    trainer.train(net, train_input, val_error)

    # Evaluate on test data
    error = np.mean(net.predict(test_input) != y_test)
    print('Test error rate: %.4f' % error)
    assert error < 0.2
Example #5
0
        dp.Activation('relu'),
        pool_layer(),
        dp.Flatten(),
        dp.DropoutFullyConnected(
            n_out=512,
            dropout=0.5,
            weights=dp.Parameter(dp.AutoFiller(weight_gain_fc),
                                 weight_decay=weight_decay_fc),
        ),
        dp.Activation('relu'),
        dp.FullyConnected(
            n_out=dataset.n_classes,
            weights=dp.Parameter(dp.AutoFiller(weight_gain_fc)),
        ),
    ],
    loss=dp.SoftmaxCrossEntropy(),
)

# Train network
n_epochs = [50, 15, 15]
learn_rate = 0.05
momentum = 0.88
for i, epochs in enumerate(n_epochs):
    trainer = dp.StochasticGradientDescent(
        max_epochs=epochs,
        learn_rule=dp.Momentum(learn_rate=learn_rate / 10**i,
                               momentum=momentum),
    )
    trainer.train(net, train_input)