示例#1
0
def test_classification():
    # Make dataset
    n_classes = 2
    n_samples = 1000
    n_features = 48
    x, y = make_classification(n_samples=n_samples,
                               n_features=n_features,
                               n_classes=n_classes,
                               n_informative=n_classes * 2,
                               random_state=1)
    x = x.astype(dp.float_)
    y = y.astype(dp.int_)
    n_train = int(0.8 * n_samples)
    x_train = x[:n_train]
    y_train = y[:n_train]
    x_test = x[n_train:]
    y_test = y[n_train:]

    scaler = dp.StandardScaler()
    x_train = scaler.fit_transform(x_train)
    x_test = scaler.transform(x_test)

    # Setup feeds
    batch_size = 16
    train_feed = dp.SupervisedFeed(x_train, y_train, batch_size=batch_size)
    test_feed = dp.Feed(x_test)

    # Setup neural network
    weight_decay = 1e-03
    net = dp.NeuralNetwork(
        layers=[
            dp.Affine(
                n_out=32,
                weights=dp.Parameter(dp.AutoFiller(),
                                     weight_decay=weight_decay),
            ),
            dp.ReLU(),
            dp.Affine(
                n_out=64,
                weights=dp.Parameter(dp.AutoFiller(),
                                     weight_decay=weight_decay),
            ),
            dp.ReLU(),
            dp.Affine(
                n_out=n_classes,
                weights=dp.Parameter(dp.AutoFiller()),
            ),
        ],
        loss=dp.SoftmaxCrossEntropy(),
    )

    # Train neural network
    learn_rule = dp.Momentum(learn_rate=0.01 / batch_size, momentum=0.9)
    trainer = dp.GradientDescent(net, train_feed, learn_rule)
    trainer.train_epochs(n_epochs=10)

    # Evaluate on test data
    error = np.mean(net.predict(test_feed) != y_test)
    print('Test error rate: %.4f' % error)
    assert error < 0.2
示例#2
0
def vgg_net(path, pool_method='max', border_mode='same'):
    matconvnet = scipy.io.loadmat(path)
    img_mean = matconvnet['meta'][0][0][2][0][0][2]
    vgg_layers = matconvnet['layers'][0]
    layers = []
    for layer in vgg_layers:
        layer = layer[0][0]
        layer_type = layer[1][0]
        if layer_type == 'conv':
            params = layer[2][0]
            weights = params[0]
            bias = params[1]
            weights = np.transpose(weights, (3, 2, 0, 1)).astype(dp.float_)
            bias = np.reshape(bias, (1, bias.size, 1, 1)).astype(dp.float_)
            layers.append(conv_layer(weights, bias, border_mode))
        elif layer_type == 'pool':
            layers.append(pool_layer(pool_method, border_mode))
        elif layer_type == 'relu':
            layers.append(dp.ReLU())
        elif layer_type == 'softmax':
            pass
        else:
            raise ValueError('invalid layer type: %s' % layer_type)
    return layers, img_mean
示例#3
0
    n += 1

# Prepare network feeds
batch_size = 128
train_feed = dp.SupervisedSiameseFeed(x1, x2, y, batch_size=batch_size)

# Setup network
w_gain = 1.5
w_decay = 1e-4
net = dp.SiameseNetwork(
    siamese_layers=[
        dp.Affine(
            n_out=1024,
            weights=dp.Parameter(dp.AutoFiller(w_gain), weight_decay=w_decay),
        ),
        dp.ReLU(),
        dp.Affine(
            n_out=1024,
            weights=dp.Parameter(dp.AutoFiller(w_gain), weight_decay=w_decay),
        ),
        dp.ReLU(),
        dp.Affine(
            n_out=2,
            weights=dp.Parameter(dp.AutoFiller(w_gain)),
        ),
    ],
    loss=dp.ContrastiveLoss(margin=1.0),
)

# Train network
learn_rate = 0.01 / batch_size
示例#4
0
文件: test_mlp.py 项目: obinsc/deeppy
def test_classification():
    # Make dataset
    n_classes = 2
    n_samples = 1000
    n_features = 48
    x, y = make_classification(
        n_samples=n_samples, n_features=n_features, n_classes=n_classes,
        n_informative=n_classes*2, random_state=1
    )

    n_train = int(0.8 * n_samples)
    n_val = int(0.5 * (n_samples - n_train))

    x_train = x[:n_train]
    y_train = y[:n_train]
    x_val = x[n_train:n_train+n_val]
    y_val = y[n_train:n_train+n_val]
    x_test = x[n_train+n_val:]
    y_test = y[n_train+n_val:]

    scaler = dp.StandardScaler()
    x_train = scaler.fit_transform(x_train)
    x_val = scaler.transform(x_val)
    x_test = scaler.transform(x_test)

    # Setup input
    batch_size = 16
    train_input = dp.SupervisedInput(x_train, y_train, batch_size=batch_size)
    val_input = dp.Input(x_val)
    test_input = dp.Input(x_test)

    # Setup neural network
    weight_decay = 1e-03
    net = dp.NeuralNetwork(
        layers=[
            dp.Affine(
                n_out=32,
                weights=dp.Parameter(dp.AutoFiller(),
                                     weight_decay=weight_decay),
            ),
            dp.ReLU(),
            dp.Affine(
                n_out=64,
                weights=dp.Parameter(dp.AutoFiller(),
                                     weight_decay=weight_decay),
            ),
            dp.ReLU(),
            dp.Affine(
                n_out=n_classes,
                weights=dp.Parameter(dp.AutoFiller()),
            ),
        ],
        loss=dp.SoftmaxCrossEntropy(),
    )

    # Train neural network
    def val_error():
        return np.mean(net.predict(val_input) != y_val)
    trainer = dp.GradientDescent(
        min_epochs=10, learn_rule=dp.Momentum(learn_rate=0.01, momentum=0.9),
    )
    trainer.train(net, train_input, val_error)

    # Evaluate on test data
    error = np.mean(net.predict(test_input) != y_test)
    print('Test error rate: %.4f' % error)
    assert error < 0.2