示例#1
0
def run():
    # Prepare data
    dataset = dp.datasets.MNIST()
    x, y = dataset.data(flat=True)
    x = x.astype(dp.float_)
    y = y.astype(dp.int_)
    train_idx, test_idx = dataset.split()
    x_train = x[train_idx]
    y_train = y[train_idx]
    x_test = x[test_idx]
    y_test = y[test_idx]

    scaler = dp.UniformScaler(high=255.)
    x_train = scaler.fit_transform(x_train)
    x_test = scaler.transform(x_test)

    batch_size = 128
    train_input = dp.SupervisedInput(x_train, y_train, batch_size=batch_size)
    test_input = dp.SupervisedInput(x_test, y_test)

    # Setup neural network
    net = dp.NeuralNetwork(
        layers=[
            dp.FullyConnected(
                n_output=800,
                weights=dp.Parameter(dp.AutoFiller(), weight_decay=0.0001),
            ),
            dp.Activation('relu'),
            dp.FullyConnected(
                n_output=800,
                weights=dp.Parameter(dp.AutoFiller(), weight_decay=0.0001),
            ),
            dp.Activation('relu'),
            dp.FullyConnected(
                n_output=dataset.n_classes,
                weights=dp.Parameter(dp.AutoFiller(), weight_decay=0.0001),
            ),
            dp.MultinomialLogReg(),
        ],
    )

    # Train neural network
    def val_error():
        return net.error(test_input)
    trainer = dp.StochasticGradientDescent(
        max_epochs=25,
        learn_rule=dp.Momentum(learn_rate=0.1, momentum=0.9),
    )
    trainer.train(net, train_input, val_error)

    # Visualize weights from first layer
    W = next(np.array(layer.params()[0].array) for layer in net.layers
             if isinstance(layer, dp.FullyConnected))
    W = np.reshape(W.T, (-1, 28, 28))
    filepath = os.path.join('mnist', 'mlp_weights.png')
    dp.misc.img_save(dp.misc.img_tile(dp.misc.img_stretch(W)), filepath)

    # Evaluate on test data
    error = net.error(test_input)
    print('Test error rate: %.4f' % error)
def test_recurrent():
    n_outs = [1, 4, 5]
    confs = itertools.product(batch_sizes, n_ins, n_hiddens, n_outs)
    for batch_size, n_in, n_hidden, n_out in confs:
        print('Recurrent: batch_size=%i, n_in=%i, n_hidden=%i, n_out=%i' %
              (batch_size, n_in, n_hidden, n_out))
        x_shape = (batch_size, n_in)
        h_shape = (batch_size, n_hidden)
        y_shape = (batch_size, n_out)

        x = np.random.normal(size=x_shape).astype(dp.float_)
        h = np.random.normal(size=h_shape).astype(dp.float_)
        y = np.random.normal(size=y_shape).astype(dp.float_)
        h_next = np.random.normal(size=h_shape).astype(dp.float_)

        node = Recurrent(
            n_hidden=n_hidden,
            n_out=n_out,
            w_xh=dp.AutoFiller(),
            w_hh=dp.AutoFiller(),
            w_hy=dp.AutoFiller(),
        )

        in_arrays = {'x': x, 'h': h}
        grad_arrays = {'y': y, 'h': h_next}

        check_node_grads(node, in_arrays, grad_arrays, rtol=rtol, atol=atol)
示例#3
0
def test_classification():
    # Make dataset
    n_classes = 2
    n_samples = 1000
    n_features = 48
    x, y = make_classification(n_samples=n_samples,
                               n_features=n_features,
                               n_classes=n_classes,
                               n_informative=n_classes * 2,
                               random_state=1)
    x = x.astype(dp.float_)
    y = y.astype(dp.int_)
    n_train = int(0.8 * n_samples)
    x_train = x[:n_train]
    y_train = y[:n_train]
    x_test = x[n_train:]
    y_test = y[n_train:]

    scaler = dp.StandardScaler()
    x_train = scaler.fit_transform(x_train)
    x_test = scaler.transform(x_test)

    # Setup feeds
    batch_size = 16
    train_feed = dp.SupervisedFeed(x_train, y_train, batch_size=batch_size)
    test_feed = dp.Feed(x_test)

    # Setup neural network
    weight_decay = 1e-03
    net = dp.NeuralNetwork(
        layers=[
            dp.Affine(
                n_out=32,
                weights=dp.Parameter(dp.AutoFiller(),
                                     weight_decay=weight_decay),
            ),
            dp.ReLU(),
            dp.Affine(
                n_out=64,
                weights=dp.Parameter(dp.AutoFiller(),
                                     weight_decay=weight_decay),
            ),
            dp.ReLU(),
            dp.Affine(
                n_out=n_classes,
                weights=dp.Parameter(dp.AutoFiller()),
            ),
        ],
        loss=dp.SoftmaxCrossEntropy(),
    )

    # Train neural network
    learn_rule = dp.Momentum(learn_rate=0.01 / batch_size, momentum=0.9)
    trainer = dp.GradientDescent(net, train_feed, learn_rule)
    trainer.train_epochs(n_epochs=10)

    # Evaluate on test data
    error = np.mean(net.predict(test_feed) != y_test)
    print('Test error rate: %.4f' % error)
    assert error < 0.2
def new_model(n_classes, n_layers, n_hidden):
#    filler = dp.UniformFiller(low=-0.05, high=0.05)
    filler = dp.AutoFiller(gain=1.25)
    def rnn_node():
        return GatedRecurrent(n_hidden=n_hidden, w_x=filler, w_h=filler)
    recurrent_nodes = [rnn_node() for _ in range(n_layers)]
    fc_out = dp.FullyConnected(n_out=n_classes, weights=dp.AutoFiller())
    return recurrent_nodes, fc_out
def affine(n_out, gain, wdecay=0.0, bias=0.0):
    if bias is None:
        return ex.nnet.Linear(
            n_out=n_out,
            weights=dp.Parameter(dp.AutoFiller(gain), weight_decay=wdecay),
        )
    else:
        return ex.nnet.Affine(
            n_out=n_out,
            bias=bias,
            weights=dp.Parameter(dp.AutoFiller(gain), weight_decay=wdecay),
        )
示例#6
0
def conv_layer(n_filters):
    return dp.Convolution(
        n_filters=32,
        filter_shape=(5, 5),
        border_mode='full',
        weights=dp.Parameter(dp.AutoFiller(gain=1.25), weight_decay=0.003),
    )
示例#7
0
def conv_layer(n_filters):
    return dp.Convolution(
        n_filters=n_filters,
        filter_shape=(5, 5),
        border_mode='valid',
        weights=dp.Parameter(dp.AutoFiller(gain=1.39), weight_decay=0.0005),
    )
示例#8
0
def run():
    np.random.seed(3)
    layers = [
        dp.Activation('relu'),
        dp.Activation('sigmoid'),
        dp.Activation('tanh'),
        dp.FullyConnected(
            n_output=3,
            weights=dp.AutoFiller(),
        ),
        dp.Dropout(0.2),
        dp.DropoutFullyConnected(
            n_output=10,
            weights=dp.AutoFiller(),
            dropout=0.5,
        ),
    ]

    input_shape = (1, 5)
    x = np.random.normal(size=input_shape).astype(dp.float_)
    for layer in layers:
        dp.misc.check_bprop(layer, x)

    conv_layers = [
        dp.Convolutional(
            n_filters=32,
            filter_shape=(3, 3),
            border_mode='same',
            weights=dp.AutoFiller(),
        ),
        dp.Convolutional(
            n_filters=32,
            filter_shape=(5, 5),
            border_mode='valid',
            weights=dp.AutoFiller(),
        ),
        dp.Pool(
            win_shape=(3, 3),
            strides=(2, 2),
            method='max',
        )
    ]
    input_shape = (5, 3, 8, 8)
    x = np.random.normal(size=input_shape).astype(dp.float_)
    for layer in conv_layers:
        dp.misc.check_bprop(layer, x)
def backconv(n_filters, filter_size, stride=2, gain=1.0, wdecay=0.0, bias=0.0):
    return ex.nnet.BackwardConvolution(
        n_filters=n_filters,
        strides=(stride, stride),
        weights=dp.Parameter(dp.AutoFiller(gain), weight_decay=wdecay),
        bias=bias,
        filter_shape=(filter_size, filter_size),
        border_mode='same',
    )
def aae_latent_encoder(n_hidden, n_discriminator=1024, recon_weight=0.025):
    wgain = 1.0
    discriminator = ex.Sequential([
        affine(n_discriminator, wgain, bias=None),
        ex.nnet.BatchNormalization(),
        ex.nnet.ReLU(),
        affine(n_discriminator, wgain, bias=None),
        ex.nnet.BatchNormalization(),
        ex.nnet.ReLU(),
        affine(1, wgain),
        ex.nnet.Sigmoid(),
    ])
    latent_encoder = model.ae.AdversarialEncoder(
        n_hidden,
        discriminator,
        dp.AutoFiller(),
        recon_weight=recon_weight,
    )
    return latent_encoder
import numpy as np
import deeppy as dp

net = dp.NeuralNetwork(layers=[
    dp.Convolutional(
        n_filters=32,
        filter_shape=(5, 5),
        weights=dp.Parameter(dp.AutoFiller(), weight_decay=0.0001),
    ),
    dp.Activation('relu'),
    dp.Pool(
        win_shape=(3, 3),
        strides=(2, 2),
        method='max',
    ),
    dp.Convolutional(
        n_filters=64,
        filter_shape=(5, 5),
        weights=dp.Parameter(dp.AutoFiller(), weight_decay=0.0001),
    ),
    dp.Activation('relu'),
    dp.Pool(
        win_shape=(3, 3),
        strides=(2, 2),
        method='max',
    ),
    dp.Flatten(),
    dp.FullyConnected(
        n_output=128,
        weights=dp.Parameter(dp.AutoFiller()),
    ),
示例#12
0
    else:
        y[n] = 0
    n += 1

# Prepare network feeds
batch_size = 128
train_feed = dp.SupervisedSiameseFeed(x1, x2, y, batch_size=batch_size)

# Setup network
w_gain = 1.5
w_decay = 1e-4
net = dp.SiameseNetwork(
    siamese_layers=[
        dp.Affine(
            n_out=1024,
            weights=dp.Parameter(dp.AutoFiller(w_gain), weight_decay=w_decay),
        ),
        dp.ReLU(),
        dp.Affine(
            n_out=1024,
            weights=dp.Parameter(dp.AutoFiller(w_gain), weight_decay=w_decay),
        ),
        dp.ReLU(),
        dp.Affine(
            n_out=2,
            weights=dp.Parameter(dp.AutoFiller(w_gain)),
        ),
    ],
    loss=dp.ContrastiveLoss(margin=1.0),
)
示例#13
0
net = dp.NeuralNetwork(
    layers=[
        conv_layer(32),
        dp.Activation('relu'),
        pool_layer(),
        conv_layer(32),
        dp.Activation('relu'),
        pool_layer(),
        conv_layer(64),
        dp.Activation('relu'),
        pool_layer(),
        dp.Flatten(),
        dp.DropoutFullyConnected(
            n_out=64,
            weights=dp.Parameter(dp.AutoFiller(gain=1.25), weight_decay=0.03)
        ),
        dp.Activation('relu'),
        dp.FullyConnected(
            n_out=dataset.n_classes,
            weights=dp.Parameter(dp.AutoFiller(gain=1.25)),
        )
    ],
    loss=dp.SoftmaxCrossEntropy(),
)


# Train network
def test_error():
    return np.mean(net.predict(test_input) != y_test)
n_epochs = [8, 8]
示例#14
0
def run():
    # Prepare data
    dataset = dp.datasets.MNIST()
    x, y = dataset.data()
    x = x.astype(dp.float_)[:, np.newaxis, :, :]
    y = y.astype(dp.int_)
    train_idx, test_idx = dataset.split()
    x_train = x[train_idx]
    y_train = y[train_idx]
    x_test = x[test_idx]
    y_test = y[test_idx]

    scaler = dp.UniformScaler(high=255.)
    x_train = scaler.fit_transform(x_train)
    x_test = scaler.transform(x_test)

    batch_size = 128
    train_input = dp.SupervisedInput(x_train, y_train, batch_size=batch_size)
    test_input = dp.SupervisedInput(x_test, y_test)

    # Setup neural network
    net = dp.NeuralNetwork(layers=[
        dp.Convolutional(
            n_filters=32,
            filter_shape=(5, 5),
            weights=dp.Parameter(dp.AutoFiller(), weight_decay=0.0001),
        ),
        dp.Activation('relu'),
        dp.Pool(
            win_shape=(3, 3),
            strides=(2, 2),
            method='max',
        ),
        dp.Convolutional(
            n_filters=64,
            filter_shape=(5, 5),
            weights=dp.Parameter(dp.AutoFiller(), weight_decay=0.0001),
        ),
        dp.Activation('relu'),
        dp.Pool(
            win_shape=(3, 3),
            strides=(2, 2),
            method='max',
        ),
        dp.Flatten(),
        dp.FullyConnected(
            n_output=128,
            weights=dp.Parameter(dp.AutoFiller()),
        ),
        dp.FullyConnected(
            n_output=dataset.n_classes,
            weights=dp.Parameter(dp.AutoFiller()),
        ),
        dp.MultinomialLogReg(),
    ], )

    # Train neural network
    def val_error():
        return net.error(test_input)

    trainer = dp.StochasticGradientDescent(
        max_epochs=15,
        learn_rule=dp.Momentum(learn_rate=0.01, momentum=0.9),
    )
    trainer.train(net, train_input, val_error)

    # Visualize convolutional filters to disk
    for l, layer in enumerate(net.layers):
        if not isinstance(layer, dp.Convolutional):
            continue
        W = np.array(layer.params()[0].array)
        filepath = os.path.join('mnist', 'conv_layer_%i.png' % l)
        dp.misc.img_save(dp.misc.conv_filter_tile(W), filepath)

    # Evaluate on test data
    error = net.error(test_input)
    print('Test error rate: %.4f' % error)
示例#15
0
文件: test_mlp.py 项目: obinsc/deeppy
def test_classification():
    # Make dataset
    n_classes = 2
    n_samples = 1000
    n_features = 48
    x, y = make_classification(
        n_samples=n_samples, n_features=n_features, n_classes=n_classes,
        n_informative=n_classes*2, random_state=1
    )

    n_train = int(0.8 * n_samples)
    n_val = int(0.5 * (n_samples - n_train))

    x_train = x[:n_train]
    y_train = y[:n_train]
    x_val = x[n_train:n_train+n_val]
    y_val = y[n_train:n_train+n_val]
    x_test = x[n_train+n_val:]
    y_test = y[n_train+n_val:]

    scaler = dp.StandardScaler()
    x_train = scaler.fit_transform(x_train)
    x_val = scaler.transform(x_val)
    x_test = scaler.transform(x_test)

    # Setup input
    batch_size = 16
    train_input = dp.SupervisedInput(x_train, y_train, batch_size=batch_size)
    val_input = dp.Input(x_val)
    test_input = dp.Input(x_test)

    # Setup neural network
    weight_decay = 1e-03
    net = dp.NeuralNetwork(
        layers=[
            dp.Affine(
                n_out=32,
                weights=dp.Parameter(dp.AutoFiller(),
                                     weight_decay=weight_decay),
            ),
            dp.ReLU(),
            dp.Affine(
                n_out=64,
                weights=dp.Parameter(dp.AutoFiller(),
                                     weight_decay=weight_decay),
            ),
            dp.ReLU(),
            dp.Affine(
                n_out=n_classes,
                weights=dp.Parameter(dp.AutoFiller()),
            ),
        ],
        loss=dp.SoftmaxCrossEntropy(),
    )

    # Train neural network
    def val_error():
        return np.mean(net.predict(val_input) != y_val)
    trainer = dp.GradientDescent(
        min_epochs=10, learn_rule=dp.Momentum(learn_rate=0.01, momentum=0.9),
    )
    trainer.train(net, train_input, val_error)

    # Evaluate on test data
    error = np.mean(net.predict(test_input) != y_test)
    print('Test error rate: %.4f' % error)
    assert error < 0.2
示例#16
0
weight_gain_fc = 1.84
weight_decay_fc = 0.002
net = dp.NeuralNetwork(
    layers=[
        conv_layer(32),
        dp.Activation('relu'),
        pool_layer(),
        conv_layer(64),
        dp.Activation('relu'),
        pool_layer(),
        dp.Flatten(),
        dp.DropoutFullyConnected(
            n_out=512,
            dropout=0.5,
            weights=dp.Parameter(dp.AutoFiller(weight_gain_fc),
                                 weight_decay=weight_decay_fc),
        ),
        dp.Activation('relu'),
        dp.FullyConnected(
            n_out=dataset.n_classes,
            weights=dp.Parameter(dp.AutoFiller(weight_gain_fc)),
        ),
    ],
    loss=dp.SoftmaxCrossEntropy(),
)

# Train network
n_epochs = [50, 15, 15]
learn_rate = 0.05
momentum = 0.88
def vae_latent_encoder(n_hidden):
    latent_encoder = model.ae.NormalEncoder(n_hidden, dp.AutoFiller())
    return latent_encoder
示例#18
0
        method='max',
    )


net = dp.NeuralNetwork(
    layers=[
        conv_layer(32),
        dp.ReLU(),
        pool_layer(),
        conv_layer(32),
        dp.ReLU(),
        pool_layer(),
        conv_layer(64),
        dp.ReLU(),
        pool_layer(),
        dp.Flatten(),
        dp.Dropout(),
        dp.Affine(n_out=64,
                  weights=dp.Parameter(dp.AutoFiller(gain=1.25),
                                       weight_decay=0.03)),
        dp.ReLU(),
        dp.Affine(
            n_out=dataset.n_classes,
            weights=dp.Parameter(dp.AutoFiller(gain=1.25)),
        )
    ],
    loss=dp.SoftmaxCrossEntropy(),
)

profile(net, train_input)
示例#19
0
def run():
    # Prepare MNIST data
    dataset = dp.datasets.MNIST()
    x, y = dataset.data(flat=True)
    x = x.astype(dp.float_)
    y = y.astype(dp.int_)
    train_idx, test_idx = dataset.split()
    x_train = x[train_idx]
    y_train = y[train_idx]
    x_test = x[test_idx]
    y_test = y[test_idx]

    scaler = dp.UniformScaler(high=255.)
    x_train = scaler.fit_transform(x_train)
    x_test = scaler.transform(x_test)

    # Generate image pairs
    n_pairs = 100000
    x1 = np.empty((n_pairs, 28 * 28), dtype=dp.float_)
    x2 = np.empty_like(x1, dtype=dp.float_)
    y = np.empty(n_pairs, dtype=dp.int_)
    n_imgs = x_train.shape[0]
    n = 0
    while n < n_pairs:
        i = random.randint(0, n_imgs - 1)
        j = random.randint(0, n_imgs - 1)
        if i == j:
            continue
        x1[n, ...] = x_train[i]
        x2[n, ...] = x_train[j]
        if y_train[i] == y_train[j]:
            y[n] = 1
        else:
            y[n] = 0
        n += 1

    # Input to network
    train_input = dp.SupervisedSiameseInput(x1, x2, y, batch_size=128)
    test_input = dp.SupervisedInput(x_test, y_test)

    # Setup network
    net = dp.SiameseNetwork(
        siamese_layers=[
            dp.Dropout(),
            dp.FullyConnected(
                n_output=800,
                weights=dp.Parameter(dp.AutoFiller(), weight_decay=0.00001),
            ),
            dp.Activation('relu'),
            dp.FullyConnected(
                n_output=800,
                weights=dp.Parameter(dp.AutoFiller(), weight_decay=0.00001),
            ),
            dp.Activation('relu'),
            dp.FullyConnected(
                n_output=2,
                weights=dp.Parameter(dp.AutoFiller(), weight_decay=0.00001),
            ),
        ],
        loss_layer=dp.ContrastiveLoss(margin=0.5),
    )

    # Train network
    trainer = dp.StochasticGradientDescent(
        max_epochs=10,
        learn_rule=dp.RMSProp(learn_rate=0.001),
    )
    trainer.train(net, train_input)

    # Visualize feature space
    feat = net.features(test_input)
    colors = [
        'tomato', 'lawngreen', 'royalblue', 'gold', 'saddlebrown', 'violet',
        'turquoise', 'mediumpurple', 'darkorange', 'darkgray'
    ]
    plt.figure()
    for i in range(10):
        plt.scatter(feat[y_test == i, 0],
                    feat[y_test == i, 1],
                    s=3,
                    c=colors[i],
                    linewidths=0)
    plt.legend([str(i) for i in range(10)], scatterpoints=1, markerscale=4)
    if not os.path.exists('mnist'):
        os.mkdirs('mnist')
    plt.savefig(os.path.join('mnist', 'siamese_dists.png'), dpi=200)
def affine(n_out, gain):
    return expr.nnet.Affine(n_out=n_out, weights=dp.AutoFiller(gain))
示例#21
0
        ])
        for i in range(numberOfModels):
            cor[i] = np.corrcoef(cnn_rep[:, i], pred_train[:, i])[0, 1]

        svm_model = OneVsRestClassifier(LinearSVC(random_state=0))
        svm_model.fit(cnn_rep[:, 0:numberOfModels], cnn_targets)
        prediction = svm_model.predict(pred)

        acc = np.sum(prediction == y_test) / float(np.size(y_test))

        # Setup neural network using the stacked autoencoder layers
        net = dp.NeuralNetwork(
            [
                dp.FullyConnected(
                    n_out=100,  #neuronNum[-1],
                    weights=dp.Parameter(dp.AutoFiller()),
                ),
                dp.Sigmoid(),
                dp.FullyConnected(
                    n_out=10,
                    weights=dp.Parameter(dp.AutoFiller()),
                ),
            ],
            loss=dp.loss.MeanSquaredError(),
        )

        # Fine-tune neural network
        train_input = dp.SupervisedInput(brain_rep,
                                         cnn_rep,
                                         batch_size=batch_size)
        test_input = dp.Input(x_test)