Ejemplo n.º 1
0
def run():
    # Prepare data
    dataset = dp.datasets.MNIST()
    x, y = dataset.data(flat=True)
    x = x.astype(dp.float_)
    y = y.astype(dp.int_)
    train_idx, test_idx = dataset.split()
    x_train = x[train_idx]
    y_train = y[train_idx]
    x_test = x[test_idx]
    y_test = y[test_idx]

    scaler = dp.UniformScaler(high=255.)
    x_train = scaler.fit_transform(x_train)
    x_test = scaler.transform(x_test)

    batch_size = 128
    train_input = dp.SupervisedInput(x_train, y_train, batch_size=batch_size)
    test_input = dp.SupervisedInput(x_test, y_test)

    # Setup neural network
    net = dp.NeuralNetwork(
        layers=[
            dp.FullyConnected(
                n_output=800,
                weights=dp.Parameter(dp.AutoFiller(), weight_decay=0.0001),
            ),
            dp.Activation('relu'),
            dp.FullyConnected(
                n_output=800,
                weights=dp.Parameter(dp.AutoFiller(), weight_decay=0.0001),
            ),
            dp.Activation('relu'),
            dp.FullyConnected(
                n_output=dataset.n_classes,
                weights=dp.Parameter(dp.AutoFiller(), weight_decay=0.0001),
            ),
            dp.MultinomialLogReg(),
        ],
    )

    # Train neural network
    def val_error():
        return net.error(test_input)
    trainer = dp.StochasticGradientDescent(
        max_epochs=25,
        learn_rule=dp.Momentum(learn_rate=0.1, momentum=0.9),
    )
    trainer.train(net, train_input, val_error)

    # Visualize weights from first layer
    W = next(np.array(layer.params()[0].array) for layer in net.layers
             if isinstance(layer, dp.FullyConnected))
    W = np.reshape(W.T, (-1, 28, 28))
    filepath = os.path.join('mnist', 'mlp_weights.png')
    dp.misc.img_save(dp.misc.img_tile(dp.misc.img_stretch(W)), filepath)

    # Evaluate on test data
    error = net.error(test_input)
    print('Test error rate: %.4f' % error)
Ejemplo n.º 2
0
def run():
    # Prepare data
    dataset = dp.datasets.MNIST()
    x, y = dataset.data()
    x = x.astype(dp.float_)[:, np.newaxis, :, :]
    y = y.astype(dp.int_)
    train_idx, test_idx = dataset.split()
    x_train = x[train_idx]
    y_train = y[train_idx]
    x_test = x[test_idx]
    y_test = y[test_idx]

    scaler = dp.UniformScaler(high=255.)
    x_train = scaler.fit_transform(x_train)
    x_test = scaler.transform(x_test)

    batch_size = 128
    train_input = dp.SupervisedInput(x_train, y_train, batch_size=batch_size)
    test_input = dp.SupervisedInput(x_test, y_test)

    # Setup neural network
    net = dp.NeuralNetwork(layers=[
        dp.Convolutional(
            n_filters=32,
            filter_shape=(5, 5),
            weights=dp.Parameter(dp.AutoFiller(), weight_decay=0.0001),
        ),
        dp.Activation('relu'),
        dp.Pool(
            win_shape=(3, 3),
            strides=(2, 2),
            method='max',
        ),
        dp.Convolutional(
            n_filters=64,
            filter_shape=(5, 5),
            weights=dp.Parameter(dp.AutoFiller(), weight_decay=0.0001),
        ),
        dp.Activation('relu'),
        dp.Pool(
            win_shape=(3, 3),
            strides=(2, 2),
            method='max',
        ),
        dp.Flatten(),
        dp.FullyConnected(
            n_output=128,
            weights=dp.Parameter(dp.AutoFiller()),
        ),
        dp.FullyConnected(
            n_output=dataset.n_classes,
            weights=dp.Parameter(dp.AutoFiller()),
        ),
        dp.MultinomialLogReg(),
    ], )

    # Train neural network
    def val_error():
        return net.error(test_input)

    trainer = dp.StochasticGradientDescent(
        max_epochs=15,
        learn_rule=dp.Momentum(learn_rate=0.01, momentum=0.9),
    )
    trainer.train(net, train_input, val_error)

    # Visualize convolutional filters to disk
    for l, layer in enumerate(net.layers):
        if not isinstance(layer, dp.Convolutional):
            continue
        W = np.array(layer.params()[0].array)
        filepath = os.path.join('mnist', 'conv_layer_%i.png' % l)
        dp.misc.img_save(dp.misc.conv_filter_tile(W), filepath)

    # Evaluate on test data
    error = net.error(test_input)
    print('Test error rate: %.4f' % error)
Ejemplo n.º 3
0
def run():
    # Prepare MNIST data
    dataset = dp.datasets.MNIST()
    x, y = dataset.data(flat=True)
    x = x.astype(dp.float_)
    y = y.astype(dp.int_)
    train_idx, test_idx = dataset.split()
    x_train = x[train_idx]
    y_train = y[train_idx]
    x_test = x[test_idx]
    y_test = y[test_idx]

    scaler = dp.UniformScaler(high=255.)
    x_train = scaler.fit_transform(x_train)
    x_test = scaler.transform(x_test)

    # Generate image pairs
    n_pairs = 100000
    x1 = np.empty((n_pairs, 28 * 28), dtype=dp.float_)
    x2 = np.empty_like(x1, dtype=dp.float_)
    y = np.empty(n_pairs, dtype=dp.int_)
    n_imgs = x_train.shape[0]
    n = 0
    while n < n_pairs:
        i = random.randint(0, n_imgs - 1)
        j = random.randint(0, n_imgs - 1)
        if i == j:
            continue
        x1[n, ...] = x_train[i]
        x2[n, ...] = x_train[j]
        if y_train[i] == y_train[j]:
            y[n] = 1
        else:
            y[n] = 0
        n += 1

    # Input to network
    train_input = dp.SupervisedSiameseInput(x1, x2, y, batch_size=128)
    test_input = dp.SupervisedInput(x_test, y_test)

    # Setup network
    net = dp.SiameseNetwork(
        siamese_layers=[
            dp.Dropout(),
            dp.FullyConnected(
                n_output=800,
                weights=dp.Parameter(dp.AutoFiller(), weight_decay=0.00001),
            ),
            dp.Activation('relu'),
            dp.FullyConnected(
                n_output=800,
                weights=dp.Parameter(dp.AutoFiller(), weight_decay=0.00001),
            ),
            dp.Activation('relu'),
            dp.FullyConnected(
                n_output=2,
                weights=dp.Parameter(dp.AutoFiller(), weight_decay=0.00001),
            ),
        ],
        loss_layer=dp.ContrastiveLoss(margin=0.5),
    )

    # Train network
    trainer = dp.StochasticGradientDescent(
        max_epochs=10,
        learn_rule=dp.RMSProp(learn_rate=0.001),
    )
    trainer.train(net, train_input)

    # Visualize feature space
    feat = net.features(test_input)
    colors = [
        'tomato', 'lawngreen', 'royalblue', 'gold', 'saddlebrown', 'violet',
        'turquoise', 'mediumpurple', 'darkorange', 'darkgray'
    ]
    plt.figure()
    for i in range(10):
        plt.scatter(feat[y_test == i, 0],
                    feat[y_test == i, 1],
                    s=3,
                    c=colors[i],
                    linewidths=0)
    plt.legend([str(i) for i in range(10)], scatterpoints=1, markerscale=4)
    if not os.path.exists('mnist'):
        os.mkdirs('mnist')
    plt.savefig(os.path.join('mnist', 'siamese_dists.png'), dpi=200)
Ejemplo n.º 4
0
        numberOfAugmentation = 100
        for i in range(numberOfAugmentation):
            for j in range(len(test_index)):
                features[i * 864 + test_index[j], :] = np.NAN
                targets[0, i * 864 + test_index[j]] = np.NAN

        x_train = np.delete(features, test_index, axis=0)
        y_train = np.delete(targets, test_index)

        x_train = features[~np.all(np.isnan(features), axis=1)]
        y_train = targets[~np.isnan(targets)]
        y_train = y_train.astype('int')

        # Normalize pixel intensities
        scaler = dp.UniformScaler()
        x_train = scaler.fit_transform(x_train)
        x_test = scaler.transform(x_test)

        # Shufflinig
        ind = range(len(y_train))
        np.random.shuffle(ind)
        y_train = y_train[ind]
        x_train = x_train[ind, :]

        # Prepare autoencoder input
        batch_size = 50
        train_input = dp.Input(x_train, batch_size=batch_size)

        # Setup autoencoders
        sae = dp.StackedAutoencoder(layers=[
Ejemplo n.º 5
0
def run():
    # Prepare data
    dataset = dp.datasets.CIFAR10()
    x, y = dataset.data()
    x = x.astype(dp.float_)
    y = y.astype(dp.int_)
    train_idx, test_idx = dataset.split()
    x_train = x[train_idx]
    y_train = y[train_idx]
    x_test = x[test_idx]
    y_test = y[test_idx]

    scaler = dp.UniformScaler(feature_wise=True)
    x_train = scaler.fit_transform(x_train)
    x_test = scaler.transform(x_test)

    batch_size = 128
    train_input = dp.SupervisedInput(x_train, y_train, batch_size=batch_size)
    test_input = dp.SupervisedInput(x_test, y_test, batch_size=batch_size)

    # Setup neural network
    pool_kwargs = {
        'win_shape': (3, 3),
        'strides': (2, 2),
        'border_mode': 'same',
        'method': 'max',
    }
    net = dp.NeuralNetwork(
        layers=[
            dp.Convolutional(
                n_filters=32,
                filter_shape=(5, 5),
                border_mode='same',
                weights=dp.Parameter(dp.NormalFiller(sigma=0.0001),
                                     weight_decay=0.004, monitor=True),
            ),
            dp.Activation('relu'),
            dp.Pool(**pool_kwargs),
            dp.Convolutional(
                n_filters=32,
                filter_shape=(5, 5),
                border_mode='same',
                weights=dp.Parameter(dp.NormalFiller(sigma=0.01),
                                     weight_decay=0.004, monitor=True),
            ),
            dp.Activation('relu'),
            dp.Pool(**pool_kwargs),
            dp.Convolutional(
                n_filters=64,
                filter_shape=(5, 5),
                border_mode='same',
                weights=dp.Parameter(dp.NormalFiller(sigma=0.01),
                                     weight_decay=0.004, monitor=True),
            ),
            dp.Activation('relu'),
            dp.Pool(**pool_kwargs),
            dp.Flatten(),
            dp.FullyConnected(
                n_output=64,
                weights=dp.Parameter(dp.NormalFiller(sigma=0.1),
                                     weight_decay=0.004, monitor=True),
            ),
            dp.Activation('relu'),
            dp.FullyConnected(
                n_output=dataset.n_classes,
                weights=dp.Parameter(dp.NormalFiller(sigma=0.1),
                                     weight_decay=0.004, monitor=True),
            ),
            dp.MultinomialLogReg(),
        ],
    )

    # Train neural network
    def val_error():
        return net.error(test_input)
    n_epochs = [8, 8]
    learn_rate = 0.001
    for i, max_epochs in enumerate(n_epochs):
        lr = learn_rate/10**i
        trainer = dp.StochasticGradientDescent(
            max_epochs=max_epochs,
            learn_rule=dp.Momentum(learn_rate=lr, momentum=0.9),
        )
        trainer.train(net, train_input, val_error)

    # Visualize convolutional filters to disk
    for l, layer in enumerate(net.layers):
        if not isinstance(layer, dp.Convolutional):
            continue
        W = np.array(layer.params()[0].array)
        filepath = os.path.join('cifar10', 'conv_layer_%i.png' % l)
        dp.misc.img_save(dp.misc.conv_filter_tile(W), filepath)

    # Evaluate on test data
    error = net.error(test_input)
    print('Test error rate: %.4f' % error)