Esempio n. 1
0
def test_classification():
    # Make dataset
    n_classes = 2
    n_samples = 1000
    n_features = 48
    x, y = make_classification(n_samples=n_samples,
                               n_features=n_features,
                               n_classes=n_classes,
                               n_informative=n_classes * 2,
                               random_state=1)
    x = x.astype(dp.float_)
    y = y.astype(dp.int_)
    n_train = int(0.8 * n_samples)
    x_train = x[:n_train]
    y_train = y[:n_train]
    x_test = x[n_train:]
    y_test = y[n_train:]

    scaler = dp.StandardScaler()
    x_train = scaler.fit_transform(x_train)
    x_test = scaler.transform(x_test)

    # Setup feeds
    batch_size = 16
    train_feed = dp.SupervisedFeed(x_train, y_train, batch_size=batch_size)
    test_feed = dp.Feed(x_test)

    # Setup neural network
    weight_decay = 1e-03
    net = dp.NeuralNetwork(
        layers=[
            dp.Affine(
                n_out=32,
                weights=dp.Parameter(dp.AutoFiller(),
                                     weight_decay=weight_decay),
            ),
            dp.ReLU(),
            dp.Affine(
                n_out=64,
                weights=dp.Parameter(dp.AutoFiller(),
                                     weight_decay=weight_decay),
            ),
            dp.ReLU(),
            dp.Affine(
                n_out=n_classes,
                weights=dp.Parameter(dp.AutoFiller()),
            ),
        ],
        loss=dp.SoftmaxCrossEntropy(),
    )

    # Train neural network
    learn_rule = dp.Momentum(learn_rate=0.01 / batch_size, momentum=0.9)
    trainer = dp.GradientDescent(net, train_feed, learn_rule)
    trainer.train_epochs(n_epochs=10)

    # Evaluate on test data
    error = np.mean(net.predict(test_feed) != y_test)
    print('Test error rate: %.4f' % error)
    assert error < 0.2
def feeds(img_size,
          batch_size,
          epoch_size,
          n_augment=int(6e5),
          with_attributes=False,
          split='val'):
    dataset = dp.dataset.CelebA()
    if split == 'val':
        train_idxs = dataset.train_idxs
        test_idxs = dataset.val_idxs
    elif split == 'test':
        train_idxs = np.hstack((dataset.train_idxs, dataset.val_idxs))
        test_idxs = dataset.test_idxs
    x_train = celeba_imgs(img_size, img_idxs=train_idxs, n_augment=n_augment)
    x_train = np.transpose(x_train, (0, 3, 1, 2))
    x_test = celeba_imgs(img_size, img_idxs=test_idxs)
    x_test = img_transform(x_test, to_bc01=True)
    attributes = dataset.attributes.astype(dp.float_)
    y_train = attributes[train_idxs]
    y_test = attributes[test_idxs]
    if n_augment > 0:
        y_train = y_train[np.arange(n_augment) % len(y_train)]

    if with_attributes:
        train_feed = SupervisedAugmentedFeed(x_train,
                                             y_train,
                                             batch_size=batch_size,
                                             epoch_size=epoch_size)
        test_feed = dp.SupervisedFeed(x_test, y_test, batch_size=batch_size)
    else:
        train_feed = AugmentedFeed(x_train, batch_size, epoch_size)
        test_feed = dp.Feed(x_test, batch_size)

    return train_feed, test_feed
def feeds(split='test',
          batch_size=128,
          epoch_size=None,
          preprocessing='',
          augmentation='',
          supervised=False):
    x_train, y_train, x_val, y_val, x_test, y_test = arrays(split)
    if supervised:
        train_feed = ShuffledSupervisedFeed(x_train,
                                            y_train,
                                            batch_size=batch_size,
                                            epoch_size=epoch_size)
        val_feed = dp.SupervisedFeed(x_val, y_val, batch_size=batch_size)
        test_feed = dp.SupervisedFeed(x_test, y_test, batch_size=batch_size)
    else:
        train_feed = ShuffledFeed(x_train,
                                  batch_size=batch_size,
                                  epoch_size=epoch_size)
        val_feed = dp.Feed(x_val, batch_size=batch_size)
        test_feed = dp.Feed(x_test, batch_size=batch_size)
    return train_feed, val_feed, test_feed
Esempio n. 4
0
def supervised_feed(img_size, batch_size=128, epoch_size=250, val_fold=None):
    x_train, y_train = arrays('train')
    x_test, y_test = arrays('test')
    x_train = resize_imgs(x_train, img_size)
    x_test = resize_imgs(x_test, img_size)
    x_train = img_transform(x_train, to_bc01=True)
    x_test = img_transform(x_test, to_bc01=True)

    # TODO use folds
    train_feed = ShuffledSupervisedFeed(
        x_train, y_train, batch_size=batch_size, epoch_size=epoch_size
    )
    test_feed = dp.SupervisedFeed(x_test, y_test, batch_size=batch_size)
    return train_feed, test_feed
def feeds(alignment,
          crop_size,
          rescale_size,
          batch_size,
          epoch_size,
          n_augment=int(1e5),
          with_attributes=False,
          split='val'):
    if split == 'val':
        train_split = 'valtrain'
        test_split = 'val'
    elif split == 'test':
        train_split = 'testtrain'
        test_split = 'test'
    x_train, y_train = lfw_imgs_split(alignment, train_split)

    # Shuffle training images
    idxs = np.random.permutation(len(x_train))
    x_train = x_train[idxs]
    y_train = y_train[idxs]

    if n_augment > 0:
        y_train = y_train[np.arange(n_augment) % len(x_train)]
    x_train = resize_imgs(x_train, crop_size, rescale_size, n_augment)
    x_train = np.transpose(x_train, (0, 3, 1, 2))

    x_test, y_test = lfw_imgs_split(alignment, test_split)
    x_test = resize_imgs(x_test, crop_size, rescale_size)
    x_test = img_transform(x_test, to_bc01=True)

    if with_attributes:
        train_feed = SupervisedAugmentedFeed(x_train,
                                             y_train,
                                             batch_size=batch_size,
                                             epoch_size=epoch_size)
        test_feed = dp.SupervisedFeed(x_test, y_test, batch_size=batch_size)
    else:
        train_feed = AugmentedFeed(x_train, batch_size, epoch_size)
        test_feed = dp.Feed(x_test, batch_size)

    return train_feed, test_feed
Esempio n. 6
0
import matplotlib.pyplot as plt
import deeppy as dp

# Fetch CIFAR10 data
dataset = dp.dataset.CIFAR10()
x_train, y_train, x_test, y_test = dataset.arrays(dp_dtypes=True)

# Normalize pixel intensities
scaler = dp.StandardScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)

# Prepare network feeds
batch_size = 128
train_feed = dp.SupervisedFeed(x_train, y_train, batch_size=batch_size)
test_feed = dp.Feed(x_test, batch_size=batch_size)


# Setup network
def conv_layer(n_filters):
    return dp.Convolution(
        n_filters=32,
        filter_shape=(5, 5),
        border_mode='full',
        weights=dp.Parameter(dp.AutoFiller(gain=1.25), weight_decay=0.003),
    )


def pool_layer():
    return dp.Pool(