mnist = MnistDataset()

logging.info("transforming images with elastic distortion")

expanded_train_set = []

for img, label in mnist.train_set():
    expanded_train_set.append((img, label))
    original_img = (img * 256).reshape((28, 28))
    transformed_img = (elastic_distortion(original_img) / 256).flatten()
    expanded_train_set.append((transformed_img, label))

env.numpy_rand.shuffle(expanded_train_set)

expanded_mnist = BasicDataset(train=expanded_train_set, valid=mnist.valid_set(), test=mnist.test_set())

logging.info("expanded training data size: %d" % len(expanded_train_set))

if __name__ == '__main__':
    model = NeuralClassifier(input_dim=28 * 28)
    model.stack(Dense(256, 'relu'),
                Dense(256, 'relu'),
                Dense(10, 'linear'),
                Softmax())

    trainer = MomentumTrainer(model)

    annealer = LearningRateAnnealer()

    mnist = MiniBatches(expanded_mnist, batch_size=20)
mnist = MnistDataset()

logging.info("transforming images with elastic distortion")

expanded_train_set = []

for img, label in mnist.train_set():
    expanded_train_set.append((img, label))
    original_img = (img * 256).reshape((28, 28))
    transformed_img = (elastic_distortion(original_img) / 256).flatten()
    expanded_train_set.append((transformed_img, label))

global_rand.shuffle(expanded_train_set)

expanded_mnist = BasicDataset(train=expanded_train_set, valid=mnist.valid_set(), test=mnist.test_set())

logging.info("expanded training data size: %d" % len(expanded_train_set))

if __name__ == '__main__':
    model = NeuralClassifier(input_dim=28 * 28)
    model.stack(Dense(256, 'relu'),
                Dense(256, 'relu'),
                Dense(10, 'linear'),
                Softmax())

    trainer = MomentumTrainer(model)

    annealer = LearningRateAnnealer()

    mnist = MiniBatches(expanded_mnist, batch_size=20)
logging.info("transforming images with elastic distortion")

expanded_train_set = []

for img, label in mnist.train_set():
    expanded_train_set.append((img, label))
    original_img = (img * 256).reshape((28, 28))
    transformed_img = (elastic_distortion(original_img) / 256).flatten()
    expanded_train_set.append((transformed_img, label))

global_rand.shuffle(expanded_train_set)

expanded_mnist = BasicDataset(train=expanded_train_set,
                              valid=mnist.valid_set(),
                              test=mnist.test_set())

logging.info("expanded training data size: %d" % len(expanded_train_set))

if __name__ == '__main__':
    model = NeuralClassifier(input_dim=28 * 28)
    model.stack(Dense(256, 'relu'), Dense(256, 'relu'), Dense(10, 'linear'),
                Softmax())

    trainer = MomentumTrainer(model)

    annealer = LearningRateAnnealer()

    mnist = MiniBatches(expanded_mnist, batch_size=20)

    trainer.run(mnist, controllers=[annealer])