Пример #1
0
 def loadDataset(self):
     print 'load MNIST dataset'
     dataset = data.load_mnist_data()
     dataset['data'] = dataset['data'].astype(np.float32)
     dataset['data'] /= 255    # 最終的にnpの行列データを取得
     dataset['target'] = dataset['target'].astype(np.int32)
     return dataset
Пример #2
0
 def test8_evaluate(self):
     (train_data, valid_data) = load_mnist_data()
     net = nnet.Network([784, 5, 10], debug=True)
     nt = net.evaluate(train_data[:100])
     self.assertEqual(nt, 14)
     nv = net.evaluate(valid_data[9000:])
     self.assertEqual(nv, 102)
Пример #3
0
 def test_load_data(self):
     mnist = data.load_mnist_data()
     self.assertEqual(len(mnist['data']), 70000)
     self.assertEqual(len(mnist['data'][0]), 784)
     self.assertEqual(mnist['data'][0][0], 0)
     self.assertEqual(mnist['data'][0][156], 126)
     self.assertEqual(len(mnist['target']), 70000)
     self.assertEqual(mnist['target'][0], 5)
Пример #4
0
 def test9_train(self):
     (train_data, valid_data) = load_mnist_data()
     # reduce data sets for faster speed:
     train_data = train_data[:1000]
     valid_data = valid_data[:1000]
     net = nnet.Network([784, 12, 10], debug=True)
     net.train(train_data, valid_data, epochs=1, mini_batch_size=8, alpha=5)
     nv = net.evaluate(valid_data)
     self.assertEqual(nv, 503)
Пример #5
0
 def test7_update_minibatch(self):
     (train_data, _) = load_mnist_data()
     train_data_vec = [(x, nnet.unit(y, 10)) for x, y in train_data]
     mini_batch = train_data_vec[10:20]
     net = nnet.Network([784, 15, 10], debug=True)
     net.update_mini_batch(mini_batch, 5.5)
     self.assertEqual(net.biases[1].shape[0], 10)
     self.assertEqual(net.biases[1].shape[1], 1)
     self.assertAlmostEqual(net.biases[1][9, 0], -0.537912934538857)
     self.assertEqual(net.weights[0].shape[0], 15)
     self.assertEqual(net.weights[0].shape[1], 784)
     self.assertAlmostEqual(net.weights[0][9, 333], 0.099262147771176)
Пример #6
0
    def read_dataset(self):
        print('load MNIST dataset')
        mnist = data.load_mnist_data()
        mnist['data'] = mnist['data'].astype(np.float32)
        mnist['data'] /= 255
        mnist['target'] = mnist['target'].astype(np.int32)

        # Number of data
        self.N = 60000
        self.x_train, self.x_test = np.split(mnist['data'],   [self.N])
        self.y_train, self.y_test = np.split(mnist['target'], [self.N])
        # Number of test data
        self.N_test = self.y_test.size
Пример #7
0
def main():
    # Load data
    device = torch.device("cuda:0" if (
        torch.cuda.is_available() and ngpu > 0) else "cpu")
    dataloader = load_mnist_data(classes=[0, 1],
                                 batch_size=batch_size,
                                 image_size=image_size)

    # Plot some training images
    #plot_data_batch(device, dataloader)

    # Create the generator
    netG = Generator_Quantum(n_qubits, q_depth, q_delta).to(device)

    # Create the Discriminator
    netD = Discriminator_FCNN(image_size * image_size, ngpu).to(device)

    train(device, netG, netD, dataloader)
Пример #8
0
from chainer import optimizers

import numpy as np
from pylab import *
import time

import data

# 学習のパラメータ
batchsize = 100
n_epoch = 20


# データセットの準備
print ("load MNIST dataset")
mnist = data.load_mnist_data()
mnist["data"] = mnist["data"].astype(np.float32)
mnist["data"] /= 255
mnist["target"] = mnist["target"].astype(np.int32)

# 訓練データとテストデータに分割
N = 60000
x_train, x_test = np.split(mnist["data"], [N])
y_train, y_test = np.split(mnist["target"], [N])
N_test = y_test.size

# 画像を (nsample, channel, height, width) の4次元テンソルに変換
# MNISTはチャンネル数が1なのでreshapeだけでOK
X_train = x_train.reshape((len(x_train), 1, 28, 28))
X_test = x_test.reshape((len(x_test), 1, 28, 28))
Пример #9
0
if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Chainer-BriCA integration")
    parser.add_argument("--gpu", "-g", default=-1, type=int, help="GPU ID")

    args = parser.parse_args()

    use_gpu = False
    if args.gpu >= 0:
        print "Using gpu: {}".format(args.gpu)
        use_gpu = True
        cuda.get_device(args.gpu).use()

    batchsize = 100

    mnist = data.load_mnist_data()
    mnist['data'] = mnist['data'].astype(np.float32)
    mnist['data'] /= 255
    mnist['target'] = mnist['target'].astype(np.int32)

    N_train = 60000
    x_train, x_test = np.split(mnist['data'], [N_train])
    y_train, y_test = np.split(mnist['target'], [N_train])
    N_test = y_test.size

    f = open('sda.pkl', 'rb')
    stacked_autoencoder = pickle.load(f)
    f.close()

    scheduler = brica1.VirtualTimeSyncScheduler()
    agent = brica1.Agent(scheduler)
Пример #10
0
from data import load_mnist_data
import nnet
import numpy as np

from matplotlib import pyplot as plt


def show(x):
    """ visualize a single training example """
    im = plt.imshow(np.reshape(1 - x, (28, 28)))
    im.set_cmap('gray')


print("loading MNIST dataset")
(train_data, valid_data) = load_mnist_data()

# reduce data sets for faster speed:
train_data = train_data
valid_data = valid_data

# to see a training example, uncomment:
#x, y = train_data[123]
#show(x)
#plt.title("label = %d" % y)

# some initial params, not necessarily good ones
net = nnet.Network([784, 80, 10])

print("training")
net.train(train_data, valid_data, epochs=10, mini_batch_size=8, alpha=0.5)
Пример #11
0
 def __init__(self):
     self._data = data.load_mnist_data()
     self._data['data'] = self._data['data'].astype(np.float32)
     self._data['data'] /= 255
     self._data['target'] = self._data['target'].astype(np.int32)
     self._last = 0
Пример #12
0
                    help='Use tiny datasets for quick tests')
args = parser.parse_args()

batchsize = args.batchsize
n_epoch = args.epoch
n_latent = args.dimz

print('GPU: {}'.format(args.gpu))
print('# dim z: {}'.format(args.dimz))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')

# Prepare dataset
print('load MNIST dataset')
mnist = data.load_mnist_data(args.test)
mnist['data'] = mnist['data'].astype(np.float32)
mnist['data'] /= 255
mnist['target'] = mnist['target'].astype(np.int32)

if args.test:
    N = 30
else:
    N = 60000

x_train, x_test = np.split(mnist['data'], [N])
y_train, y_test = np.split(mnist['target'], [N])
N_test = y_test.size

# Prepare VAE model, defined in net.py
model = net.VAE(784, n_latent, 500)
Пример #13
0
def main():
    # Reproducibility
    seed = settings["seed"]
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    print("Loading datasets...")
    circle_train_input, circle_train_target, \
        circle_test_input, circle_test_target = generate_circle_classification_dataset()
    mnist_train_input, mnist_train_target, mnist_test_input, mnist_test_target = load_mnist_data()
    fashion_mnist_train_input, fashion_mnist_train_target, \
        fashion_mnist_test_input, fashion_mnist_test_target = load_fashion_mnist_data()
    print("Done!")

    results_dir = settings["results_dir"]

    # Boolean variable, whether to run the mini-batch size and learning rate experiment or was it already completed
    # WARNING: Takes a very long time
    run_mini_batch_size_lr_experiments = False

    if run_mini_batch_size_lr_experiments:
        print("Running the mini-batch size and learning rate experiments...")
        circle_experiment_log, circle_training_logs = \
            run_mini_batch_size_experiment("circle", circle_train_input, circle_train_target,
                                           circle_test_input, circle_test_target)
        circle_experiment_log.to_csv(results_dir + "circle_mini_batch_size_lr_experiment_log.csv",
                                     sep=",", header=True, index=False, encoding="utf-8")
        circle_training_logs.to_csv(results_dir + "circle_mini_batch_size_lr_training_logs.csv",
                                    sep=",", header=True, index=False, encoding="utf-8")

        mnist_experiment_log, mnist_training_logs = \
            run_mini_batch_size_experiment("mnist", mnist_train_input, mnist_train_target,
                                           mnist_test_input, mnist_test_target)
        mnist_experiment_log.to_csv(results_dir + "mnist_mini_batch_size_lr_experiment_log.csv",
                                    sep=",", header=True, index=False, encoding="utf-8")
        mnist_training_logs.to_csv(results_dir + "mnist_mini_batch_size_lr_training_logs.csv",
                                   sep=",", header=True, index=False, encoding="utf-8")

        fashion_mnist_experiment_log, fashion_mnist_training_logs = \
            run_mini_batch_size_experiment("fashion_mnist", fashion_mnist_train_input, fashion_mnist_train_target,
                                           fashion_mnist_test_input, fashion_mnist_test_target)
        fashion_mnist_experiment_log.to_csv(results_dir + "fashion_mnist_mini_batch_size_lr_experiment_log.csv",
                                            sep=",", header=True, index=False, encoding="utf-8")
        fashion_mnist_training_logs.to_csv(results_dir + "fashion_mnist_mini_batch_size_lr_training_logs.csv",
                                           sep=",", header=True, index=False, encoding="utf-8")
        print("Done!")

    else:
        print("Loading the first experiment logs...")
        circle_experiment_log = pd.read_csv(results_dir + "circle_mini_batch_size_lr_experiment_log.csv",
                                            sep=",", header=0, index_col=None, encoding="utf-8")
        circle_training_logs = pd.read_csv(results_dir + "circle_mini_batch_size_lr_training_logs.csv",
                                           sep=",", header=0, index_col=None, encoding="utf-8")
        mnist_experiment_log = pd.read_csv(results_dir + "mnist_mini_batch_size_lr_experiment_log.csv",
                                           sep=",", header=0, index_col=None, encoding="utf-8")
        mnist_training_logs = pd.read_csv(results_dir + "mnist_mini_batch_size_lr_training_logs.csv",
                                          sep=",", header=0, index_col=None, encoding="utf-8")
        fashion_mnist_experiment_log = pd.read_csv(results_dir + "fashion_mnist_mini_batch_size_lr_experiment_log.csv",
                                                   sep=",", header=0, index_col=None, encoding="utf-8")
        fashion_mnist_training_logs = pd.read_csv(results_dir + "fashion_mnist_mini_batch_size_lr_training_logs.csv",
                                                  sep=",", header=0, index_col=None, encoding="utf-8")
        print("Done!")

    print("Computing combined score for experiment conditions...")
    circle_experiment_log = compute_combined_score_for_experiment_conditions(circle_experiment_log)
    mnist_experiment_log = compute_combined_score_for_experiment_conditions(mnist_experiment_log)
    fashion_mnist_experiment_log = compute_combined_score_for_experiment_conditions(fashion_mnist_experiment_log)
    print("Done!")

    print("Finding optimal mini-batch sizes and learning rates...")
    circle_best_mini_batch_sizes, circle_best_lrs = \
        extract_optimal_parameters_from_experiment_log(circle_experiment_log)
    mnist_best_mini_batch_sizes, mnist_best_lrs = \
        extract_optimal_parameters_from_experiment_log(mnist_experiment_log)
    fashion_mnist_best_mini_batch_sizes, fashion_mnist_best_lrs = \
        extract_optimal_parameters_from_experiment_log(fashion_mnist_experiment_log)
    print("Done!")

    def merge_experiment_data_from_different_datasets(circle_data, mnist_data, fashion_mnist_data):
        """
        Helper function to merge experiment results from the different datasets into one combined log

        :param circle_data: results from an experiment on the 2D circle dataset, pandas.Dataframe
        :param mnist_data: results from an experiment on the MNIST dataset, pandas.Dataframe
        :param fashion_mnist_data: results from an experiment on the FashionMNIST dataset, pandas.Dataframe

        :returns: merged experiment log, pandas.Dataframe
        """

        return pd.concat((circle_data, mnist_data, fashion_mnist_data), axis=0, ignore_index=True)

    print("Generating the plots for the first experiment...")
    experiment_log = merge_experiment_data_from_different_datasets(circle_experiment_log,
                                                                   mnist_experiment_log,
                                                                   fashion_mnist_experiment_log)
    training_logs = merge_experiment_data_from_different_datasets(circle_training_logs,
                                                                  mnist_training_logs,
                                                                  fashion_mnist_training_logs)
    visualize_results_first_experiment(experiment_log, training_logs)
    print("Done!")

    # Boolean variable, whether to run the convergence region experiment or was it already completed
    # WARNING: Takes a long time
    run_convergence_region_experiments = False

    if run_convergence_region_experiments:
        print("Running the convergence region experiments...")
        circle_experiment_log = run_convergence_region_experiment("circle", circle_train_input, circle_train_target,
                                                                  circle_best_mini_batch_sizes, circle_best_lrs)
        circle_experiment_log.to_csv(results_dir + "circle_convergence_region_experiment_log.csv",
                                     sep=",", header=True, index=False, encoding="utf-8")

        mnist_experiment_log = run_convergence_region_experiment("mnist", mnist_train_input, mnist_train_target,
                                                                 mnist_best_mini_batch_sizes, mnist_best_lrs)
        mnist_experiment_log.to_csv(results_dir + "mnist_convergence_region_experiment_log.csv",
                                    sep=",", header=True, index=False, encoding="utf-8")

        fashion_mnist_experiment_log = \
            run_convergence_region_experiment("fashion_mnist", fashion_mnist_train_input, fashion_mnist_train_target,
                                              fashion_mnist_best_mini_batch_sizes, fashion_mnist_best_lrs)
        fashion_mnist_experiment_log.to_csv(results_dir + "fashion_mnist_convergence_region_experiment_log.csv",
                                            sep=",", header=True, index=False, encoding="utf-8")
        print("Done!")
    else:
        print("Loading the second experiment logs...")
        circle_experiment_log = pd.read_csv(results_dir + "circle_convergence_region_experiment_log.csv",
                                            sep=",", header=0, index_col=None, encoding="utf-8")
        mnist_experiment_log = pd.read_csv(results_dir + "mnist_convergence_region_experiment_log.csv",
                                           sep=",", header=0, index_col=None, encoding="utf-8")
        fashion_mnist_experiment_log = pd.read_csv(results_dir + "fashion_mnist_convergence_region_experiment_log.csv",
                                                   sep=",", header=0, index_col=None, encoding="utf-8")
        print("Done!")

    print("Generating the plots for the second experiment...")
    experiment_log = merge_experiment_data_from_different_datasets(circle_experiment_log,
                                                                   mnist_experiment_log,
                                                                   fashion_mnist_experiment_log)
    visualize_results_second_experiment(experiment_log)
    print("Done!")
Пример #14
0
def train(trainL, trainA, testL, testA):
    trainLoss = []
    trainAccuracy = []
    testLoss = []
    testAccuracy = []
    l1w = []
    l2w = []
    l3w = []
    print('load MNIST dataset')
    mnist = data.load_mnist_data()
    mnist['data'] = mnist['data'].astype(np.float32)
    mnist['data'] /= 255
    mnist['target'] = mnist['target'].astype(np.int32)

    N = 1000
    lsizes = [784, 50, 50, 10]
    x_train, x_test = np.split(mnist['data'], [N])
    y_train, y_test = np.split(mnist['target'], [N])
    N_test = y_test.size

    # Prepare multi-layer perceptron model, defined in net.py
    if args.net == 'simple':
        #model = net.MnistMLP(lsizes)
        model = net.MnistMLP(layer_sizes=lsizes)
        if args.gpu >= 0:
            cuda.get_device(args.gpu).use()
            model.to_gpu()
        xp = np if args.gpu < 0 else cuda.cupy
    elif args.net == 'parallel':
        cuda.check_cuda_available()
        model = L.Classifier(net.MnistMLPParallel(784, n_units, 10))
        xp = cuda.cupy
    # Setup optimizer
    optimizer = optimizers.Adam()
    optimizer.setup(model)
    # Init/Resume
    if args.initmodel:
        print('Load model from', args.initmodel)
        serializers.load_npz(args.initmodel, model)
    if args.resume:
        print('Load optimizer state from', args.resume)
        serializers.load_npz(args.resume, optimizer)
    # Pretrain loop
    print("start pretrain")
    epo = p_epoch
    for j in six.moves.range(1, len(lsizes)):
        if j == len(lsizes) - 1:
            model.setfinetuning()
            print("start finetuning")
            epo = n_epoch
        for epoch in six.moves.range(1, epo + 1):
            print('layer ', j, 'p_epoch ', epoch)
            perm = np.random.permutation(N)
            sum_accuracy = 0
            sum_loss = 0
            for i in six.moves.range(0, N, batchsize):
                x = chainer.Variable(xp.asarray(x_train[perm[i:i +
                                                             batchsize]]))
                t = chainer.Variable(xp.asarray(y_train[perm[i:i +
                                                             batchsize]]))
                optimizer.update(model, x, t, j)
                sum_loss += float(model.loss.data) * len(t.data)
                if not model.pretrain:
                    sum_accuracy += float(model.accuracy.data) * len(t.data)
            if model.pretrain:
                print('Pretrain: train mean loss={}'.format(sum_loss / N))
            else:
                print('Finetune: train mean loss={}, accuracy={}'.format(
                    sum_loss / N, sum_accuracy / N))
            trainLoss.append(sum_loss / N)
            trainAccuracy.append(sum_accuracy / N)
            # evaluation
            sum_accuracy = 0
            sum_loss = 0
            model.train = False
            for i in six.moves.range(0, N_test, batchsize):
                x = chainer.Variable(xp.asarray(x_test[i:i + batchsize]),
                                     volatile='on')
                t = chainer.Variable(xp.asarray(y_test[i:i + batchsize]),
                                     volatile='on')
                loss = model(x, t, j)
                sum_loss += float(loss.data) * len(t.data)
                if not model.pretrain:
                    sum_accuracy += float(model.accuracy.data) * len(t.data)
            if model.pretrain:
                print('Pretrain: test  mean loss={}'.format(sum_loss / N_test))
            else:
                print('Finetune: test  mean loss={}, accuracy={}'.format(
                    sum_loss / N_test, sum_accuracy / N_test))
            testLoss.append(sum_loss / N_test)
            testAccuracy.append(sum_accuracy / N_test)
            model.train = True

    # Save the model and the optimizer
    savecsv(trainLoss, trainL)
    savecsv(trainAccuracy, trainA)
    savecsv(testLoss, testL)
    savecsv(testAccuracy, testA)

    print('save the model')
    serializers.save_npz('mlp.model', model)
    print('save the optimizer')
    serializers.save_npz('mlp.state', optimizer)
Пример #15
0
 def __init__(self):
     self._data = data.load_mnist_data()
     self._data['data'] = self._data['data'].astype(np.float32)
     self._data['data'] /= 255
     self._data['target'] = self._data['target'].astype(np.int32)
     self._last = 0
Пример #16
0
 def load_mnist_binary_data(self):
     mnist = data.load_mnist_data()
     mnist['data'] = mnist['data'].astype(np.float32)
     mnist['data'] /= 255
     mnist['target'] = mnist['target'].astype(np.int32)
     return mnist