Exemplo n.º 1
0
def main():
    (x_train, y_train), (x_test, y_test) = get_mnist("data/").load_data()

    # 4.1
    print("Gaussian classifier with mnist dataset reduced by PCA")
    pca_components = list(range(1, 15))
    pca_results = []
    for pca_component in pca_components:
        pca = PCA(n_components=pca_component)
        pca.fit(x_train)
        x_train_tmp = pca.transform(x_train)
        x_test_tmp = pca.transform(x_test)
        gauss = gaussian_classifier()
        gauss.train(x_train_tmp, y_train)
        yhat = gauss.predict(x_test_tmp)
        pca_results.append(np.mean(y_test != yhat) * 100)
    ##Save plot
    fig, ax = plt.subplots()
    plt.title("pca components vs error rate in gaussian classifier")
    plt.plot(pca_components,
             pca_results,
             label="Error rate on test set alpha=1.0")
    plt.legend(loc="upper left")
    plt.xticks(pca_components, rotation=70)
    plt.xlabel("PCA Dimensions")
    plt.ylabel("Eror rate")
    plt.savefig("scripts/results/gc_pca_4-1.png")
    # 4.2
    print(
        "Gaussian classifier with mnist dataset reduced by PCA with smoothing")
    alphas = [0.01, 0.5, 0.9]
    plt.title(
        "pca components vs error rate in gaussian classifier with smoothing")
    plt.plot(pca_components, pca_results)
    for alpha in alphas:
        pca__smooth_results = []
        for pca_component in pca_components:
            pca = PCA(n_components=pca_component)
            pca.fit(x_train)
            x_train_tmp = pca.transform(x_train)
            x_test_tmp = pca.transform(x_test)
            gauss = gaussian_classifier()
            gauss.train(x_train_tmp, y_train, alpha=alpha)
            yhat = gauss.predict(x_test_tmp)
            pca__smooth_results.append(np.mean(y_test != yhat) * 100)
        plt.plot(pca_components,
                 pca__smooth_results,
                 marker="o",
                 label=f"Error rate on test set alpha={alpha}")
    ##Save plot
    plt.legend(loc="upper left")
    plt.xlabel("PCA Dimensions")
    plt.ylabel("Eror rate")
    plt.xticks(pca_components, rotation=70)
    plt.savefig("scripts/results/gc_pca_4-2.png")
Exemplo n.º 2
0
        x, x_score, x_real_score, x_fake_score, x_fake_ng_score, y_pred = inputs
        grad = K.gradients(x_score, [x])[0]
        grad_norm = K.sqrt(K.sum(K.square(grad), axis=[1, 2, 3]))
        grad_pen = K.mean(K.square(grad_norm - 1)) * lamb
        d_loss = K.mean(x_real_score - x_fake_ng_score)
        g_loss = K.mean(x_fake_score - x_fake_ng_score)
        return K.mean(grad_pen + d_loss + g_loss)

if __name__ == '__main__':
    batch_size = 128
    init_lr = 1e-5
    img_size = (28, 28, 1)
    dst_img_size = (140, 140)
    latent_dim = 100

    (X_train, Y_train), _ = get_mnist()
    X_train = X_train[Y_train == 8]
    X_train = X_train.astype('float32') / 127.5 - 1
    X_train = np.expand_dims(X_train, 3)
    dataset = Dataset(X_train)
    generator = data_generator(dataset, batch_size=batch_size, shuffle=True)

    d_input = Input(shape=img_size, dtype='float32')
    d_out = discriminator_model(d_input)
    d_model = Model(d_input, d_out)

    g_input = Input(shape=(latent_dim, ), dtype='float32')
    g_out = generator_model(g_input)
    g_model = Model(g_input, g_out)

    x_in = Input(shape=img_size, dtype='float32')
Exemplo n.º 3
0
def main():
    classifier = gmm_classifier()
    (x_train, y_train), (x_test, y_test) = get_mnist("data/").load_data()
    # 5.2
    pca_components = [u for u in range(2, 30)]
    pca_results = []
    print("GMM with mnist dataset reduced by PCA with smoothing")
    alphas = [0.1, 0.5, 0.9]
    for alpha in alphas:
        pca__smooth_results = []
        for pca_component in pca_components:
            pca = PCA(n_components=pca_component)
            pca.fit(x_train)
            x_train_tmp = pca.transform(x_train)
            x_test_tmp = pca.transform(x_test)
            gauss = gmm_classifier()
            gauss.train(x_train_tmp,
                        y_train,
                        x_test_tmp,
                        y_test,
                        1,
                        alpha=alpha)
            yhat = gauss.predict(x_test_tmp)
            pca__smooth_results.append(np.mean(y_test != yhat) * 100)
        plt.plot(pca_components,
                 pca__smooth_results,
                 marker="o",
                 label=f"Error rate on test set with alpha={alpha} and k=1")
    ##Save plot
    plt.title("pca components vs error rate in GMM - K=1, Alpha=0.9")
    plt.legend(loc="upper left")
    plt.xticks(pca_components, rotation=70)
    plt.xlabel("PCA Dimensions")
    plt.ylabel("Eror rate")
    plt.savefig("scripts/results/gmm_pca_5-2.png")
    # 5.3
    plt.clf()
    pca_results = []
    print("GMM with mnist dataset reduced by PCA with smoothing")
    ks = [1, 2, 3, 4, 5, 6, 7]
    pca__smooth_results = []
    for k in ks:
        pca = PCA(n_components=30)
        pca.fit(x_train)
        x_train_tmp = pca.transform(x_train)
        x_test_tmp = pca.transform(x_test)
        gauss = gmm_classifier()
        gauss.train(x_train_tmp, y_train, x_test_tmp, y_test, k, alpha=0.9)
        yhat = gauss.predict(x_test_tmp)
        pca__smooth_results.append(np.mean(y_test != yhat) * 100)
    plt.plot(ks,
             pca__smooth_results,
             marker="o",
             label=f"Error rate on test set with alpha={alpha} and k={k}")
    ##Save plot
    plt.title(
        "Number of mixtures vs error rate in GMM - Alpha=0.9 - PCA to 30dim")
    plt.legend(loc="upper left")
    plt.xticks(ks, rotation=70)
    plt.xlabel("Number of mixtures")
    plt.ylabel("Eror rate")
    plt.savefig("scripts/results/gmm_pca_5-3.png")
Exemplo n.º 4
0
def main():
    parser = argparse.ArgumentParser(description='ChainerMN example: MNIST')
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=100,
                        help='Number of images in each mini-batch')
    parser.add_argument('--communicator',
                        type=str,
                        default='pure_nccl',
                        help='Type of communicator')
    parser.add_argument('--epoch',
                        '-e',
                        type=int,
                        default=20,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--gpu', '-g', action='store_true', help='Use GPU')
    parser.add_argument('--out',
                        '-o',
                        default='result',
                        help='Directory to output the result')
    parser.add_argument('--resume',
                        '-r',
                        default='',
                        help='Resume the training from snapshot')
    parser.add_argument('--unit',
                        '-u',
                        type=int,
                        default=1000,
                        help='Number of units')
    args = parser.parse_args()

    # Prepare ChainerMN communicator.

    if args.gpu:
        if args.communicator == 'naive':
            print("Error: 'naive' communicator does not support GPU.\n")
            exit(-1)
        comm = chainermn.create_communicator(args.communicator)
        device = comm.intra_rank
    else:
        if args.communicator != 'naive':
            print('Warning: using naive communicator '
                  'because only naive supports CPU-only execution')
        comm = chainermn.create_communicator('naive')
        device = -1

    if comm.rank == 0:
        print('==========================================')
        print('Num process (COMM_WORLD): {}'.format(comm.size))
        if args.gpu:
            print('Using GPUs')
        print('Using {} communicator'.format(args.communicator))
        print('Num unit: {}'.format(args.unit))
        print('Num Minibatch-size: {}'.format(args.batchsize))
        print('Num epoch: {}'.format(args.epoch))
        print('==========================================')

    model = L.Classifier(MLP(args.unit, 10))
    if device >= 0:
        chainer.cuda.get_device_from_id(device).use()
        model.to_gpu()

    # Create a multi node optimizer from a standard Chainer optimizer.
    optimizer = chainermn.create_multi_node_optimizer(
        chainer.optimizers.Adam(), comm)
    optimizer.setup(model)

    # Split and distribute the dataset. Only worker 0 loads the whole dataset.
    # Datasets of worker 0 are evenly split and distributed to all workers.
    if comm.rank == 0:
        train, test = mnist_dataset.get_mnist()
    else:
        train, test = None, None
    train = chainermn.scatter_dataset(train, comm, shuffle=True)
    test = chainermn.scatter_dataset(test, comm, shuffle=True)

    train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(test,
                                                 args.batchsize,
                                                 repeat=False,
                                                 shuffle=False)

    updater = training.StandardUpdater(train_iter, optimizer, device=device)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    # Create a multi node evaluator from a standard Chainer evaluator.
    evaluator = extensions.Evaluator(test_iter, model, device=device)
    evaluator = chainermn.create_multi_node_evaluator(evaluator, comm)
    trainer.extend(evaluator)

    # Some display and output extensions are necessary only for one worker.
    # (Otherwise, there would just be repeated outputs.)
    if comm.rank == 0:
        trainer.extend(extensions.dump_graph('main/loss'))
        trainer.extend(extensions.LogReport())
        trainer.extend(
            extensions.PrintReport([
                'epoch', 'main/loss', 'validation/main/loss', 'main/accuracy',
                'validation/main/accuracy', 'elapsed_time'
            ]))
        # trainer.extend(extensions.ProgressBar())

    if args.resume:
        chainer.serializers.load_npz(args.resume, trainer)

    trainer.run()
Exemplo n.º 5
0
def main():
    # This script is almost identical to train_mnist.py. The only difference is
    # that this script uses data-parallel computation on two GPUs.
    # See train_mnist.py for more details.
    parser = argparse.ArgumentParser(description='Chainer example: MNIST')
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=400,
                        help='Number of images in each mini-batch')
    parser.add_argument('--epoch',
                        '-e',
                        type=int,
                        default=20,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--gpu', '-g', action='store_true', help='Use gpu')
    parser.add_argument('--gpu_number',
                        '-n',
                        type=int,
                        default=1,
                        help='Number of gpus')
    parser.add_argument('--out',
                        '-o',
                        default='result',
                        help='Directory to output the result')
    parser.add_argument('--resume',
                        '-r',
                        default='',
                        help='Resume the training from snapshot')
    parser.add_argument('--unit',
                        '-u',
                        type=int,
                        default=1000,
                        help='Number of units')
    args = parser.parse_args()

    if args.gpu:
        print("Use the gpu environment to perform work, please \
set the number of gpus you need to use.")
        if args.gpu_number == 2:
            print('===================================')
            use_gpu = {'main': 0, 'second': 1}
            print('# use GPU:2')
            print('# unit: {}'.format(args.unit))
            print('# minibatch-size: {}'.format(args.batchsize))
            print('# epoch: {}'.format(args.epoch))
            print('===================================')
        elif args.gpu_number == 1:
            print('===================================')
            use_gpu = {'main': 0}
            print('# use GPU: 1')
            print('# unit: {}'.format(args.unit))
            print('# minibatch-size: {}'.format(args.batchsize))
            print('# epoch: {}'.format(args.epoch))
            print('===================================')
        else:
            raise ValueError(
                'please set the correct number of gpus you need to use!')
    else:
        raise ValueError('gpu env set error!')

    chainer.backends.cuda.get_device_from_id(0).use()

    model = L.Classifier(MLP(args.unit, 10))
    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)

    train, test = mnist_dataset.get_mnist()
    train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(test,
                                                 args.batchsize,
                                                 repeat=False,
                                                 shuffle=False)

    # ParallelUpdater implements the data-parallel gradient computation on
    # multiple GPUs. It accepts "devices" argument that specifies which GPU to
    # use.
    updater = training.updaters.ParallelUpdater(
        train_iter,
        optimizer,
        # The device of the name 'main' is used as a "master", while others are
        # used as slaves. Names other than 'main' are arbitrary.
        devices=use_gpu,
    )
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    trainer.extend(extensions.Evaluator(test_iter, model, device=0))
    trainer.extend(extensions.dump_graph('main/loss'))
    trainer.extend(extensions.snapshot(), trigger=(args.epoch, 'epoch'))
    trainer.extend(extensions.LogReport())
    trainer.extend(
        extensions.PrintReport([
            'epoch', 'main/loss', 'validation/main/loss', 'main/accuracy',
            'validation/main/accuracy', 'elapsed_time'
        ]))
    # trainer.extend(extensions.ProgressBar())

    if args.resume:
        chainer.serializers.load_npz(args.resume, trainer)

    trainer.run()
Exemplo n.º 6
0
def main():
    parser = argparse.ArgumentParser(description='Chainer example: MNIST')
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=100,
                        help='Number of images in each mini-batch')
    parser.add_argument('--epoch',
                        '-e',
                        type=int,
                        default=20,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--frequency',
                        '-f',
                        type=int,
                        default=-1,
                        help='Frequency of taking a snapshot')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--out',
                        '-o',
                        default='result',
                        help='Directory to output the result')
    parser.add_argument('--resume',
                        '-r',
                        default='',
                        help='Resume the training from snapshot')
    parser.add_argument('--unit',
                        '-u',
                        type=int,
                        default=1000,
                        help='Number of units')
    parser.add_argument('--noplot',
                        dest='plot',
                        action='store_false',
                        help='Disable PlotReport extension')
    args = parser.parse_args()

    print('=============================================')
    if args.gpu < 0:
        print('# gpu = {}, Program selected cpu execution!'.format(args.gpu))
    else:
        print('# gpu = {}, Program selected gpu execution!'.format(args.gpu))
    print('# number of units: {}'.format(args.unit))
    print('# minibatch-size: {}'.format(args.batchsize))
    print('# epoch: {}'.format(args.epoch))
    print('=============================================')

    # Set up a neural network to train
    # Classifier reports softmax cross entropy loss and accuracy at every
    # iteration, which will be used by the PrintReport extension below.
    model = L.Classifier(MLP(args.unit, 10))
    if args.gpu >= 0:
        # Make a specified GPU current
        chainer.backends.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()  # Copy the model to the GPU

    # Setup an optimizer
    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)

    # Load the MNIST dataset
    train, test = mnist_dataset.get_mnist()

    train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(test,
                                                 args.batchsize,
                                                 repeat=False,
                                                 shuffle=False)

    # Set up a trainer
    updater = training.updaters.StandardUpdater(train_iter,
                                                optimizer,
                                                device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    # Evaluate the model with the test dataset for each epoch
    trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))

    # Dump a computational graph from 'loss' variable at the first iteration
    # The "main" refers to the target link of the "main" optimizer.
    trainer.extend(extensions.dump_graph('main/loss'))

    # Take a snapshot for each specified epoch
    frequency = args.epoch if args.frequency == -1 else max(1, args.frequency)
    trainer.extend(extensions.snapshot(), trigger=(frequency, 'epoch'))

    # Write a log of evaluation statistics for each epoch
    trainer.extend(extensions.LogReport())

    # Save two plot images to the result dir
    #if args.plot and extensions.PlotReport.available():
    #    trainer.extend(
    #        extensions.PlotReport(['main/loss', 'validation/main/loss'],
    #                              'epoch', file_name='loss.png'))
    #    trainer.extend(
    #        extensions.PlotReport(
    #            ['main/accuracy', 'validation/main/accuracy'],
    #            'epoch', file_name='accuracy.png'))

    # Print selected entries of the log to stdout
    # Here "main" refers to the target link of the "main" optimizer again, and
    # "validation" refers to the default name of the Evaluator extension.
    # Entries other than 'epoch' are reported by the Classifier link, called by
    # either the updater or the evaluator.
    trainer.extend(
        extensions.PrintReport([
            'epoch', 'main/loss', 'validation/main/loss', 'main/accuracy',
            'validation/main/accuracy', 'elapsed_time'
        ]))

    # Print a progress bar to stdout
    # trainer.extend(extensions.ProgressBar())

    if args.resume:
        # Resume from a snapshot
        chainer.serializers.load_npz(args.resume, trainer)

    # Run the training
    trainer.run()