예제 #1
0
def test_cgan_fashion_mnist(show_figure=False, block_figure_on_end=False):
    print("========== Test CGAN on Fashion-MNIST data ==========")

    np.random.seed(random_seed())

    (x_train, y_train), (x_test, y_test) = demo.load_fashion_mnist()
    x_train = x_train.astype(np.float32).reshape([-1, 28, 28, 1]) / 0.5 - 1.
    x_test = x_test.astype(np.float32).reshape([-1, 28, 28, 1]) / 0.5 - 1.

    loss_display = Display(layout=(1, 1),
                           dpi='auto',
                           show=show_figure,
                           block_on_end=block_figure_on_end,
                           monitor=[
                               {
                                   'metrics': ['d_loss', 'g_loss'],
                                   'type':
                                   'line',
                                   'labels':
                                   ["discriminator loss", "generator loss"],
                                   'title':
                                   "CGAN: Losses",
                                   'xlabel':
                                   "epoch",
                                   'ylabel':
                                   "loss",
                               },
                           ])
    sample_display = Display(layout=(1, 1),
                             dpi='auto',
                             figsize=(10, 10),
                             freq=1,
                             show=show_figure,
                             block_on_end=block_figure_on_end,
                             monitor=[
                                 {
                                     'metrics': ['x_samples'],
                                     'title': "CGAN: Generated data",
                                     'type': 'img',
                                     'num_samples': 100,
                                     'tile_shape': (10, 10),
                                 },
                             ])

    model = CGAN(
        model_name="CGAN_Fashion-MNIST",
        num_z=10,  # set to 100 for a full run
        z_prior=Uniform1D(low=-1.0, high=1.0),
        img_size=(28, 28, 1),
        batch_size=64,  # set to 64 for a full run
        num_conv_layers=3,  # set to 3 for a full run
        num_gen_feature_maps=2,  # set to 64 for a full run
        num_dis_feature_maps=2,  # set to 64 for a full run
        metrics=['d_loss', 'g_loss'],
        callbacks=[loss_display, sample_display],
        num_epochs=1,  # set to 100 for a full run
        random_state=random_seed(),
        verbose=1)

    model.fit(x_train, y_train)
예제 #2
0
def test_wgan_gp_fashion_mnist(show_figure=False, block_figure_on_end=False):
    print("========== Test WGAN-GP on Fashion-MNIST data ==========")

    np.random.seed(random_seed())

    (x_train, y_train), (x_test, y_test) = demo.load_fashion_mnist()
    x_train = x_train.astype(np.float32).reshape([-1, 28, 28, 1]) / 0.5 - 1.
    x_test = x_test.astype(np.float32).reshape([-1, 28, 28, 1]) / 0.5 - 1.

    root_dir = os.path.join(model_dir(), "male/WGAN-GP/FashionMNIST")
    loss_display = Display(layout=(1, 1),
                           dpi='auto',
                           show=show_figure,
                           block_on_end=block_figure_on_end,
                           filepath=[os.path.join(root_dir, "loss/loss_{epoch:04d}.png"),
                                     os.path.join(root_dir, "loss/loss_{epoch:04d}.pdf")],
                           monitor=[{'metrics': ['d_loss', 'g_loss'],
                                     'type': 'line',
                                     'labels': ["discriminator loss", "generator loss"],
                                     'title': "Losses",
                                     'xlabel': "epoch",
                                     'ylabel': "loss",
                                     },
                                    ])
    sample_display = Display(layout=(1, 1),
                             dpi='auto',
                             figsize=(10, 10),
                             freq=1,
                             show=show_figure,
                             block_on_end=block_figure_on_end,
                             monitor=[{'metrics': ['x_samples'],
                                       'title': "Generated data",
                                       'type': 'img',
                                       'num_samples': 100,
                                       'tile_shape': (10, 10),
                                       },
                                      ])

    model = WGAN_GP(model_name="WGAN_GP_FashionMNIST",
                    num_z=10,  # set to 100 for a full run
                    z_prior=Uniform1D(low=-1.0, high=1.0),
                    img_size=(28, 28, 1),
                    batch_size=16,  # set to 64 for a full run
                    num_conv_layers=3,  # set to 3 for a full run
                    num_gen_feature_maps=4,  # set to 64 for a full run
                    num_dis_feature_maps=4,  # set to 64 for a full run
                    metrics=['d_loss', 'g_loss'],
                    callbacks=[loss_display, sample_display],
                    num_epochs=4,  # set to 100 for a full run
                    random_state=random_seed(),
                    log_path=os.path.join(root_dir, "logs"),
                    verbose=1)

    model.fit(x_train)

    model = WGAN_GP(model_name="WGAN_GP_FashionMNIST",
                    num_z=10,  # set to 100 for a full run
                    z_prior=Gaussian1D(mu=0.0, sigma=1.0),
                    img_size=(28, 28, 1),
                    batch_size=32,  # set to 64 for a full run
                    num_conv_layers=3,  # set to 3 for a full run
                    num_gen_feature_maps=4,  # set to 64 for a full run
                    num_dis_feature_maps=4,  # set to 64 for a full run
                    metrics=['d_loss', 'g_loss'],
                    callbacks=[loss_display, sample_display],
                    num_epochs=4,  # set to 100 for a full run
                    random_state=random_seed(),
                    log_path=os.path.join(root_dir, "logs"),
                    verbose=1)

    model.fit(x_train)
예제 #3
0
def test_fashion_mnist(show=False, block_figure_on_end=False):
    print("========== Test Fashion-MNIST dataset using GLM ==========")

    np.random.seed(random_seed())

    (x_train, y_train), (x_test, y_test) = demo.load_fashion_mnist()
    print("Number of training samples = {}".format(x_train.shape[0]))
    print("Number of testing samples = {}".format(x_test.shape[0]))

    x = np.vstack([x_train, x_test])
    y = np.concatenate([y_train, y_test])

    early_stopping = EarlyStopping(monitor='val_err', patience=2, verbose=1)
    filepath = os.path.join(
        model_dir(),
        "male/datasets/fashion_mnist_{epoch:04d}_{val_err:.6f}.pkl")
    checkpoint = ModelCheckpoint(filepath,
                                 mode='min',
                                 monitor='val_err',
                                 verbose=0,
                                 save_best_only=True)
    loss_display = Display(title="Learning curves",
                           dpi='auto',
                           layout=(3, 1),
                           freq=1,
                           show=show,
                           block_on_end=block_figure_on_end,
                           monitor=[
                               {
                                   'metrics': ['loss', 'val_loss'],
                                   'type': 'line',
                                   'labels':
                                   ["training loss", "validation loss"],
                                   'title': "Learning losses",
                                   'xlabel': "epoch",
                                   'ylabel': "loss",
                               },
                               {
                                   'metrics': ['err', 'val_err'],
                                   'type': 'line',
                                   'title': "Learning errors",
                                   'xlabel': "epoch",
                                   'ylabel': "error",
                               },
                               {
                                   'metrics': ['err'],
                                   'type': 'line',
                                   'labels': ["training error"],
                                   'title': "Learning errors",
                                   'xlabel': "epoch",
                                   'ylabel': "error",
                               },
                           ])

    weight_display = Display(title="Filters",
                             dpi='auto',
                             layout=(1, 1),
                             figsize=(3, 24),
                             freq=1,
                             show=show,
                             block_on_end=block_figure_on_end,
                             monitor=[
                                 {
                                     'metrics': ['weights'],
                                     'title': "Learned weights",
                                     'type': 'img',
                                     'disp_dim': (28, 28),
                                     'tile_shape': (10, 1),
                                 },
                             ])

    clf = GLM(
        model_name="GLM_softmax_cv",
        link='softmax',
        loss='softmax',
        optimizer='sgd',
        num_epochs=4,
        batch_size=10,
        task='classification',
        metrics=['loss', 'err'],
        callbacks=[early_stopping, checkpoint, loss_display, weight_display],
        cv=[-1] * x_train.shape[0] + [0] * x_test.shape[0],
        random_state=random_seed(),
        verbose=1)

    clf.fit(x, y)

    train_err = 1.0 - clf.score(x_train, y_train)
    test_err = 1.0 - clf.score(x_test, y_test)
    print("Training error = %.4f" % train_err)
    print("Testing error = %.4f" % test_err)
예제 #4
0
def test_gan_fashion_mnist(show_figure=False, block_figure_on_end=False):
    print("========== Test GAN on Fashion-MNIST data ==========")

    np.random.seed(random_seed())

    (x_train, y_train), (x_test, y_test) = demo.load_fashion_mnist()
    x_train = x_train.astype(np.float32)
    x_test = x_test.astype(np.float32)

    loss_display = Display(layout=(1, 1),
                           dpi='auto',
                           show=show_figure,
                           block_on_end=block_figure_on_end,
                           monitor=[
                               {
                                   'metrics': ['d_loss', 'g_loss'],
                                   'type':
                                   'line',
                                   'labels':
                                   ["discriminator loss", "generator loss"],
                                   'title':
                                   "Losses",
                                   'xlabel':
                                   "epoch",
                                   'ylabel':
                                   "loss",
                               },
                           ])
    sample_display = Display(layout=(1, 1),
                             dpi='auto',
                             figsize=(10, 10),
                             freq=1,
                             show=show_figure,
                             block_on_end=block_figure_on_end,
                             monitor=[
                                 {
                                     'metrics': ['x_samples'],
                                     'title': "Generated data",
                                     'type': 'img',
                                     'num_samples': 100,
                                     'disp_dim': (28, 28),
                                     'tile_shape': (10, 10),
                                 },
                             ])

    # <editor-fold desc="Working example">
    # model = GAN(num_x=784,
    #             num_discriminator_hiddens=(128,),
    #             discriminator_batchnorm=False,
    #             discriminator_act_funcs=('lrelu',),
    #             discriminator_learning_rate=0.001,
    #             num_z=100,
    #             generator_distribution=Uniform(low=(-1.0,) * 100, high=(1.0,) * 100),
    #             generator_batchnorm=False,
    #             num_generator_hiddens=(128,),
    #             generator_act_funcs=('lrelu',),
    #             generator_out_func='sigmoid',
    #             generator_learning_rate=0.001,
    #             batch_size=32,
    #             metrics=['d_loss', 'g_loss'],
    #             callbacks=[loss_display, sample_display],
    #             num_epochs=100,
    #             random_state=random_seed(),
    #             verbose=1)
    # </editor-fold>

    # <editor-fold desc="Testing example">
    NUM_Z = 10  # set to 100 for a full run
    model = GAN(
        num_x=784,
        num_discriminator_hiddens=(16, ),  # set to 128 for a full run
        discriminator_batchnorm=False,
        discriminator_act_funcs=('lrelu', ),
        discriminator_dropouts=(0.99, ),
        discriminator_learning_rate=0.001,
        num_z=NUM_Z,
        generator_distribution=Uniform(low=(-1.0, ) * NUM_Z,
                                       high=(1.0, ) * NUM_Z),
        generator_batchnorm=False,
        num_generator_hiddens=(16, 16),  # set to (128, 128) for a full run
        generator_act_funcs=('lrelu', 'lrelu'),
        generator_out_func='sigmoid',
        generator_learning_rate=0.001,
        batch_size=32,
        metrics=['d_loss', 'g_loss'],
        callbacks=[loss_display, sample_display],
        num_epochs=4,  # set to 100 for a full run
        random_state=random_seed(),
        verbose=1)
    # </editor-fold>

    model.fit(x_train)