Example #1
0
def test_bbrbm_logpartition():
    print(
        "========== Test Computing log-partition function of BernoulliBernoulliRBM =========="
    )

    np.random.seed(random_seed())

    (x_train, y_train), (x_test, y_test) = demo.load_mnist()

    model = BernoulliBernoulliRBM(num_hidden=15,
                                  num_visible=784,
                                  batch_size=100,
                                  num_epochs=4,
                                  momentum_method='sudden',
                                  weight_cost=2e-4,
                                  metrics=['recon_err'],
                                  random_state=random_seed(),
                                  verbose=1)

    model.fit(x_train)

    exact_logpart = model.get_logpartition(method='exact')
    print("Exact log-partition function = %.4f" % exact_logpart)

    test_exact_loglik = model.get_loglik(x_test, method='exact').mean()
    print("Exact log-likelihood of testing data = %.4f" % test_exact_loglik)

    test_csl_loglik = model.get_loglik(x_test,
                                       method='csl',
                                       num_hidden_samples=100,
                                       num_steps=100).mean()
    print("CSL log-likelihood of testing data = %.4f" % test_csl_loglik)
Example #2
0
def test_bbrbm_pipeline():
    print(
        "========== Test the pipeline of "
        "BernoulliBernoulliRBM followed by k-nearest-neighbors (kNN) =========="
    )

    np.random.seed(random_seed())

    from sklearn.pipeline import Pipeline
    from sklearn.neighbors import KNeighborsClassifier

    (x_train, y_train), (x_test, y_test) = demo.load_mnist()

    estimators = [('rbm',
                   BernoulliBernoulliRBM(num_hidden=15,
                                         num_visible=784,
                                         batch_size=100,
                                         num_epochs=4,
                                         momentum_method='sudden',
                                         weight_cost=2e-4,
                                         random_state=random_seed(),
                                         verbose=0)),
                  ('knn', KNeighborsClassifier(n_neighbors=1))]

    clf = Pipeline(estimators)

    clf.fit(x_train, y_train)

    train_err = 1.0 - clf.score(x_train, y_train)
    test_err = 1.0 - clf.score(x_test, y_test)
    print("Training error = %.4f" % train_err)
    print("Testing error = %.4f" % test_err)
Example #3
0
def test_cgan_mnist(show_figure=False, block_figure_on_end=False):
    print("========== Test CGAN on MNIST data ==========")

    np.random.seed(random_seed())

    (x_train, y_train), (x_test, y_test) = demo.load_mnist()
    x_train = x_train.astype(np.float32).reshape([-1, 28, 28, 1]) / 0.5 - 1.
    x_test = x_test.astype(np.float32).reshape([-1, 28, 28, 1]) / 0.5 - 1.

    loss_display = Display(layout=(1, 1),
                           dpi='auto',
                           show=show_figure,
                           block_on_end=block_figure_on_end,
                           monitor=[
                               {
                                   'metrics': ['d_loss', 'g_loss'],
                                   'type':
                                   'line',
                                   'labels':
                                   ["discriminator loss", "generator loss"],
                                   'title':
                                   "Losses",
                                   'xlabel':
                                   "epoch",
                                   'ylabel':
                                   "loss",
                               },
                           ])
    sample_display = Display(layout=(1, 1),
                             dpi='auto',
                             figsize=(10, 10),
                             freq=1,
                             show=show_figure,
                             block_on_end=block_figure_on_end,
                             monitor=[
                                 {
                                     'metrics': ['x_samples'],
                                     'title': "Generated data",
                                     'type': 'img',
                                     'num_samples': 100,
                                     'tile_shape': (10, 10),
                                 },
                             ])

    model = CGAN(
        model_name="CGAN_MNIST",
        num_z=10,  # set to 100 for a full run
        z_prior=Uniform1D(low=-1.0, high=1.0),
        img_size=(28, 28, 1),
        batch_size=64,  # set to 64 for a full run
        num_conv_layers=3,  # set to 3 for a full run
        num_gen_feature_maps=2,  # set to 32 for a full run
        num_dis_feature_maps=2,  # set to 32 for a full run
        metrics=['d_loss', 'g_loss'],
        callbacks=[loss_display, sample_display],
        num_epochs=1,  # set to 100 for a full run
        random_state=random_seed(),
        verbose=1)

    model.fit(x_train, y_train)
Example #4
0
def test_bbrbm_csl():
    print("========== Test Conservative Sampling-based Likelihood (CSL) "
          "of BernoulliBernoulliRBM ==========")

    np.random.seed(random_seed())

    (x_train, y_train), (x_test, y_test) = demo.load_mnist()

    model = BernoulliBernoulliRBM(num_hidden=15,
                                  num_visible=784,
                                  batch_size=100,
                                  num_epochs=4,
                                  momentum_method='sudden',
                                  weight_cost=2e-4,
                                  metrics=['recon_err'],
                                  random_state=random_seed(),
                                  verbose=1)

    model.fit(x_train)

    train_csl = model.get_loglik(x_train,
                                 method='csl',
                                 num_hidden_samples=100,
                                 num_steps=10).mean()
    print("Training log-likelihood computed using CSL = %.4f" % train_csl)

    test_csl = model.get_loglik(x_test,
                                method='csl',
                                num_hidden_samples=100,
                                num_steps=10).mean()
    print("Testing log-likelihood computed using CSL = %.4f" % test_csl)
Example #5
0
def test_dfm_save_and_load(show_figure=False, block_figure_on_end=False):
    print("========== Test Save and Load functions of DFM on MNIST data ==========")

    np.random.seed(random_seed())

    (x_train, y_train), (x_test, y_test) = demo.load_mnist()
    x_train = x_train.astype(np.float32).reshape([-1, 28, 28, 1]) / 0.5 - 1.
    x_test = x_test.astype(np.float32).reshape([-1, 28, 28, 1]) / 0.5 - 1.

    loss_display = Display(layout=(1, 1),
                           dpi='auto',
                           show=show_figure,
                           block_on_end=block_figure_on_end,
                           monitor=[{'metrics': ['d_loss', 'g_loss'],
                                     'type': 'line',
                                     'labels': ["discriminator loss", "generator loss"],
                                     'title': "Losses",
                                     'xlabel': "epoch",
                                     'ylabel': "loss",
                                     },
                                    ])
    sample_display = Display(layout=(1, 1),
                             dpi='auto',
                             figsize=(10, 10),
                             freq=1,
                             show=show_figure,
                             block_on_end=block_figure_on_end,
                             monitor=[{'metrics': ['x_samples'],
                                       'title': "Generated data",
                                       'type': 'img',
                                       'num_samples': 100,
                                       'tile_shape': (10, 10),
                                       },
                                      ])

    model = DFM(model_name="DFM_MNIST_SaveLoad",
                num_z=10,  # set to 100 for a full run
                img_size=(28, 28, 1),
                batch_size=32,  # set to 64 for a full run
                num_conv_layers=3,  # set to 3 for a full run
                num_gen_feature_maps=4,  # set to 32 for a full run
                num_dis_feature_maps=4,  # set to 32 for a full run
                alpha=0.03 / 10,  # 0.03 / 1024
                noise_std=1.0,
                num_dfm_layers=1,  # 2
                num_dfm_hidden=10,  # 1024
                metrics=['d_loss', 'g_loss'],
                callbacks=[loss_display, sample_display],
                num_epochs=4,  # set to 100 for a full run
                random_state=random_seed(),
                verbose=1)

    model.fit(x_train)

    save_file_path = model.save()

    model1 = TensorFlowModel.load_model(save_file_path)
    model1.num_epochs = 10
    model1.fit(x_train)
Example #6
0
def test_srbm_gridsearch():
    print("========== Tune parameters for Supervised RBM ==========")

    from sklearn.model_selection import PredefinedSplit
    from sklearn.model_selection import GridSearchCV

    np.random.seed(random_seed())

    (x_train, y_train), (x_test, y_test) = demo.load_mnist()

    x = np.vstack([x_train, x_test, x_test])
    y = np.concatenate([y_train, y_test, y_test])

    early_stopping = EarlyStopping(monitor='val_loss', patience=2, verbose=1)

    params = {
        'batch_size': [64, 100],
        'learning_rate': [0.1, 0.01],
        'weight_cost': [0.001, 0.0001]
    }

    model = SupervisedRBM(num_hidden=15,
                          num_visible=784,
                          batch_size=100,
                          num_epochs=4,
                          learning_rate=0.01,
                          momentum_method='sudden',
                          weight_cost=0.0,
                          inference_engine='variational_inference',
                          approx_method='first_order',
                          metrics=['loss'],
                          callbacks=[early_stopping],
                          cv=[-1] * x_train.shape[0] + [0] * x_test.shape[0],
                          random_state=random_seed(),
                          verbose=0)

    ps = PredefinedSplit(test_fold=[-1] * x_train.shape[0] +
                         [-1] * x_test.shape[0] + [1] * x_test.shape[0])

    gs = GridSearchCV(model,
                      params,
                      cv=ps,
                      n_jobs=-1,
                      refit=False,
                      verbose=True)
    gs.fit(x, y)

    print("Best error {} @ params {}".format(1.0 - gs.best_score_,
                                             gs.best_params_))
Example #7
0
def test_bbrbm_gridsearch():
    print(
        "========== Tuning parameters for the pipeline of "
        "BernoulliBernoulliRBM followed by k-nearest-neighbors (kNN) =========="
    )

    np.random.seed(random_seed())

    from sklearn.pipeline import Pipeline
    from sklearn.model_selection import GridSearchCV
    from sklearn.model_selection import PredefinedSplit
    from sklearn.neighbors import KNeighborsClassifier

    (x_train, y_train), (x_test, y_test) = demo.load_mnist()

    x = np.vstack([x_train, x_test])
    y = np.concatenate([y_train, y_test])

    estimators = [('rbm',
                   BernoulliBernoulliRBM(num_hidden=500,
                                         num_visible=784,
                                         batch_size=100,
                                         num_epochs=4,
                                         momentum_method='sudden',
                                         weight_cost=2e-4,
                                         random_state=random_seed(),
                                         verbose=0)),
                  ('knn', KNeighborsClassifier(n_neighbors=4))]

    params = dict(rbm__num_hidden=[10, 15],
                  rbm__batch_size=[64, 100],
                  knn__n_neighbors=[1, 2])

    ps = PredefinedSplit(test_fold=[-1] * x_train.shape[0] +
                         [1] * x_test.shape[0])

    clf = Pipeline(estimators)

    gs = GridSearchCV(clf, params, cv=ps, n_jobs=-1, refit=False, verbose=True)
    gs.fit(x, y)

    print("Best error {} @ params {}".format(1.0 - gs.best_score_,
                                             gs.best_params_))
Example #8
0
def test_bbrbm_generate_data(show_figure=False, block_figure_on_end=False):
    print(
        "========== Test Data Generation of BernoulliBernoulliRBM ==========")

    np.random.seed(random_seed())

    (x_train, y_train), (x_test, y_test) = demo.load_mnist()

    model = BernoulliBernoulliRBM(num_hidden=15,
                                  num_visible=784,
                                  batch_size=100,
                                  num_epochs=4,
                                  momentum_method='sudden',
                                  weight_cost=2e-4,
                                  metrics=['recon_err'],
                                  random_state=random_seed(),
                                  verbose=1)

    model.fit(x_train)

    num_samples = 25
    x_gen = model.generate_data(num_samples=num_samples)

    import matplotlib.pyplot as plt
    plt.style.use('ggplot')
    from male.utils.disp_utils import tile_raster_images

    img = tile_raster_images(x_gen,
                             img_shape=(28, 28),
                             tile_shape=(5, 5),
                             tile_spacing=(1, 1),
                             scale_rows_to_unit_interval=False,
                             output_pixel_vals=False)
    plt.figure()
    _ = plt.imshow(img, aspect='auto', cmap='Greys_r', interpolation='none')
    plt.title("Generate {} samples".format(num_samples))
    plt.colorbar()
    plt.axis('off')
    plt.tight_layout()
    if show_figure:
        plt.show(block=block_figure_on_end)
Example #9
0
def test_image_saver_callback():
    np.random.seed(random_seed())

    (x_train, y_train), (_, _) = demo.load_mnist()
    (cifar10_train, _), (_, _) = demo.load_cifar10()

    imgsaver1 = ImageSaver(freq=1,
                           filepath=os.path.join(
                               model_dir(), "male/callbacks/imagesaver/"
                               "mnist/mnist_{epoch:04d}.png"),
                           monitor={
                               'metrics': 'x_data',
                               'img_size': (28, 28, 1),
                               'tile_shape': (10, 10),
                               'images': x_train[:100].reshape([-1, 28, 28, 1])
                           })
    imgsaver2 = ImageSaver(freq=1,
                           filepath=os.path.join(
                               model_dir(), "male/callbacks/imagesaver/"
                               "cifar10/cifar10_{epoch:04d}.png"),
                           monitor={
                               'metrics':
                               'x_data',
                               'img_size': (32, 32, 3),
                               'tile_shape': (10, 10),
                               'images':
                               cifar10_train[:100].reshape([-1, 32, 32, 3])
                           })

    optz = SGD(learning_rate=0.001)
    clf = GLM(model_name="imagesaver_callback",
              link='softmax',
              loss='softmax',
              optimizer=optz,
              num_epochs=4,
              batch_size=100,
              task='classification',
              callbacks=[imgsaver1, imgsaver2],
              random_state=random_seed(),
              verbose=1)
    clf.fit(x_train, y_train)
Example #10
0
def test_keras_vae_mnist_bin():
    np.random.seed(random_seed())

    (x_train, y_train), (x_test, y_test) = demo.load_mnist()

    x_train = x_train.astype(np.float32) / 255.0
    x_test = x_test.astype(np.float32) / 255.0

    cv = [-1] * x_train.shape[0] + [0] * x_test.shape[0]

    x = np.vstack((x_train, x_test))
    y = np.concatenate((y_train, y_test))

    model = KerasVAE(num_visible=784,
                     num_hiddens=[500],
                     act_funcs=['sigmoid'],
                     cv=cv,
                     num_z=200,
                     z_init=1.0,
                     batch_size=128,
                     num_epochs=1000)
    model.fit(x)
Example #11
0
def test_gank_image_saver():
    print("========== Test GANK-Logit with Image Saver ==========")

    np.random.seed(random_seed())

    (x_train, y_train), (x_test, y_test) = demo.load_mnist()
    x_train = x_train.astype(np.float32).reshape([-1, 28, 28, 1]) / 0.5 - 1.
    x_test = x_test.astype(np.float32).reshape([-1, 28, 28, 1]) / 0.5 - 1.

    imgsaver = ImageSaver(freq=1,
                          filepath=os.path.join(
                              model_dir(), "male/GANK/imagesaver/"
                              "mnist/mnist_{epoch:04d}.png"),
                          monitor={
                              'metrics': 'x_samples',
                              'num_samples': 100,
                              'tile_shape': (10, 10),
                          })

    model = GANK(
        model_name="GANK_MNIST",
        num_random_features=50,  # set to 1000 for a full run
        gamma_init=0.01,
        loss='logit',
        num_z=10,  # set to 100 for a full run
        img_size=(28, 28, 1),
        batch_size=32,  # set to 64 for a full run
        num_conv_layers=3,  # set to 3 for a full run
        num_gen_feature_maps=4,  # set to 32 for a full run
        num_dis_feature_maps=4,  # set to 32 for a full run
        metrics=['d_loss', 'g_loss'],
        callbacks=[imgsaver],
        num_epochs=4,  # set to 100 for a full run
        random_state=random_seed(),
        verbose=1)
    model.fit(x_train)

    np.random.seed(random_seed())

    (x_train, y_train), (x_test, y_test) = demo.load_cifar10()
    x_train = x_train.astype(np.float32).reshape([-1, 32, 32, 3]) / 0.5 - 1.
    x_test = x_test.astype(np.float32).reshape([-1, 32, 32, 3]) / 0.5 - 1.

    imgsaver = ImageSaver(freq=1,
                          filepath=os.path.join(
                              model_dir(), "male/GANK/imagesaver/"
                              "cifar10/cifar10_{epoch:04d}.png"),
                          monitor={
                              'metrics': 'x_samples',
                              'num_samples': 100,
                              'tile_shape': (10, 10),
                          })
    model = GANK(
        model_name="GANK_CIFAR10",
        num_random_features=50,  # set 1000 for a full run
        gamma_init=0.01,
        loss='logit',
        num_z=10,  # set to 100 for a full run
        img_size=(32, 32, 3),
        batch_size=32,  # set to 64 for a full run
        num_conv_layers=3,  # set to 3 for a full run
        num_gen_feature_maps=4,  # set to 32 for a full run
        num_dis_feature_maps=4,  # set to 32 for a full run
        metrics=['d_loss', 'g_loss'],
        callbacks=[imgsaver],
        num_epochs=4,  # set to 500 for a full run
        random_state=random_seed(),
        verbose=1)
    model.fit(x_train)
Example #12
0
def test_dcgan_save_and_load(show_figure=False, block_figure_on_end=False):
    print(
        "========== Test Save and Load functions of DCGAN on MNIST data =========="
    )

    np.random.seed(random_seed())

    (x_train, y_train), (x_test, y_test) = demo.load_mnist()
    x_train = x_train.astype(np.float32).reshape([-1, 28, 28, 1]) / 0.5 - 1.
    x_test = x_test.astype(np.float32).reshape([-1, 28, 28, 1]) / 0.5 - 1.

    root_dir = os.path.join(model_dir(), "male/DCGAN/MNIST")
    loss_display = Display(layout=(1, 1),
                           dpi='auto',
                           show=show_figure,
                           block_on_end=block_figure_on_end,
                           monitor=[
                               {
                                   'metrics': ['d_loss', 'g_loss'],
                                   'type':
                                   'line',
                                   'labels':
                                   ["discriminator loss", "generator loss"],
                                   'title':
                                   "Losses",
                                   'xlabel':
                                   "epoch",
                                   'ylabel':
                                   "loss",
                               },
                           ])
    sample_display = Display(layout=(1, 1),
                             dpi='auto',
                             figsize=(10, 10),
                             freq=1,
                             show=show_figure,
                             block_on_end=block_figure_on_end,
                             monitor=[
                                 {
                                     'metrics': ['x_samples'],
                                     'title': "Generated data",
                                     'type': 'img',
                                     'num_samples': 100,
                                     'tile_shape': (10, 10),
                                 },
                             ])

    model = DCGAN(
        model_name="DCGAN_MNIST_SaveLoad",
        num_z=10,  # set to 100 for a full run
        img_size=(28, 28, 1),
        batch_size=16,  # set to 64 for a full run
        num_conv_layers=3,  # set to 3 for a full run
        num_gen_feature_maps=4,  # set to 32 for a full run
        num_dis_feature_maps=4,  # set to 32 for a full run
        metrics=['d_loss', 'g_loss'],
        callbacks=[loss_display, sample_display],
        num_epochs=2,  # set to 100 for a full run
        log_path=os.path.join(root_dir, "logs"),
        random_state=random_seed(),
        verbose=1)

    model.fit(x_train)

    print("Saving model...")
    save_file_path = model.save(os.path.join(root_dir, "checkpoints/ckpt"))
    print("Reloading model...")
    model1 = TensorFlowModel.load_model(save_file_path)
    model1.num_epochs = 4
    model1.fit(x_train)
    print("Done!")
Example #13
0
def test_ssrbm_classification(show_figure=False, block_figure_on_end=False):
    print("========== Test Semi-Supervised RBM for Classification ==========")

    num_labeled_data = 1000

    from sklearn.metrics import accuracy_score
    from sklearn.neighbors import KNeighborsClassifier

    np.random.seed(random_seed())

    (x_train, y_train), (x_test, y_test) = demo.load_mnist()

    # remove some labels
    idx_train, idx_test = next(
        iter(
            StratifiedShuffleSplit(n_splits=1,
                                   test_size=num_labeled_data,
                                   random_state=random_seed()).split(
                                       x_train, y_train)))
    y_train[idx_train] = 10**8

    x = np.vstack([x_train, x_test])
    y = np.concatenate([y_train, y_test])

    learning_display = Display(
        title="Learning curves",
        dpi='auto',
        layout=(3, 1),
        freq=1,
        show=show_figure,
        block_on_end=block_figure_on_end,
        monitor=[
            {
                'metrics': ['recon_err', 'val_recon_err'],
                'type': 'line',
                'labels': ["training recon error", "validation recon error"],
                'title': "Reconstruction Errors",
                'xlabel': "epoch",
                'ylabel': "error",
            },
            {
                'metrics': ['loss', 'val_loss'],
                'type': 'line',
                'labels': ["training loss", "validation loss"],
                'title': "Learning Losses",
                'xlabel': "epoch",
                'ylabel': "loss",
            },
            {
                'metrics': ['err', 'val_err'],
                'type': 'line',
                'labels': ["training error", "validation error"],
                'title': "Prediction Errors",
                'xlabel': "epoch",
                'ylabel': "error",
            },
            # {'metrics': ['loglik_csl', 'val_loglik_csl'],
            #  'type': 'line',
            #  'labels': ["training loglik (CSL)", "validation loglik (CSL)"],
            #  'title': "Loglikelihoods using CSL",
            #  'xlabel': "epoch",
            #  'ylabel': "loglik",
            #  },
        ])

    filter_display = Display(title="Receptive Fields",
                             dpi='auto',
                             layout=(1, 1),
                             figsize=(8, 8),
                             freq=1,
                             show=show_figure,
                             block_on_end=block_figure_on_end,
                             monitor=[
                                 {
                                     'metrics': ['filters'],
                                     'title': "Receptive Fields",
                                     'type': 'img',
                                     'num_filters': 15,
                                     'disp_dim': (28, 28),
                                     'tile_shape': (3, 5),
                                 },
                             ])

    hidden_display = Display(title="Hidden Activations",
                             dpi='auto',
                             layout=(1, 1),
                             figsize=(8, 8),
                             freq=1,
                             show=show_figure,
                             block_on_end=block_figure_on_end,
                             monitor=[
                                 {
                                     'metrics': ['hidden_activations'],
                                     'title': "Hidden Activations",
                                     'type': 'img',
                                     'data': x_train[:100],
                                 },
                             ])

    early_stopping = EarlyStopping(monitor='val_loss', patience=2, verbose=1)
    filepath = os.path.join(model_dir(),
                            "male/ssRBM/mnist_{epoch:04d}_{val_loss:.6f}.pkl")
    checkpoint = ModelCheckpoint(filepath,
                                 mode='min',
                                 monitor='val_loss',
                                 verbose=0,
                                 save_best_only=True)
    model = SemiSupervisedRBM(num_hidden=15,
                              num_visible=784,
                              batch_size=100,
                              num_epochs=4,
                              learning_rate=0.1,
                              w_init=0.1,
                              momentum_method='sudden',
                              weight_cost=0.0,
                              inference_engine='variational_inference',
                              approx_method='first_order',
                              metrics=['recon_err', 'loss', 'err'],
                              callbacks=[
                                  filter_display, learning_display,
                                  hidden_display, early_stopping, checkpoint
                              ],
                              cv=[-1] * x_train.shape[0] +
                              [0] * x_test.shape[0],
                              random_state=random_seed(),
                              verbose=1)

    model.fit(x, y)

    print("Test reconstruction error = %.4f" %
          model.get_reconstruction_error(x_test).mean())

    print("=========== Predicted by Semi-Supervised RBM ============")
    print("Train accuracy = {0:>1.4f}\tTest accuracy = {1:>1.4f}".format(
        accuracy_score(y_train, model.predict(x_train)),
        accuracy_score(y_test, model.predict(x_test))))

    x_train1 = model.transform(x_train)
    x_test1 = model.transform(x_test)

    clf = KNeighborsClassifier(n_neighbors=4)
    clf.fit(x_train1, y_train)

    print("=========== Predicted by kNN ============")
    print("Train accuracy = {0:>1.4f}\tTest accuracy = {1:>1.4f}".format(
        accuracy_score(y_train, clf.predict(x_train1)),
        accuracy_score(y_test, clf.predict(x_test1))))
Example #14
0
def test_nrbm_general(show_figure=False, block_figure_on_end=False):
    print("========== Test NonnegativeRBM in General ==========")

    np.random.seed(random_seed())

    from sklearn.metrics import accuracy_score
    from sklearn.neighbors import KNeighborsClassifier

    (x_train, y_train), (x_test, y_test) = demo.load_mnist()

    x = np.vstack([x_train, x_test])
    y = np.concatenate([y_train, y_test])

    learning_display = Display(
        title="Learning curves",
        dpi='auto',
        layout=(2, 2),
        freq=1,
        show=show_figure,
        block_on_end=block_figure_on_end,
        monitor=[
            {
                'metrics': ['recon_err', 'val_recon_err'],
                'type': 'line',
                'labels': ["training recon error", "validation recon error"],
                'title': "Reconstruction Errors",
                'xlabel': "epoch",
                'ylabel': "error",
            },
            {
                'metrics': ['free_energy', 'val_free_energy'],
                'type': 'line',
                'title': "Free Energies",
                'xlabel': "epoch",
                'ylabel': "energy",
            },
            {
                'metrics': ['recon_loglik', 'val_recon_loglik'],
                'type': 'line',
                'labels': ["training recon loglik", "validation recon loglik"],
                'title': "Reconstruction Loglikelihoods",
                'xlabel': "epoch",
                'ylabel': "loglik",
            },
            {
                'metrics': ['recon_loglik', 'val_recon_loglik'],
                'type': 'line',
                'labels': ["training recon loglik", "validation recon loglik"],
                'title': "Reconstruction Loglikelihoods",
                'xlabel': "epoch",
                'ylabel': "loglik",
            },
            # {'metrics': ['loglik_csl', 'val_loglik_csl'],
            #  'type': 'line',
            #  'labels': ["training loglik (CSL)", "validation loglik (CSL)"],
            #  'title': "Loglikelihoods using CSL",
            #  'xlabel': "epoch",
            #  'ylabel': "loglik",
            #  },
        ])

    filter_display = Display(title="Receptive Fields",
                             dpi='auto',
                             layout=(1, 1),
                             figsize=(8, 8),
                             freq=1,
                             show=show_figure,
                             block_on_end=block_figure_on_end,
                             monitor=[
                                 {
                                     'metrics': ['filters'],
                                     'title': "Receptive Fields",
                                     'type': 'img',
                                     'num_filters': 15,
                                     'disp_dim': (28, 28),
                                     'tile_shape': (3, 5),
                                 },
                             ])

    model = NonnegativeRBM(
        num_hidden=15,
        num_visible=784,
        batch_size=100,
        num_epochs=4,
        momentum_method='sudden',
        weight_cost=2e-4,
        nonnegative_type='barrier',
        nonnegative_cost=0.001,
        random_state=6789,
        metrics=['recon_err', 'free_energy', 'recon_loglik'],
        callbacks=[learning_display, filter_display],
        cv=[-1] * x_train.shape[0] + [0] * x_test.shape[0],
        verbose=1)

    model.fit(x)

    print("Train free energy = %.4f" % model.get_free_energy(x_train).mean())
    print("Test free energy = %.4f" % model.get_free_energy(x_test).mean())

    print("Train reconstruction likelihood = %.4f" %
          model.get_reconstruction_loglik(x_train).mean())
    print("Test reconstruction likelihood = %.4f" %
          model.get_reconstruction_loglik(x_test).mean())

    x_train1 = model.transform(x_train)
    x_test1 = model.transform(x_test)

    clf = KNeighborsClassifier(n_neighbors=1)
    clf.fit(x_train1, y_train)

    print("Error = %.4f" % (1 - accuracy_score(y_test, clf.predict(x_test1))))
Example #15
0
def test_srbm_regression(show_figure=False, block_figure_on_end=False):
    print("========== Test Supervised RBM for Regression ==========")

    from sklearn.metrics import mean_squared_error
    from sklearn.linear_model import LinearRegression

    np.random.seed(random_seed())

    (x_train, y_train), (x_test, y_test) = demo.load_mnist()

    x = np.vstack([x_train, x_test])
    y = np.concatenate([y_train, y_test])

    learning_display = Display(
        title="Learning curves",
        dpi='auto',
        layout=(3, 1),
        freq=1,
        show=show_figure,
        block_on_end=block_figure_on_end,
        monitor=[
            {
                'metrics': ['recon_err', 'val_recon_err'],
                'type': 'line',
                'labels': ["training recon error", "validation recon error"],
                'title': "Reconstruction Errors",
                'xlabel': "epoch",
                'ylabel': "error",
            },
            {
                'metrics': ['loss', 'val_loss'],
                'type': 'line',
                'labels': ["training loss", "validation loss"],
                'title': "Learning Losses",
                'xlabel': "epoch",
                'ylabel': "loss",
            },
            {
                'metrics': ['err', 'val_err'],
                'type': 'line',
                'labels': ["training error", "validation error"],
                'title': "Prediction Errors",
                'xlabel': "epoch",
                'ylabel': "error",
            },
            # {'metrics': ['loglik_csl', 'val_loglik_csl'],
            #  'type': 'line',
            #  'labels': ["training loglik (CSL)", "validation loglik (CSL)"],
            #  'title': "Loglikelihoods using CSL",
            #  'xlabel': "epoch",
            #  'ylabel': "loglik",
            #  },
        ])

    filter_display = Display(title="Receptive Fields",
                             dpi='auto',
                             layout=(1, 1),
                             figsize=(8, 8),
                             freq=1,
                             show=show_figure,
                             block_on_end=block_figure_on_end,
                             monitor=[
                                 {
                                     'metrics': ['filters'],
                                     'title': "Receptive Fields",
                                     'type': 'img',
                                     'num_filters': 15,
                                     'disp_dim': (28, 28),
                                     'tile_shape': (3, 5),
                                 },
                             ])

    hidden_display = Display(title="Hidden Activations",
                             dpi='auto',
                             layout=(1, 1),
                             figsize=(8, 8),
                             freq=1,
                             show=show_figure,
                             block_on_end=block_figure_on_end,
                             monitor=[
                                 {
                                     'metrics': ['hidden_activations'],
                                     'title': "Hidden Activations",
                                     'type': 'img',
                                     'data': x_train[:100],
                                 },
                             ])

    early_stopping = EarlyStopping(monitor='val_loss', patience=2, verbose=1)
    filepath = os.path.join(model_dir(),
                            "male/sRBM/mnist_{epoch:04d}_{val_loss:.6f}.pkl")
    checkpoint = ModelCheckpoint(filepath,
                                 mode='min',
                                 monitor='val_loss',
                                 verbose=0,
                                 save_best_only=True)
    model = SupervisedRBM(task='regression',
                          num_hidden=15,
                          num_visible=784,
                          batch_size=100,
                          num_epochs=4,
                          w_init=0.01,
                          learning_rate=0.01,
                          momentum_method='sudden',
                          weight_cost=0.0,
                          inference_engine='variational_inference',
                          approx_method='first_order',
                          metrics=['recon_err', 'loss', 'err'],
                          callbacks=[
                              filter_display, learning_display, hidden_display,
                              early_stopping, checkpoint
                          ],
                          cv=[-1] * x_train.shape[0] + [0] * x_test.shape[0],
                          random_state=random_seed(),
                          verbose=1)

    model.fit(x, y)

    print("Test reconstruction error = %.4f" %
          model.get_reconstruction_error(x_test).mean())
    print("Test loss = %.4f" % model.get_loss(x_test, y_test))

    print("=========== Predicted by sRBM ============")
    print("Train MSE = {0:>1.4f}\tTest MSE = {1:>1.4f}".format(
        -model.score(x_train, y_train), -model.score(x_test, y_test)))

    # fit a Linear Regressor
    lr = LinearRegression()
    lr.fit(x_train, y_train)
    print("=========== Predicted by Linear Regressor ============")
    print("Train MSE = {0:>1.4f}\tTest MSE = {1:>1.4f}".format(
        mean_squared_error(y_train, lr.predict(x_train)),
        mean_squared_error(y_test, lr.predict(x_test))))
Example #16
0
def test_efrbm_mnist(visible_layer_type='binary',
                     hidden_layer_type='continuous',
                     show_figure=False,
                     block_figure_on_end=False):
    (x_train, y_train), (x_test, y_test) = demo.load_mnist()

    num_train = 1000
    num_test = 500

    x_train = x_train[:num_train]
    y_train = y_train[:num_train]
    x_test = x_test[:num_test]
    y_test = y_test[:num_test]

    x = np.vstack([x_train, x_test])
    y = np.concatenate([y_train, y_test])

    learning_display = Display(
        title="Learning curves",
        dpi='auto',
        layout=(1, 2),
        freq=1,
        show=show_figure,
        block_on_end=block_figure_on_end,
        monitor=[{
            'metrics': ['recon_err', 'val_recon_err'],
            'type': 'line',
            'labels': ["training recon error", "validation recon error"],
            'title': "Reconstruction Errors",
            'xlabel': "epoch",
            'ylabel': "error",
        }, {
            'metrics': ['free_energy', 'val_free_energy'],
            'type': 'line',
            'title': "Free Energies",
            'xlabel': "epoch",
            'ylabel': "energy",
        }])

    gen_display = Display(title="Generated data",
                          dpi='auto',
                          layout=(1, 1),
                          figsize=(8, 8),
                          freq=1,
                          show=show_figure,
                          block_on_end=block_figure_on_end,
                          monitor=[
                              {
                                  'metrics': ['generated_data'],
                                  'title': "Generated data",
                                  'type': 'img',
                                  'num_filters': 100,
                                  'disp_dim': (28, 28),
                                  'tile_shape': (10, 10),
                              },
                          ])

    recon_display = Display(title="Reconstructed data",
                            dpi='auto',
                            layout=(1, 1),
                            figsize=(8, 8),
                            freq=1,
                            show=show_figure,
                            block_on_end=block_figure_on_end,
                            monitor=[
                                {
                                    'metrics': ['reconstruction'],
                                    'title': "Reconstructed data",
                                    'type': 'img',
                                    'data': x_train,
                                    'num_filters': 100,
                                    'disp_dim': (28, 28),
                                    'tile_shape': (10, 10),
                                },
                            ])

    suf_stat_dim_vis = SUFFICIENT_STATISTICS_DIM[visible_layer_type]
    suf_stat_dim_hid = SUFFICIENT_STATISTICS_DIM[hidden_layer_type]
    w_init = 0.1
    learning_rate = 0.1,
    Gaussian_layer_trainable_sigmal2 = True
    if visible_layer_type == 'continous' or hidden_layer_type == 'continuous':
        Gaussian_layer_trainable_sigmal2 = False
        w_init = 0.001
        learning_rate = 0.01

    model = EFRBM(
        suf_stat_dim_vis=suf_stat_dim_vis,
        visible_layer_type=visible_layer_type,
        suf_stat_dim_hid=suf_stat_dim_hid,
        hidden_layer_type=hidden_layer_type,
        Gaussian_layer_trainable_sigmal2=Gaussian_layer_trainable_sigmal2,
        num_hidden=15,
        num_visible=784,
        batch_size=100,
        num_epochs=5,
        momentum_method='sudden',
        weight_cost=2e-4,
        w_init=w_init,
        learning_rate=learning_rate,
        metrics=['recon_err', 'free_energy'],
        callbacks=[learning_display, recon_display],
        cv=[-1] * x_train.shape[0] + [0] * x_test.shape[0],
        random_state=random_seed(),
        verbose=1)

    model.fit(x)

    print("Running Logistic Regression...")

    x_train1 = model.transform(x_train)
    x_test1 = model.transform(x_test)

    clf = LogisticRegression()
    clf.fit(x_train1, y_train)

    y_test_pred = clf.predict(x_test1)

    print("Error = %.4f" % (1 - accuracy_score(y_test, y_test_pred)))
Example #17
0
def test_bbrbm_pcd(show_figure=False, block_figure_on_end=False):
    print("========== Test BernoulliBernoulliRBM using "
          "Persistent Contrastive Divergence ==========")

    np.random.seed(random_seed())

    from sklearn.metrics import accuracy_score
    from sklearn.neighbors import KNeighborsClassifier

    (x_train, y_train), (x_test, y_test) = demo.load_mnist()

    x = np.vstack([x_train, x_test])
    y = np.concatenate([y_train, y_test])

    learning_display = Display(
        title="Learning curves",
        dpi='auto',
        layout=(2, 2),
        freq=1,
        show=show_figure,
        block_on_end=block_figure_on_end,
        monitor=[
            {
                'metrics': ['recon_err', 'val_recon_err'],
                'type': 'line',
                'labels': ["training recon error", "validation recon error"],
                'title': "Reconstruction Errors",
                'xlabel': "epoch",
                'ylabel': "error",
            },
            {
                'metrics': ['free_energy', 'val_free_energy'],
                'type': 'line',
                'title': "Free Energies",
                'xlabel': "epoch",
                'ylabel': "energy",
            },
            {
                'metrics': ['recon_loglik', 'val_recon_loglik'],
                'type': 'line',
                'labels': ["training recon loglik", "validation recon loglik"],
                'title': "Reconstruction Loglikelihoods",
                'xlabel': "epoch",
                'ylabel': "loglik",
            },
            {
                'metrics': ['recon_loglik', 'val_recon_loglik'],
                'type': 'line',
                'labels': ["training recon loglik", "validation recon loglik"],
                'title': "Reconstruction Loglikelihoods",
                'xlabel': "epoch",
                'ylabel': "loglik",
            },
            # {'metrics': ['loglik_csl', 'val_loglik_csl'],
            #  'type': 'line',
            #  'labels': ["training loglik (CSL)", "validation loglik (CSL)"],
            #  'title': "Loglikelihoods using CSL",
            #  'xlabel': "epoch",
            #  'ylabel': "loglik",
            #  },
        ])

    filter_display = Display(title="Receptive Fields",
                             dpi='auto',
                             layout=(1, 1),
                             figsize=(8, 8),
                             freq=1,
                             show=show_figure,
                             block_on_end=block_figure_on_end,
                             monitor=[
                                 {
                                     'metrics': ['filters'],
                                     'title': "Receptive Fields",
                                     'type': 'img',
                                     'num_filters': 15,
                                     'disp_dim': (28, 28),
                                     'tile_shape': (3, 5),
                                 },
                             ])

    hidden_display = Display(title="Hidden Activations",
                             dpi='auto',
                             layout=(1, 1),
                             figsize=(8, 8),
                             freq=1,
                             show=show_figure,
                             block_on_end=block_figure_on_end,
                             monitor=[
                                 {
                                     'metrics': ['hidden_activations'],
                                     'title': "Hidden Activations",
                                     'type': 'img',
                                     'data': x_train[:1000],
                                 },
                             ])

    model = BernoulliBernoulliRBM(
        num_hidden=15,
        num_visible=784,
        batch_size=100,
        num_epochs=4,
        learning_method='pcd',
        num_pcd=5,
        num_chains=10,
        momentum_method='sudden',
        weight_cost=2e-4,
        metrics=['recon_err', 'free_energy', 'recon_loglik'],
        callbacks=[learning_display, filter_display, hidden_display],
        cv=[-1] * x_train.shape[0] + [0] * x_test.shape[0],
        random_state=random_seed(),
        verbose=1)

    model.fit(x)

    train_free_energy = model.get_free_energy(x_train).mean()
    test_free_energy = model.get_free_energy(x_test).mean()
    print("Train free energy = %.4f" % train_free_energy)
    print("Test free energy = %.4f" % test_free_energy)

    train_recon_loglik = model.get_reconstruction_loglik(x_train).mean()
    test_recon_loglik = model.get_reconstruction_loglik(x_test).mean()
    print("Train reconstruction likelihood = %.4f" % train_recon_loglik)
    print("Test reconstruction likelihood = %.4f" % test_recon_loglik)

    x_train1 = model.transform(x_train)
    x_test1 = model.transform(x_test)

    clf = KNeighborsClassifier(n_neighbors=1)
    clf.fit(x_train1, y_train)

    test_pred_err = 1.0 - accuracy_score(y_test, clf.predict(x_test1))
    print("BBRBM->kNN: test error = %.4f" % test_pred_err)
Example #18
0
def test_gan_save_and_load(show_figure=False, block_figure_on_end=False):
    print(
        "========== Test Save and Load functions of GAN on MNIST data =========="
    )

    np.random.seed(random_seed())

    (x_train, y_train), (x_test, y_test) = demo.load_mnist()
    x_train = x_train.astype(np.float32).reshape([-1, 28, 28, 1]) / 0.5 - 1.
    x_test = x_test.astype(np.float32).reshape([-1, 28, 28, 1]) / 0.5 - 1.

    root_dir = os.path.join(model_dir(), "male/GAN/MNIST")
    loss_display = Display(layout=(1, 1),
                           dpi='auto',
                           show=show_figure,
                           block_on_end=block_figure_on_end,
                           monitor=[
                               {
                                   'metrics': ['d_loss', 'g_loss'],
                                   'type':
                                   'line',
                                   'labels':
                                   ["discriminator loss", "generator loss"],
                                   'title':
                                   "Losses",
                                   'xlabel':
                                   "epoch",
                                   'ylabel':
                                   "loss",
                               },
                           ])
    sample_display = Display(layout=(1, 1),
                             dpi='auto',
                             figsize=(10, 10),
                             freq=1,
                             show=show_figure,
                             block_on_end=block_figure_on_end,
                             monitor=[
                                 {
                                     'metrics': ['x_samples'],
                                     'title': "Generated data",
                                     'type': 'img',
                                     'num_samples': 100,
                                     'tile_shape': (10, 10),
                                 },
                             ])

    model = GAN(model_name="GAN_MNIST_SaveLoad",
                num_x=784,
                num_discriminator_hiddens=(16, ),
                discriminator_batchnorm=False,
                discriminator_act_funcs=('lrelu', ),
                discriminator_learning_rate=0.001,
                num_z=8,
                generator_distribution=Uniform(low=(-1.0, ) * 8,
                                               high=(1.0, ) * 8),
                generator_batchnorm=False,
                num_generator_hiddens=(16, ),
                generator_act_funcs=('lrelu', ),
                generator_out_func='sigmoid',
                generator_learning_rate=0.001,
                batch_size=32,
                metrics=['d_loss', 'g_loss'],
                callbacks=[loss_display, sample_display],
                num_epochs=2,
                log_path=os.path.join(root_dir, "logs"),
                random_state=random_seed(),
                verbose=1)

    model.fit(x_train)

    print("Saving model...")
    save_file_path = model.save(os.path.join(root_dir, "checkpoints/ckpt"))
    print("Reloading model...")
    model1 = TensorFlowModel.load_model(save_file_path)
    model1.num_epochs = 4
    model1.fit(x_train)
    print("Done!")
Example #19
0
def test_pytorch_mlp_v1(show=False, block_figure_on_end=False):
    print("========== Test PytorchMLPv1 ==========")

    np.random.seed(random_seed())

    (x_train, y_train), (x_test, y_test) = demo.load_mnist()

    idx_train = np.random.permutation(x_train.shape[0])
    x_train = x_train[idx_train].astype(np.float32)
    y_train = y_train[idx_train].astype(np.uint8)
    print("Number of training samples = {}".format(x_train.shape[0]))

    idx_test = np.random.permutation(x_test.shape[0])
    x_test = x_test[idx_test].astype(np.float32)
    y_test = y_test[idx_test].astype(np.uint8)
    print("Number of testing samples = {}".format(x_test.shape[0]))

    x = np.vstack([x_train, x_test])
    y = np.concatenate([y_train, y_test])

    err_display = Display(title="Error curves",
                          dpi='auto',
                          layout=(1, 1),
                          freq=1,
                          show=show,
                          block_on_end=block_figure_on_end,
                          monitor=[{
                              'metrics': ['err', 'val_err'],
                              'type': 'line',
                              'title': "Learning errors",
                              'xlabel': "epoch",
                              'ylabel': "error",
                          }])
    loss_display = Display(
        title="Learning curves",
        dpi='auto',
        layout=(3, 1),
        freq=1,
        show=show,
        block_on_end=block_figure_on_end,
        filepath=[
            os.path.join(model_dir(), "male/PyTorchMLP/"
                         "loss/loss_{epoch:04d}.png"),
            os.path.join(model_dir(), "male/PyTorchMLP/"
                         "loss/loss_{epoch:04d}.pdf")
        ],
        monitor=[
            {
                'metrics': ['loss', 'val_loss'],
                'type': 'line',
                'labels': ["training loss", "validation loss"],
                'title': "Learning losses",
                'xlabel': "epoch",
                'xlabel_params': {
                    'fontsize': 50
                },
                'ylabel': "loss",
            },
            {
                'metrics': ['err', 'val_err'],
                'type': 'line',
                'title': "Learning errors",
                'xlabel': "epoch",
                'ylabel': "error",
            },
            {
                'metrics': ['err'],
                'type': 'line',
                'labels': ["training error"],
                'title': "Learning errors",
                'xlabel': "epoch",
                'ylabel': "error",
            },
        ])

    weight_display = Display(title="Filters",
                             dpi='auto',
                             layout=(1, 1),
                             figsize=(6, 15),
                             freq=1,
                             show=show,
                             block_on_end=block_figure_on_end,
                             filepath=os.path.join(
                                 model_dir(), "male/PyTorchMLP/"
                                 "weights/weights_{epoch:04d}.png"),
                             monitor=[
                                 {
                                     'metrics': ['weights'],
                                     'title': "Learned weights",
                                     'type': 'img',
                                     'tile_shape': (5, 2),
                                 },
                             ])

    clf = PyTorchMLP(model_name='PyTorchMLP',
                     arch='MLPv1',
                     num_epochs=4,
                     batch_size=100,
                     metrics=['loss', 'err'],
                     callbacks=[loss_display, err_display, weight_display],
                     cv=[-1] * x_train.shape[0] + [0] * x_test.shape[0],
                     random_state=random_seed(),
                     verbose=1)

    clf.fit(x, y)
    print("Training error = %.4f" % (1.0 - clf.score(x_train, y_train)))
    print("Testing error = %.4f" % (1.0 - clf.score(x_test, y_test)))
Example #20
0
def test_wgan_gp_mnist(show_figure=False, block_figure_on_end=False):
    print("========== Test WGAN-GP on MNIST data ==========")

    np.random.seed(random_seed())

    (x_train, y_train), (x_test, y_test) = demo.load_mnist()
    x_train = x_train.astype(np.float32).reshape([-1, 28, 28, 1]) / 0.5 - 1.
    x_test = x_test.astype(np.float32).reshape([-1, 28, 28, 1]) / 0.5 - 1.

    root_dir = os.path.join(model_dir(), "male/WGAN-GP/MNIST")
    loss_display = Display(layout=(1, 1),
                           dpi='auto',
                           show=show_figure,
                           block_on_end=block_figure_on_end,
                           filepath=[os.path.join(root_dir, "loss/loss_{epoch:04d}.png"),
                                     os.path.join(root_dir, "loss/loss_{epoch:04d}.pdf")],
                           monitor=[{'metrics': ['d_loss', 'g_loss'],
                                     'type': 'line',
                                     'labels': ["discriminator loss", "generator loss"],
                                     'title': "Losses",
                                     'xlabel': "epoch",
                                     'ylabel': "loss",
                                     },
                                    ])
    sample_display = Display(layout=(1, 1),
                             dpi='auto',
                             figsize=(10, 10),
                             freq=1,
                             show=show_figure,
                             block_on_end=block_figure_on_end,
                             monitor=[{'metrics': ['x_samples'],
                                       'title': "Generated data",
                                       'type': 'img',
                                       'num_samples': 100,
                                       'tile_shape': (10, 10),
                                       },
                                      ])

    model = WGAN_GP(model_name="WGAN_GP_MNIST_z_uniform",
                    num_z=10,  # set to 100 for a full run
                    z_prior=Uniform1D(low=-1.0, high=1.0),
                    img_size=(28, 28, 1),
                    batch_size=16,  # set to 64 for a full run
                    num_conv_layers=3,  # set to 3 for a full run
                    num_gen_feature_maps=4,  # set to 32 for a full run
                    num_dis_feature_maps=4,  # set to 32 for a full run
                    metrics=['d_loss', 'g_loss'],
                    callbacks=[loss_display, sample_display],
                    num_epochs=4,  # set to 100 for a full run
                    # summary_freq=1,  # uncomment this for a full run
                    random_state=random_seed(),
                    log_path=os.path.join(root_dir, "logs"),
                    verbose=1)

    model.fit(x_train)

    model = WGAN_GP(model_name="WGAN_GP_MNIST_z_Gaussian",
                    num_z=10,  # set to 100 for a full run
                    z_prior=Gaussian1D(mu=0.0, sigma=1.0),
                    img_size=(28, 28, 1),
                    batch_size=32,  # set to 64 for a full run
                    num_conv_layers=3,  # set to 3 for a full run
                    num_gen_feature_maps=4,  # set to 32 for a full run
                    num_dis_feature_maps=4,  # set to 32 for a full run
                    metrics=['d_loss', 'g_loss'],
                    callbacks=[loss_display, sample_display],
                    num_epochs=4,  # set to 100 for a full run
                    # summary_freq=1,  # uncomment this for a full run
                    random_state=random_seed(),
                    log_path=os.path.join(root_dir, "logs"),
                    verbose=1)

    model.fit(x_train)
Example #21
0
def test_gan_mnist(show_figure=False, block_figure_on_end=False):
    print("========== Test GAN on MNIST data ==========")

    np.random.seed(random_seed())

    (x_train, y_train), (x_test, y_test) = demo.load_mnist()
    x_train = x_train.astype(np.float32)
    x_test = x_test.astype(np.float32)

    loss_display = Display(layout=(1, 1),
                           dpi='auto',
                           show=show_figure,
                           block_on_end=block_figure_on_end,
                           monitor=[
                               {
                                   'metrics': ['d_loss', 'g_loss'],
                                   'type':
                                   'line',
                                   'labels':
                                   ["discriminator loss", "generator loss"],
                                   'title':
                                   "Losses",
                                   'xlabel':
                                   "epoch",
                                   'ylabel':
                                   "loss",
                               },
                           ])
    sample_display = Display(layout=(1, 1),
                             dpi='auto',
                             figsize=(10, 10),
                             freq=1,
                             show=show_figure,
                             block_on_end=block_figure_on_end,
                             monitor=[
                                 {
                                     'metrics': ['x_samples'],
                                     'title': "Generated data",
                                     'type': 'img',
                                     'num_samples': 100,
                                     'disp_dim': (28, 28),
                                     'tile_shape': (10, 10),
                                 },
                             ])

    # <editor-fold desc="Working example">
    # model = GAN(num_x=784,
    #             num_discriminator_hiddens=(128,),
    #             discriminator_batchnorm=False,
    #             discriminator_act_funcs=('lrelu',),
    #             discriminator_learning_rate=0.001,
    #             num_z=100,
    #             generator_distribution=Uniform(low=(-1.0,) * 100, high=(1.0,) * 100),
    #             generator_batchnorm=False,
    #             num_generator_hiddens=(128,),
    #             generator_act_funcs=('lrelu',),
    #             generator_out_func='sigmoid',
    #             generator_learning_rate=0.001,
    #             batch_size=32,
    #             metrics=['d_loss', 'g_loss'],
    #             callbacks=[loss_display, sample_display],
    #             num_epochs=100,
    #             random_state=random_seed(),
    #             verbose=1)
    # </editor-fold>

    # <editor-fold desc="Testing example">
    NUM_Z = 10  # set to 100 for a full run
    model = GAN(
        num_x=784,
        num_discriminator_hiddens=(16, ),  # set to 128 for a full run
        discriminator_batchnorm=False,
        discriminator_act_funcs=('lrelu', ),
        discriminator_dropouts=(0.99, ),
        discriminator_learning_rate=0.001,
        num_z=NUM_Z,
        generator_distribution=Uniform(low=(-1.0, ) * NUM_Z,
                                       high=(1.0, ) * NUM_Z),
        generator_batchnorm=False,
        num_generator_hiddens=(16, 16),  # set to (128, 128) for a full run
        generator_act_funcs=('lrelu', 'lrelu'),
        generator_out_func='sigmoid',
        generator_learning_rate=0.001,
        batch_size=32,
        metrics=['d_loss', 'g_loss'],
        callbacks=[loss_display, sample_display],
        num_epochs=4,  # set to 100 for a full run
        random_state=random_seed(),
        verbose=1)
    # </editor-fold>

    model.fit(x_train)
Example #22
0
def test_dcgan_image_saver():
    print("========== Test DCGAN with Image Saver ==========")

    np.random.seed(random_seed())

    num_data = 128
    (x_train, y_train), (x_test, y_test) = demo.load_mnist()
    x_train = x_train[:num_data].astype(np.float32).reshape([-1, 28, 28, 1
                                                             ]) / 0.5 - 1.
    x_test = x_test.astype(np.float32).reshape([-1, 28, 28, 1]) / 0.5 - 1.

    root_dir = os.path.join(model_dir(), "male/DCGAN/imagesaver/mnist")
    imgsaver = ImageSaver(freq=1,
                          filepath=os.path.join(root_dir,
                                                "mnist_{epoch:04d}.png"),
                          monitor={
                              'metrics': 'x_samples',
                              'num_samples': 100,
                              'tile_shape': (10, 10),
                          })

    model = DCGAN(
        model_name="DCGAN_MNIST",
        num_z=10,  # set to 100 for a full run
        img_size=(28, 28, 1),
        batch_size=16,  # set to 64 for a full run
        num_conv_layers=3,  # set to 3 for a full run
        num_gen_feature_maps=4,  # set to 32 for a full run
        num_dis_feature_maps=4,  # set to 32 for a full run
        metrics=['d_loss', 'g_loss'],
        callbacks=[imgsaver],
        num_epochs=4,  # set to 100 for a full run
        random_state=random_seed(),
        log_path=os.path.join(root_dir, "logs"),
        verbose=1)

    model.fit(x_train)

    np.random.seed(random_seed())

    (x_train, y_train), (x_test, y_test) = demo.load_cifar10()
    x_train = x_train[:num_data].astype(np.float32).reshape([-1, 32, 32, 3
                                                             ]) / 0.5 - 1.
    x_test = x_test.astype(np.float32).reshape([-1, 32, 32, 3]) / 0.5 - 1.

    root_dir = os.path.join(model_dir(), "male/DCGAN/imagesaver/cifar10")
    imgsaver = ImageSaver(freq=1,
                          filepath=os.path.join(root_dir,
                                                "cifar10_{epoch:04d}.png"),
                          monitor={
                              'metrics': 'x_samples',
                              'num_samples': 100,
                              'tile_shape': (10, 10),
                          })

    model = DCGAN(
        model_name="DCGAN_CIFAR10",
        num_z=10,  # set to 100 for a full run
        img_size=(32, 32, 3),
        batch_size=16,  # set to 64 for a full run
        num_conv_layers=3,  # set to 3 for a full run
        num_gen_feature_maps=4,  # set to 32 for a full run
        num_dis_feature_maps=4,  # set to 32 for a full run
        metrics=['d_loss', 'g_loss'],
        callbacks=[imgsaver],
        num_epochs=4,  # set to 100 for a full run
        random_state=random_seed(),
        log_path=os.path.join(root_dir, "logs"),
        verbose=1)

    model.fit(x_train)
Example #23
0
def test_display_callbacks(show=False, block_figure_on_end=False):
    np.random.seed(random_seed())

    (x_train, y_train), (x_test, y_test) = demo.load_mnist()

    idx_train = np.random.permutation(x_train.shape[0])
    x_train = x_train[idx_train]
    y_train = y_train[idx_train]
    print("Number of training samples = {}".format(x_train.shape[0]))

    idx_test = np.random.permutation(x_test.shape[0])
    x_test = x_test[idx_test]
    y_test = y_test[idx_test]
    print("Number of testing samples = {}".format(x_test.shape[0]))

    x = np.vstack([x_train, x_test])
    y = np.concatenate([y_train, y_test])

    err_display = Display(title="Error curves",
                          dpi='auto',
                          layout=(1, 1),
                          freq=1,
                          show=show,
                          block_on_end=block_figure_on_end,
                          monitor=[{
                              'metrics': ['err', 'val_err'],
                              'type': 'line',
                              'title': "Learning errors",
                              'xlabel': "epoch",
                              'ylabel': "error",
                          }])
    loss_display = Display(
        title="Learning curves",
        dpi='auto',
        layout=(3, 1),
        freq=1,
        show=show,
        block_on_end=block_figure_on_end,
        filepath=[
            os.path.join(model_dir(), "male/callbacks/"
                         "display/loss/loss_{epoch:04d}.png"),
            os.path.join(model_dir(), "male/callbacks/"
                         "display/loss/loss_{epoch:04d}.pdf")
        ],
        monitor=[
            {
                'metrics': ['loss', 'val_loss'],
                'type': 'line',
                'labels': ["training loss", "validation loss"],
                'title': "Learning losses",
                'xlabel': "epoch",
                'xlabel_params': {
                    'fontsize': 50
                },
                'ylabel': "loss",
            },
            {
                'metrics': ['err', 'val_err'],
                'type': 'line',
                'title': "Learning errors",
                'xlabel': "epoch",
                'ylabel': "error",
            },
            {
                'metrics': ['err'],
                'type': 'line',
                'labels': ["training error"],
                'title': "Learning errors",
                'xlabel': "epoch",
                'ylabel': "error",
            },
        ])

    weight_display = Display(title="Filters",
                             dpi='auto',
                             layout=(1, 1),
                             figsize=(6, 15),
                             freq=1,
                             show=show,
                             block_on_end=block_figure_on_end,
                             filepath=os.path.join(
                                 model_dir(), "male/callbacks/display/"
                                 "weights/weights_{epoch:04d}.png"),
                             monitor=[
                                 {
                                     'metrics': ['weights'],
                                     'title': "Learned weights",
                                     'type': 'img',
                                     'tile_shape': (5, 2),
                                 },
                             ])

    optz = SGD(learning_rate=0.001)
    clf = GLM(model_name="display_callbacks",
              link='softmax',
              loss='softmax',
              optimizer=optz,
              num_epochs=20,
              batch_size=100,
              task='classification',
              metrics=['loss', 'err'],
              callbacks=[loss_display, weight_display, err_display],
              cv=[-1] * x_train.shape[0] + [0] * x_test.shape[0],
              random_state=random_seed(),
              verbose=1)

    clf.fit(x, y)
    print("Training error = %.4f" % (1.0 - clf.score(x_train, y_train)))
    print("Testing error = %.4f" % (1.0 - clf.score(x_test, y_test)))