Ejemplo n.º 1
0
def __denoising_autoencoder_mnist__():
    X_train, y_train, X_val, y_val, X_test, y_test = load_mnist()
    X_train = X_train.reshape(X_train.shape[0], -1)
    X_val   = X_val.reshape(X_val.shape[0], -1)
    X_test  = X_test.reshape(X_test.shape[0], -1)    
    
    print 'Train data shape: ',   X_train.shape
    print 'Train labels shape: ', y_train.shape
    print 'Test data shape: ',    X_test.shape
    print 'Test labels shape: ',  y_test.shape
    print ''
    
    ninput  = 28*28
    nhidden = 100

    net = denoising_autoencoder(layer_units=(ninput, nhidden, ninput), bias=True,
                                act_func = 'sigmoid', loss_type='euclidean', seed=12)
    tic = time.time()
    stats = net.train_with_SGD_with_noise(X_train, noise=gaussiannoise(rate=0.3, sd=0.3), learning_rate=0.1, 
                                          learning_rate_decay=0.95, reg=0.001, num_iters=2000, batchsize=128, mu=0.9)
    toc = time.time()
    print toc-tic, 'sec elapsed'
    print 'overall loss: ', net.loss_with_noise(X_train, X_train, reg=0.01, opt='test')
    
    plot_net_output(net, stats, X_train)
Ejemplo n.º 2
0
def train(args):
    # load data
    train_dataloader, test_dataloader = load_mnist()

    # load model
    model = CapsuleNet(args).cuda()

    # define loss and optimizer
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    sched = torch.optim.lr_scheduler.ExponentialLR(optimizer,
                                                   gamma=args.lr_decay)

    # start training
    for epooch in range(args.epochs):
        for (i, data) in tqdm(enumerate(train_dataloader),
                              total=len(train_dataloader),
                              smoothing=0.9):
            img, label = data
            img, label = img.cuda(), label.cuda()
            label = F.one_hot(label, num_classes=args.num_class).float()
            optimizer.zero_grad()

            pred, reon = model(img, label)
            loss = margin_loss(label, pred, reon, img, args.lam_recon)
            loss.backward()
            optimizer.step()
        model.eval()
        correct = 0
        for (i, data) in tqdm(enumerate(test_dataloader),
                              total=len(test_dataloader),
                              smoothing=True):
            img, label = data
            img, label = img.cuda(), label.cuda()
            label = F.one_hot(label, num_classes=args.num_class).float()
            pred, reon = model(img, label)
            y_pred = pred.data.max(dim=1)[1]
            y_true = label.data.max(dim=1)[1]
            correct += (y_pred.eq(y_true).cpu().sum())
        print(correct)
        print(len(test_dataloader.dataset))
        OA = correct.data.item() / len(test_dataloader.dataset)
        print('Test acc:', OA)
    if TRAIN_SCRATCH and avg_loss < BEST_VAL:
        BEST_VAL = avg_loss
        torch.save(autoencoder.state_dict(), './history/conv_autoencoder.pt')
        print('Save Best Model in HISTORY\n')


if __name__ == '__main__':
    EPOCHS = 100
    BATCH_SIZE = 128
    LEARNING_RATE = 1e-3
    WEIGHT_DECAY = 1e-5
    LOG_INTERVAL = 100
    TRAIN_SCRATCH = False  # whether to train a model from scratch
    BEST_VAL = float('inf')  # record the best val loss

    train_loader, test_loader = data_utils.load_mnist(BATCH_SIZE)

    conv_autoencoder = ConvAutoencoder()
    if cuda: conv_autoencoder.to(device)

    if TRAIN_SCRATCH:
        for epoch in range(EPOCHS):
            starttime = datetime.datetime.now()
            model_training(conv_autoencoder, train_loader, epoch)
            endtime = datetime.datetime.now()
            print(f'Train a epoch in {(endtime - starttime).seconds} seconds')
            # evaluate on test set and save best model
            evaluation(conv_autoencoder, test_loader)
        print('Trainig Complete with best validation loss {:.4f}'.format(
            BEST_VAL))
    else:
Ejemplo n.º 4
0
def train(cat_dim,
          noise_dim,
          batch_size,
          n_batch_per_epoch,
          nb_epoch,
          dset="mnist"):
    """
    Train model

    Load the whole train data in memory for faster operations

    args: **kwargs (dict) keyword arguments that specify the model hyperparameters
    """
    general_utils.setup_logging("IG")
    # Load and rescale data
    if dset == "mnist":
        print("loading mnist data")
        X_real_train, Y_real_train, X_real_test, Y_real_test = data_utils.load_mnist(
        )
        # pick 1000 sample for testing
        # X_real_test = X_real_test[-1000:]
        # Y_real_test = Y_real_test[-1000:]

    img_dim = X_real_train.shape[-3:]
    epoch_size = n_batch_per_epoch * batch_size

    try:

        # Create optimizers
        opt_dcgan = Adam(lr=1E-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
        opt_discriminator = Adam(lr=2E-4,
                                 beta_1=0.9,
                                 beta_2=0.999,
                                 epsilon=1e-08)
        # opt_discriminator = SGD(lr=1E-4, momentum=0.9, nesterov=True)

        # Load generator model
        generator_model = models.load("generator_deconv",
                                      cat_dim,
                                      noise_dim,
                                      img_dim,
                                      batch_size,
                                      dset=dset)
        # Load discriminator model
        discriminator_model = models.load("DCGAN_discriminator",
                                          cat_dim,
                                          noise_dim,
                                          img_dim,
                                          batch_size,
                                          dset=dset)

        generator_model.compile(loss='mse', optimizer=opt_discriminator)
        # stop the discriminator to learn while in generator is learning
        discriminator_model.trainable = False

        DCGAN_model = models.DCGAN(generator_model, discriminator_model,
                                   cat_dim, noise_dim)

        list_losses = ['binary_crossentropy', 'categorical_crossentropy']
        list_weights = [1, 1]
        DCGAN_model.compile(loss=list_losses,
                            loss_weights=list_weights,
                            optimizer=opt_dcgan)

        # Multiple discriminator losses
        # allow the discriminator to learn again
        discriminator_model.trainable = True
        discriminator_model.compile(loss=list_losses,
                                    loss_weights=list_weights,
                                    optimizer=opt_discriminator)
        # Start training
        print("Start training")
        for e in range(nb_epoch + 1):
            # Initialize progbar and batch counter
            # progbar = generic_utils.Progbar(epoch_size)
            batch_counter = 1
            start = time.time()
            print("Epoch: {}".format(e))
            for X_real_batch, Y_real_batch in zip(
                    data_utils.gen_batch(X_real_train, batch_size),
                    data_utils.gen_batch(Y_real_train, batch_size)):

                # Create a batch to feed the discriminator model
                X_disc_fake, y_disc_fake, noise_sample = data_utils.get_disc_batch(
                    X_real_batch,
                    Y_real_batch,
                    generator_model,
                    batch_size,
                    cat_dim,
                    noise_dim,
                    type="fake")
                X_disc_real, y_disc_real = data_utils.get_disc_batch(
                    X_real_batch,
                    Y_real_batch,
                    generator_model,
                    batch_size,
                    cat_dim,
                    noise_dim,
                    type="real")

                # Update the discriminator
                disc_loss_fake = discriminator_model.train_on_batch(
                    X_disc_fake, [y_disc_fake, Y_real_batch])
                disc_loss_real = discriminator_model.train_on_batch(
                    X_disc_real, [y_disc_real, Y_real_batch])
                disc_loss = disc_loss_fake + disc_loss_real
                # Create a batch to feed the generator model
                # X_noise, y_gen = data_utils.get_gen_batch(batch_size, cat_dim, noise_dim)

                # Freeze the discriminator
                discriminator_model.trainable = False
                gen_loss = DCGAN_model.train_on_batch(
                    [Y_real_batch, noise_sample], [y_disc_real, Y_real_batch])
                # Unfreeze the discriminator
                discriminator_model.trainable = True
                # training validation
                p_real_batch, p_Y_batch = discriminator_model.predict(
                    X_real_batch, batch_size=batch_size)
                acc_train = data_utils.accuracy(p_Y_batch, Y_real_batch)
                batch_counter += 1
                # progbar.add(batch_size, values=[("D tot", disc_loss[0]),
                #                                 ("D cat", disc_loss[2]),
                #                                 ("G tot", gen_loss[0]),
                #                                 ("G cat", gen_loss[2]),
                #                                 ("P Real:", p_real_batch),
                #                                 ("Q acc", acc_train)])

                # Save images for visualization
                if batch_counter % (n_batch_per_epoch /
                                    2) == 0 and e % 10 == 0:
                    data_utils.plot_generated_batch(X_real_batch,
                                                    generator_model,
                                                    batch_size, cat_dim,
                                                    noise_dim, e)
                if batch_counter >= n_batch_per_epoch:
                    break

            print("")
            print('Epoch %s/%s, Time: %s' %
                  (e + 1, nb_epoch, time.time() - start))
            _, p_Y_test = discriminator_model.predict(
                X_real_test, batch_size=X_real_test.shape[0])
            acc_test = data_utils.accuracy(p_Y_test, Y_real_test)
            print("Epoch: {} Accuracy: {}".format(e + 1, acc_test))
            if e % 1000 == 0:
                gen_weights_path = os.path.join(
                    '../../models/IG/gen_weights.h5')
                generator_model.save_weights(gen_weights_path, overwrite=True)

                disc_weights_path = os.path.join(
                    '../../models/IG/disc_weights.h5')
                discriminator_model.save_weights(disc_weights_path,
                                                 overwrite=True)

                DCGAN_weights_path = os.path.join(
                    '../../models/IG/DCGAN_weights.h5')
                DCGAN_model.save_weights(DCGAN_weights_path, overwrite=True)

    except KeyboardInterrupt:
        pass
Ejemplo n.º 5
0
def train(**kwargs):
    """
    Train model

    Load the whole train data in memory for faster operations

    args: **kwargs (dict) keyword arguments that specify the model hyperparameters
    """

    # Roll out the parameters
    batch_size = kwargs["batch_size"]
    n_batch_per_epoch = kwargs["n_batch_per_epoch"]
    nb_epoch = kwargs["nb_epoch"]
    generator = kwargs["generator"]
    model_name = kwargs["model_name"]
    image_data_format = kwargs["image_data_format"]
    img_dim = kwargs["img_dim"]
    bn_mode = kwargs["bn_mode"]
    label_smoothing = kwargs["label_smoothing"]
    label_flipping = kwargs["label_flipping"]
    noise_scale = kwargs["noise_scale"]
    dset = kwargs["dset"]
    use_mbd = kwargs["use_mbd"]
    epoch_size = n_batch_per_epoch * batch_size

    # Setup environment (logging directory etc)
    general_utils.setup_logging(model_name)

    # Load and rescale data
    if dset == "celebA":
        X_real_train = data_utils.load_celebA(img_dim, image_data_format)
    if dset == "mnist":
        X_real_train, _, _, _ = data_utils.load_mnist(image_data_format)
    img_dim = X_real_train.shape[-3:]
    noise_dim = (100, )

    try:

        # Create optimizers
        opt_dcgan = Adam(lr=1E-3, beta_1=0.5, beta_2=0.999, epsilon=1e-08)
        opt_discriminator = SGD(lr=1E-3, momentum=0.9, nesterov=True)

        # Load generator model
        generator_model = models.load("generator_%s" % generator,
                                      noise_dim,
                                      img_dim,
                                      bn_mode,
                                      batch_size,
                                      dset=dset,
                                      use_mbd=use_mbd)
        # Load discriminator model
        discriminator_model = models.load("DCGAN_discriminator",
                                          noise_dim,
                                          img_dim,
                                          bn_mode,
                                          batch_size,
                                          dset=dset,
                                          use_mbd=use_mbd)

        generator_model.compile(loss='mse', optimizer=opt_discriminator)
        discriminator_model.trainable = False

        DCGAN_model = models.DCGAN(generator_model, discriminator_model,
                                   noise_dim, img_dim)

        loss = ['binary_crossentropy']
        loss_weights = [1]
        DCGAN_model.compile(loss=loss,
                            loss_weights=loss_weights,
                            optimizer=opt_dcgan)

        discriminator_model.trainable = True
        discriminator_model.compile(loss='binary_crossentropy',
                                    optimizer=opt_discriminator)

        gen_loss = 100
        disc_loss = 100

        # Start training
        print("Start training")
        for e in range(nb_epoch):
            # Initialize progbar and batch counter
            progbar = generic_utils.Progbar(epoch_size)
            batch_counter = 1
            start = time.time()

            for X_real_batch in data_utils.gen_batch(X_real_train, batch_size):

                # Create a batch to feed the discriminator model
                X_disc, y_disc = data_utils.get_disc_batch(
                    X_real_batch,
                    generator_model,
                    batch_counter,
                    batch_size,
                    noise_dim,
                    noise_scale=noise_scale,
                    label_smoothing=label_smoothing,
                    label_flipping=label_flipping)

                # Update the discriminator
                disc_loss = discriminator_model.train_on_batch(X_disc, y_disc)

                # Create a batch to feed the generator model
                X_gen, y_gen = data_utils.get_gen_batch(
                    batch_size, noise_dim, noise_scale=noise_scale)

                # Freeze the discriminator
                discriminator_model.trainable = False
                gen_loss = DCGAN_model.train_on_batch(X_gen, y_gen)
                # Unfreeze the discriminator
                discriminator_model.trainable = True

                batch_counter += 1
                progbar.add(batch_size,
                            values=[("D logloss", disc_loss),
                                    ("G logloss", gen_loss)])

                # Save images for visualization
                if batch_counter % 100 == 0:
                    data_utils.plot_generated_batch(X_real_batch,
                                                    generator_model,
                                                    batch_size, noise_dim,
                                                    image_data_format)

                if batch_counter >= n_batch_per_epoch:
                    break

            print("")
            print('Epoch %s/%s, Time: %s' %
                  (e + 1, nb_epoch, time.time() - start))

            if e % 5 == 0:
                gen_weights_path = os.path.join(
                    '../../models/%s/gen_weights_epoch%s.h5' % (model_name, e))
                generator_model.save_weights(gen_weights_path, overwrite=True)

                disc_weights_path = os.path.join(
                    '../../models/%s/disc_weights_epoch%s.h5' %
                    (model_name, e))
                discriminator_model.save_weights(disc_weights_path,
                                                 overwrite=True)

                DCGAN_weights_path = os.path.join(
                    '../../models/%s/DCGAN_weights_epoch%s.h5' %
                    (model_name, e))
                DCGAN_model.save_weights(DCGAN_weights_path, overwrite=True)

    except KeyboardInterrupt:
        pass
Ejemplo n.º 6
0
import cPickle as pickle
import cv2
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import sys
sys.path.insert(0, '../mlp_test')
from  data_utils import load_mnist

train_len = 1000
start=0

the_colors = [ 'blue', 'green', 'red', 'cyan', 'magenta',  'yellow', 'darkblue', 'lawngreen', 'orange', 'violet']

showSequence = True

data_set = load_mnist()[0]


target_values = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])

chosens = [(index,data_set[1][index])  for index in range(start, start + train_len) if data_set[1][index] in target_values]

sorted_chosens = np.asarray(sorted(chosens, key=lambda target: target[1]))
X_data = np.asarray(data_set[0][sorted_chosens[:,0]])
y_data = np.asarray([data_set[1][sorted_chosens[:,0]]])[0]

clrs = [the_colors[k] for i in range(train_len) for k in target_values if y_data[i] == k]
if not showSequence:
    params = {'legend.fontsize': 6}
    plt.rcParams.update(params)
patches = []
import urllib.request
from urllib.parse import urlparse
import sys

# Gets your bucket name from command line
try:
    bucket = str(sys.argv[1])
except Exception as error:
    print("Please pass your bucket name as a commandline argument")
    sys.exit(1)

# Download dataset from pinned commit
url = "https://github.com/aws/amazon-sagemaker-examples/raw/af6667bd0be3c9cdec23fecda7f0be6d0e3fa3ea/sagemaker-debugger/xgboost_realtime_analysis/data_utils.py"
urllib.request.urlretrieve(url, "data_utils.py")

from data_utils import load_mnist, upload_to_s3

prefix = "sagemaker/xgboost"
train_file, validation_file = load_mnist()
upload_to_s3(train_file, bucket, f"{prefix}/train/mnist.train.libsvm")
upload_to_s3(validation_file, bucket,
             f"{prefix}/validation/mnist.validation.libsvm")

# Remove downloaded file
import os

os.remove("data_utils.py")
Ejemplo n.º 8
0
from __future__ import division

import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import cv2
from cv2 import matchShapes
from skimage.measure import compare_ssim
import sys
sys.path.insert(0, '../mlp_test')
from  data_utils import load_mnist


train_data, verificatio_data, test_data = load_mnist()


index_1 = 7
index_2 = 10
img_arr_1 = train_data[0][index_1].reshape((28, 28))
img_val_1 = train_data[1][index_1]
img_arr_2 = train_data[0][index_2].reshape((28, 28))
img_val_2 = train_data[1][index_2]
import scipy.misc
ret, thresh_1 = cv2.threshold(np.uint8(img_arr_1 * 255).copy(), 127, 255, cv2.THRESH_BINARY)
ret, thresh2 = cv2.threshold(np.uint8(img_arr_2 * 255).copy(), 127, 255,0)
contours,hierarchy = cv2.findContours(thresh_1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnt1 = contours[0]
contours,hierarchy = cv2.findContours(thresh2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnt2 = contours[0]
formatt = "{:4.2f}"
match_I1 = formatt.format(matchShapes(cnt1, cnt2, cv2.cv.CV_CONTOURS_MATCH_I1,0))
Ejemplo n.º 9
0
def eval(**kwargs):

    # Roll out the parameters
    batch_size = kwargs["batch_size"]
    generator = kwargs["generator"]
    model_name = kwargs["model_name"]
    image_data_format = kwargs["image_data_format"]
    img_dim = kwargs["img_dim"]
    cont_dim = (kwargs["cont_dim"],)
    cat_dim = (kwargs["cat_dim"],)
    noise_dim = (kwargs["noise_dim"],)
    bn_mode = kwargs["bn_mode"]
    noise_scale = kwargs["noise_scale"]
    dset = kwargs["dset"]
    eval_epoch = kwargs["eval_epoch"]

    # Setup environment (logging directory etc)
    general_utils.setup_logging(**kwargs)

    # Load and rescale data
    if dset == "RGZ":
        X_real_train = data_utils.load_RGZ(img_dim, image_data_format)
    if dset == "mnist":
        X_real_train, _, _, _ = data_utils.load_mnist(image_data_format)
    img_dim = X_real_train.shape[-3:]

    # Load generator model
    generator_model = models.load("generator_%s" % generator,
                                  cat_dim,
                                  cont_dim,
                                  noise_dim,
                                  img_dim,
                                  bn_mode,
                                  batch_size,
                                  dset=dset)

    # Load colorization model
    generator_model.load_weights("../../models/%s/gen_weights_epoch%05d.h5" %
                                 (model_name, eval_epoch))

    X_plot = []
    # Vary the categorical variable
    for i in range(cat_dim[0]):
        X_noise = data_utils.sample_noise(noise_scale, batch_size, noise_dim)
        X_cont = data_utils.sample_noise(noise_scale, batch_size, cont_dim)
        X_cont = np.repeat(X_cont[:1, :], batch_size, axis=0)  # fix continuous noise
        X_cat = np.zeros((batch_size, cat_dim[0]), dtype='float32')
        X_cat[:, i] = 1  # always the same categorical value

        X_gen = generator_model.predict([X_cat, X_cont, X_noise])
        X_gen = data_utils.inverse_normalization(X_gen)

        if image_data_format == "channels_first":
            X_gen = X_gen.transpose(0,2,3,1)

        X_gen = [X_gen[i] for i in range(len(X_gen))]
        X_plot.append(np.concatenate(X_gen, axis=1))
    X_plot = np.concatenate(X_plot, axis=0)

    plt.figure(figsize=(8,10))
    if X_plot.shape[-1] == 1:
        plt.imshow(X_plot[:, :, 0], cmap="gray")
    else:
        plt.imshow(X_plot)
    plt.xticks([])
    plt.yticks([])
    plt.ylabel("Varying categorical factor", fontsize=28, labelpad=60)

    plt.annotate('', xy=(-0.05, 0), xycoords='axes fraction', xytext=(-0.05, 1),
                 arrowprops=dict(arrowstyle="-|>", color='k', linewidth=4))
    plt.tight_layout()
    plt.savefig(os.path.join("../../figures", model_name, "varying_categorical.png"))
    plt.clf()
    plt.close()

    # Vary the continuous variables
    X_plot = []
    # First get the extent of the noise sampling
    x = np.ravel(data_utils.sample_noise(noise_scale, batch_size * 20000, cont_dim))
    # Define interpolation points
    x = np.linspace(x.min(), x.max(), num=batch_size)
    for i in range(batch_size):
        X_noise = data_utils.sample_noise(noise_scale, batch_size, noise_dim)
        X_cont = np.concatenate([np.array([x[i], x[j]]).reshape(1, -1) for j in range(batch_size)], axis=0)
        X_cat = np.zeros((batch_size, cat_dim[0]), dtype='float32')
        X_cat[:, 1] = 1  # always the same categorical value

        X_gen = generator_model.predict([X_cat, X_cont, X_noise])
        X_gen = data_utils.inverse_normalization(X_gen)
        if image_data_format == "channels_first":
            X_gen = X_gen.transpose(0,2,3,1)
        X_gen = [X_gen[i] for i in range(len(X_gen))]
        X_plot.append(np.concatenate(X_gen, axis=1))
    X_plot = np.concatenate(X_plot, axis=0)

    plt.figure(figsize=(10,10))
    if X_plot.shape[-1] == 1:
        plt.imshow(X_plot[:, :, 0], cmap="gray")
    else:
        plt.imshow(X_plot)
    plt.xticks([])
    plt.yticks([])
    plt.ylabel("Varying continuous factor 1", fontsize=28, labelpad=60)
    plt.annotate('', xy=(-0.05, 0), xycoords='axes fraction', xytext=(-0.05, 1),
                 arrowprops=dict(arrowstyle="-|>", color='k', linewidth=4))
    plt.xlabel("Varying continuous factor 2", fontsize=28, labelpad=60)
    plt.annotate('', xy=(1, -0.05), xycoords='axes fraction', xytext=(0, -0.05),
                 arrowprops=dict(arrowstyle="-|>", color='k', linewidth=4))
    plt.tight_layout()
    plt.savefig(os.path.join("../../figures", model_name, "varying_continuous.png"))
    plt.clf()
    plt.close()
Ejemplo n.º 10
0
def train(**kwargs):
    """
    Train model

    Load the whole train data in memory for faster operations

    args: **kwargs (dict) keyword arguments that specify the model hyperparameters
    """

    # Roll out the parameters
    batch_size = kwargs["batch_size"]
    n_batch_per_epoch = kwargs["n_batch_per_epoch"]
    nb_epoch = kwargs["nb_epoch"]
    generator = kwargs["generator"]
    model_name = kwargs["model_name"]
    image_data_format = kwargs["image_data_format"]
    celebA_img_dim = kwargs["celebA_img_dim"]
    cont_dim = (kwargs["cont_dim"], )
    cat_dim = (kwargs["cat_dim"], )
    noise_dim = (kwargs["noise_dim"], )
    label_smoothing = kwargs["label_smoothing"]
    label_flipping = kwargs["label_flipping"]
    noise_scale = kwargs["noise_scale"]
    dset = kwargs["dset"]
    use_mbd = kwargs["use_mbd"]
    load_from_dir = kwargs["load_from_dir"]
    target_size = kwargs["target_size"]
    save_weights_every_n_epochs = kwargs["save_weights_every_n_epochs"]
    save_only_last_n_weights = kwargs["save_only_last_n_weights"]
    visualize_images_every_n_epochs = kwargs["visualize_images_every_n_epochs"]
    epoch_size = n_batch_per_epoch * batch_size

    # Setup environment (logging directory etc)
    general_utils.setup_logging(**kwargs)

    # Load and rescale data
    if dset == "celebA":
        X_real_train = data_utils.load_celebA(celebA_img_dim,
                                              image_data_format)
    elif dset == "mnist":
        X_real_train, _, _, _ = data_utils.load_mnist(image_data_format)
    else:
        X_batch_gen = data_utils.data_generator_from_dir(
            dset, target_size, batch_size)
        X_real_train = next(X_batch_gen)
    img_dim = X_real_train.shape[-3:]

    try:

        # Create optimizers
        opt_dcgan = Adam(lr=1E-4, beta_1=0.5, beta_2=0.999, epsilon=1e-08)
        opt_discriminator = Adam(lr=1E-4,
                                 beta_1=0.5,
                                 beta_2=0.999,
                                 epsilon=1e-08)
        # opt_discriminator = SGD(lr=1E-4, momentum=0.9, nesterov=True)

        # Load generator model
        generator_model = models.load("generator_%s" % generator,
                                      cat_dim,
                                      cont_dim,
                                      noise_dim,
                                      img_dim,
                                      batch_size,
                                      dset=dset,
                                      use_mbd=use_mbd)
        # Load discriminator model
        discriminator_model = models.load("DCGAN_discriminator",
                                          cat_dim,
                                          cont_dim,
                                          noise_dim,
                                          img_dim,
                                          batch_size,
                                          dset=dset,
                                          use_mbd=use_mbd)

        generator_model.compile(loss='mse', optimizer=opt_discriminator)
        discriminator_model.trainable = False

        DCGAN_model = models.DCGAN(generator_model, discriminator_model,
                                   cat_dim, cont_dim, noise_dim)

        list_losses = [
            'binary_crossentropy', 'categorical_crossentropy', gaussian_loss
        ]
        list_weights = [1, 1, 1]
        DCGAN_model.compile(loss=list_losses,
                            loss_weights=list_weights,
                            optimizer=opt_dcgan)

        # Multiple discriminator losses
        discriminator_model.trainable = True
        discriminator_model.compile(loss=list_losses,
                                    loss_weights=list_weights,
                                    optimizer=opt_discriminator)

        gen_loss = 100
        disc_loss = 100

        if not load_from_dir:
            X_batch_gen = data_utils.gen_batch(X_real_train, batch_size)

        # Start training
        print("Start training")

        disc_total_losses = []
        disc_log_losses = []
        disc_cat_losses = []
        disc_cont_losses = []
        gen_total_losses = []
        gen_log_losses = []
        gen_cat_losses = []
        gen_cont_losses = []

        start = time.time()

        for e in range(nb_epoch):

            print('--------------------------------------------')
            print('[{0:%Y/%m/%d %H:%M:%S}] Epoch {1:d}/{2:d}\n'.format(
                datetime.datetime.now(), e + 1, nb_epoch))

            # Initialize progbar and batch counter
            progbar = generic_utils.Progbar(epoch_size)
            batch_counter = 1

            disc_total_loss_batch = 0
            disc_log_loss_batch = 0
            disc_cat_loss_batch = 0
            disc_cont_loss_batch = 0
            gen_total_loss_batch = 0
            gen_log_loss_batch = 0
            gen_cat_loss_batch = 0
            gen_cont_loss_batch = 0

            for batch_counter in range(n_batch_per_epoch):

                # Load data
                X_real_batch = next(X_batch_gen)

                # Create a batch to feed the discriminator model
                X_disc, y_disc, y_cat, y_cont = data_utils.get_disc_batch(
                    X_real_batch,
                    generator_model,
                    batch_counter,
                    batch_size,
                    cat_dim,
                    cont_dim,
                    noise_dim,
                    noise_scale=noise_scale,
                    label_smoothing=label_smoothing,
                    label_flipping=label_flipping)

                # Update the discriminator
                disc_loss = discriminator_model.train_on_batch(
                    X_disc, [y_disc, y_cat, y_cont])

                # Create a batch to feed the generator model
                X_gen, y_gen, y_cat, y_cont, y_cont_target = data_utils.get_gen_batch(
                    batch_size,
                    cat_dim,
                    cont_dim,
                    noise_dim,
                    noise_scale=noise_scale)

                # Freeze the discriminator
                discriminator_model.trainable = False
                gen_loss = DCGAN_model.train_on_batch(
                    [y_cat, y_cont, X_gen], [y_gen, y_cat, y_cont_target])
                # Unfreeze the discriminator
                discriminator_model.trainable = True

                progbar.add(batch_size,
                            values=[("D tot", disc_loss[0]),
                                    ("D log", disc_loss[1]),
                                    ("D cat", disc_loss[2]),
                                    ("D cont", disc_loss[3]),
                                    ("G tot", gen_loss[0]),
                                    ("G log", gen_loss[1]),
                                    ("G cat", gen_loss[2]),
                                    ("G cont", gen_loss[3])])

                disc_total_loss_batch += disc_loss[0]
                disc_log_loss_batch += disc_loss[1]
                disc_cat_loss_batch += disc_loss[2]
                disc_cont_loss_batch += disc_loss[3]
                gen_total_loss_batch += gen_loss[0]
                gen_log_loss_batch += gen_loss[1]
                gen_cat_loss_batch += gen_loss[2]
                gen_cont_loss_batch += gen_loss[3]

                # # Save images for visualization
                # if batch_counter % (n_batch_per_epoch / 2) == 0:
                #     data_utils.plot_generated_batch(X_real_batch, generator_model, e,
                #                                     batch_size, cat_dim, cont_dim, noise_dim,
                #                                     image_data_format, model_name)

            disc_total_losses.append(disc_total_loss_batch / n_batch_per_epoch)
            disc_log_losses.append(disc_log_loss_batch / n_batch_per_epoch)
            disc_cat_losses.append(disc_cat_loss_batch / n_batch_per_epoch)
            disc_cont_losses.append(disc_cont_loss_batch / n_batch_per_epoch)
            gen_total_losses.append(gen_total_loss_batch / n_batch_per_epoch)
            gen_log_losses.append(gen_log_loss_batch / n_batch_per_epoch)
            gen_cat_losses.append(gen_cat_loss_batch / n_batch_per_epoch)
            gen_cont_losses.append(gen_cont_loss_batch / n_batch_per_epoch)

            # Save images for visualization
            if (e + 1) % visualize_images_every_n_epochs == 0:
                data_utils.plot_generated_batch(X_real_batch, generator_model,
                                                e, batch_size, cat_dim,
                                                cont_dim, noise_dim,
                                                image_data_format, model_name)
                data_utils.plot_losses(disc_total_losses, disc_log_losses,
                                       disc_cat_losses, disc_cont_losses,
                                       gen_total_losses, gen_log_losses,
                                       gen_cat_losses, gen_cont_losses,
                                       model_name)

            if (e + 1) % save_weights_every_n_epochs == 0:

                print("Saving weights...")

                # Delete all but the last n weights
                general_utils.purge_weights(save_only_last_n_weights,
                                            model_name)

                # Save weights
                gen_weights_path = os.path.join(
                    '../../models/%s/gen_weights_epoch%05d.h5' %
                    (model_name, e))
                generator_model.save_weights(gen_weights_path, overwrite=True)

                disc_weights_path = os.path.join(
                    '../../models/%s/disc_weights_epoch%05d.h5' %
                    (model_name, e))
                discriminator_model.save_weights(disc_weights_path,
                                                 overwrite=True)

                DCGAN_weights_path = os.path.join(
                    '../../models/%s/DCGAN_weights_epoch%05d.h5' %
                    (model_name, e))
                DCGAN_model.save_weights(DCGAN_weights_path, overwrite=True)

            end = time.time()
            print("")
            print('Epoch %s/%s, Time: %s' % (e + 1, nb_epoch, end - start))
            start = end

    except KeyboardInterrupt:
        pass

    gen_weights_path = '../../models/%s/generator_latest.h5' % (model_name)
    print("Saving", gen_weights_path)
    generator_model.save(gen_weights_path, overwrite=True)
Ejemplo n.º 11
0
import gzip
import cPickle
import sys
sys.path.insert(0, '../mlp_test')
from  data_utils import load_mnist


train_set, valid_set, test_set = load_mnist()


chosen_index = 1250

test_x_chosen = test_set[0][chosen_index]
test_y_chosen = test_set[1][chosen_index]

test_chosen = [test_set[0][chosen_index], test_set[1][chosen_index]]

filepath = "../data/savedImage_" + str(test_y_chosen) + ".p"

cPickle.dump(test_chosen, open(filepath, "wb" ) )

test_saved = cPickle.load( open(filepath, "rb" ) )

test_x_saved, test_y_saved = test_saved

import matplotlib.cm as cm
import matplotlib.pyplot as plt

plt.title(test_y_saved,  fontsize=24)
plt.imshow(test_x_saved.reshape((28, 28)), cmap = cm.Greys_r)
plt.show()
Ejemplo n.º 12
0
# Spectral embedding of the digits dataset
def cse(X, nr_components=2):
    print("Computing Spectral embedding")
    embedder = manifold.SpectralEmbedding(n_components=nr_components, random_state=0,
                                          eigen_solver="arpack")
    return embedder.fit_transform(X)


def tsne(X, nr_components=2):
    print("Computing t-SNE embedding")
    tsne = manifold.TSNE(n_components=nr_components, init='random', random_state=0)
    return tsne.fit_transform(X)


if __name__ == '__main__':
    train_data = load_mnist()[0]

    chosens = [index for index in range(start, start + testlen) if train_data[1][index] in target_values]

    indexes = np.asarray([i for i in chosens])
    X_data = np.asarray([train_data[0][i] for i in chosens])
    y_data = np.asarray([train_data[1][i] for i in chosens])


    if showAll:
        t0 = time()
        plot_embedding(tsne(X_data), y_data,
                       "t-SNE embedding of the digits (time %.2fs)" %
                       (time() - t0))
        t0 = time()
        plot_embedding(cse(X_data), y_data,
Ejemplo n.º 13
0
import pywt
import sys
sys.path.insert(0, '../mlp_test')
from  data_utils import load_mnist

mode = 'sym2'
level = None

direction = ['h', 'v', 'd']

test_data = load_mnist()[2]

chosen_index = 7

test_x_chosen = test_data[0][chosen_index]
test_y_chosen = test_data[1][chosen_index]

pic_arr = test_x_chosen.reshape((28, 28))

pic_wts = pywt.wavedec2(pic_arr, mode, level=level)

length = len(pic_wts)

import matplotlib.cm as cm
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator


plt.subplot(length , 3, 2)
ax = plt.subplot(length , 3, 2)
ax.get_yaxis().set_major_locator(MaxNLocator(integer=True))
def train(**kwargs):
    """
    Train model

    Load the whole train data in memory for faster operations

    args: **kwargs (dict) keyword arguments that specify the model hyperparameters
    """

    # Roll out the parameters
    batch_size = kwargs["batch_size"]
    n_batch_per_epoch = kwargs["n_batch_per_epoch"]
    nb_epoch = kwargs["nb_epoch"]
    generator = kwargs["generator"]
    model_name = kwargs["model_name"]
    image_dim_ordering = kwargs["image_dim_ordering"]
    img_dim = kwargs["img_dim"]
    bn_mode = kwargs["bn_mode"]
    label_smoothing = kwargs["label_smoothing"]
    label_flipping = kwargs["label_flipping"]
    noise_scale = kwargs["noise_scale"]
    dset = kwargs["dset"]
    use_mbd = kwargs["use_mbd"]
    epoch_size = n_batch_per_epoch * batch_size

    # Setup environment (logging directory etc)
    general_utils.setup_logging(model_name)

    # Load and rescale data
    if dset == "celebA":
        X_real_train = data_utils.load_celebA(img_dim, image_dim_ordering)
    if dset == "mnist":
        X_real_train, _, _, _ = data_utils.load_mnist(image_dim_ordering)
    img_dim = X_real_train.shape[-3:]
    noise_dim = (100,)

    try:

        # Create optimizers
        opt_dcgan = Adam(lr=1E-3, beta_1=0.5, beta_2=0.999, epsilon=1e-08)
        opt_discriminator = SGD(lr=1E-3, momentum=0.9, nesterov=True)

        # Load generator model
        generator_model = models.load("generator_%s" % generator,
                                      noise_dim,
                                      img_dim,
                                      bn_mode,
                                      batch_size,
                                      dset=dset,
                                      use_mbd=use_mbd)
        # Load discriminator model
        discriminator_model = models.load("DCGAN_discriminator",
                                          noise_dim,
                                          img_dim,
                                          bn_mode,
                                          batch_size,
                                          dset=dset,
                                          use_mbd=use_mbd)

        generator_model.compile(loss='mse', optimizer=opt_discriminator)
        discriminator_model.trainable = False

        DCGAN_model = models.DCGAN(generator_model,
                                   discriminator_model,
                                   noise_dim,
                                   img_dim)

        loss = ['binary_crossentropy']
        loss_weights = [1]
        DCGAN_model.compile(loss=loss, loss_weights=loss_weights, optimizer=opt_dcgan)

        discriminator_model.trainable = True
        discriminator_model.compile(loss='binary_crossentropy', optimizer=opt_discriminator)

        gen_loss = 100
        disc_loss = 100

        # Start training
        print("Start training")
        for e in range(nb_epoch):
            # Initialize progbar and batch counter
            progbar = generic_utils.Progbar(epoch_size)
            batch_counter = 1
            start = time.time()

            for X_real_batch in data_utils.gen_batch(X_real_train, batch_size):

                # Create a batch to feed the discriminator model
                X_disc, y_disc = data_utils.get_disc_batch(X_real_batch,
                                                           generator_model,
                                                           batch_counter,
                                                           batch_size,
                                                           noise_dim,
                                                           noise_scale=noise_scale,
                                                           label_smoothing=label_smoothing,
                                                           label_flipping=label_flipping)

                # Update the discriminator
                disc_loss = discriminator_model.train_on_batch(X_disc, y_disc)

                # Create a batch to feed the generator model
                X_gen, y_gen = data_utils.get_gen_batch(batch_size, noise_dim, noise_scale=noise_scale)

                # Freeze the discriminator
                discriminator_model.trainable = False
                gen_loss = DCGAN_model.train_on_batch(X_gen, y_gen)
                # Unfreeze the discriminator
                discriminator_model.trainable = True

                batch_counter += 1
                progbar.add(batch_size, values=[("D logloss", disc_loss),
                                                ("G logloss", gen_loss)])

                # Save images for visualization
                if batch_counter % 100 == 0:
                    data_utils.plot_generated_batch(X_real_batch, generator_model,
                                                    batch_size, noise_dim, image_dim_ordering)

                if batch_counter >= n_batch_per_epoch:
                    break

            print("")
            print('Epoch %s/%s, Time: %s' % (e + 1, nb_epoch, time.time() - start))

            if e % 5 == 0:
                gen_weights_path = os.path.join('../../models/%s/gen_weights_epoch%s.h5' % (model_name, e))
                generator_model.save_weights(gen_weights_path, overwrite=True)

                disc_weights_path = os.path.join('../../models/%s/disc_weights_epoch%s.h5' % (model_name, e))
                discriminator_model.save_weights(disc_weights_path, overwrite=True)

                DCGAN_weights_path = os.path.join('../../models/%s/DCGAN_weights_epoch%s.h5' % (model_name, e))
                DCGAN_model.save_weights(DCGAN_weights_path, overwrite=True)

    except KeyboardInterrupt:
        pass
Ejemplo n.º 15
0
train_len = 2000
start=0

# 'mahalanobis'

target_values = np.array([1])
metrics = ['braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation',
           'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'kulsinski',
            'matching', 'minkowski', 'rogerstanimoto', 'russellrao',
           'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
           'yule']

# The metrics 'mahalanobis', 'seuclidean', 'cosine' are not directly usable

train_data, validation_data, test_data = load_mnist()

chosens = [(index, train_data[1][index]) for index in range(start, start + train_len) if train_data[1][index] in target_values]

sorted_chosens = np.asarray(sorted(chosens, key=lambda target: target[1]))
X_data = train_data[0][start:start + train_len]
y_data = train_data[1][start:start + train_len]

X_test = test_data[0]
y_test = test_data[1]

len_test = len(test_data[1])
from scipy.spatial.distance import cdist

def closest_node(node, nodes, distance = 'euclidean'):
    return cdist([node], nodes, metric = distance).argmin()
Ejemplo n.º 16
0
def eval(**kwargs):

    # Roll out the parameters
    batch_size = kwargs["batch_size"]
    generator = kwargs["generator"]
    model_name = kwargs["model_name"]
    image_dim_ordering = kwargs["image_dim_ordering"]
    img_dim = kwargs["img_dim"]
    cont_dim = (kwargs["cont_dim"],)
    cat_dim = (kwargs["cat_dim"],)
    noise_dim = (kwargs["noise_dim"],)
    bn_mode = kwargs["bn_mode"]
    noise_scale = kwargs["noise_scale"]
    dset = kwargs["dset"]
    epoch = kwargs["epoch"]

    # Setup environment (logging directory etc)
    general_utils.setup_logging(model_name)

    # Load and rescale data
    if dset == "RGZ":
        X_real_train = data_utils.load_RGZ(img_dim, image_dim_ordering)
    if dset == "mnist":
        X_real_train, _, _, _ = data_utils.load_mnist(image_dim_ordering)
    img_dim = X_real_train.shape[-3:]

    # Load generator model
    generator_model = models.load("generator_%s" % generator,
                                  cat_dim,
                                  cont_dim,
                                  noise_dim,
                                  img_dim,
                                  bn_mode,
                                  batch_size,
                                  dset=dset)

    # Load colorization model
    generator_model.load_weights("../../models/%s/gen_weights_epoch%s.h5" %
                                 (model_name, epoch))

    X_plot = []
    # Vary the categorical variable
    for i in range(cat_dim[0]):
        X_noise = data_utils.sample_noise(noise_scale, batch_size, noise_dim)
        X_cont = data_utils.sample_noise(noise_scale, batch_size, cont_dim)
        X_cont = np.repeat(X_cont[:1, :], batch_size, axis=0)  # fix continuous noise
        X_cat = np.zeros((batch_size, cat_dim[0]), dtype='float32')
        X_cat[:, i] = 1  # always the same categorical value

        X_gen = generator_model.predict([X_cat, X_cont, X_noise])
        X_gen = data_utils.inverse_normalization(X_gen)

        if image_dim_ordering == "th":
            X_gen = X_gen.transpose(0,2,3,1)

        X_gen = [X_gen[i] for i in range(len(X_gen))]
        X_plot.append(np.concatenate(X_gen, axis=1))
    X_plot = np.concatenate(X_plot, axis=0)

    plt.figure(figsize=(8,10))
    if X_plot.shape[-1] == 1:
        plt.imshow(X_plot[:, :, 0], cmap="gray")
    else:
        plt.imshow(X_plot)
    plt.xticks([])
    plt.yticks([])
    plt.ylabel("Varying categorical factor", fontsize=28, labelpad=60)

    plt.annotate('', xy=(-0.05, 0), xycoords='axes fraction', xytext=(-0.05, 1),
                 arrowprops=dict(arrowstyle="-|>", color='k', linewidth=4))
    plt.tight_layout()
    plt.savefig("../../figures/varying_categorical.png")
    plt.clf()
    plt.close()

    # Vary the continuous variables
    X_plot = []
    # First get the extent of the noise sampling
    x = np.ravel(data_utils.sample_noise(noise_scale, batch_size * 20000, cont_dim))
    # Define interpolation points
    x = np.linspace(x.min(), x.max(), num=batch_size)
    for i in range(batch_size):
        X_noise = data_utils.sample_noise(noise_scale, batch_size, noise_dim)
        X_cont = np.concatenate([np.array([x[i], x[j]]).reshape(1, -1) for j in range(batch_size)], axis=0)
        X_cat = np.zeros((batch_size, cat_dim[0]), dtype='float32')
        X_cat[:, 1] = 1  # always the same categorical value

        X_gen = generator_model.predict([X_cat, X_cont, X_noise])
        X_gen = data_utils.inverse_normalization(X_gen)
        if image_dim_ordering == "th":
            X_gen = X_gen.transpose(0,2,3,1)
        X_gen = [X_gen[i] for i in range(len(X_gen))]
        X_plot.append(np.concatenate(X_gen, axis=1))
    X_plot = np.concatenate(X_plot, axis=0)

    plt.figure(figsize=(10,10))
    if X_plot.shape[-1] == 1:
        plt.imshow(X_plot[:, :, 0], cmap="gray")
    else:
        plt.imshow(X_plot)
    plt.xticks([])
    plt.yticks([])
    plt.ylabel("Varying continuous factor 1", fontsize=28, labelpad=60)
    plt.annotate('', xy=(-0.05, 0), xycoords='axes fraction', xytext=(-0.05, 1),
                 arrowprops=dict(arrowstyle="-|>", color='k', linewidth=4))
    plt.xlabel("Varying continuous factor 2", fontsize=28, labelpad=60)
    plt.annotate('', xy=(1, -0.05), xycoords='axes fraction', xytext=(0, -0.05),
                 arrowprops=dict(arrowstyle="-|>", color='k', linewidth=4))
    plt.tight_layout()
    plt.savefig("../../figures/varying_continuous.png")
    plt.clf()
    plt.close()