def save_model(self, keep_all_checkpoints=False):
        """Save model. If keep_all_checkpoints is set to True, then a copy of the entire model's weight and optimizer state is preserved for each checkpoint, along with the corresponding epoch in the file name. If set to False, then only the latest model is kept on disk, saving a lot of space, but potentially losing a good model due to overtraining."""
        from keras import models
        model_epoch_filename = "model_epoch_{0:0>4}.hdf5".format(self.epochs_completed)

        if keep_all_checkpoints:
            chkpoint_model_path = os.path.join(settings.CHECKPOINTS_DIR, model_epoch_filename)
            print_positive("Saving model after epoch #{} to disk:\n{}.".format(self.epochs_completed, chkpoint_model_path))
            self.keras_model.save(chkpoint_model_path)
            force_symlink("../checkpoints/{}".format(model_epoch_filename), self.model_path)
        else:
            print_positive("Overwriting latest model after epoch #{} to disk:\n{}.".format(self.epochs_completed, self.model_path))
            self.keras_model.save(self.model_path)
        return True
Esempio n. 2
0
    def read_checkpoint_file(self):
        """Read the 'checkpoint.json' file and update the class variables accordingly."""
        checkpoint = None
        if os.path.isfile(self.path_checkpoint_file):
            print_positive("Found checkpoint file: {}".format(
                self.path_checkpoint_file))
            print_info("Verifying integrity of checkpoint file...")
            try:
                with open(self.path_checkpoint_file, "r") as fp:
                    try:
                        checkpoint = json.load(fp)
                    except ValueError as e:
                        handle_error(
                            "Failed to open checkpoint file '{0}'. ".format(
                                self.path_checkpoint_file) +
                            "It does not appear to be a valid JSON file.", e)
                        checkpoint = None
            except IOError as e:
                handle_error(
                    "Unable to open checkpoint file '{}' for reading.".format(
                        self.path_checkpoint_file), e)
        ### Failed to find or open checkpoint file. Set some values to 0 and exit
        if checkpoint != None:
            ### Succesfully loaded check point file, gather the data!
            print_positive(
                "Successfully loaded checkpoint! Reading its data...")
            self.epochs_completed = checkpoint['epochs_completed']
            if checkpoint['model'] != settings.MODEL:
                print_warning(
                    "Inconsistency detected: the checkpoint model '{0}' does not match command line argument of '{1}'."
                    .format(checkpoint['model'], settings.MODEL))
                print_info("Discarding checkpoint and starting from scratch.")
                return None
            if checkpoint['exp_name'] != settings.EXP_NAME:
                print_warning(
                    "Inconsistency detected: the checkpoint experiment name '{0}' does not match command line argument of '{1}'."
                    .format(checkpoint['exp_name'], settings.EXP_NAME))
                print_info("Discarding checkpoint and starting from scratch.")
                return None

            self.wall_time = checkpoint['wall_time']
            self.process_time = checkpoint['process_time']
        else:
            self.epochs_completed = 0
            self.wall_time = 0
            self.process_time = 0

        return checkpoint
    def load_model(self):
        """Return True if a valid model was found and correctly loaded. Return False if no model was loaded."""
        from shutil import copyfile
        from keras.models import load_model

        chosen_model_path = None
        best_model_path = None
        best_model_glob = glob.glob(settings.MODELS_DIR + "/best_model_*.hdf5")
        if len(best_model_glob) > 0:
            best_model_path = best_model_glob[0]
            chosen_model_path = best_model_path
            print_positive("Loading *best* model with the lowest validation score: {}"
                           .format(best_model_path))
        if best_model_path == None:
            model_epoch_filename = "model_epoch_{0:0>4}.hdf5".format(self.epochs_completed)
            most_recent_model_path = os.path.join(settings.CHECKPOINTS_DIR, model_epoch_filename)
            chosen_model_path = self.model_path
            if not os.path.isfile(chosen_model_path):
                if not os.path.isfile(most_recent_model_path):
                    print_warning("Unexpected problem: cannot find the model's HDF5 file anymore at path:\n'{}'".format(most_recent_model_path))
                    return False
                else:
                    chosen_model_path = most_recent_model_path

            print_positive("Loading last known valid model (this includes the complete architecture, all weights, optimizer's state and so on)!")

        # Check if file is readable first
        try:
            open(chosen_model_path, "r").close()
        except Exception as e:
            handle_error("Lacking permission to *open for reading* the HDF5 model located at\n{}."
                         .format(chosen_model_path), e)
            return False
        
        # Load the actual HDF5 model file
        try:
            self.keras_model = load_model(chosen_model_path)
        except Exception as e:
            handle_error("Unfortunately, the model did not parse as a valid HDF5 Keras model and cannot be loaded for an unkown reason. A backup of the model will be created, after which training will restart from scratch.".format(chosen_model_path), e)
            try:
                copyfile(chosen_model_path, "{}.backup".format(chosen_model_path))
            except Exception as e:
                handle_error("Looks like you're having a bad day. The copy operation failed for an unknown reason. We will exit before causing some serious damage ;). Better luck next time. Please verify your directory permissions and your default umask!.", e)
                sys.exit(-3)
            return False
        return True
Esempio n. 4
0
 def _load_jpgs_and_captions_npy(self):
     print_positive(
         "Found the project datasets encoded as a 4-tensor in '.npy' format. Attempting to load..."
     )
     prefix = ""
     if settings.MAX_TRAINING_SAMPLES != -1:
         prefix = "subset_"
     try:
         for i, filename in enumerate([
                 self._images_filename, self._test_images_filename,
                 self._captions_ids_filename, self._captions_dict_filename
         ]):
             path = os.path.join(settings.MSCOCO_DIR, prefix + filename)
             if i == 0:
                 self.images = np.load(path)
                 if settings.MAX_TRAINING_SAMPLES != -1 and self.images.shape[
                         0] != settings.MAX_TRAINING_SAMPLES:
                     raise Exception("Incorrect number of images")
                 print_info("Loaded: {}".format(path))
             elif i == 1:
                 self.test_images = np.load(path)
                 print_info("Loaded: {}".format(path))
             elif i == 2:
                 self.captions_ids = np.load(path)
                 print_info("Loaded: {}".format(path))
             elif i == 3:
                 self.captions_dict = np.load(path)
                 print_info("Loaded: {}".format(path))
         log("")
         print_positive("Successfully loaded entire datasets!")
         self._is_dataset_loaded = True
     except Exception as e:
         handle_warning(
             "Unable to load some of the '.npy' dataset files. Going back to loading '.jpg' files one at a time.",
             e)
         self._is_dataset_loaded = False
Esempio n. 5
0
    def load_model(self):
        """Return True if a valid model was found and correctly loaded. Return False if no model was loaded."""
        import numpy as np
        from lasagne.layers import set_all_param_values

        if os.path.isfile(self.full_gen_path) and os.path.isfile(
                self.full_disc_path):
            print_positive(
                "Found latest '.npz' model's weights files saved to disk at paths:\n{}\n{}"
                .format(self.full_gen_path, self.full_disc_path))
        else:
            print_info(
                "Cannot resume from checkpoint. Could not find '.npz'  weights files, either {} or {}."
                .format(self.full_gen_path, self.full_disc_path))
            return False

        try:
            ### Load the generator model's weights
            print_info("Attempting to load generator model: {}".format(
                self.full_gen_path))
            with np.load(self.full_gen_path) as fp:
                param_values = [fp['arr_%d' % i] for i in range(len(fp.files))]
            set_all_param_values(self.generator, param_values)

            ### Load the discriminator model's weights
            print_info("Attempting to load generator model: {}".format(
                self.full_disc_path))
            with np.load(self.full_disc_path) as fp:
                param_values = [fp['arr_%d' % i] for i in range(len(fp.files))]
            set_all_param_values(self.discriminator, param_values)
        except Exception as e:
            handle_error(
                "Failed to read or parse the '.npz' weights files, either {} or {}."
                .format(self.full_gen_path, self.full_disc_path), e)
            return False
        return True
    def train(self, Dataset):
        from keras.callbacks import EarlyStopping, ModelCheckpoint, LambdaCallback
        
        ### Get the datasets
        X_train, X_test, Y_train, Y_test, id_train, id_val = Dataset.return_train_data()

        #### Print model summary
        #print_info("Model summary:")
        #self.keras_model.summary()

        #### Compile the model (if necessary)
        self._compile()
        
        #### Fit the model
        
        # We fit the model iteratively, typically in more than one pass, in order to
        # create frequent checkpoints. For example, if EPOCHS_PER_CHECKPOINT is set to 5
        # and NUM_EPOCHS is set to 33, then the loop will iterate 6 times, performing 5
        # epochs of training via the 'fit' method and on the 7th iteration, it will
        # perform 3 epochs only in order to reach NUM_EPOCHS. Finally, a checkpoint is
        # always created once training is complete, even if the next EPOCHS_PER_CHECKPOINT
        # multiple was not reached yet.

        ### Print the major params again, for convenience
        print_info("Starting training from epoch {0} to epoch {1} {2}, creating checkpoints every {3} epochs."
                   .format(self.epochs_completed + 1,
                           self.epochs_completed + settings.NUM_EPOCHS,
                           "(i.e. training an extra {0} epochs)".format(settings.NUM_EPOCHS),
                           settings.EPOCHS_PER_CHECKPOINT))

        # Define training callbacks
        early_stopping = EarlyStopping(monitor='val_loss', patience=10, verbose=1, mode='min')
        best_model_path = os.path.join(settings.MODELS_DIR,
                                       "best_model_epoch.{epoch:03d}_loss.{val_loss:.4f}.hdf5")
        checkpointer = ModelCheckpoint(filepath=best_model_path,
                                       verbose=1, save_best_only=True)
        epoch_complete = LambdaCallback(on_epoch_end = self.increment_epochs_completed)

        # Ready to train!
        print_positive("Starting to train model!...")
        epoch = 0
        verbose = settings.VERBOSE
        if verbose == 2:
            verbose = 0 # If verbose == 2, the 'epoch_complete' callback will already be printing the same
        self.keras_model.fit(X_train, Y_train,
                             validation_split=0.1,
                             epochs = settings.NUM_EPOCHS,
                             batch_size = settings.BATCH_SIZE,
                             verbose = verbose,
                             initial_epoch = self.epochs_completed,
                             callbacks=[early_stopping, checkpointer, epoch_complete])
        
        ### Training complete
        print_positive("Training complete!")

        # Checkpoint time (save hyper parameters, model and checkpoint file)
        print_positive("CHECKPOINT AT EPOCH {}. Updating 'checkpoint.json' file...".format(self.epochs_completed))
        self.update_checkpoint(False)

        ### Evaluate the model's performance
        print_info("Evaluating model...")
        train_scores = self.keras_model.evaluate(X_train, Y_train, batch_size = settings.BATCH_SIZE, verbose = 0)
        test_scores = self.keras_model.evaluate(X_test, Y_test, batch_size = settings.BATCH_SIZE, verbose = 0)
        metric = self.keras_model.metrics_names[1]
        print_positive("Training score {0: >6}: {1:.5f}".format(metric, train_scores[1]))
        print_positive("Testing score  {0: >6}: {1:.5f}".format(metric, train_scores[1]))

        ### Save the model's performance to disk
        path_model_score = os.path.join(settings.PERF_DIR, "score.txt")
        print_info("Saving performance to file '{}'".format(path_model_score))
        with open(path_model_score, "w") as fd:
            fd.write("Performance statistics\n")
            fd.write("----------------------\n")
            fd.write("Model = {}\n".format(settings.MODEL))
            fd.write("Cumulative number of training epochs = {0}\n".format(self.epochs_completed))
            fd.write("Training score (metric: {0: >6}) = {1:.5f}\n".format(metric, train_scores[1]))
            fd.write("Testing score  (metric: {0: >6}) = {1:.5f}\n".format(metric, train_scores[1]))
def run_experiment():
    log("Welcome! This is my final project for the course:")
    log('    IFT6266-H2017 (a.k.a. "Deep Learning"')
    log("         Prof. Aaron Courville")
    log("")
    log("This program is copyrighted 2017 Philippe Paradis. All Rights Reserved."
        )
    log("")
    log("Enjoy!")
    log("")
    model = None

    if settings.MODEL == "conv_mlp_vgg16":
        settings.MODEL = "conv_mlp"
        settings.USE_VGG16_LOSS = True

    # Define model's specific settings
    if settings.MODEL == "test":
        from keras_models import Test_Model
        model = Test_Model()
    elif settings.MODEL == "mlp":
        from keras_models import MLP_Model
        model = MLP_Model()
    elif settings.MODEL == "conv_mlp":
        from keras_models import Conv_MLP
        model = Conv_MLP()
    elif settings.MODEL == "conv_deconv":
        from keras_models import Conv_Deconv
        model = Conv_Deconv()
    elif settings.MODEL == "lasagne_conv_deconv":
        from lasagne_models import Lasagne_Conv_Deconv
        model = Lasagne_Conv_Deconv(use_dropout=False)
    elif settings.MODEL == "lasagne_conv_deconv_dropout":
        from lasagne_models import Lasagne_Conv_Deconv
        model = Lasagne_Conv_Deconv(use_dropout=True)
        settings.MODEL = "lasagne_conv_deconv"
    elif settings.MODEL == "dcgan":
        model = models.DCGAN_Model()
    elif settings.MODEL == "wgan":
        from wgan import WGAN_Model
        model = WGAN_Model()
    elif settings.MODEL == "lsgan":
        from lsgan import LSGAN_Model
        model = LSGAN_Model()
    elif settings.MODEL == "vgg16":
        if not os.path.isfile("vgg16.pkl"):
            log("Could not find VGG-16 pre-trained weights file 'vgg16.pkl'. Downloading..."
                )
            download_vgg16_weights()
        from vgg16 import VGG16_Model
        model = VGG16_Model()
    else:
        raise NotImplementedError()

    ### Check if --force flag was passed
    if settings.FORCE_RUN:
        stopfile = os.path.join(settings.BASE_DIR, "STOP")
        if os.path.isfile(stopfile):
            os.remove(stopfile)

    ### Check for STOP file in BASE_DIR. Who knows, this experiment could
    ### be a baddy which we certainly don't want to waste precious GPU time on! Oh no!
    if model.check_stop_file():
        print_error(
            "Oh dear, it looks like a STOP file is present in this experiment's base directory, located here:\n{}\nIf you think the STOP file was added by error and you would like to pursue this experiment further, simply feel absolute free to delete this file (which is empty, anyway)."
            .format(os.path.join(settings.BASE_DIR, "STOP")))
        sys.exit(-2)

    ### Load checkpoint (if any). This will also load the hyper parameters file.
    ### This will also load the model's architecture, weights, optimizer states,
    ### that is, everything necessary to resume training.
    print_info(
        "Checking for a valid checkpoint. If so, load hyper parameters and all data from the last known state..."
    )
    checkpoint, hyperparams, resume_from_checkpoint = model.resume_last_checkpoint(
    )

    if resume_from_checkpoint:
        print_positive(
            "Found checkpoint, hyper parameters and model data all passing the integrity tests!"
            "Ready to resume training!")
        log("")
        print_info("State of last checkpoint:")
        for key in checkpoint:
            log(" * {0: <20} = {1}".format(str(key), str(checkpoint[key])))
        log("")
    else:
        print_info(
            "No valid checkpoint found for this experiment. Building and training model from scratch."
        )
        ### Build model's architecture
        model.build()
        ### Save hyperparameters to a file
        model.save_hyperparams()

    if settings.NUM_EPOCHS == 0 and not settings.PERFORM_PREDICT_ONLY:
        log("Okay, we specified 0 epochs, so we only created the experiment directory:\m{}\mand the hyper parameters file within that directory 'hyperparameters.json'."
            .format(settings.BASE_DIR))
        sys.exit(0)

    ###
    ### Make sure the dataset has been downloaded and extracted correctly on disk
    ###
    if check_mscoco_dir(settings.MSCOCO_DIR) == False:
        log("(!) The project dataset based on MSCOCO was not found in its expected location '{}' or the symlink is broken."
            .format(settings.MSCOCO_DIR))
        log("Attempting to download the dataset...")
        rc = download_dataset()
        if rc != 0:
            log("(!) Failed to download the project dataset, exiting...")
            sys.exit(rc)

    if settings.TINY_DATASET == True:
        if check_mscoco_dir(os.path.join(settings.THIS_DIR,
                                         "mscoco_small/")) == False:
            create_tiny_dataset()
            settings.MSCOCO_DIR = os.path.join(settings.THIS_DIR,
                                               "mscoco_small/")

    verbosity_level = "Low"
    if settings.VERBOSE == 1:
        verbosity_level = "High"
    elif settings.VERBOSE == 2:
        verbosity_level = "Medium"

    # Print info about our settings
    log("============================================================")
    print_info("Experiment name    = %s" % settings.EXP_NAME)
    log("============================================================")
    log("")
    print_info("Experiment settings and options:")
    log(" * Model type            = " + str(settings.MODEL))
    log(" * Training epochs       = " + str(settings.NUM_EPOCHS))
    log(" * Batch size            = " + str(settings.BATCH_SIZE))
    log(" * Learning rate         = " + str(settings.LEARNING_RATE))
    log(" * Epochs per checkpoint = " + str(settings.EPOCHS_PER_CHECKPOINT))
    log(" * Epochs per samples    = " + str(settings.EPOCHS_PER_SAMPLES))
    log(" * Feature Matching Loss = " + str(settings.FEATURE_MATCHING))
    log(" * Keep model's data for every checkpoint  = " +
        str(settings.KEEP_ALL_CHECKPOINTS))
    log(" * Verbosity             = " + str(settings.VERBOSE) +
        " ({})".format(verbosity_level))
    log(" * Data augmentation     = " + str(settings.DATASET_AUGMENTATION))
    log(" * Load greyscale images = " +
        str(settings.LOAD_BLACK_AND_WHITE_IMAGES))
    log("")

    if settings.MODEL in ["dcgan", "wgan", "lsgan"]:
        print_info("GAN-specific settings:")
        log(" * Type of GAN used      = " + str(settings.MODEL))
        log(" * Generator/critic updates per epoch = " +
            str(settings.UPDATES_PER_EPOCH))
        log(" * GAN learning rate     = " + str(settings.GAN_LEARNING_RATE))
        if settings.MODEL == "lsgan":
            log(" * LSGAN architecture #  = " +
                str(settings.LSGAN_ARCHITECTURE))
        log("")

    ### Print hyperparameters, as loaded from existing file or as initialized for new experiment
    print_info("Hyper parameters:")
    for key in model.hyper:
        log(" * {0: <20} = {1}".format(str(key), str(model.hyper[key])))
    log("")

    #######################################
    # Info about the dataset
    #######################################
    # The data is already split into training and validation datasets
    # The training dataset has:
    # - 82782 items
    # - 984 MB of data
    # The validation dataset has:
    # - 40504 items
    # - 481 MB of data
    #
    # There is also a pickled dictionary that maps image filenames (minutes the
    # .jpg extension) to a list of 5 strings (the 5 human-generated captions).
    # This dictionary is an OrderedDict with 123286 entries.

    import dataset

    ### Create and initialize an empty InpaintingDataset object
    Dataset = dataset.ColorsFirstDataset(settings.IMAGE_WIDTH,
                                         settings.IMAGE_HEIGHT)

    ### Load dataset
    Dataset.load_dataset()

    log("")
    print_info("Summary of data within dataset:")
    log(" * images.shape        = " + str(Dataset.images.shape))
    log(" * captions_ids.shape  = " + str(Dataset.captions_ids.shape))
    log(" * captions_dict.shape = " + str(Dataset.captions_dict.shape))
    log("")

    ### Train the model (computation intensive)
    if settings.MODEL == "mlp" or settings.MODEL == "test" or settings.MODEL == "conv_mlp":
        Dataset.preprocess()
        Dataset.normalize()
        Dataset.preload()
        model.train(Dataset)
        Dataset.denormalize()

        ### Produce predictions
        Y_test_pred = model.predict(Dataset.get_data(X=True, Test=True),
                                    batch_size=model.hyper['batch_size'])

        ### Reshape predictions to a 2d image and denormalize data
        Y_test_pred = dataset.denormalize_data(Y_test_pred)
        num_rows = Y_test_pred.shape[0]
        Y_test_pred_2d = unflatten_to_4tensor(Y_test_pred,
                                              num_rows,
                                              32,
                                              32,
                                              is_colors_channel_first=True)
        Y_test_pred_2d = transpose_colors_channel(Y_test_pred_2d,
                                                  from_first_to_last=True)

        ### Create dataset with colors channel last
        NewDataset = dataset.ColorsLastDataset(settings.IMAGE_WIDTH,
                                               settings.IMAGE_HEIGHT)
        NewDataset.load_dataset()
        NewDataset.preprocess(model="conv_deconv")
        NewDataset.preload(model="conv_deconv")

        ### Save predictions to disk
        save_keras_predictions(Y_test_pred_2d,
                               Dataset.id_val,
                               NewDataset,
                               num_images=50)
        create_html_results_page("results.html", "assets/", num_images=50)
    elif settings.MODEL == "conv_deconv":
        Dataset.preprocess()
        Dataset.normalize()
        Dataset.preload()
        model.train(Dataset)
        Dataset.denormalize()

        ### Produce predictions
        Y_test_pred_2d = model.predict(Dataset.get_data(X=True, Test=True),
                                       batch_size=model.hyper['batch_size'])

        ### Reshape predictions
        Y_test_pred_2d = dataset.denormalize_data(Y_test_pred_2d)
        Y_test_pred_2d = transpose_colors_channel(Y_test_pred_2d,
                                                  from_first_to_last=True)

        ### Create dataset with colors channel last
        NewDataset = dataset.ColorsLastDataset(settings.IMAGE_WIDTH,
                                               settings.IMAGE_HEIGHT)
        NewDataset.load_dataset()
        NewDataset.preprocess(model="conv_deconv")
        NewDataset.preload(model="conv_deconv")

        ### Save predictions to disk
        save_keras_predictions(Y_test_pred_2d,
                               Dataset.id_val,
                               NewDataset,
                               num_images=50)
        create_html_results_page("results.html", "assets/", num_images=50)
    elif settings.MODEL == "lasagne_conv_deconv" or settings.MODEL == "vgg16":
        Dataset.preprocess()
        Dataset.normalize()
        Dataset.preload()
        model.train(Dataset)
    elif settings.MODEL == "dcgan":
        from lasagne.utils import floatX
        Dataset.preprocess()
        Dataset.normalize()
        Dataset.preload()
        generator, discriminator, train_fn, gen_fn = model.train(
            Dataset,
            num_epochs=settings.NUM_EPOCHS,
            epochsize=10,
            batchsize=64,
            initial_eta=8e-5)
        Dataset.denormalize()

        settings.touch_dir(settings.SAMPLES_DIR)
        for i in range(100):
            samples = gen_fn(floatX(np.random.rand(10 * 10, 100)))
            path = os.path.join(settings.EPOCHS_DIR, 'samples_%i.png' % i)
            samples = dataset.denormalize_data(samples)
            Image.fromarray(
                samples.reshape(10, 10, 3, 64,
                                64).transpose(0, 3, 1, 4,
                                              2).reshape(10 * 64, 10 * 64,
                                                         3)).save(path)
            sample = gen_fn(floatX(np.random.rand(1, 100)))
            sample = dataset.denormalize_data(sample)
            path = os.path.join(settings.SAMPLES_DIR, 'one_sample_%i.png' % i)
            Image.fromarray(
                sample.reshape(3, 64, 64).transpose(1, 2,
                                                    0).reshape(64, 64,
                                                               3)).save(path)
    elif settings.MODEL == "wgan":
        import wgan
        from lasagne.utils import floatX

        Dataset.preprocess()
        Dataset.normalize()
        Dataset.preload()
        generator, critic, generator_train_fn, critic_train_fn, gen_fn = wgan.train(
            Dataset, num_epochs=settings.NUM_EPOCHS)
        Dataset.denormalize()

        settings.touch_dir(settings.SAMPLES_DIR)
        for i in range(100):
            samples = gen_fn(floatX(np.random.rand(10 * 10, 100)))
            path = os.path.join(settings.EPOCHS_DIR, 'samples_%i.png' % i)
            samples = dataset.denormalize_data(samples)
            Image.fromarray(
                samples.reshape(10, 10, 3, 64,
                                64).transpose(0, 3, 1, 4,
                                              2).reshape(10 * 64, 10 * 64,
                                                         3)).save(path)
            sample = gen_fn(floatX(np.random.rand(1, 100)))
            sample = dataset.denormalize_data(sample)
            path = os.path.join(settings.SAMPLES_DIR, 'one_sample_%i.png' % i)
            Image.fromarray(
                sample.reshape(3, 64, 64).transpose(1, 2,
                                                    0).reshape(64, 64,
                                                               3)).save(path)
    elif settings.MODEL == "lsgan":
        from lasagne.utils import floatX
        Dataset.preprocess()
        Dataset.normalize()
        Dataset.preload()

        generator, critic, gen_fn = model.train(
            Dataset,
            num_epochs=settings.NUM_EPOCHS,
            epochsize=settings.UPDATES_PER_EPOCH,
            batchsize=settings.BATCH_SIZE,
            initial_eta=settings.GAN_LEARNING_RATE,
            architecture=settings.LSGAN_ARCHITECTURE)
        Dataset.denormalize()
        for i in range(100):
            samples = gen_fn(floatX(np.random.rand(10 * 10, 100)))
            path = os.path.join(settings.SAMPLES_DIR,
                                "samples_{:0>3}.png".format(i))
            samples = dataset.denormalize_data(samples)
            Image.fromarray(
                samples.reshape(10, 10, 3, 64,
                                64).transpose(0, 3, 1, 4,
                                              2).reshape(10 * 64, 10 * 64,
                                                         3)).save(path)
            sample = gen_fn(floatX(np.random.rand(1, 100)))
            sample = dataset.denormalize_data(sample)
            path = os.path.join(settings.SAMPLES_DIR,
                                "one_sample_{:0>3}.png".format(i))
            Image.fromarray(
                sample.reshape(3, 64, 64).transpose(1, 2,
                                                    0).reshape(64, 64,
                                                               3)).save(path)

    ### Success...? Well, at least we didn't crash :P
    log("Exiting normally. That's typically a good sign :-)")
    sys.exit(0)
Esempio n. 8
0
    def train(self, dataset):
        log("Fetching data...")
        X_train, X_val, y_train, y_val, ind_train, ind_val = dataset.return_train_data(
        )
        X_test, y_test = dataset.return_test_data()

        #Variance of the prediction can be maximized to obtain sharper images.
        #If this coefficient is set to "0", the loss is just the L2 loss.
        StdevCoef = 0

        # Prepare Theano variables for inputs and targets
        input_var = T.tensor4('inputs')
        target_var = T.tensor4('targets')

        # Create neural network model
        log("Building model and compiling functions...")
        self.build_network(input_var, target_var)

        # See if we can resume from previously saved model
        print_info("Looking for existing model to resume training from...")
        model_path = os.path.join(settings.MODELS_DIR,
                                  settings.EXP_NAME + ".npz")
        if self.load_model(model_path):
            print_positive(
                "Loaded saved model weights found on disk at: {}!".format(
                    model_path))
            print_positive("Resuming training from exisiting weights!")
        else:
            print_info(
                "Unable to find exisiting or load valid model file. Starting training from scratch!"
            )

        # Build loss function
        train_loss = self.build_loss(input_var, target_var)

        # Update expressions
        from theano import shared
        eta = shared(lasagne.utils.floatX(settings.LEARNING_RATE))
        params = lasagne.layers.get_all_params(self.network_out,
                                               trainable=True)
        updates = lasagne.updates.adam(train_loss, params, learning_rate=eta)

        # Train loss function
        train_fn = theano.function([input_var, target_var],
                                   train_loss,
                                   updates=updates)

        # Test/validation Loss expression (disable dropout and so on...)
        test_loss = self.build_loss(input_var, target_var, deterministic=True)

        # Validation loss function
        val_test_fn = theano.function([input_var, target_var], test_loss)

        # Predict function
        predict_fn = theano.function([input_var],
                                     lasagne.layers.get_output(
                                         self.network_out, deterministic=True))

        ##########################################
        # Finally, launch the training loop.
        ##########################################
        keyboard_interrupt = False
        completed_epochs = 0
        try:
            log("Starting training...")
            batch_size = settings.BATCH_SIZE
            best_val_loss = 1.0e30
            best_val_loss_epoch = -1
            for epoch in range(settings.NUM_EPOCHS):
                start_time = time.time()
                train_losses = []
                for batch in self.iterate_minibatches(X_train,
                                                      y_train,
                                                      batch_size,
                                                      shuffle=True):
                    inputs, targets = batch
                    train_losses.append(train_fn(inputs, targets))

                val_losses = []
                for batch in self.iterate_minibatches(X_val,
                                                      y_val,
                                                      batch_size,
                                                      shuffle=False):
                    inputs, targets = batch
                    val_losses.append(val_test_fn(inputs, targets))

                completed_epochs += 1

                # Print the results for this epoch
                mean_train_loss = np.mean(train_losses)
                mean_val_loss = np.mean(val_losses)
                log("Epoch {} of {} took {:.3f}s".format(
                    epoch + 1, settings.NUM_EPOCHS,
                    time.time() - start_time))
                log(" - training loss:    {:.6f}".format(mean_train_loss))
                log(" - validation loss:  {:.6f}".format(mean_val_loss))

                create_checkpoint = False
                STOP_FILE = False

                if self.check_stop_file():
                    STOP_FILE = True
                    create_checkpoint = True

                if epoch >= 8 and mean_val_loss < best_val_loss:
                    best_val_loss_epoch = epoch + 1
                    best_val_loss = mean_val_loss
                    create_checkpoint = True
                    print_positive(
                        "New best val loss = {:.6f}!!! Creating model checkpoint!"
                        .format(best_val_loss))
                elif (epoch + 1) % settings.EPOCHS_PER_CHECKPOINT == 0:
                    create_checkpoint = True
                    print_info(
                        "Time for model checkpoint (every {} epochs)...".
                        format(settings.EPOCHS_PER_CHECKPOINT))

                if create_checkpoint:
                    # Save checkpoint
                    model_checkpoint_filename = "model_checkpoint-val_loss.{:.6f}-epoch.{:0>3}.npz".format(
                        best_val_loss, epoch + 1)
                    model_checkpoint_path = os.path.join(
                        settings.CHECKPOINTS_DIR, model_checkpoint_filename)
                    print_info("Saving model checkpoint: {}".format(
                        model_checkpoint_path))
                    # Save model
                    self.save_model(model_checkpoint_path)

                # Save samples for this epoch
                if (epoch + 1) % settings.EPOCHS_PER_SAMPLES == 0:
                    num_samples = 100
                    num_rows = 10
                    num_cols = 10
                    samples = self.create_samples(X_val, y_val, batch_size,
                                                  num_samples, predict_fn)
                    samples = denormalize_data(samples)
                    samples_path = os.path.join(
                        settings.EPOCHS_DIR,
                        'samples_epoch_{0:0>5}.png'.format(epoch + 1))
                    print_info(
                        "Time for saving sample images (every {} epochs)... ".
                        format(settings.EPOCHS_PER_SAMPLES) +
                        "Saving {} sample images predicted validation dataset input images here: {}"
                        .format(num_samples, samples_path))
                    try:
                        import PIL.Image as Image
                        Image.fromarray(
                            samples.reshape(
                                num_rows, num_cols, 3, 32, 32).transpose(
                                    0, 3, 1, 4,
                                    2).reshape(num_rows * 32, num_cols * 32,
                                               3)).save(samples_path)
                    except ImportError as e:
                        print_warning(
                            "Cannot import module 'PIL.Image', which is necessary for the Lasagne model to output its sample images. You should really install it!"
                        )

                if STOP_FILE:
                    print_critical(
                        "STOP file found. Ending training here! Still producing results..."
                    )
                    break
        except KeyboardInterrupt:
            print_critical("Training interrupted by KeyboardInterrupt (^C)!")
            print_info(
                "Before shutting down, attempt to saving valid checkpoint, produce preliminary results, and so on."
            )
            keyboard_interrupt = True

        print_info("Training complete!")

        # Save model
        self.save_model(
            os.path.join(settings.MODELS_DIR, settings.EXP_NAME + ".npz"))

        # Compute 'num_images' predictions to visualize and print the test error
        num_images = 100
        test_losses = []
        num_iter = 0
        num_predictions = min(X_test.shape[0], num_images)
        indices = np.arange(X_test.shape[0])
        if num_images < X_test.shape[0]:
            indices = indices[0:num_images]
        X_test_small = X_test[indices, :, :, :]
        y_test_small = y_test[indices, :, :, :]
        preds = np.zeros((num_predictions, 3, 32, 32))
        for batch in self.iterate_minibatches(X_test_small,
                                              y_test_small,
                                              batch_size,
                                              shuffle=False):
            inputs, targets = batch
            preds[num_iter * batch_size:(num_iter + 1) *
                  batch_size] = predict_fn(inputs)
            test_losses.append(val_test_fn(inputs, targets))
            num_iter += 1
        log("Final results:")
        log(" - test loss:        {:.6f}".format(np.mean(test_losses)))

        # Save predictions and create HTML page to visualize them
        test_images_original = np.copy(normalize_data(dataset.test_images))
        denormalize_and_save_jpg_results(preds, X_test, y_test,
                                         test_images_original, num_images)
        create_html_results_page(num_images)

        # Save model's performance
        path_model_score = os.path.join(settings.PERF_DIR, "score.txt")
        print_info("Saving performance to file '{}'".format(path_model_score))
        metric = settings.LOSS_FUNCTION
        log("")
        log("Performance statistics")
        log("----------------------")
        log(" * Model = {}".format(settings.MODEL))
        log(" * Number of training epochs = {0}".format(completed_epochs))
        log(" * Final training score (metric: {0: >6})    = {1:.5f}".format(
            metric, np.mean(train_losses)))
        log(" * Final validation score  (metric: {0: >6}) = {1:.5f}".format(
            metric, np.mean(val_losses)))
        log(" * Best validation score   (metric: {0: >6}) = {1:.5f}".format(
            metric, best_val_loss))
        log(" * Epoch for best validation score           = {}".format(
            best_val_loss_epoch))
        log(" * Testing dataset  (metric: {0: >6})        = {1:.5f}".format(
            metric, np.mean(test_losses)))
        log("")
        with open(path_model_score, "w") as fd:
            fd.write("Performance statistics\n")
            fd.write("----------------------\n")
            fd.write("Model = {}\n".format(settings.MODEL))
            fd.write(
                "Number of training epochs = {0}\n".format(completed_epochs))
            fd.write(
                "Final training score (metric: {0: >6})    = {1:.5f}\n".format(
                    metric, np.mean(train_losses)))
            fd.write(
                "Final validation score  (metric: {0: >6}) = {1:.5f}\n".format(
                    metric, np.mean(val_losses)))
            fd.write(
                "Best validation score   (metric: {0: >6}) = {1:.5f}\n".format(
                    metric, best_val_loss))
            fd.write("Epoch for best validation score           = {}\n".format(
                best_val_loss_epoch))
            fd.write(
                "Testing dataset  (metric: {0: >6})        = {1:.5f}\n".format(
                    metric, np.mean(test_losses)))

        if keyboard_interrupt:
            raise KeyboardInterrupt