Пример #1
0
    def get_model(self) -> Model:
        """Get selected model.

        Returns
        -------
        Model
            Keras model object, compiled.

        """
        # Build the model
        optimizer = RMSprop(self.learning_rate)
        if self.model_type == "unet":
            model = models.unet(self.input_shape, self.classes, optimizer,
                                self.loss)
        elif self.model_type == "unet_large":
            model = models.unet_large(self.input_shape, self.classes,
                                      optimizer, self.loss)
        elif self.model_type == "fcdensenet":
            model = models.fcdensenet(self.input_shape, self.classes,
                                      optimizer, self.loss)
        elif self.model_type == "fcn_small":
            model = models.fcn_small(self.input_shape, self.classes, optimizer,
                                     self.loss)
        model.summary()
        return model
Пример #2
0
    def get_model(self) -> Model:
        """Get selected model.

        Returns
        -------
        Model
            Keras model object, compiled.

        """
        # Build the model
        optimizer = RMSprop(self.learning_rate)
        if self.model_type == "unet":
            model = models.unet(self.input_shape, self.classes, optimizer,
                                self.loss)
        elif self.model_type == "unet_large":
            model = models.unet_large(self.input_shape, self.classes,
                                      optimizer, self.loss)
        elif self.model_type == "fcdensenet":
            model = models.fcdensenet(self.input_shape, self.classes,
                                      optimizer, self.loss)
        elif self.model_type == "fcn_small":
            model = models.fcn_small(self.input_shape, self.classes, optimizer,
                                     self.loss)
        if self.preload_weights:
            logger.info(
                "=====================================================")
            logger.info(f"Using weights from {self.preload_weights}")
            logger.info(
                "=====================================================")
            model.load_weights(self.preload_weights)
        model.run_eagerly = True
        model.summary()
        return model
Пример #3
0
    def get_model(self) -> Model:
        """Get selected model.

        Returns
        -------
        Model
            Keras model object, compiled.

        """
        # Build the model
        optimizer = RMSprop(self.learning_rate)
        if self.model_type == "unet":
            model = models.unet(
                self.input_shape,
                self.classes,
                self.loss,
            )
        elif self.model_type == "unet_large":
            model = models.unet_large(
                self.input_shape,
                self.classes,
                self.loss,
            )
        elif self.model_type == "fcdensenet":
            model = models.fcdensenet(
                self.input_shape,
                self.classes,
                self.loss,
            )
        elif self.model_type == "fcn_small":
            model = models.fcn_small(self.input_shape, self.classes, self.loss)
        if self.preload_weights:
            logger.info(
                "=====================================================")
            logger.info(f"Using weights from {self.preload_weights}")
            logger.info(
                "=====================================================")
            model.load_weights(self.preload_weights)
            if config.FINE_TUNE_AT > 0:
                for layer in model.layers[:config.FINE_TUNE_AT]:
                    layer.trainable = False
                logger.info(
                    "=====================================================")
                logger.info(f"Fine tuning from %d layer" % config.FINE_TUNE_AT)
                logger.info(
                    "=====================================================")

        model = models.compile_model(model, self.loss, optimizer)

        model.run_eagerly = True
        model.summary()
        return model
def main():
    prog_name = sys.argv[0]
    args = do_args(sys.argv[1:], prog_name)

    verbose = args.verbose
    output = args.output
    name = args.name

    data_dir = args.data_dir
    training_states = args.training_states
    validation_states = args.validation_states
    superres_states = args.superres_states

    num_epochs = args.epochs
    model_type = args.model_type
    batch_size = args.batch_size
    learning_rate = args.learning_rate
    time_budget = args.time_budget
    loss = args.loss
    do_color_aug = args.color
    do_superres = loss == "superres"

    log_dir = os.path.join(output, name)

    assert os.path.exists(log_dir), "Output directory doesn't exist"

    f = open(os.path.join(log_dir, "args.txt"), "w")
    for k, v in args.__dict__.items():
        f.write("%s,%s\n" % (str(k), str(v)))
    f.close()

    print("Starting %s at %s" % (prog_name, str(datetime.datetime.now())))
    start_time = float(time.time())

    #------------------------------
    # Step 1, load data
    #------------------------------

    training_patches = []
    for state in training_states:
        print("Adding training patches from %s" % (state))
        fn = os.path.join(data_dir, "%s_extended-train_patches.csv" % (state))
        df = pd.read_csv(fn)
        for fn in df["patch_fn"].values:
            training_patches.append((os.path.join(data_dir, fn), state))

    validation_patches = []
    for state in validation_states:
        print("Adding validation patches from %s" % (state))
        fn = os.path.join(data_dir, "%s_extended-val_patches.csv" % (state))
        df = pd.read_csv(fn)
        for fn in df["patch_fn"].values:
            validation_patches.append((os.path.join(data_dir, fn), state))

    print("Loaded %d training patches and %d validation patches" %
          (len(training_patches), len(validation_patches)))

    if do_superres:
        print("Using %d states in superres loss:" % (len(superres_states)))
        print(superres_states)

    #------------------------------
    # Step 2, run experiment
    #------------------------------
    #training_steps_per_epoch = len(training_patches) // batch_size
    #validation_steps_per_epoch = len(validation_patches) // batch_size

    training_steps_per_epoch = 300
    validation_steps_per_epoch = 39

    print("Number of training/validation steps per epoch: %d/%d" %
          (training_steps_per_epoch, validation_steps_per_epoch))

    # Build the model
    optimizer = RMSprop(learning_rate)
    if model_type == "unet":
        model = models.unet((240, 240, 4), 5, optimizer, loss)
    elif model_type == "unet_large":
        model = models.unet_large((240, 240, 4), 5, optimizer, loss)
    elif model_type == "fcdensenet":
        model = models.fcdensenet((240, 240, 4), 5, optimizer, loss)
    elif model_type == "fcn_small":
        model = models.fcn_small((240, 240, 4), 5, optimizer, loss)
    model.summary()

    validation_callback = utils.LandcoverResults(log_dir=log_dir,
                                                 time_budget=time_budget,
                                                 verbose=verbose)
    learning_rate_callback = LearningRateScheduler(utils.schedule_stepped,
                                                   verbose=verbose)

    model_checkpoint_callback = ModelCheckpoint(os.path.join(
        log_dir, "model_{epoch:02d}.h5"),
                                                verbose=verbose,
                                                save_best_only=False,
                                                save_weights_only=False,
                                                period=20)

    training_generator = datagen.DataGenerator(
        training_patches,
        batch_size,
        training_steps_per_epoch,
        240,
        240,
        4,
        do_color_aug=do_color_aug,
        do_superres=do_superres,
        superres_only_states=superres_states)
    validation_generator = datagen.DataGenerator(validation_patches,
                                                 batch_size,
                                                 validation_steps_per_epoch,
                                                 240,
                                                 240,
                                                 4,
                                                 do_color_aug=do_color_aug,
                                                 do_superres=do_superres,
                                                 superres_only_states=[])

    model.fit_generator(
        training_generator,
        steps_per_epoch=training_steps_per_epoch,
        #epochs=10**6,
        epochs=num_epochs,
        verbose=verbose,
        validation_data=validation_generator,
        validation_steps=validation_steps_per_epoch,
        max_queue_size=256,
        workers=4,
        use_multiprocessing=True,
        callbacks=[
            validation_callback,
            #learning_rate_callback,
            model_checkpoint_callback
        ],
        initial_epoch=0)

    #------------------------------
    # Step 3, save models
    #------------------------------
    model.save(os.path.join(log_dir, "final_model.h5"))

    model_json = model.to_json()
    with open(os.path.join(log_dir, "final_model.json"), "w") as json_file:
        json_file.write(model_json)
    model.save_weights(os.path.join(log_dir, "final_model_weights.h5"))

    print("Finished in %0.4f seconds" % (time.time() - start_time))
    del model, training_generator, validation_generator