Exemplo n.º 1
0
    def evaluate(self, test_data_path, weights):
        """Evaluate a trained model on the test dataset
        
        Args:
            test_data_path (str): path to directory containing images for testing
            weights (str): name of the tensorflow checkpoint (weights) to evaluate
        """
        test_data_list = get_filepaths_from_dir(test_data_path)
        if not test_data_list:
            raise ValueError(
                "No test data found in folder {}".format(test_data_path))
        elif (len(self.train_data_list) < self.batch_size):
            raise ValueError(
                "Batch size must be smaller than the dataset (batch size = {}, number of test data = {})"
                .format(self.batch_size, len(test_data_list)))
        self.is_exr = is_exr(test_data_list[0])

        # Get and create test dataset
        ds_test = self.get_data(test_data_list, self.batch_size, 1)
        for x, y in ds_test.take(1):  # take one batch from ds_test
            testX, testY = x, y
        print_("Number of test data: {}\n".format(len(test_data_list)), 'm')
        print("Input shape {}, target shape: {}".format(
            testX.shape, testY.shape))

        # Build model
        model = self.get_compiled_model(testX.shape[1:])

        # Load model weights
        print_("Loading trained model for testing...\n", 'm')
        model.load_weights(os.path.join(self.ckpt_dir,
                                        weights)).expect_partial()
        print_("...Checkpoint {} loaded\n".format(weights), 'm')

        # Test final model on this unseen dataset
        results = model.evaluate(ds_test)
        print("test loss, test acc:", results)
        print_("--------End of testing--------\n", 'm')
Exemplo n.º 2
0
    def __init__(self, args):
        # Training hyperparameters
        self.learning_rate = args.learning_rate
        self.batch_size = args.batch_size
        self.epoch = args.epoch
        self.no_resume = args.no_resume
        # A random seed (!=None) allows you to reproduce your training results
        self.seed = args.seed
        if self.seed is not None:
            # Set all seeds necessary for deterministic training
            enable_deterministic_training(self.seed, args.no_gpu_patch)
        self.crop_size = 256
        self.n_levels = 3
        self.scale = 0.5
        self.channels = 3  # input / output channels
        # Training and validation dataset paths
        train_in_data_path = './data/train/input'
        train_gt_data_path = './data/train/groundtruth'
        val_in_data_path = './data/validation/input'
        val_gt_data_path = './data/validation/groundtruth'

        # Where to save and load model weights (=checkpoints)
        self.checkpoints_dir = './checkpoints'
        if not os.path.exists(self.checkpoints_dir):
            os.makedirs(self.checkpoints_dir)
        self.ckpt_save_name = args.ckpt_save_name
        # Maximum number of recent checkpoint files to keep
        self.max_ckpts_to_keep = 50
        # In addition keep one checkpoint file for every N hours of training
        self.keep_ckpt_every_n_hours = 1
        # How often, in training steps. we save model checkpoints
        self.ckpts_save_freq = 1000
        # How often, in training steps. we print training losses to bash
        self.training_print_freq = 10

        # Where to save tensorboard summaries
        self.summaries_dir = './summaries'
        if not os.path.exists(self.summaries_dir):
            os.makedirs(self.summaries_dir)
        # How often, in training steps. we save tensorboard summaries
        self.summaries_save_freq = 10
        # How often, in secs, we flush the pending tensorboard summaries to disk
        self.summary_flush_secs = 30

        # Get training dataset as lists of image paths
        self.train_in_data_list = get_filepaths_from_dir(train_in_data_path)
        self.train_gt_data_list = get_filepaths_from_dir(train_gt_data_path)
        if not self.train_in_data_list or not self.train_gt_data_list:
            raise ValueError(
                "No training data found in folders {} or {}".format(
                    train_in_data_path, train_gt_data_path))
        elif len(self.train_in_data_list) != len(self.train_gt_data_list):
            raise ValueError(
                "{} ({} data) and {} ({} data) should have the same number of input data"
                .format(train_in_data_path, len(self.train_in_data_list),
                        train_gt_data_path, len(self.train_gt_data_list)))
        elif (len(self.train_in_data_list) < self.batch_size):
            raise ValueError(
                "Batch size must be smaller than the dataset (batch size = {}, number of training data = {})"
                .format(self.batch_size, len(self.train_in_data_list)))
        self.is_exr = is_exr(self.train_in_data_list[0])

        # Get validation dataset if provided
        self.has_val_data = True
        self.val_in_data_list = get_filepaths_from_dir(val_in_data_path)
        self.val_gt_data_list = get_filepaths_from_dir(val_gt_data_path)
        if not self.val_in_data_list or not self.val_gt_data_list:
            print("No validation data found in {} or {}".format(
                val_in_data_path, val_gt_data_path))
            self.has_val_data = False
        elif len(self.val_in_data_list) != len(self.val_gt_data_list):
            raise ValueError(
                "{} ({} data) and {} ({} data) should have the same number of input data"
                .format(val_in_data_path, len(self.val_in_data_list),
                        val_gt_data_path, len(self.val_gt_data_list)))
        elif (len(self.val_in_data_list) < self.batch_size):
            raise ValueError(
                "Batch size must be smaller than the dataset (batch size = {}, number of validation data = {})"
                .format(self.batch_size, len(self.val_in_data_list)))
        else:
            val_is_exr = is_exr(self.val_in_data_list[0])
            if (val_is_exr and not self.is_exr) or (not val_is_exr
                                                    and self.is_exr):
                raise TypeError(
                    "Train and validation data should have the same file format"
                )
            print("Number of validation data: {}".format(
                len(self.val_in_data_list)))

        # Compute and print training hyperparameters
        batch_per_epoch = (len(self.train_in_data_list)) // self.batch_size
        self.max_steps = int(self.epoch * (batch_per_epoch))
        print_(
            "Number of training data: {}\nNumber of batches per epoch: {} (batch size = {})\nNumber of training steps for {} epochs: {}\n"
            .format(len(self.train_in_data_list), batch_per_epoch,
                    self.batch_size, self.epoch, self.max_steps), 'm')
Exemplo n.º 3
0
    def __init__(self, args):
        # Training hyperparameters
        self.learning_rate = args.learning_rate
        self.batch_size = args.batch_size
        self.epoch = args.epoch
        self.patch_size = 50
        self.channels = 3  # input / output channels
        self.output_param_number = 1
        self.no_resume = args.no_resume
        # A random seed (!=None) allows you to reproduce your training results
        self.seed = args.seed
        if self.seed is not None:
            # Set all seeds necessary for deterministic training
            enable_deterministic_training(self.seed, args.no_gpu_patch)
        # Training and validation dataset paths
        train_data_path = './data/train/'
        val_data_path = './data/validation/'

        # Where to save and load model weights (=checkpoints)
        self.ckpt_dir = './checkpoints'
        if not os.path.exists(self.ckpt_dir):
            os.makedirs(self.ckpt_dir)
        self.ckpt_save_name = args.ckpt_save_name

        # Where to save tensorboard summaries
        self.summaries_dir = './summaries/'
        if not os.path.exists(self.summaries_dir):
            os.makedirs(self.summaries_dir)

        # Get training dataset as list of image paths
        self.train_data_list = get_filepaths_from_dir(train_data_path)
        if not self.train_data_list:
            raise ValueError(
                "No training data found in folder {}".format(train_data_path))
        elif (len(self.train_data_list) < self.batch_size):
            raise ValueError(
                "Batch size must be smaller than the dataset (batch size = {}, number of training data = {})"
                .format(self.batch_size, len(self.train_data_list)))
        self.is_exr = is_exr(self.train_data_list[0])

        # Compute and print training hyperparameters
        self.batch_per_epoch = (len(self.train_data_list)) // self.batch_size
        max_steps = int(self.epoch * (self.batch_per_epoch))
        print_(
            "Number of training data: {}\nNumber of batches per epoch: {} (batch size = {})\nNumber of training steps for {} epochs: {}\n"
            .format(len(self.train_data_list), self.batch_per_epoch,
                    self.batch_size, self.epoch, max_steps), 'm')

        # Get validation dataset if provided
        self.has_val_data = True
        self.val_data_list = get_filepaths_from_dir(val_data_path)
        if not self.val_data_list:
            print("No validation data found in {}".format(val_data_path))
            self.has_val_data = False
        elif (len(self.val_data_list) < self.batch_size):
            raise ValueError(
                "Batch size must be smaller than the dataset (batch size = {}, number of validation data = {})"
                .format(self.batch_size, len(self.val_data_list)))
        else:
            val_is_exr = is_exr(self.val_data_list[0])
            if (val_is_exr and not self.is_exr) or (not val_is_exr
                                                    and self.is_exr):
                raise TypeError(
                    "Train and validation data should have the same file format"
                )
            self.val_batch_per_epoch = (len(
                self.val_data_list)) // self.batch_size
            print(
                "Number of validation data: {}\nNumber of validation batches per epoch: {} (batch size = {})"
                .format(len(self.val_data_list), self.val_batch_per_epoch,
                        self.batch_size))
Exemplo n.º 4
0
    def __init__(self, args):
        # Training hyperparameters
        self.learning_rate = args.learning_rate
        self.batch_size = args.batch_size
        self.epoch = args.epoch
        self.crop_size = 256
        self.n_levels = 3
        self.scale = 0.5
        self.channels = 3  # input / output channels
        # Training and validation dataset paths
        train_in_data_path = './data/train/input'
        train_gt_data_path = './data/train/groundtruth'
        val_in_data_path = './data/val/input'
        val_gt_data_path = './data/val/groundtruth'
        # Where to save and load model weights (=checkpoints)
        self.checkpoints_dir = './checkpoints'
        if not os.path.exists(self.checkpoints_dir):
            os.makedirs(self.checkpoints_dir)
        self.ckpt_save_name = 'trainingTemplateTF.model'
        # Where to save tensorboard summaries
        self.summaries_dir = './summaries'
        if not os.path.exists(self.summaries_dir):
            os.makedirs(self.summaries_dir)

        # Get training dataset as lists of image paths
        self.train_in_data_list = get_filepaths_from_dir(train_in_data_path)
        self.train_gt_data_list = get_filepaths_from_dir(train_gt_data_path)
        if len(self.train_in_data_list) is 0 or len(
                self.train_gt_data_list) is 0:
            raise ValueError(
                "No training data found in folders {} or {}".format(
                    train_in_data_path, train_gt_data_path))
        elif len(self.train_in_data_list) != len(self.train_gt_data_list):
            raise ValueError(
                "{} ({} data) and {} ({} data) should have the same number of input data"
                .format(train_in_data_path, len(self.train_in_data_list),
                        train_gt_data_path, len(self.train_gt_data_list)))
        elif (len(self.train_in_data_list) < self.batch_size):
            raise ValueError(
                "Batch size must be smaller than the dataset (batch size = {}, number of training data = {})"
                .format(self.batch_size, len(self.train_in_data_list)))
        self.is_exr = is_exr(self.train_in_data_list[0])

        # Get validation dataset if provided
        self.has_val_data = True
        self.val_in_data_list = get_filepaths_from_dir(val_in_data_path)
        self.val_gt_data_list = get_filepaths_from_dir(val_gt_data_path)
        if len(self.val_in_data_list) is 0 or len(self.val_gt_data_list) is 0:
            print("No validation data found in {} or {}".format(
                val_in_data_path, val_gt_data_path))
            self.has_val_data = False
        elif len(self.val_in_data_list) != len(self.val_gt_data_list):
            raise ValueError(
                "{} ({} data) and {} ({} data) should have the same number of input data"
                .format(val_in_data_path, len(self.val_in_data_list),
                        val_gt_data_path, len(self.val_gt_data_list)))
        elif (len(self.val_in_data_list) < self.batch_size):
            raise ValueError(
                "Batch size must be smaller than the dataset (batch size = {}, number of validation data = {})"
                .format(self.batch_size, len(self.val_in_data_list)))
        else:
            val_is_exr = is_exr(self.val_in_data_list[0])
            if (val_is_exr and not self.is_exr) or (not val_is_exr
                                                    and self.is_exr):
                raise TypeError(
                    "Train and validation data should have the same file format"
                )
            print("Number of validation data: {}".format(
                len(self.val_in_data_list)))

        # Compute and print training hyperparameters
        batch_per_epoch = (len(self.train_in_data_list)) // self.batch_size
        self.max_steps = int(self.epoch * (batch_per_epoch))
        print_(
            "Number of training data: {}\nNumber of batches per epoch: {} (batch size = {})\nNumber of training steps for {} epochs: {}\n"
            .format(len(self.train_in_data_list), batch_per_epoch,
                    self.batch_size, self.epoch, self.max_steps), 'm')