Beispiel #1
0
class Symbol_Builder():
    def __init__(self, cf, size, sess):
        self.cf = cf
        self.size = self.cf.size_image_train
        self.build_model(sess)
        self.trainable_var = tf.trainable_variables()
        self.__build_loss()
        self.__build_TB()
        self.__build_optimizer()

    def build_model(self, sess):
        # Prepare model
        print('Generating model ...')
        self.model = Model_builder(self.cf, self.size)
        self.model.Build(sess)

    def __build_loss(self):
        # Prepare Loss
        print('Configuring metrics ...')
        self.loss = LossHandler(self.cf)
        self.loss_fun = self.loss.loss(self.model.logits, self.model.simb_gt,
                                       self.trainable_var)

    def __build_TB(self):
        # Prepare TensorBoard
        print('Preparing TensorBoard ...')
        self.tensorBoard = TB_Builder(self.cf, self.model, self.loss_fun)

    def __build_optimizer(self):
        # Prepare optimizer
        print('Selecting optimizer ...')
        self.optimizer = Optimizer_builder().build(self.cf)
        self.grads_and_vars, self.train_op = Optimizer_builder().minimize(
            self.optimizer, self.loss_fun, self.trainable_var)
Beispiel #2
0
 def build_model(self, sess):
     # Prepare model
     print('Generating model ...')
     self.model = Model_builder(self.cf, self.size)
     self.model.Build(sess)
Beispiel #3
0
def main():
    start_time = time.time()
    # Prepare configutation
    print('Loading configuration ...')
    config = Configuration()
    cf = config.Load()
    # Enable log file
    logger_debug = Logger(cf.log_file_debug)

    logger_debug.write('\n ---------- Init experiment: ' + cf.exp_name +
                       ' ---------- \n')
    # Model building
    logger_debug.write('- Building model: ' + cf.model_name + ' <--- ')
    model = Model_builder(cf)
    model.build()

    # Problem type
    if cf.problem_type == 'segmentation':
        problem_manager = SemanticSegmentation_Manager(cf, model)
    elif cf.problem_type == 'classification':
        problem_manager = Classification_Manager(cf, model)
    elif cf.problem_type == 'detection':
        problem_manager = Detection_Manager(cf, model)
    else:
        raise ValueError('Unknown problem type')

    # Create dataloader builder
    dataloader = Dataloader_Builder(cf, model)

    if cf.train:
        model.net.train()  # enable dropout modules and others
        train_time = time.time()
        # Dataloaders
        logger_debug.write('\n- Reading Train dataset: ')
        dataloader.build_train()
        if cf.valid_images_txt is not None and cf.valid_gt_txt is not None and cf.valid_samples_epoch != 0:
            logger_debug.write('\n- Reading Validation dataset: ')
            dataloader.build_valid(cf.valid_samples_epoch, cf.valid_images_txt,
                                   cf.valid_gt_txt, cf.resize_image_valid,
                                   cf.valid_batch_size)
            problem_manager.trainer.start(dataloader.train_loader,
                                          dataloader.train_set,
                                          dataloader.loader_set,
                                          dataloader.loader)
        else:
            # Train without validation inside epoch
            problem_manager.trainer.start(dataloader.train_loader,
                                          dataloader.train_set)
        train_time = time.time() - train_time
        logger_debug.write('\t Train step finished: %ds ' % (train_time))

    if cf.validation:
        valid_time = time.time()
        model.net.eval()
        if not cf.train:
            logger_debug.write('- Reading Validation dataset: ')
            dataloader.build_valid(cf.valid_samples, cf.valid_images_txt,
                                   cf.valid_gt_txt, cf.resize_image_valid,
                                   cf.valid_batch_size)
        else:
            # If the Dataloader for validation was used on train, only update the total number of images to take
            dataloader.loader_set.update_indexes(
                cf.valid_samples,
                valid=True)  #valid=True avoids shuffle for validation
        logger_debug.write('\n- Starting validation <---')
        problem_manager.validator.start(dataloader.loader_set,
                                        dataloader.loader, 'Validation')
        valid_time = time.time() - valid_time
        logger_debug.write('\t Validation step finished: %ds ' % (valid_time))

    if cf.test:
        model.net.eval()
        test_time = time.time()
        logger_debug.write('\n- Reading Test dataset: ')
        dataloader.build_valid(cf.test_samples, cf.test_images_txt,
                               cf.test_gt_txt, cf.resize_image_test,
                               cf.test_batch_size)
        logger_debug.write('\n - Starting test <---')
        problem_manager.validator.start(dataloader.loader_set,
                                        dataloader.loader, 'Test')
        test_time = time.time() - test_time
        logger_debug.write('\t Test step finished: %ds ' % (test_time))

    if cf.predict_test:
        model.net.eval()
        pred_time = time.time()
        logger_debug.write('\n- Reading Prediction dataset: ')
        dataloader.build_predict()
        logger_debug.write('\n - Generating predictions <---')
        problem_manager.predictor.start(dataloader.predict_loader)
        pred_time = time.time() - pred_time
        logger_debug.write('\t Prediction step finished: %ds ' % (pred_time))

    total_time = time.time() - start_time
    logger_debug.write('\n- Experiment finished: %ds ' % (total_time))
    logger_debug.write('\n')
Beispiel #4
0
def main():
    start_time = time.time()
    # Input arguments
    parser = argparse.ArgumentParser(description="TensorFlow framework for Semantic Segmentation")
    parser.add_argument("--config_file",
                        type=str,
                        default='config/configFile.py',
                        help="configuration file path")

    parser.add_argument("--exp_name",
                        type=str,
                        default='Sample',
                        help="Experiment name")

    parser.add_argument("--exp_folder",
                        type=str,
                        default='/home/jlgomez/Experiments/',
                        help="Experiment folder path")

    args = parser.parse_args()

    # Prepare configutation
    print ('Loading configuration ...')
    config = Configuration(args.config_file, args.exp_name, args.exp_folder)
    cf = config.Load()
    # Enable log file
    logger_debug = Logger(cf.log_file_debug)

    logger_debug.write('\n ---------- Init experiment: ' + cf.exp_name + ' ---------- \n')

    # Model building
    logger_debug.write('- Building model: ' + cf.model_name + ' <--- ')
    model = Model_builder(cf)
    model.build()

    # Problem type
    if cf.problem_type == 'segmentation':
        problem_manager = SemanticSegmentation_Manager(cf, model)
    elif cf.problem_type == 'classification':
        problem_manager = Classification_Manager(cf, model)
    else:
        raise ValueError('Unknown problem type')

    # Loss definition
    criterion = Loss_Builder(cf).build().cuda()

    # Optimizer definition
    optimizer = Optimizer_builder().build(cf, model.net)

    # Learning rate scheduler
    scheduler = scheduler_builder().build(cf, optimizer)

    # Create dataloader builder
    dataloader = Dataloader_Builder(cf)

    if cf.train:
        model.net.train()  # enable dropout modules and others
        train_time = time.time()
        # Dataloaders
        logger_debug.write('\n- Reading Train dataset: ')
        dataloader.build_train()
        if cf.valid_images_txt is not None and cf.valid_gt_txt is not None and cf.valid_samples_epoch != 0:
            logger_debug.write('\n- Reading Validation dataset: ')
            dataloader.build_valid(cf.valid_samples_epoch, cf.valid_images_txt, cf.valid_gt_txt,
                                   cf.resize_image_valid, cf.valid_batch_size)
            problem_manager.trainer.start(criterion, optimizer, dataloader.train_loader, dataloader.train_set,
                                          dataloader.loader_set, dataloader.loader, scheduler)
        else:
            # Train without validation inside epoch
            problem_manager.trainer.start(criterion, optimizer, dataloader.train_loader, dataloader.train_set,
                                          scheduler=scheduler)
        train_time = time.time() - train_time
        logger_debug.write('\t Train step finished: %ds ' % (train_time))

    if cf.validation:
        valid_time = time.time()
        model.net.eval()
        if not cf.train:
            logger_debug.write('- Reading Validation dataset: ')
            dataloader.build_valid(cf.valid_samples,cf.valid_images_txt, cf.valid_gt_txt,
                                   cf.resize_image_valid, cf.valid_batch_size)
        else:
            # If the Dataloader for validation was used on train, only update the total number of images to take
            dataloader.loader_set.update_indexes(cf.valid_samples, valid=True) #valid=True avoids shuffle for validation
        logger_debug.write('\n- Starting validation <---')
        problem_manager.validator.start(criterion, dataloader.loader_set, dataloader.loader)
        valid_time = time.time() - valid_time
        logger_debug.write('\t Validation step finished: %ds ' % (valid_time))

    if cf.test:
        model.net.eval()
        test_time = time.time()
        logger_debug.write('\n- Reading Test dataset: ')
        dataloader.build_valid(cf.test_samples, cf.test_images_txt, cf.test_gt_txt,
                               cf.resize_image_test, cf.test_batch_size)
        logger_debug.write('\n - Starting test <---')
        problem_manager.validator.start(criterion, dataloader.loader_set, dataloader.loader)
        test_time = time.time() - test_time
        logger_debug.write('\t Test step finished: %ds ' % (test_time))

    if cf.predict_test:
        model.net.eval()
        pred_time = time.time()
        logger_debug.write('\n- Reading Prediction dataset: ')
        dataloader.build_predict()
        logger_debug.write('\n - Generating predictions <---')
        problem_manager.predictor.start(dataloader.predict_loader)
        pred_time = time.time() - pred_time
        logger_debug.write('\t Prediction step finished: %ds ' % (pred_time))

    total_time = time.time() - start_time
    logger_debug.write('\n- Experiment finished: %ds ' % (total_time))
    logger_debug.write('\n')