Ejemplo n.º 1
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument(
        "--config_file",
        default="/media/bi/Data/Mycode/car_demo/ai_city/configs/submit.yml",
        help="path to config file",
        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    train_loader, val_loader, num_query, num_classes, dataset = make_data_loader(
        cfg)
    model = build_model(cfg, num_classes)
    model.load_param(cfg.TEST.WEIGHT)

    ## read meta information
    dataset = VeRi(cfg.DATASETS.ROOT_DIR)
    print("test_tracks", dataset.test_tracks)
    indices_np = inference(cfg, model, val_loader, num_query, dataset)
    #write_result(indices_np, os.path.dirname(cfg.TEST.WEIGHT), topk=100)

    write_result_with_track(indices_np, os.path.dirname(cfg.TEST.WEIGHT),
                            dataset.test_tracks)
Ejemplo n.º 2
0
def train(cfg):
    # prepare dataset
    train_loader, val_loader, num_query, num_classes, dataset = make_data_loader(
        cfg)

    # prepare model
    model = build_model(cfg, num_classes)

    optimizer = make_optimizer(cfg, model)

    # scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR,
    #                               cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD)

    loss_func = make_loss(cfg, num_classes)  # modified by gu

    # Add for using self trained model
    if cfg.MODEL.PRETRAIN_CHOICE == 'imagenet':
        start_epoch = 0
        last_epoch = -1
    elif cfg.MODEL.PRETRAIN_CHOICE == 'finetune':
        start_epoch = 0
        last_epoch = -1
        model.load_param(cfg.MODEL.PRETRAIN_PATH, skip_fc=False)
    elif cfg.MODEL.PRETRAIN_CHOICE == 'resume':
        checkpoint = torch.load(cfg.MODEL.PRETRAIN_PATH, map_location='cuda')
        start_epoch = checkpoint['epoch']
        last_epoch = start_epoch
        model.load_state_dict(checkpoint['state_dict'])
        model.cuda()
        #optimizer = make_optimizer(cfg, model)
        optimizer.load_state_dict(checkpoint['optimizer'])
        print('resume from {}'.format(cfg.MODEL.PRETRAIN_PATH))
    else:
        start_epoch = 0
        last_epoch = -1
        print('Only support pretrain_choice for imagenet and self, but got {}'.
              format(cfg.MODEL.PRETRAIN_CHOICE))

    scheduler = build_lr_scheduler(optimizer, cfg.SOLVER.LR_SCHEDULER, cfg,
                                   last_epoch)

    do_train(
        cfg,
        model,
        dataset,
        train_loader,
        val_loader,
        optimizer,
        scheduler,  # modify for using self trained model
        loss_func,
        num_query,
        start_epoch  # add for using self trained model
    )
Ejemplo n.º 3
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument("--config_file",
                        default="./configs/debug.yml",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        # with open(args.config_file, 'r') as cf:
        #     config_str = "\n" + cf.read()
        #     logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    train_loader, val_loader, num_query, num_classes, dataset = make_data_loader(
        cfg)
    model = build_model(cfg, num_classes)
    model.load_param(cfg.TEST.WEIGHT)

    imgs, feats = inference_to_get_feats(cfg, model, val_loader, num_query,
                                         dataset)

    import numpy as np
    np.save('feats_extract.npy', feats.cpu().detach().numpy())
    print(imgs[:50])
Ejemplo n.º 4
0
def test(cfg, model):
    torch.cuda.empty_cache()
    dataset_name = cfg.DATASET.TEST
    model_dir = os.path.join(cfg.MODEL_DIR, cfg.MODEL.NAME)
    output_folder = os.path.join(model_dir, "inference", dataset_name)
    os.makedirs(output_folder, exist_ok=True)
    data_loader_val = make_data_loader(cfg, is_train=False)
    inference(
        cfg,
        model,
        data_loader_val,
        device=cfg.MODEL.DEVICE,
        output_folder=output_folder,
    )
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument("--config_file",
                        default="./configs/debug.yml",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    train_loader, val_loader, num_query, num_classes, dataset = make_data_loader(
        cfg)
    model = build_model(cfg, num_classes)
    model.load_param(cfg.TEST.WEIGHT)

    results = []
    out_dir = os.path.join('AIC_crop')
    results += vis_actmap(model, cfg, train_loader, out_dir)
    results += vis_actmap(model, cfg, val_loader, out_dir)

    with open(os.path.join(out_dir, 'detection.json'), 'w') as f:
        json.dump(results, f)
Ejemplo n.º 6
0
def train(cfg):
    
    
    getter = make_getter(cfg)
    model = build_matching_model(cfg)
    device = torch.device(cfg.MODEL.DEVICE)
    model = model.to(device)
    model = torch.nn.DataParallel(model)

    optimizer = make_adam_optimizer(cfg, model)
    scheduler = make_lr_scheduler(cfg, optimizer)

    # pass arguments to trainer, rather than cfg
    arguments = {}
    arguments["iteration"] = 0

    model_dir = os.path.join(cfg.MODEL_DIR, cfg.MODEL.NAME)
    checkpointer = Checkpointer(cfg, model, optimizer, scheduler, save_dir=model_dir)
    extra_checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT)
    arguments.update(extra_checkpoint_data)

    tensorboard = TensorBoard(cfg) if cfg.TENSORBOARD.IS_ON else None

    data_loader = make_data_loader(
        cfg,
        is_train=True,
        start_iter=arguments["iteration"],
    )

    checkpoint_period = cfg.TRAIN.CHECKPOINT_PERIOD

    do_train(
        cfg,
        model,
        data_loader,
        optimizer,
        scheduler,
        checkpointer,
        tensorboard,
        device,
        checkpoint_period,
        arguments,
        getter
    )

    return model
Ejemplo n.º 7
0
def main():
    parser = argparse.ArgumentParser(description="Dense Correspondence")
    parser.add_argument(
        "--config-file",
        default="",
        metavar="FILE",
        help="path to config file",
    )
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )

    args = parser.parse_args()

    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    # cfg.freeze()

    model = build_matching_model(cfg)
    model.to(cfg.MODEL.DEVICE)
    model = torch.nn.DataParallel(model)

    model_dir = os.path.join(cfg.MODEL_DIR, cfg.MODEL.NAME)
    checkpointer = Checkpointer(cfg, model, save_dir=model_dir)
    _ = checkpointer.load(cfg.MODEL.WEIGHT)

    dataset_name = cfg.DATASET.TEST
    output_folder = os.path.join(model_dir, "inference", dataset_name)
    os.makedirs(output_folder, exist_ok=True)
    data_loader_val = make_data_loader(cfg, is_train=False)
    inference(
        cfg,
        model,
        data_loader_val,
        device=cfg.MODEL.DEVICE,
        output_folder=output_folder,
    )
Ejemplo n.º 8
0
def main(cfg, config_name):
    """
    Main training function: after preparing the data loaders, model, optimizer, and trainer,
    start with the training process.

    Args:
        cfg (dict): current configuration parameters
        config_name (str): path to the config file
    """

    # Create the output dir if it does not exist
    if not os.path.exists(cfg['misc']['log_dir']):
        os.makedirs(cfg['misc']['log_dir'])

    # Initialize the model
    model = config.get_model(cfg)
    model = model.cuda()

    # Get data loader
    train_loader = make_data_loader(cfg, phase='train')
    val_loader = make_data_loader(cfg, phase='val')

    # Log directory
    dataset_name = cfg["data"]["dataset"]

    now = datetime.now().strftime("%y_%m_%d-%H_%M_%S_%f")
    now += "__Method_" + str(cfg['method']['backbone'])
    now += "__Pretrained_" if cfg['network']['use_pretrained'] and cfg[
        'network']['pretrained_path'] else ''
    if cfg['method']['flow']: now += "__Flow_"
    if cfg['method']['ego_motion']: now += "__Ego_"
    if cfg['method']['semantic']: now += "__Sem_"
    now += "__Rem_Ground_" if cfg['data']['remove_ground'] else ''
    now += "__VoxSize_" + str(cfg['misc']["voxel_size"])
    now += "__Pts_" + str(cfg['misc']["num_points"])
    path2log = os.path.join(cfg['misc']['log_dir'], "logs_" + dataset_name,
                            now)

    logger, checkpoint_dir = prepare_logger(cfg, path2log)
    tboard_logger = SummaryWriter(path2log)

    # Output number of model parameters
    logger.info("Parameter Count: {:d}".format(n_model_parameters(model)))

    # Output torch and cuda version
    logger.info('Torch version: {}'.format(torch.__version__))
    logger.info('CUDA version: {}'.format(torch.version.cuda))

    # Save config file that was used for this experiment
    with open(os.path.join(path2log,
                           config_name.split(os.sep)[-1]), 'w') as outfile:
        yaml.dump(cfg, outfile, default_flow_style=False, allow_unicode=True)

    # Get optimizer and trainer
    optimizer = config.get_optimizer(cfg, model)
    scheduler = config.get_scheduler(cfg, optimizer)

    # Parameters determining the saving and validation interval (if positive denotes iteration if negative epoch)
    stat_interval = cfg['train']['stat_interval']
    stat_interval = stat_interval if stat_interval > 0 else abs(
        stat_interval * len(train_loader))

    chkpt_interval = cfg['train']['chkpt_interval']
    chkpt_interval = chkpt_interval if chkpt_interval > 0 else abs(
        chkpt_interval * len(train_loader))

    val_interval = cfg['train']['val_interval']
    val_interval = val_interval if val_interval > 0 else abs(val_interval *
                                                             len(train_loader))

    # if not a pretrained model epoch and iterations should be -1
    metric_val_best = np.inf
    running_metrics = {}
    running_losses = {}
    epoch_it = -1
    total_it = -1

    # Load the pretrained weights
    if cfg['network']['use_pretrained'] and cfg['network']['pretrained_path']:
        model, optimizer, scheduler, epoch_it, total_it, metric_val_best = load_checkpoint(
            model,
            optimizer,
            scheduler,
            filename=cfg['network']['pretrained_path'])

        # Find previous tensorboard files and copy them
        tb_files = glob.glob(
            os.sep.join(cfg['network']['pretrained_path'].split(os.sep)[:-1]) +
            '/events.*')
        for tb_file in tb_files:
            shutil.copy(tb_file,
                        os.path.join(path2log,
                                     tb_file.split(os.sep)[-1]))

    # Initialize the trainer
    device = torch.device('cuda' if (
        torch.cuda.is_available() and cfg['misc']['use_gpu']) else 'cpu')
    trainer = config.get_trainer(cfg, model, device)
    acc_iter_size = cfg['train']['acc_iter_size']

    # Training loop
    while epoch_it < cfg['train']['max_epoch']:
        epoch_it += 1
        lr = scheduler.get_last_lr()
        logger.info('Training epoch: {}, LR: {} '.format(epoch_it, lr))
        gc.collect()

        train_loader_iter = train_loader.__iter__()
        start = time.time()
        tbar = tqdm(total=len(train_loader) // acc_iter_size, ncols=100)

        for it in range(len(train_loader) // acc_iter_size):
            optimizer.zero_grad()
            total_it += 1
            batch_metrics = {}
            batch_losses = {}

            for iter_idx in range(acc_iter_size):

                batch = train_loader_iter.next()

                dict_all_to_device(batch, device)
                losses, metrics, total_loss = trainer.train_step(batch)

                total_loss.backward()

                # Save the running metrics and losses
                if not batch_metrics:
                    batch_metrics = copy.deepcopy(metrics)
                else:
                    for key, value in metrics.items():
                        batch_metrics[key] += value

                if not batch_losses:
                    batch_losses = copy.deepcopy(losses)
                else:
                    for key, value in losses.items():
                        batch_losses[key] += value

            # Compute the mean value of the metrics and losses of the batch
            for key, value in batch_metrics.items():
                batch_metrics[key] = value / acc_iter_size

            for key, value in batch_losses.items():
                batch_losses[key] = value / acc_iter_size

            optimizer.step()
            torch.cuda.empty_cache()

            tbar.set_description('Loss: {:.3g}'.format(
                batch_losses['total_loss']))
            tbar.update(1)

            # Save the running metrics and losses
            if not running_metrics:
                running_metrics = copy.deepcopy(batch_metrics)
            else:
                for key, value in batch_metrics.items():
                    running_metrics[key] += value

            if not running_losses:
                running_losses = copy.deepcopy(batch_losses)
            else:
                for key, value in batch_losses.items():
                    running_losses[key] += value

            # Logs
            if total_it % stat_interval == stat_interval - 1:
                # Print / save logs
                logger.info("Epoch {0:d} - It. {1:d}: loss = {2:.3f}".format(
                    epoch_it, total_it,
                    running_losses['total_loss'] / stat_interval))

                for key, value in running_losses.items():
                    tboard_logger.add_scalar("Train/{}".format(key),
                                             value / stat_interval, total_it)
                    # Reinitialize the values
                    running_losses[key] = 0

                for key, value in running_metrics.items():
                    tboard_logger.add_scalar("Train/{}".format(key),
                                             value / stat_interval, total_it)
                    # Reinitialize the values
                    running_metrics[key] = 0

                start = time.time()

            # Run validation
            if total_it % val_interval == val_interval - 1:
                logger.info("Starting the validation")
                val_losses, val_metrics = trainer.validate(val_loader)

                for key, value in val_losses.items():
                    tboard_logger.add_scalar("Val/{}".format(key), value,
                                             total_it)

                for key, value in val_metrics.items():
                    tboard_logger.add_scalar("Val/{}".format(key), value,
                                             total_it)

                logger.info(
                    "VALIDATION -It. {0:d}: total loss: {1:.3f}.".format(
                        total_it, val_losses['total_loss']))

                if val_losses['total_loss'] < metric_val_best:
                    metric_val_best = val_losses['total_loss']
                    logger.info('New best model (loss: {:.4f})'.format(
                        metric_val_best))

                    save_checkpoint(os.path.join(path2log, 'model_best.pt'),
                                    epoch=epoch_it,
                                    it=total_it,
                                    model=model,
                                    optimizer=optimizer,
                                    scheduler=scheduler,
                                    config=cfg,
                                    best_val=metric_val_best)
                else:
                    save_checkpoint(os.path.join(
                        path2log, 'model_{}.pt'.format(total_it)),
                                    epoch=epoch_it,
                                    it=total_it,
                                    model=model,
                                    optimizer=optimizer,
                                    scheduler=scheduler,
                                    config=cfg,
                                    best_val=val_losses['total_loss'])

        # After the epoch if finished update the scheduler
        scheduler.step()

    # Quit after the maximum number of epochs is reached
    logger.info(
        'Training completed after {} Epochs ({} it) with best val metric ({})={}'
        .format(epoch_it, it, model_selection_metric, metric_val_best))
Ejemplo n.º 9
0
def main(cfg, logger):
    """
    Main function of this software. After preparing the data loaders, model, optimizer, and trainer,
    start with the training and evaluation process.

    Args:
        cfg (dict): current configuration paramaters
    """

    # Initialize parameters
    model_selection_metric = cfg['train']['model_selection_metric']

    if cfg['train']['model_selection_mode'] == 'maximize':
        model_selection_sign = 1
    elif cfg['train']['model_selection_mode'] == 'minimize':
        model_selection_sign = -1
    else:
        raise ValueError(
            'model_selection_mode must be either maximize or minimize.')

    # Get data loader
    train_loader = make_data_loader(cfg, phase='train')
    val_loader = make_data_loader(cfg, phase='val')

    # Set up tensorboard logger
    tboard_logger = SummaryWriter(os.path.join(cfg['misc']['log_dir'], 'logs'))

    # Get model
    model = config.get_model(cfg)

    # Get optimizer and trainer
    optimizer = getattr(optim, cfg['optimizer']['alg'])(
        model.parameters(),
        lr=cfg['optimizer']['learning_rate'],
        weight_decay=cfg['optimizer']['weight_decay'])

    trainer = config.get_trainer(cfg, model, optimizer, tboard_logger)

    # Load pre-trained model if existing
    kwargs = {
        'model': model,
        'optimizer': optimizer,
    }

    checkpoint_io = CheckpointIO(
        cfg['misc']['log_dir'],
        initialize_from=cfg['model']['init_from'],
        initialization_file_name=cfg['model']['init_file_name'],
        **kwargs)

    try:
        load_dict = checkpoint_io.load('model.pt')
    except FileExistsError:
        load_dict = dict()

    epoch_it = load_dict.get('epoch_it', -1)
    it = load_dict.get('it', -1)

    metric_val_best = load_dict.get('loss_val_best',
                                    -model_selection_sign * np.inf)

    if metric_val_best == np.inf or metric_val_best == -np.inf:
        metric_val_best = -model_selection_sign * np.inf

    logger.info('Current best validation metric ({}): {:.5f}'.format(
        model_selection_metric, metric_val_best))

    # Training parameters
    stat_interval = cfg['train']['stat_interval']
    stat_interval = stat_interval if stat_interval > 0 else abs(
        stat_interval * len(train_loader))

    chkpt_interval = cfg['train']['chkpt_interval']
    chkpt_interval = chkpt_interval if chkpt_interval > 0 else abs(
        chkpt_interval * len(train_loader))

    val_interval = cfg['train']['val_interval']
    val_interval = val_interval if val_interval > 0 else abs(val_interval *
                                                             len(train_loader))

    # Print model parameters and model graph
    nparameters = sum(p.numel() for p in model.parameters())
    #print(model)
    logger.info('Total number of parameters: {}'.format(nparameters))

    # Training loop
    while epoch_it < cfg['train']['max_epoch']:
        epoch_it += 1

        for batch in train_loader:
            it += 1
            loss = trainer.train_step(batch, it)
            tboard_logger.add_scalar('train/loss', loss, it)

            # Print output
            if stat_interval != 0 and (it % stat_interval) == 0 and it != 0:
                logger.info('[Epoch {}] it={}, loss={:.4f}'.format(
                    epoch_it, it, loss))

            # Save checkpoint
            if (chkpt_interval != 0 and
                (it % chkpt_interval) == 0) and it != 0:
                logger.info('Saving checkpoint')
                checkpoint_io.save('model.pt',
                                   epoch_it=epoch_it,
                                   it=it,
                                   loss_val_best=metric_val_best)

            # Run validation
            if val_interval != 0 and (it % val_interval) == 0 and it != 0:
                eval_dict = trainer.evaluate(val_loader, it)

                metric_val = eval_dict[model_selection_metric]
                logger.info('Validation metric ({}): {:.4f}'.format(
                    model_selection_metric, metric_val))

                for k, v in eval_dict.items():
                    tboard_logger.add_scalar('val/{}'.format(k), v, it)

                if model_selection_sign * (metric_val - metric_val_best) > 0:
                    metric_val_best = metric_val
                    logger.info(
                        'New best model (loss {:.4f})'.format(metric_val_best))
                    checkpoint_io.save('model_best.pt',
                                       epoch_it=epoch_it,
                                       it=it,
                                       loss_val_best=metric_val_best)

    # Quit after the maximum number of epochs is reached
    logger.info(
        'Training completed after {} Epochs ({} it) with best val metric ({})={}'
        .format(epoch_it, it, model_selection_metric, metric_val_best))
Ejemplo n.º 10
0
def main(cfg, logger):
    """
    Main function of this evaluation software. After preparing the data loaders, and the model start with the evaluation process.
    Args:
        cfg (dict): current configuration paramaters
    """

    # Create the output dir if it does not exist 
    if not os.path.exists(cfg['test']['results_dir']):
        os.makedirs(cfg['test']['results_dir'])

    # Get model
    model = config.get_model(cfg)
    device = torch.device('cuda' if (torch.cuda.is_available() and cfg['misc']['use_gpu']) else 'cpu') 

    # Get data loader
    eval_loader = make_data_loader(cfg, phase='test')

    # Log directory
    dataset_name = cfg["data"]["dataset"]

    path2log = os.path.join(cfg['test']['results_dir'], dataset_name, '{}_{}'.format(cfg['method']['backbone'], cfg['misc']['num_points']))

    logger, checkpoint_dir = prepare_logger(cfg, path2log)

    # Output torch and cuda version 
    
    logger.info('Torch version: {}'.format(torch.__version__))
    logger.info('CUDA version: {}'.format(torch.version.cuda))
    logger.info('Starting evaluation of the method {} on {} dataset'.format(cfg['method']['backbone'], dataset_name))

    # Save config file that was used for this experiment
    with open(os.path.join(path2log, "config.yaml"),'w') as outfile:
        yaml.dump(cfg, outfile, default_flow_style=False, allow_unicode=True)


    logger.info("Parameter Count: {:d}".format(n_model_parameters(model)))
    
    # Load the pretrained weights
    if cfg['network']['use_pretrained'] and cfg['network']['pretrained_path']:
        model, optimizer, scheduler, epoch_it, total_it, metric_val_best = load_checkpoint(model, None, None, filename=cfg['network']['pretrained_path'])

    else:
        logger.warning('MODEL RUNS IN EVAL MODE, BUT NO PRETRAINED WEIGHTS WERE LOADED!!!!')


    # Initialize the trainer
    trainer = config.get_trainer(cfg, model,device)

    # if not a pretrained model epoch and iterations should be -1 
    eval_metrics = defaultdict(list)    
    start = time.time()
    
    for it, batch in enumerate(tqdm(eval_loader)):
        # Put all the tensors to the designated device
        dict_all_to_device(batch, device)
        

        metrics = trainer.eval_step(batch)
        
        for key in metrics:
            eval_metrics[key].append(metrics[key])


    stop = time.time()

    # Compute mean values of the evaluation statistics
    result_string = ''

    for key, value in eval_metrics.items():
        if key not in ['true_p', 'true_n', 'false_p', 'false_n']:
            result_string += '{}: {:.3f}; '.format(key, np.mean(value))
    
    if 'true_p' in eval_metrics:
        result_string += '{}: {:.3f}; '.format('dataset_precision_f', (np.sum(eval_metrics['true_p']) / (np.sum(eval_metrics['true_p'])  + np.sum(eval_metrics['false_p'])) ))
        result_string += '{}: {:.3f}; '.format('dataset_recall_f', (np.sum(eval_metrics['true_p']) / (np.sum(eval_metrics['true_p'])  + np.sum(eval_metrics['false_n']))))

        result_string += '{}: {:.3f}; '.format('dataset_precision_b', (np.sum(eval_metrics['true_n']) / (np.sum(eval_metrics['true_n'])  + np.sum(eval_metrics['false_n']))))
        result_string += '{}: {:.3f}; '.format('dataset_recall_b', (np.sum(eval_metrics['true_n']) / (np.sum(eval_metrics['true_n'])  + np.sum(eval_metrics['false_p']))))


    logger.info('Outputing the evaluation metric for: {} {} {} '.format('Flow, ' if cfg['metrics']['flow'] else '', 'Ego-Motion, ' if cfg['metrics']['ego_motion'] else '', 'Bckg. Segmentaion' if cfg['metrics']['semantic'] else ''))
    logger.info(result_string)
    logger.info('Evaluation completed in {}s [{}s per scene]'.format((stop - start), (stop - start)/len(eval_loader)))