예제 #1
0
def main():
    # experiment configurations
    cfgs = parse.parse_args()

    # logging
    logger, final_output_dir = liblogger.get_logger(cfgs)

    # Set GPU
    if cfgs['use_gpu'] and torch.cuda.is_available():
        GPUs = cfgs['gpu_id']
    else:
        logger.info("GPU acceleration is disabled.")

    if len(GPUs) == 1:
        torch.cuda.set_device(GPUs[0])

    # cudnn related setting
    torch.backends.cudnn.benchmark = cfgs['cudnn']['benchmark']
    torch.backends.cudnn.deterministic = cfgs['cudnn']['deterministic']
    torch.backends.cudnn.enabled = cfgs['cudnn']['enabled']

    # model initialization
    model_settings = cfgs['heatmapModel']
    model_name = model_settings['name']
    method_str = 'models.heatmapModel' + '.' + model_name + '.get_pose_net'
    model = eval(method_str)(cfgs, is_train=cfgs['train'])

    if cfgs['train']:
        train(model, model_settings, GPUs, cfgs, logger, final_output_dir)
    elif cfgs['evaluate']:
        evaluate(model, model_settings, GPUs, cfgs, logger, final_output_dir)
예제 #2
0
def main():
    # experiment configurations
    cfgs = parse.parse_args()

    # logging
    logger, final_output_dir = liblogger.get_logger(cfgs)
    
    # Set GPU
    if cfgs['use_gpu'] and torch.cuda.is_available():
        GPUs = cfgs['gpu_id']
    else:
        logger.info("GPU acceleration is disabled.")

    # load datasets
    train_dataset, eval_dataset = car_instance.prepare_data(cfgs, logger)
    logger.info("Finished preparing datasets...")
    
    # training
    if cfgs['train']:
        record = trainer.train_cascade(train_dataset, eval_dataset, cfgs, logger)
        cascade = record['cascade']
        
    if cfgs['save'] and 'cascade' in locals():
        if 'save_name' in cfgs:
            # save the cascaded model
            save_path = os.path.join(cfgs['save_dir'], cfgs['save_name'])
        else:
            save_name = 'lifter'
            save_path = os.path.join(cfgs['dirs']['output'], save_name)
        if not os.path.exists(save_path):
            os.mkdir(save_path)
        # save the model and the normalization statistics
        torch.save(cascade[0].cpu().state_dict(), 
                   os.path.join(save_path, 'final_state.th')
                   )
        np.save(os.path.join(save_path, 'stats.npy'), train_dataset.statistics)
        # save loss history
        #np.save(os.path.join(save_path, 'record.npy'), record['record'])
        
    if cfgs['visualize'] or cfgs['evaluate']:
        # visualize the predictions
        cascade = torch.load(cfgs['load_model_path'])        
        if cfgs['use_gpu']:
            cascade.cuda()
            
    if cfgs['evaluate']:   
        trainer.evaluate_cascade(cascade, eval_dataset, cfgs) 
        
    return record
예제 #3
0
def main():
    # experiment configurations
    cfgs = parse.parse_args()

    # logging
    logger, final_output_dir = liblogger.get_logger(cfgs)

    # save a copy of the experiment configuration
    save_cfg_path = os.path.join(final_output_dir, 'saved_config.yml')
    shutil.copyfile(cfgs['config_path'], save_cfg_path)

    # set GPU
    if cfgs['use_gpu'] and torch.cuda.is_available():
        logger.info('Using GPU:{}'.format(cfgs['gpu_id']))
        os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
            list(map(str, cfgs['gpu_id'])))
    else:
        raise ValueError('CPU-based inference is not maintained.')

    # cudnn related setting
    torch.backends.cudnn.benchmark = cfgs['cudnn']['benchmark']
    torch.backends.cudnn.deterministic = cfgs['cudnn']['deterministic']
    torch.backends.cudnn.enabled = cfgs['cudnn']['enabled']

    # configurations related to the KITTI dataset
    data_cfgs = cfgs['dataset']

    # which split to show
    split = data_cfgs['split']  # default: KITTI val split
    dataset_inf = libkitti.get_dataset(cfgs, logger, split)

    # set the dataset to inference mode
    dataset_inf.inference([True, False])

    # read annotations
    input_file_path = cfgs['dirs']['load_prediction_file']
    # the record for 2D and 3D predictions
    results = {}

    # flags: the user can choose to use which type of input bounding boxes to use
    # use_gt_box can be used to re-produce the experiments simulating perfect 2D detection
    results['flags'] = {}
    if cfgs['use_pred_box']:
        # read the predicted boxes as specified by the path
        results['pred'] = dataset_inf.read_predictions(input_file_path)

    # Initialize Ego-Net and load the pre-trained checkpoint
    model = EgoNet(cfgs, pre_trained=True)
    model = model.eval().cuda()

    # perform inference and save the (updated) predictions
    inference(dataset_inf, model, results, cfgs)
    if cfgs['visualize']:
        return

    evaluator = "./kitti-eval/evaluate_object_3d_offline"
    label_dir = os.path.join(cfgs['dataset']['root'], 'training', 'label_2')
    output_dir = os.path.join(cfgs['dirs']['output'], 'submission')

    # When generating submission files for the test split,
    # if no detections are produced for one image, generate an empty file
    if cfgs['dataset']['split'] == 'test':
        test_calib_dir = os.path.join(cfgs['dataset']['root'], 'testing',
                                      'calib')
        generate_empty_file(output_dir, test_calib_dir)
        return

    # run kitti-eval to produce official evaluation
    command = "{} {} {}".format(evaluator, label_dir, output_dir)
    output = subprocess.check_output(command, shell=True)
    print(output.decode())
    return output
예제 #4
0
def main():
    # experiment configurations
    cfgs = parse.parse_args()
    
    # logging
    logger, final_output_dir = liblogger.get_logger(cfgs)   
    shutil.copyfile(cfgs['config_path'], os.path.join(final_output_dir, 'saved_config.yml'))
    # Set GPU
    if cfgs['use_gpu'] and torch.cuda.is_available():
        GPUs = cfgs['gpu_id']
    else:
        logger.info("GPU acceleration is disabled.")
        
    # cudnn related setting
    torch.backends.cudnn.benchmark = cfgs['cudnn']['benchmark']
    torch.backends.cudnn.deterministic = cfgs['cudnn']['deterministic']
    torch.backends.cudnn.enabled = cfgs['cudnn']['enabled']

    data_cfgs = cfgs['dataset']
    
    # which split to show
    split = 'valid'
    dataset_inf = eval('dataset.' + data_cfgs['name']  
                        + '.car_instance').get_dataset(cfgs, logger, split)
    # set to inference mode but does not read image
    dataset_inf.inference([True, False])
    
    # some temporary testing
    # test_angle_conversion(dataset_inf, dataset_inf.instance_stats['ref_box3d'])
    
    # read annotations
    input_file_path = cfgs['dirs']['load_prediction_file']
    # the record for 2D and 3D predictions
    # key->value: name of the approach->dictionary storing the predictions
    results = {}
    confidence_thres = cfgs['conf_thres']
    
    # flags: use bounding box produced by SMOKE as well as the ground truth boxes
    # for comparison
    results['flags'] = {}
    results['flags']['pred'] = cfgs['use_pred_box']
    if results['flags']['pred']:
        results['pred'] = dataset_inf.read_predictions(input_file_path)
    results['flags']['gt'] = cfgs['use_gt_box']
    
    # load checkpoints
    model_dict = prepare_models(cfgs)
    
    # inference and update prediction
    inference(dataset_inf, model_dict, results, cfgs)       
    
    # then you can run kitti-eval for evaluation
    evaluator = cfgs['dirs']['kitti_evaluator']
    label_dir = cfgs['dirs']['kitti_label']
    output_dir = os.path.join(cfgs['dirs']['output'], 'submission')
    
    # if no detections are produced, generate an empty file
    #generate_empty_file(output_dir, label_dir)
    command = "{} {} {}".format(evaluator, label_dir, output_dir)
    # e.g.
    # ~/Documents/Github/SMOKE/smoke/data/datasets/evaluation/kitti/kitti_eval/evaluate_object_3d_offline /home/nicholas/Documents/Github/SMOKE/datasets/kitti/training/label_2 /media/nicholas/Database/experiments/3DLearning/0826
    # /media/nicholas/Database/Github/M3D-RPN/data/kitti_split1/devkit/cpp/evaluate_object /home/nicholas/Documents/Github/SMOKE/datasets/kitti/training/label_2 /media/nicholas/Database/Github/M3D-RPN/output/tmp_results
    return