Beispiel #1
0
def inference(model_config, eval_config, dataset_config, data_split,
              ckpt_indices):

    # Overwrite the defaults
    dataset_config = config_builder.proto_to_obj(dataset_config)

    dataset_config.data_split = data_split
    dataset_config.data_split_dir = 'training'
    if data_split == 'test':
        dataset_config.data_split_dir = 'testing'

    eval_config.eval_mode = 'test'
    eval_config.evaluate_repeatedly = False

    dataset_config.has_labels = False
    # Enable this to see the actually memory being used
    eval_config.allow_gpu_mem_growth = True

    eval_config = config_builder.proto_to_obj(eval_config)
    # Grab the checkpoint indices to evaluate
    eval_config.ckpt_indices = ckpt_indices

    # Remove augmentation during evaluation in test mode
    dataset_config.aug_list = []

    # Build the dataset object
    dataset = DatasetBuilder.build_kitti_dataset(dataset_config,
                                                 use_defaults=False)

    # Setup the model
    model_name = model_config.model_name
    # Overwrite repeated field
    model_config = config_builder.proto_to_obj(model_config)
    # Switch path drop off during evaluation
    model_config.path_drop_probabilities = [1.0, 1.0]

    with tf.Graph().as_default():
        if model_name == 'avod_model':
            model = AvodModel(model_config,
                              train_val_test=eval_config.eval_mode,
                              dataset=dataset)
        elif model_name == 'rpn_model':
            model = RpnModel(model_config,
                             train_val_test=eval_config.eval_mode,
                             dataset=dataset)
        elif model_name == 'bev_only_rpn_model':
            model = BevOnlyRpnModel(model_config,
                                    train_val_test=eval_config.eval_mode,
                                    dataset=dataset)
        elif model_name == 'bev_only_avod_model':
            model = BevOnlyAvodModel(model_config,
                                     train_val_test=eval_config.eval_mode,
                                     dataset=dataset)
        else:
            raise ValueError('Invalid model name {}'.format(model_name))

        model_evaluator = Evaluator(model, dataset_config, eval_config)
        model_evaluator.run_latest_checkpoints()
Beispiel #2
0
def inferPerspective(model_config, eval_config, dataset_config,
                     additional_cls):
    model_name = model_config.model_name

    entity_perspect_dir = dataset_config.dataset_dir + dataset_config.data_split_dir + '/'

    logging.debug("Inferring perspective: %s\n %s\n %s",
                  dataset_config.data_split, entity_perspect_dir,
                  dataset_config.dataset_dir)

    files_in_range = create_split.create_split(dataset_config.dataset_dir,
                                               entity_perspect_dir,
                                               dataset_config.data_split)

    # If there are no files within the range cfg.MIN_IDX, cfg.MAX_IDX
    # then skip this perspective
    if not files_in_range:
        logging.debug(
            "No files within the range cfg.MIN_IDX, cfg.MAX_IDX, skipping perspective"
        )
        return

    if not additional_cls:
        estimate_ground_planes.estimate_ground_planes(entity_perspect_dir,
                                                      dataset_config, 0)

    # Build the dataset object
    dataset = DatasetBuilder.build_kitti_dataset(dataset_config,
                                                 use_defaults=False)

    #Switch inference output directory
    model_config.paths_config.pred_dir = entity_perspect_dir + '/{}/'.format(
        cfg.AVOD_OUTPUT_DIR)
    logging.debug("Prediction directory: %s",
                  model_config.paths_config.pred_dir)

    with tf.Graph().as_default():
        if model_name == 'avod_model':
            model = AvodModel(model_config,
                              train_val_test=eval_config.eval_mode,
                              dataset=dataset)
        elif model_name == 'rpn_model':
            model = RpnModel(model_config,
                             train_val_test=eval_config.eval_mode,
                             dataset=dataset)
        else:
            raise ValueError('Invalid model name {}'.format(model_name))

        model_evaluator = Evaluator(model, dataset_config, eval_config)
        model_evaluator.run_latest_checkpoints()

    save_kitti_predictions.convertPredictionsToKitti(
        dataset, model_config.paths_config.pred_dir, additional_cls)
Beispiel #3
0
def evaluate(model_config, eval_config, dataset_config):

    # Parse eval config
    eval_mode = eval_config.eval_mode
    if eval_mode not in ['val', 'test']:
        raise ValueError('Evaluation mode can only be set to `val` or `test`')
    evaluate_repeatedly = eval_config.evaluate_repeatedly

    # Parse dataset config
    data_split = dataset_config.data_split
    if data_split == 'train':
        dataset_config.data_split_dir = 'training'
        dataset_config.has_labels = True

    elif data_split.startswith('val'):
        dataset_config.data_split_dir = 'training'

        # Don't load labels for val split when running in test mode
        if eval_mode == 'val':
            dataset_config.has_labels = True
        elif eval_mode == 'test':
            dataset_config.has_labels = False

    elif data_split == 'test':
        dataset_config.data_split_dir = 'testing'
        dataset_config.has_labels = False

    else:
        raise ValueError('Invalid data split', data_split)

    # Convert to object to overwrite repeated fields
    dataset_config = config_builder.proto_to_obj(dataset_config)

    # Remove augmentation during evaluation
    dataset_config.aug_list = []

    # Build the dataset object
    dataset = DatasetBuilder.build_kitti_dataset(dataset_config,
                                                 use_defaults=False)

    # Setup the model
    model_name = model_config.model_name

    # Convert to object to overwrite repeated fields
    model_config = config_builder.proto_to_obj(model_config)

    # Switch path drop off during evaluation
    model_config.path_drop_probabilities = [1.0, 1.0]

    with tf.Graph().as_default():
        if model_name == 'avod_model':
            model = AvodModel(model_config,
                              train_val_test=eval_mode,
                              dataset=dataset)
        elif model_name == 'rpn_model':
            model = RpnModel(model_config,
                             train_val_test=eval_mode,
                             dataset=dataset)
        else:
            raise ValueError('Invalid model name {}'.format(model_name))

        model_evaluator = Evaluator(model, dataset_config, eval_config)

        if evaluate_repeatedly:
            model_evaluator.repeated_checkpoint_run()
        else:
            model_evaluator.run_latest_checkpoints()