예제 #1
0
    def __init__(self,
                 nusc: NuScenes,
                 results_folder: str,
                 eval_set: str,
                 verbose: bool = False):
        """
        Initialize a LidarSegEval object.
        :param nusc: A NuScenes object.
        :param results_folder: Path to the folder.
        :param eval_set: The dataset split to evaluate on, e.g. train, val or test.
        :param verbose: Whether to print messages during the evaluation.
        """
        # Check there are ground truth annotations.
        assert len(
            nusc.lidarseg
        ) > 0, 'Error: No ground truth annotations found in {}.'.format(
            nusc.version)

        # Check results folder exists.
        self.results_folder = results_folder
        self.results_bin_folder = os.path.join(results_folder, 'lidarseg',
                                               eval_set)
        assert os.path.exists(self.results_bin_folder), \
            'Error: The folder containing the .bin files ({}) does not exist.'.format(self.results_bin_folder)

        self.nusc = nusc
        self.results_folder = results_folder
        self.eval_set = eval_set
        self.verbose = verbose

        self.mapper = LidarsegClassMapper(self.nusc)
        self.ignore_idx = self.mapper.ignore_class['index']
        self.id2name = {
            idx: name
            for name, idx in
            self.mapper.coarse_name_2_coarse_idx_mapping.items()
        }
        self.num_classes = len(self.mapper.coarse_name_2_coarse_idx_mapping)

        if self.verbose:
            print('There are {} classes.'.format(self.num_classes))

        self.global_cm = ConfusionMatrix(self.num_classes, self.ignore_idx)

        self.sample_tokens = get_samples_in_eval_set(self.nusc, self.eval_set)
        if self.verbose:
            print('There are {} samples.'.format(len(self.sample_tokens)))
def validate_submission(nusc: NuScenes,
                        results_folder: str,
                        eval_set: str,
                        verbose: bool = False) -> None:
    """
    Checks if a results folder is valid. The following checks are performed:
    - Check that the submission folder is according to that described in
      https://github.com/nutonomy/nuscenes-devkit/blob/master/python-sdk/nuscenes/eval/lidarseg/README.md
    - Check that the submission.json is of the following structure:
        {"meta": {"use_camera": false,
                  "use_lidar": true,
                  "use_radar": false,
                  "use_map": false,
                  "use_external": false}}
    - Check that each each lidar sample data in the evaluation set is present and valid.

    :param nusc: A NuScenes object.
    :param results_folder: Path to the folder.
    :param eval_set: The dataset split to evaluate on, e.g. train, val or test.
    :param verbose: Whether to print messages during the evaluation.
    """
    mapper = LidarsegClassMapper(nusc)
    num_classes = len(mapper.coarse_name_2_coarse_idx_mapping)

    if verbose:
        print('Checking if folder structure of {} is correct...'.format(
            results_folder))

    # Check that {results_folder}/{eval_set} exists.
    results_meta_folder = os.path.join(results_folder, eval_set)
    assert os.path.exists(results_meta_folder), \
        'Error: The folder containing the submission.json ({}) does not exist.'.format(results_meta_folder)

    # Check that {results_folder}/{eval_set}/submission.json exists.
    submisson_json_path = os.path.join(results_meta_folder, 'submission.json')
    assert os.path.exists(submisson_json_path), \
        'Error: submission.json ({}) does not exist.'.format(submisson_json_path)

    # Check that {results_folder}/lidarseg/{eval_set} exists.
    results_bin_folder = os.path.join(results_folder, 'lidarseg', eval_set)
    assert os.path.exists(results_bin_folder), \
        'Error: The folder containing the .bin files ({}) does not exist.'.format(results_bin_folder)

    if verbose:
        print('\tPassed.')

    if verbose:
        print('Checking contents of {}...'.format(submisson_json_path))

    with open(submisson_json_path) as f:
        submission_meta = json.load(f)
        valid_meta = {
            "use_camera", "use_lidar", "use_radar", "use_map", "use_external"
        }
        assert valid_meta == set(submission_meta['meta'].keys()), \
            '{} must contain {}.'.format(submisson_json_path, valid_meta)
        for meta_key in valid_meta:
            meta_key_type = type(submission_meta['meta'][meta_key])
            assert meta_key_type == bool, 'Error: Value for {} should be bool, not {}.'.format(
                meta_key, meta_key_type)

    if verbose:
        print('\tPassed.')

    if verbose:
        print(
            'Checking if all .bin files for {} exist and are valid...'.format(
                eval_set))
    sample_tokens = get_samples_in_eval_set(nusc, eval_set)
    for sample_token in tqdm(sample_tokens, disable=not verbose):
        sample = nusc.get('sample', sample_token)

        # Get the sample data token of the point cloud.
        sd_token = sample['data']['LIDAR_TOP']

        # Load the predictions for the point cloud.
        lidarseg_pred_filename = os.path.join(results_bin_folder,
                                              sd_token + '_lidarseg.bin')
        assert os.path.exists(lidarseg_pred_filename), \
            'Error: The prediction .bin file {} does not exist.'.format(lidarseg_pred_filename)
        lidarseg_pred = np.fromfile(lidarseg_pred_filename, dtype=np.uint8)

        # Check number of predictions for the point cloud.
        if len(
                nusc.lidarseg
        ) > 0:  # If ground truth exists, compare the no. of predictions with that of ground truth.
            lidarseg_label_filename = os.path.join(
                nusc.dataroot,
                nusc.get('lidarseg', sd_token)['filename'])
            assert os.path.exists(lidarseg_label_filename), \
                'Error: The ground truth .bin file {} does not exist.'.format(lidarseg_label_filename)
            lidarseg_label = np.fromfile(lidarseg_label_filename,
                                         dtype=np.uint8)
            num_points = len(lidarseg_label)
        else:  # If no ground truth is available, compare the no. of predictions with that of points in a point cloud.
            pointsensor = nusc.get('sample_data', sd_token)
            pcl_path = os.path.join(nusc.dataroot, pointsensor['filename'])
            pc = LidarPointCloud.from_file(pcl_path)
            points = pc.points
            num_points = points.shape[1]

        assert num_points == len(lidarseg_pred), \
            'Error: There are {} predictions for lidar sample data token {} ' \
            'but there are only {} points in the point cloud.'\
            .format(len(lidarseg_pred), sd_token, num_points)

        assert all((lidarseg_pred > 0) & (lidarseg_pred < num_classes)), \
            "Error: Array for predictions in {} must be between 1 and {} (inclusive)."\
            .format(lidarseg_pred_filename, num_classes - 1)

    if verbose:
        print('\tPassed.')

    if verbose:
        print(
            'Results folder {} successfully validated!'.format(results_folder))