Exemplo n.º 1
0
    def __init__(self,
                 nusc: NuScenes,
                 results_folder: str,
                 eval_set: str,
                 verbose: bool = False):
        """
        Initialize a LidarSegEval object.
        :param nusc: A NuScenes object.
        :param results_folder: Path to the folder.
        :param eval_set: The dataset split to evaluate on, e.g. train, val or test.
        :param verbose: Whether to print messages during the evaluation.
        """
        # Check there are ground truth annotations.
        assert len(
            nusc.lidarseg
        ) > 0, 'Error: No ground truth annotations found in {}.'.format(
            nusc.version)

        # Check results folder exists.
        self.results_folder = results_folder
        self.results_bin_folder = os.path.join(results_folder, 'lidarseg',
                                               eval_set)
        assert os.path.exists(self.results_bin_folder), \
            'Error: The folder containing the .bin files ({}) does not exist.'.format(self.results_bin_folder)

        self.nusc = nusc
        self.results_folder = results_folder
        self.eval_set = eval_set
        self.verbose = verbose

        self.mapper = LidarsegClassMapper(self.nusc)
        self.ignore_idx = self.mapper.ignore_class['index']
        self.id2name = {
            idx: name
            for name, idx in
            self.mapper.coarse_name_2_coarse_idx_mapping.items()
        }
        self.num_classes = len(self.mapper.coarse_name_2_coarse_idx_mapping)

        if self.verbose:
            print('There are {} classes.'.format(self.num_classes))

        self.global_cm = ConfusionMatrix(self.num_classes, self.ignore_idx)

        self.sample_tokens = get_samples_in_eval_set(self.nusc, self.eval_set)
        if self.verbose:
            print('There are {} samples.'.format(len(self.sample_tokens)))
Exemplo n.º 2
0
class LidarSegEval:
    """
    This is the official nuScenes-lidarseg evaluation code.
    Results are written to the provided output_dir.

    nuScenes-lidarseg uses the following metrics:
    - Mean Intersection-over-Union (mIOU): We use the well-known IOU metric, which is defined as TP / (TP + FP + FN).
                                           The IOU score is calculated separately for each class, and then the mean is
                                           computed across classes. Note that in the challenge, index 0 is ignored in
                                           the calculation.
    - Frequency-weighted IOU (FWIOU): Instead of taking the mean of the IOUs across all the classes, each IOU is
                                      weighted by the point-level frequency of its class. Note that in the challenge,
                                      index 0 is ignored in the calculation. FWIOU is not used for the challenge.

    We assume that:
    - For each pointcloud, the prediction for every point is present in a .bin file, in the same order as that of the
      points stored in the corresponding .bin file.
    - The naming convention of the .bin files containing the predictions for a single point cloud is:
        <lidar_sample_data_token>_lidarseg.bin
    - The predictions are between 1 and 16 (inclusive); 0 is the index of the ignored class.

    Please see https://www.nuscenes.org/lidar-segmentation for more details.
    """
    def __init__(self,
                 nusc: NuScenes,
                 results_folder: str,
                 eval_set: str,
                 verbose: bool = False):
        """
        Initialize a LidarSegEval object.
        :param nusc: A NuScenes object.
        :param results_folder: Path to the folder.
        :param eval_set: The dataset split to evaluate on, e.g. train, val or test.
        :param verbose: Whether to print messages during the evaluation.
        """
        # Check there are ground truth annotations.
        assert len(nusc.lidarseg) > 0, 'Error: No ground truth annotations found in {}.'.format(nusc.version)

        # Check results folder exists.
        self.results_folder = results_folder
        self.results_bin_folder = os.path.join(results_folder, 'lidarseg', eval_set)
        assert os.path.exists(self.results_bin_folder), \
            'Error: The folder containing the .bin files ({}) does not exist.'.format(self.results_bin_folder)

        self.nusc = nusc
        self.results_folder = results_folder
        self.eval_set = eval_set
        self.verbose = verbose

        self.mapper = LidarsegClassMapper(nusc_)
        self.ignore_idx = self.mapper.ignore_class['index']
        self.id2name = {idx: name for name, idx in self.mapper.coarse_name_2_coarse_idx_mapping.items()}
        self.num_classes = len(self.mapper.coarse_name_2_coarse_idx_mapping)

        if self.verbose:
            print('There are {} classes.'.format(self.num_classes))

        self.global_cm = ConfusionMatrix(self.num_classes, self.ignore_idx)

        self.sample_tokens = get_samples_in_eval_set(self.nusc, self.eval_set)
        if self.verbose:
            print('There are {} samples.'.format(len(self.sample_tokens)))

    def evaluate(self) -> Dict:
        """
        Performs the actual evaluation.
        :return: A dictionary containing the evaluated metrics.
        """
        for sample_token in tqdm(self.sample_tokens, disable=not self.verbose):
            sample = self.nusc.get('sample', sample_token)

            # Get the sample data token of the point cloud.
            sd_token = sample['data']['LIDAR_TOP']

            # Load the ground truth labels for the point cloud.
            lidarseg_label_filename = os.path.join(self.nusc.dataroot,
                                                   self.nusc.get('lidarseg', sd_token)['filename'])
            lidarseg_label = self.load_bin_file(lidarseg_label_filename)

            lidarseg_label = self.mapper.convert_label(lidarseg_label)

            # Load the predictions for the point cloud.
            lidarseg_pred_filename = os.path.join(self.results_folder, 'lidarseg',
                                                  self.eval_set, sd_token + '_lidarseg.bin')
            lidarseg_pred = self.load_bin_file(lidarseg_pred_filename)

            # Get the confusion matrix between the ground truth and predictions.
            # Update the confusion matrix for the sample data into the confusion matrix for the eval set.
            self.global_cm.update(lidarseg_label, lidarseg_pred)

        iou_per_class = self.global_cm.get_per_class_iou()
        miou = self.global_cm.get_mean_iou()
        freqweighted_iou = self.global_cm.get_freqweighted_iou()

        # Put everything nicely into a dict.
        results = {'iou_per_class': {self.id2name[i]: class_iou for i, class_iou in enumerate(iou_per_class)},
                   'miou': miou,
                   'freq_weighted_iou': freqweighted_iou}

        # Print the results if desired.
        if self.verbose:
            print("======\nnuScenes-lidarseg evaluation for {}".format(self.eval_set))
            print(json.dumps(results, indent=4, sort_keys=False))
            print("======")

        return results

    @staticmethod
    def load_bin_file(bin_path: str) -> np.ndarray:
        """
        Loads a .bin file containing the labels.
        :param bin_path: Path to the .bin file.
        :return: An array containing the labels.
        """
        assert os.path.exists(bin_path), 'Error: Unable to find {}.'.format(bin_path)
        bin_content = np.fromfile(bin_path, dtype=np.uint8)
        assert len(bin_content) > 0, 'Error: {} is empty.'.format(bin_path)

        return bin_content
Exemplo n.º 3
0
def visualize_semantic_differences_bev(nusc: NuScenes,
                                       sample_token: str,
                                       lidarseg_preds_folder: str = None,
                                       axes_limit: float = 40,
                                       dot_size: int = 5,
                                       out_path: str = None) -> None:
    """
    Visualize semantic difference of lidar segmentation results in bird's eye view.
    :param nusc: A NuScenes object.
    :param sample_token: Unique identifier.
    :param lidarseg_preds_folder: A path to the folder which contains the user's lidar segmentation predictions for
                                  the scene. The naming convention of each .bin file in the folder should be
                                  named in this format: <lidar_sample_data_token>_lidarseg.bin.
    :param axes_limit: Axes limit for plot (measured in meters).
    :param dot_size: Scatter plot dot size.
    :param out_path: Path to save visualization to (e.g. /save/to/here/bev_diff.png).
    """
    mapper = LidarsegClassMapper(nusc)

    sample = nusc.get('sample', sample_token)

    # Get the sample data token of the point cloud.
    sd_token = sample['data']['LIDAR_TOP']
    pointsensor = nusc.get('sample_data', sd_token)
    pcl_path = os.path.join(nusc.dataroot, pointsensor['filename'])

    # Load the ground truth labels for the point cloud.
    gt_path = os.path.join(nusc.dataroot,
                           nusc.get('lidarseg', sd_token)['filename'])
    gt = LidarSegPointCloud(pcl_path, gt_path)
    gt.labels = mapper.convert_label(gt.labels)  # Map the labels as necessary.

    # Load the predictions for the point cloud.
    preds_path = os.path.join(lidarseg_preds_folder,
                              sd_token + '_lidarseg.bin')
    preds = LidarSegPointCloud(pcl_path, preds_path)

    # Do not compare points which are ignored.
    ignored_points_idxs = np.where(
        gt.labels != mapper.ignore_class['index'])[0]
    gt.labels = gt.labels[ignored_points_idxs]
    gt.points = gt.points[ignored_points_idxs]
    preds.labels = preds.labels[ignored_points_idxs]
    preds.points = preds.points[ignored_points_idxs]

    # Init axes.
    fig, axes = plt.subplots(1,
                             3,
                             figsize=(10 * 3, 10),
                             sharex='all',
                             sharey='all')

    # Render ground truth and predictions.
    gt.render(mapper.coarse_colormap,
              mapper.coarse_name_2_coarse_idx_mapping,
              ax=axes[0])
    preds.render(mapper.coarse_colormap,
                 mapper.coarse_name_2_coarse_idx_mapping,
                 ax=axes[1])

    # Render errors.
    id2color_for_diff_bev = {
        0: (191, 41, 0, 255),  # red: wrong label
        1: (50, 168, 82, 255)
    }  # green: correct label
    colors_for_diff_bev = colormap_to_colors(id2color_for_diff_bev, {
        0: 0,
        1: 1
    })
    mask = np.array(gt.labels == preds.labels).astype(
        int)  # Convert array from bool to int.
    axes[2].scatter(gt.points[:, 0],
                    gt.points[:, 1],
                    c=colors_for_diff_bev[mask],
                    s=dot_size)
    axes[2].set_title('Errors (Correct: Green, Mislabeled: Red)')

    # Limit visible range for all subplots.
    plt.xlim(-axes_limit, axes_limit)
    plt.ylim(-axes_limit, axes_limit)

    plt.tight_layout()
    if out_path:
        plt.savefig(out_path, bbox_inches='tight', pad_inches=0)
    plt.show()
def validate_submission(nusc: NuScenes,
                        results_folder: str,
                        eval_set: str,
                        verbose: bool = False) -> None:
    """
    Checks if a results folder is valid. The following checks are performed:
    - Check that the submission folder is according to that described in
      https://github.com/nutonomy/nuscenes-devkit/blob/master/python-sdk/nuscenes/eval/lidarseg/README.md
    - Check that the submission.json is of the following structure:
        {"meta": {"use_camera": false,
                  "use_lidar": true,
                  "use_radar": false,
                  "use_map": false,
                  "use_external": false}}
    - Check that each each lidar sample data in the evaluation set is present and valid.

    :param nusc: A NuScenes object.
    :param results_folder: Path to the folder.
    :param eval_set: The dataset split to evaluate on, e.g. train, val or test.
    :param verbose: Whether to print messages during the evaluation.
    """
    mapper = LidarsegClassMapper(nusc)
    num_classes = len(mapper.coarse_name_2_coarse_idx_mapping)

    if verbose:
        print('Checking if folder structure of {} is correct...'.format(
            results_folder))

    # Check that {results_folder}/{eval_set} exists.
    results_meta_folder = os.path.join(results_folder, eval_set)
    assert os.path.exists(results_meta_folder), \
        'Error: The folder containing the submission.json ({}) does not exist.'.format(results_meta_folder)

    # Check that {results_folder}/{eval_set}/submission.json exists.
    submisson_json_path = os.path.join(results_meta_folder, 'submission.json')
    assert os.path.exists(submisson_json_path), \
        'Error: submission.json ({}) does not exist.'.format(submisson_json_path)

    # Check that {results_folder}/lidarseg/{eval_set} exists.
    results_bin_folder = os.path.join(results_folder, 'lidarseg', eval_set)
    assert os.path.exists(results_bin_folder), \
        'Error: The folder containing the .bin files ({}) does not exist.'.format(results_bin_folder)

    if verbose:
        print('\tPassed.')

    if verbose:
        print('Checking contents of {}...'.format(submisson_json_path))

    with open(submisson_json_path) as f:
        submission_meta = json.load(f)
        valid_meta = {
            "use_camera", "use_lidar", "use_radar", "use_map", "use_external"
        }
        assert valid_meta == set(submission_meta['meta'].keys()), \
            '{} must contain {}.'.format(submisson_json_path, valid_meta)
        for meta_key in valid_meta:
            meta_key_type = type(submission_meta['meta'][meta_key])
            assert meta_key_type == bool, 'Error: Value for {} should be bool, not {}.'.format(
                meta_key, meta_key_type)

    if verbose:
        print('\tPassed.')

    if verbose:
        print(
            'Checking if all .bin files for {} exist and are valid...'.format(
                eval_set))
    sample_tokens = get_samples_in_eval_set(nusc, eval_set)
    for sample_token in tqdm(sample_tokens, disable=not verbose):
        sample = nusc.get('sample', sample_token)

        # Get the sample data token of the point cloud.
        sd_token = sample['data']['LIDAR_TOP']

        # Load the predictions for the point cloud.
        lidarseg_pred_filename = os.path.join(results_bin_folder,
                                              sd_token + '_lidarseg.bin')
        assert os.path.exists(lidarseg_pred_filename), \
            'Error: The prediction .bin file {} does not exist.'.format(lidarseg_pred_filename)
        lidarseg_pred = np.fromfile(lidarseg_pred_filename, dtype=np.uint8)

        # Check number of predictions for the point cloud.
        if len(
                nusc.lidarseg
        ) > 0:  # If ground truth exists, compare the no. of predictions with that of ground truth.
            lidarseg_label_filename = os.path.join(
                nusc.dataroot,
                nusc.get('lidarseg', sd_token)['filename'])
            assert os.path.exists(lidarseg_label_filename), \
                'Error: The ground truth .bin file {} does not exist.'.format(lidarseg_label_filename)
            lidarseg_label = np.fromfile(lidarseg_label_filename,
                                         dtype=np.uint8)
            num_points = len(lidarseg_label)
        else:  # If no ground truth is available, compare the no. of predictions with that of points in a point cloud.
            pointsensor = nusc.get('sample_data', sd_token)
            pcl_path = os.path.join(nusc.dataroot, pointsensor['filename'])
            pc = LidarPointCloud.from_file(pcl_path)
            points = pc.points
            num_points = points.shape[1]

        assert num_points == len(lidarseg_pred), \
            'Error: There are {} predictions for lidar sample data token {} ' \
            'but there are only {} points in the point cloud.'\
            .format(len(lidarseg_pred), sd_token, num_points)

        assert all((lidarseg_pred > 0) & (lidarseg_pred < num_classes)), \
            "Error: Array for predictions in {} must be between 1 and {} (inclusive)."\
            .format(lidarseg_pred_filename, num_classes - 1)

    if verbose:
        print('\tPassed.')

    if verbose:
        print(
            'Results folder {} successfully validated!'.format(results_folder))