コード例 #1
0
    def test_delta(self):
        """
        This tests runs the evaluation for an arbitrary random set of predictions.
        This score is then captured in this very test such that if we change the eval code,
        this test will trigger if the results changed.
        """
        random.seed(42)
        np.random.seed(42)
        assert 'NUSCENES' in os.environ, 'Set NUSCENES env. variable to enable tests.'

        nusc = NuScenes(version='v1.0-mini', dataroot=os.environ['NUSCENES'], verbose=False)

        with open(self.res_mockup, 'w') as f:
            json.dump(self._mock_submission(nusc, 'mini_val'), f, indent=2)

        cfg = config_factory('detection_cvpr_2019')
        nusc_eval = DetectionEval(nusc, cfg, self.res_mockup, eval_set='mini_val', output_dir=self.res_eval_folder,
                                  verbose=False)
        metrics, md_list = nusc_eval.evaluate()

        # 1. Score = 0.22082865720221012. Measured on the branch "release_v0.2" on March 7 2019.
        # 2. Score = 0.2199307290627096. Changed to measure center distance from the ego-vehicle.
        # 3. Score = 0.24954451673961747. Changed to 1.0-mini and cleaned up build script.
        # 4. Score = 0.20478832626986893. Updated treatment of cones, barriers, and other algo tunings.
        # 5. Score = 0.2043569666105005. AP calculation area is changed from >=min_recall to >min_recall.
        # 6. Score = 0.20636954644294506. After bike-rack filtering.
        # 7. Score = 0.20237925145690996. After TP reversion bug.
        # 8. Score = 0.24047129251302665. After bike racks bug.
        # 9. Score = 0.24104572227466886. After bug fix in calc_tp. Include the max recall and exclude the min recall.
        # 10. Score = 0.19449091580477748. Changed to use v1.0 mini_val split.
        self.assertAlmostEqual(metrics.nd_score, 0.19449091580477748)
コード例 #2
0
def quick_test(dataroot='/data/nuscenes', gpuid=0, nworkers=10):
    """Evaluate detections with PKL.
    """
    nusc = NuScenes(version='v1.0-mini', dataroot=dataroot, verbose=True)
    nusc_maps = get_nusc_maps(dataroot)
    cfg = config_factory('detection_cvpr_2019')
    device = torch.device(f'cuda:{gpuid}') if gpuid >= 0\
        else torch.device('cpu')
    print(f'using device: {device}')

    get_example_submission()

    nusc_eval = DetectionEval(nusc,
                              config=cfg,
                              result_path='./example_submission.json',
                              eval_set='mini_train',
                              output_dir='./res',
                              verbose=True)
    info = calculate_pkl(nusc_eval.gt_boxes,
                         nusc_eval.pred_boxes,
                         nusc_eval.sample_tokens,
                         nusc_eval.nusc,
                         nusc_maps,
                         device,
                         nworkers,
                         bsz=128,
                         plot_kextremes=5,
                         verbose=True)
    print({k: v for k, v in info.items() if k != 'full'})
コード例 #3
0
def og_detection_eval(version,
                      eval_set,
                      result_path,
                      dataroot='/data/nuscenes'):
    """Evaluate according to NDS.
    """
    nusc = NuScenes(version='v1.0-{}'.format(version),
                    dataroot=dataroot,
                    verbose=True)
    cfg = config_factory('detection_cvpr_2019')
    nusc_eval = DetectionEval(nusc,
                              config=cfg,
                              result_path=result_path,
                              eval_set=eval_set,
                              output_dir='./res',
                              verbose=True)
    nusc_eval.main(plot_examples=0, render_curves=False)
コード例 #4
0
def do_nuScenes_detection_evaluation(dataset, predictions, output_folder,
                                     logger):
    mock_meta = {
        'use_camera': True,
        'use_lidar': False,
        'use_radar': False,
        'use_map': False,
        'use_external': False,
    }
    mock_results = {}
    for image_token, prediction in predictions.items():
        sample_res = []
        for p in prediction:
            p = p.numpy()
            p = p.round(4)
            type = ID_TYPE_CONVERSION[int(p[0])]
            sample_res.append({
                'sample_token': image_token,
                'translation': p[9:12].tolist(),
                'size': p[6:9].tolist(),
                'rotation': euler_to_quaternion(p[12], 0, 0),
                'velocity': [0, 0],
                'detection_name': type,
                'attribute_name': random_attr(type)
            })
        mock_results[image_token] = sample_res
    mock_submission = {'meta': mock_meta, 'results': mock_results}

    logger.info("Evaluate on nuScenes dataset")
    output_file = output_folder + ".json"
    with open(output_file, 'w') as f:
        json.dump(mock_submission, f, indent=2)

    cfg = config_factory('detection_cvpr_2019')
    nusc_eval = DetectionEval(dataset.nusc,
                              cfg,
                              output_file,
                              eval_set='val',
                              output_dir=output_folder,
                              verbose=False)
    metrics, md_list = nusc_eval.evaluate()
    print(md_list)
コード例 #5
0
    def evaluation(self, det_annos, class_names, **kwargs):

        eval_det_annos = copy.deepcopy(det_annos)

        # Create NuScenes JSON output file
        nusc_annos = {}
        for sample in eval_det_annos:
            try:
                sample_idx = sample['sample_idx'][0]
            except:
                continue

            sample_results = []

            calib = self.get_calib(sample_idx)

            sample['boxes_lidar'] = np.array(sample['boxes_lidar'])
            positions = sample['boxes_lidar'][:, :3]
            dimensions = sample['boxes_lidar'][:, 3:6]
            rotations = sample['boxes_lidar'][:, 6]

            for center, dimension, yaw, label, score in zip(
                    positions, dimensions, rotations, sample['name'],
                    sample['score']):

                quaternion = Quaternion(axis=[0, 0, 1], radians=yaw)

                box = Box(center, dimension, quaternion)
                # Move box to ego vehicle coord system
                box.rotate(Quaternion(calib.lidar_calibrated['rotation']))
                box.translate(np.array(calib.lidar_calibrated['translation']))
                # Move box to global coord system
                box.rotate(Quaternion(calib.ego_pose['rotation']))
                box.translate(np.array(calib.ego_pose['translation']))

                if (float(score) < 0):
                    score = 0
                if (float(score) > 1):
                    score = 1
                if (label == 'Cyclist'):
                    label = 'bicycle'
                sample_results.append({
                    "sample_token":
                    sample_idx,
                    "translation":
                    box.center.tolist(),
                    "size":
                    box.wlh.tolist(),
                    "rotation":
                    box.orientation.elements.tolist(),
                    "lidar_yaw":
                    float(yaw),
                    "velocity": (0, 0),
                    "detection_name":
                    label.lower(),
                    "detection_score":
                    float(score),
                    "attribute_name":
                    self.DefaultAttribute[label.lower()],
                })

            nusc_annos[sample_idx] = sample_results

        for sample_id in self.sample_id_list:
            if sample_id not in nusc_annos:
                nusc_annos[sample_id] = []

        nusc_submission = {
            "meta": {
                "use_camera": False,
                "use_lidar": True,
                "use_radar": False,
                "use_map": False,
                "use_external": False,
            },
            "results": nusc_annos,
        }
        eval_file = os.path.join(kwargs['output_dir'], 'nusc_results.json')
        with open(eval_file, "w") as f:
            json.dump(nusc_submission, f, indent=2)

        # Call NuScenes evaluation
        cfg = config_factory('detection_cvpr_2019')
        nusc_eval = DetectionEval(self.nusc,
                                  config=cfg,
                                  result_path=eval_file,
                                  eval_set=self.split,
                                  output_dir=kwargs['output_dir'],
                                  verbose=True)
        metric_summary = nusc_eval.main(plot_examples=10, render_curves=True)

        # Reformat the metrics summary a bit for the tensorboard logger
        err_name_mapping = {
            'trans_err': 'mATE',
            'scale_err': 'mASE',
            'orient_err': 'mAOE',
            'vel_err': 'mAVE',
            'attr_err': 'mAAE'
        }
        result = {}
        result['mean_ap'] = metric_summary['mean_ap']
        for tp_name, tp_val in metric_summary['tp_errors'].items():
            result[tp_name] = tp_val

        class_aps = metric_summary['mean_dist_aps']
        class_tps = metric_summary['label_tp_errors']
        for class_name in class_aps.keys():
            result['mAP_' + class_name] = class_aps[class_name]
            for key, val in err_name_mapping.items():
                result[val + '_' + class_name] = class_tps[class_name][key]

        return str(result), result