示例#1
0
def add_center_dist(nusc: NuScenes, eval_boxes: EvalBoxes):
    """
    Adds the cylindrical (xy) center distance from ego vehicle to each box.
    :param nusc: The NuScenes instance.
    :param eval_boxes: A set of boxes, either GT or predictions.
    :return: eval_boxes augmented with center distances.
    """
    for sample_token in eval_boxes.sample_tokens:
        sample_rec = nusc.get('sample', sample_token)
        sd_record = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP'])
        pose_record = nusc.get('ego_pose', sd_record['ego_pose_token'])

        for box in eval_boxes[sample_token]:
            # Both boxes and ego pose are given in global coord system, so distance can be calculated directly.
            # Note that the z component of the ego pose is 0.
            ego_translation = (box.translation[0] -
                               pose_record['translation'][0],
                               box.translation[1] -
                               pose_record['translation'][1],
                               box.translation[2] -
                               pose_record['translation'][2])
            if isinstance(box, DetectionBox) or isinstance(box, TrackingBox):
                box.ego_translation = ego_translation
            else:
                raise NotImplementedError

    return eval_boxes
    def basic_test(self,
                   eval_set: str = 'mini_val',
                   add_errors: bool = False,
                   render_curves: bool = False) -> Dict[str, Any]:
        """
        Run the evaluation with fixed randomness on the specified subset, with or without introducing errors in the
        submission.
        :param eval_set: Which split to evaluate on.
        :param add_errors: Whether to use GT as submission or introduce additional errors.
        :param render_curves: Whether to render stats curves to disk.
        :return: The metrics returned by the evaluation.
        """
        random.seed(42)
        np.random.seed(42)
        assert 'NUSCENES' in os.environ, 'Set NUSCENES env. variable to enable tests.'

        if eval_set.startswith('mini'):
            version = 'v1.0-mini'
        elif eval_set == 'test':
            version = 'v1.0-test'
        else:
            version = 'v1.0-trainval'
        nusc = NuScenes(version=version, dataroot=os.environ['NUSCENES'], verbose=False)

        with open(self.res_mockup, 'w') as f:
            mock = self._mock_submission(nusc, eval_set, add_errors=add_errors)
            json.dump(mock, f, indent=2)

        cfg = config_factory('tracking_nips_2019')
        nusc_eval = TrackingEval(cfg, self.res_mockup, eval_set=eval_set, output_dir=self.res_eval_folder,
                                 nusc_version=version, nusc_dataroot=os.environ['NUSCENES'], verbose=False)
        metrics = nusc_eval.main(render_curves=render_curves)

        return metrics
示例#3
0
    def test_delta(self):
        """
        This tests runs the evaluation for an arbitrary random set of predictions.
        This score is then captured in this very test such that if we change the eval code,
        this test will trigger if the results changed.
        """
        random.seed(42)
        np.random.seed(42)
        assert 'NUSCENES' in os.environ, 'Set NUSCENES env. variable to enable tests.'

        nusc = NuScenes(version='v1.0-mini', dataroot=os.environ['NUSCENES'], verbose=False)

        with open(self.res_mockup, 'w') as f:
            json.dump(self._mock_submission(nusc, 'mini_val'), f, indent=2)

        cfg = config_factory('detection_cvpr_2019')
        nusc_eval = DetectionEval(nusc, cfg, self.res_mockup, eval_set='mini_val', output_dir=self.res_eval_folder,
                                  verbose=False)
        metrics, md_list = nusc_eval.evaluate()

        # 1. Score = 0.22082865720221012. Measured on the branch "release_v0.2" on March 7 2019.
        # 2. Score = 0.2199307290627096. Changed to measure center distance from the ego-vehicle.
        # 3. Score = 0.24954451673961747. Changed to 1.0-mini and cleaned up build script.
        # 4. Score = 0.20478832626986893. Updated treatment of cones, barriers, and other algo tunings.
        # 5. Score = 0.2043569666105005. AP calculation area is changed from >=min_recall to >min_recall.
        # 6. Score = 0.20636954644294506. After bike-rack filtering.
        # 7. Score = 0.20237925145690996. After TP reversion bug.
        # 8. Score = 0.24047129251302665. After bike racks bug.
        # 9. Score = 0.24104572227466886. After bug fix in calc_tp. Include the max recall and exclude the min recall.
        # 10. Score = 0.19449091580477748. Changed to use v1.0 mini_val split.
        self.assertAlmostEqual(metrics.nd_score, 0.19449091580477748)
def export_ego_poses(nusc: NuScenes, out_dir: str):
    """ Script to render where ego vehicle drives on the maps """

    # Load NuScenes locations
    locations = np.unique([l['location'] for l in nusc.log])

    # Create output directory
    if not os.path.isdir(out_dir):
        os.makedirs(out_dir)

    for location in locations:
        print('Rendering map {}...'.format(location))
        nusc.render_egoposes_on_map(location)
        out_path = os.path.join(out_dir, 'egoposes-{}.png'.format(location))
        plt.tight_layout()
        plt.savefig(out_path)
示例#5
0
def export_videos(nusc: NuScenes, out_dir: str):
    """ Export videos of the images displayed in the images. """

    # Load NuScenes class
    scene_tokens = [s['token'] for s in nusc.scene]

    # Create output directory
    if not os.path.isdir(out_dir):
        os.makedirs(out_dir)

    # Write videos to disk
    for scene_token in scene_tokens:
        scene = nusc.get('scene', scene_token)
        print('Writing scene %s' % scene['name'])
        out_path = os.path.join(out_dir, scene['name']) + '.avi'
        if not os.path.exists(out_path):
            nusc.render_scene(scene['token'], out_path=out_path)
    def test_load(self):
        """
        Loads up NuScenes.
        This is intended to simply run the NuScenes class to check for import errors, typos, etc.
        """

        assert 'NUSCENES' in os.environ, 'Set NUSCENES env. variable to enable tests.'
        nusc = NuScenes(version='v1.0-mini',
                        dataroot=os.environ['NUSCENES'],
                        verbose=False)

        # Trivial assert statement
        self.assertEqual(nusc.table_root,
                         os.path.join(os.environ['NUSCENES'], 'v1.0-mini'))
示例#7
0
def _nuscenes(path: str) -> Dataset:  # noqa
    images: Dataset.Images = []
    classes: Dataset.Classes = DETECTION_NAMES
    annotations: Dataset.Annotations = {}

    nusc = NuScenes(version="v1.0-trainval", dataroot=path)

    for sample in nusc.sample:
        for cam in CAMERAS:
            cam_token = sample['data'][cam]

            #Returns the data path as well as all annotations related to that sample_data.
            #Note that the boxes are transformed into the current sensor's coordinate frame.
            data_path, boxes, camera_intrinsic = nusc.get_sample_data(
                cam_token, box_vis_level=BoxVisibility.ALL)
            images.append(data_path)
            annotations[data_path] = []

            for box in boxes:
                img_corners = view_points(box.corners(),
                                          camera_intrinsic,
                                          normalize=True)[:2, :]
                # Take an outer rect of the 3d projection
                xmin = img_corners[0].min()
                xmax = img_corners[0].max()
                ymin = img_corners[1].min()
                ymax = img_corners[1].max()

                bounds = Bounds2D(xmin, ymin, xmax - xmin, ymax - ymin)
                label = category_to_detection_name(box.name)
                if label is not None:
                    class_index = classes.index(
                        category_to_detection_name(box.name))
                    annotations[data_path].append(Object2D(
                        bounds, class_index))

    return Dataset(DATASET_NAME, images, classes, annotations)
示例#8
0
    def _do_test(self, map_name, predictions, answer):
        with patch.object(PredictHelper,
                          'get_map_name_from_sample_token') as get_map_name:
            get_map_name.return_value = map_name
            nusc = NuScenes('v1.0-mini', dataroot=os.environ['NUSCENES'])
            helper = PredictHelper(nusc)

            off_road_rate = metrics.OffRoadRate(helper, [metrics.RowMean()])

            probabilities = np.array([1 / 3] * predictions.shape[0])
            prediction = Prediction('foo-instance', 'foo-sample', predictions,
                                    probabilities)

            # Two violations out of three trajectories
            np.testing.assert_allclose(off_road_rate(np.array([]), prediction),
                                       np.array([answer]))
示例#9
0
def main(version: str, data_root: str, split_name: str, output_dir: str, submission_name: str, config_name: str) \
        -> None:
    """
    Makes predictions for a submission to the nuScenes prediction challenge.
    :param version: NuScenes version.
    :param data_root: Directory storing NuScenes data.
    :param split_name: Data split to run inference on.
    :param output_dir: Directory to store the output file.
    :param submission_name: Name of the submission to use for the results file.
    :param config_name: Name of config file to use.
    """
    nusc = NuScenes(version=version, dataroot=data_root)
    helper = PredictHelper(nusc)
    dataset = get_prediction_challenge_split(split_name)
    config = load_prediction_config(helper, config_name)

    predictions = do_inference_for_submission(helper, config, dataset)
    predictions = [prediction.serialize() for prediction in predictions]
    json.dump(predictions, open(os.path.join(output_dir, f"{submission_name}_inference.json"), "w"))
def main(version: str,
         data_root: str,
         submission_path: str,
         config_name: str = 'predict_2020_icra.json') -> None:
    """
    Computes metrics for a submission stored in submission_path with a given submission_name with the metrics
    specified by the config_name.
    :param version: nuScenes data set version.
    :param data_root: Directory storing NuScenes data.
    :param submission_path: Directory storing submission.
    :param config_name: Name of config file.
    """
    predictions = json.load(open(submission_path, "r"))
    nusc = NuScenes(version=version, dataroot=data_root)
    helper = PredictHelper(nusc)
    config = load_prediction_config(helper, config_name)
    results = compute_metrics(predictions, helper, config)
    json.dump(results,
              open(submission_path.replace('.json', '_metrics.json'), "w"),
              indent=2)
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--out_dir',
                        type=str,
                        help='Directory where to save maps with ego poses.')
    parser.add_argument('--dataroot',
                        type=str,
                        default='/data/sets/nuscenes',
                        help='Default nuScenes data directory.')
    parser.add_argument(
        '--version',
        type=str,
        default='v1.0-trainval',
        help=
        'Which version of the nuScenes dataset to evaluate on, e.g. v1.0-trainval.'
    )
    parser.add_argument('--verbose',
                        type=int,
                        default=1,
                        help='Whether to print to stdout.')

    args = parser.parse_args()
    dataroot = args.dataroot
    version = args.version
    verbose = bool(args.verbose)

    # Init.
    nusc_ = NuScenes(version=version, verbose=verbose, dataroot=dataroot)

    # Export ego poses
    export_ego_poses(nusc_, args.out_dir)
示例#12
0
def visualize_sample(nusc: NuScenes,
                     sample_token: str,
                     gt_boxes: EvalBoxes,
                     pred_boxes: EvalBoxes,
                     nsweeps: int = 1,
                     conf_th: float = 0.15,
                     eval_range: float = 50,
                     verbose: bool = True,
                     savepath: str = None) -> None:
    """
    Visualizes a sample from BEV with annotations and detection results.
    :param nusc: NuScenes object.
    :param sample_token: The nuScenes sample token.
    :param gt_boxes: Ground truth boxes grouped by sample.
    :param pred_boxes: Prediction grouped by sample.
    :param nsweeps: Number of sweeps used for lidar visualization.
    :param conf_th: The confidence threshold used to filter negatives.
    :param eval_range: Range in meters beyond which boxes are ignored.
    :param verbose: Whether to print to stdout.
    :param savepath: If given, saves the the rendering here instead of displaying.
    """
    # Retrieve sensor & pose records.
    sample_rec = nusc.get('sample', sample_token)
    sd_record = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP'])
    cs_record = nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token'])
    pose_record = nusc.get('ego_pose', sd_record['ego_pose_token'])

    # Get boxes.
    boxes_gt_global = gt_boxes[sample_token]
    boxes_est_global = pred_boxes[sample_token]

    # Map GT boxes to lidar.
    boxes_gt = boxes_to_sensor(boxes_gt_global, pose_record, cs_record)

    # Map EST boxes to lidar.
    boxes_est = boxes_to_sensor(boxes_est_global, pose_record, cs_record)

    # Add scores to EST boxes.
    for box_est, box_est_global in zip(boxes_est, boxes_est_global):
        box_est.score = box_est_global.detection_score

    # Get point cloud in lidar frame.
    pc, _ = LidarPointCloud.from_file_multisweep(nusc, sample_rec, 'LIDAR_TOP', 'LIDAR_TOP', nsweeps=nsweeps)

    # Init axes.
    _, ax = plt.subplots(1, 1, figsize=(9, 9))

    # Show point cloud.
    points = view_points(pc.points[:3, :], np.eye(4), normalize=False)
    dists = np.sqrt(np.sum(pc.points[:2, :] ** 2, axis=0))
    colors = np.minimum(1, dists / eval_range)
    ax.scatter(points[0, :], points[1, :], c=colors, s=0.2)

    # Show ego vehicle.
    ax.plot(0, 0, 'x', color='black')

    # Show GT boxes.
    for box in boxes_gt:
        box.render(ax, view=np.eye(4), colors=('g', 'g', 'g'), linewidth=2)

    # Show EST boxes.
    for box in boxes_est:
        # Show only predictions with a high score.
        assert not np.isnan(box.score), 'Error: Box score cannot be NaN!'
        if box.score >= conf_th:
            box.render(ax, view=np.eye(4), colors=('b', 'b', 'b'), linewidth=1)

    # Limit visible range.
    axes_limit = eval_range + 3  # Slightly bigger to include boxes that extend beyond the range.
    ax.set_xlim(-axes_limit, axes_limit)
    ax.set_ylim(-axes_limit, axes_limit)

    # Show / save plot.
    if verbose:
        print('Rendering sample token %s' % sample_token)
    plt.title(sample_token)
    if savepath is not None:
        plt.savefig(savepath)
        plt.close()
    else:
        plt.show()
示例#13
0
    parser.add_argument('--verbose',
                        type=int,
                        default=1,
                        help='Whether to print to stdout.')
    args = parser.parse_args()

    result_path_ = os.path.expanduser(args.result_path)
    output_dir_ = os.path.expanduser(args.output_dir)
    eval_set_ = args.eval_set
    dataroot_ = args.dataroot
    version_ = args.version
    config_path = args.config_path
    plot_examples_ = args.plot_examples
    render_curves_ = bool(args.render_curves)
    verbose_ = bool(args.verbose)

    if config_path == '':
        cfg_ = config_factory('detection_cvpr_2019')
    else:
        with open(config_path, 'r') as _f:
            cfg_ = DetectionConfig.deserialize(json.load(_f))

    nusc_ = NuScenes(version=version_, verbose=verbose_, dataroot=dataroot_)
    nusc_eval = DetectionEval(nusc_,
                              config=cfg_,
                              result_path=result_path_,
                              eval_set=eval_set_,
                              output_dir=output_dir_,
                              verbose=verbose_)
    nusc_eval.main(plot_examples=plot_examples_, render_curves=render_curves_)
示例#14
0
    def _mock_submission(nusc: NuScenes,
                         split: str,
                         add_errors: bool = False) -> Dict[str, dict]:
        """
        Creates "reasonable" submission (results and metadata) by looping through the mini-val set, adding 1 GT
        prediction per sample. Predictions will be permuted randomly along all axes.
        :param nusc: NuScenes instance.
        :param split: Dataset split to use.
        :param add_errors: Whether to use GT or add errors to it.
        """

        def random_class(category_name: str, _add_errors: bool = False) -> Optional[str]:
            # Alter 10% of the valid labels.
            class_names = sorted(TRACKING_NAMES)
            tmp = category_to_tracking_name(category_name)

            if tmp is None:
                return None
            else:
                if not _add_errors or np.random.rand() < .9:
                    return tmp
                else:
                    return class_names[np.random.randint(0, len(class_names) - 1)]

        def random_id(instance_token: str, _add_errors: bool = False) -> str:
            # Alter 10% of the valid ids to be a random string, which hopefully corresponds to a new track.
            if not _add_errors or np.random.rand() < .9:
                _tracking_id = instance_token + '_pred'
            else:
                _tracking_id = str(np.random.randint(0, sys.maxsize))

            return _tracking_id

        mock_meta = {
            'use_camera': False,
            'use_lidar': True,
            'use_radar': False,
            'use_map': False,
            'use_external': False,
        }
        mock_results = {}

        # Get all samples in the current evaluation split.
        splits = create_splits_scenes()
        val_samples = []
        for sample in nusc.sample:
            if nusc.get('scene', sample['scene_token'])['name'] in splits[split]:
                val_samples.append(sample)

        # Prepare results.
        instance_to_score = dict()
        for sample in tqdm(val_samples, leave=False):
            sample_res = []
            for ann_token in sample['anns']:
                ann = nusc.get('sample_annotation', ann_token)
                translation = np.array(ann['translation'])
                size = np.array(ann['size'])
                rotation = np.array(ann['rotation'])
                velocity = nusc.box_velocity(ann_token)[:2]
                tracking_id = random_id(ann['instance_token'], _add_errors=add_errors)
                tracking_name = random_class(ann['category_name'], _add_errors=add_errors)

                # Skip annotations for classes not part of the detection challenge.
                if tracking_name is None:
                    continue

                # Skip annotations with 0 lidar/radar points.
                num_pts = ann['num_lidar_pts'] + ann['num_radar_pts']
                if num_pts == 0:
                    continue

                # If we randomly assign a score in [0, 1] to each box and later average over the boxes in the track,
                # the average score will be around 0.5 and we will have 0 predictions above that.
                # Therefore we assign the same scores to each box in a track.
                if ann['instance_token'] not in instance_to_score:
                    instance_to_score[ann['instance_token']] = random.random()
                tracking_score = instance_to_score[ann['instance_token']]
                tracking_score = np.clip(tracking_score + random.random() * 0.3, 0, 1)

                if add_errors:
                    translation += 4 * (np.random.rand(3) - 0.5)
                    size *= (np.random.rand(3) + 0.5)
                    rotation += (np.random.rand(4) - 0.5) * .1
                    velocity *= np.random.rand(3)[:2] + 0.5

                sample_res.append({
                        'sample_token': sample['token'],
                        'translation': list(translation),
                        'size': list(size),
                        'rotation': list(rotation),
                        'velocity': list(velocity),
                        'tracking_id': tracking_id,
                        'tracking_name': tracking_name,
                        'tracking_score': tracking_score
                    })
            mock_results[sample['token']] = sample_res
        mock_submission = {
            'meta': mock_meta,
            'results': mock_results
        }
        return mock_submission
示例#15
0
        '42641eb6adcb4f8f8def8ef129d9e843_ca9a282c9e77460f8360f564131a8af5',
        '4080c30aa7104d91ad005a50b18f6108_ca9a282c9e77460f8360f564131a8af5',
        'c1958768d48640948f6053d04cffd35b_ca9a282c9e77460f8360f564131a8af5',
        '4005437c730645c2b628dc1da999e06a_39586f9d59004284a7114a68825e8eec',
        'a017fe4e9c3d445784aae034b1322006_356d81f38dd9473ba590f39e266f54e5',
        'a0049f95375044b8987fbcca8fda1e2b_c923fe08b2ff4e27975d2bf30934383b',
        '61dd7d03d7ad466d89f901ed64e2c0dd_e0845f5322254dafadbbed75aaa07969',
        '86ed8530809d4b1b8fbc53808f599339_39586f9d59004284a7114a68825e8eec',
        '2a80b29c0281435ca4893e158a281ce0_2afb9d32310e4546a71cbe432911eca2',
        '8ce4fe54af77467d90c840465f69677f_de7593d76648450e947ba0c203dee1b0',
        'f4af7fd215ee47aa8b64bac0443d7be8_9ee4020153674b9e9943d395ff8cfdf3'
    ]

    tokens = tokens * 32

    nusc = NuScenes('v1.0-trainval', dataroot=args.data_root)
    helper = PredictHelper(nusc)

    dataset = TestDataset(tokens, helper)
    dataloader = DataLoader(dataset, batch_size=16, num_workers=16)

    backbone = ResNetBackbone('resnet18')
    model = MTP(backbone, NUM_MODES)
    model = model.to(device)

    loss_function = MTPLoss(NUM_MODES, 1, 5)

    current_loss = 10000

    optimizer = optim.SGD(model.parameters(), lr=0.1)
示例#16
0
    def _mock_submission(nusc: NuScenes, split: str) -> Dict[str, dict]:
        """
        Creates "reasonable" submission (results and metadata) by looping through the mini-val set, adding 1 GT
        prediction per sample. Predictions will be permuted randomly along all axes.
        """

        def random_class(category_name: str) -> str:
            # Alter 10% of the valid labels.
            class_names = sorted(DETECTION_NAMES)
            tmp = category_to_detection_name(category_name)
            if tmp is not None and np.random.rand() < .9:
                return tmp
            else:
                return class_names[np.random.randint(0, len(class_names) - 1)]

        def random_attr(name: str) -> str:
            """
            This is the most straight-forward way to generate a random attribute.
            Not currently used b/c we want the test fixture to be back-wards compatible.
            """
            # Get relevant attributes.
            rel_attributes = detection_name_to_rel_attributes(name)

            if len(rel_attributes) == 0:
                # Empty string for classes without attributes.
                return ''
            else:
                # Pick a random attribute otherwise.
                return rel_attributes[np.random.randint(0, len(rel_attributes))]

        mock_meta = {
            'use_camera': False,
            'use_lidar': True,
            'use_radar': False,
            'use_map': False,
            'use_external': False,
        }
        mock_results = {}
        splits = create_splits_scenes()
        val_samples = []
        for sample in nusc.sample:
            if nusc.get('scene', sample['scene_token'])['name'] in splits[split]:
                val_samples.append(sample)

        for sample in tqdm(val_samples):
            sample_res = []
            for ann_token in sample['anns']:
                ann = nusc.get('sample_annotation', ann_token)
                detection_name = random_class(ann['category_name'])
                sample_res.append(
                    {
                        'sample_token': sample['token'],
                        'translation': list(np.array(ann['translation']) + 5 * (np.random.rand(3) - 0.5)),
                        'size': list(np.array(ann['size']) * 2 * (np.random.rand(3) + 0.5)),
                        'rotation': list(np.array(ann['rotation']) + ((np.random.rand(4) - 0.5) * .1)),
                        'velocity': list(nusc.box_velocity(ann_token)[:2] * (np.random.rand(3)[:2] + 0.5)),
                        'detection_name': detection_name,
                        'detection_score': random.random(),
                        'attribute_name': random_attr(detection_name)
                    })
            mock_results[sample['token']] = sample_res
        mock_submission = {
            'meta': mock_meta,
            'results': mock_results
        }
        return mock_submission
示例#17
0
    def __init__(self,
                 config: TrackingConfig,
                 result_path: str,
                 eval_set: str,
                 output_dir: str,
                 nusc_version: str,
                 nusc_dataroot: str,
                 verbose: bool = True,
                 render_classes: List[str] = None):
        """
        Initialize a TrackingEval object.
        :param config: A TrackingConfig object.
        :param result_path: Path of the nuScenes JSON result file.
        :param eval_set: The dataset split to evaluate on, e.g. train, val or test.
        :param output_dir: Folder to save plots and results to.
        :param nusc_version: The version of the NuScenes dataset.
        :param nusc_dataroot: Path of the nuScenes dataset on disk.
        :param verbose: Whether to print to stdout.
        :param render_classes: Classes to render to disk or None.
        """
        self.cfg = config
        self.result_path = result_path
        self.eval_set = eval_set
        self.output_dir = output_dir
        self.verbose = verbose
        self.render_classes = render_classes

        # Check result file exists.
        assert os.path.exists(result_path), 'Error: The result file does not exist!'

        # Make dirs.
        self.plot_dir = os.path.join(self.output_dir, 'plots')
        if not os.path.isdir(self.output_dir):
            os.makedirs(self.output_dir)
        if not os.path.isdir(self.plot_dir):
            os.makedirs(self.plot_dir)

        # Initialize NuScenes object.
        # We do not store it in self to let garbage collection take care of it and save memory.
        nusc = NuScenes(version=nusc_version, verbose=verbose, dataroot=nusc_dataroot)

        # Load data.
        if verbose:
            print('Initializing nuScenes tracking evaluation')
        pred_boxes, self.meta = load_prediction(self.result_path, self.cfg.max_boxes_per_sample, TrackingBox,
                                                verbose=verbose)
        gt_boxes = load_gt(nusc, self.eval_set, TrackingBox, verbose=verbose)

        assert set(pred_boxes.sample_tokens) == set(gt_boxes.sample_tokens), \
            "Samples in split don't match samples in predicted tracks."

        # Add center distances.
        pred_boxes = add_center_dist(nusc, pred_boxes)
        gt_boxes = add_center_dist(nusc, gt_boxes)

        # Filter boxes (distance, points per box, etc.).
        if verbose:
            print('Filtering tracks')
        pred_boxes = filter_eval_boxes(nusc, pred_boxes, self.cfg.class_range, verbose=verbose)
        if verbose:
            print('Filtering ground truth tracks')
        gt_boxes = filter_eval_boxes(nusc, gt_boxes, self.cfg.class_range, verbose=verbose)

        self.sample_tokens = gt_boxes.sample_tokens

        # Convert boxes to tracks format.
        self.tracks_gt = create_tracks(gt_boxes, nusc, self.eval_set, gt=True)
        self.tracks_pred = create_tracks(pred_boxes, nusc, self.eval_set, gt=False)
                        default=0,
                        type=int,
                        help='Whether to print outputs to stdout')

    args = parser.parse_args()
    out_dir = os.path.expanduser(args.out_dir)
    scene_name = args.scene
    verbose = bool(args.verbose)

    out_path = osp.join(out_dir, '%s.obj' % scene_name)
    if osp.exists(out_path):
        print('=> File {} already exists. Aborting.'.format(out_path))
        exit()
    else:
        print('=> Extracting scene {} to {}'.format(scene_name, out_path))

    # Create output folder
    if not out_dir == '' and not osp.isdir(out_dir):
        os.makedirs(out_dir)

    # Extract point-cloud for the specified scene
    nusc = NuScenes()
    scene_tokens = [s['token'] for s in nusc.scene if s['name'] == scene_name]
    assert len(scene_tokens) == 1, 'Error: Invalid scene %s' % scene_name

    export_scene_pointcloud(nusc,
                            out_path,
                            scene_tokens[0],
                            channel='LIDAR_TOP',
                            verbose=verbose)
示例#19
0
def filter_eval_boxes(nusc: NuScenes,
                      eval_boxes: EvalBoxes,
                      max_dist: Dict[str, float],
                      verbose: bool = False) -> EvalBoxes:
    """
    Applies filtering to boxes. Distance, bike-racks and points per box.
    :param nusc: An instance of the NuScenes class.
    :param eval_boxes: An instance of the EvalBoxes class.
    :param max_dist: Maps the detection name to the eval distance threshold for that class.
    :param verbose: Whether to print to stdout.
    """
    # Retrieve box type for detectipn/tracking boxes.
    class_field = _get_box_class_field(eval_boxes)

    # Accumulators for number of filtered boxes.
    total, dist_filter, point_filter, bike_rack_filter = 0, 0, 0, 0
    for ind, sample_token in enumerate(eval_boxes.sample_tokens):

        # Filter on distance first.
        total += len(eval_boxes[sample_token])
        eval_boxes.boxes[sample_token] = [
            box for box in eval_boxes[sample_token]
            if box.ego_dist < max_dist[box.__getattribute__(class_field)]
        ]
        dist_filter += len(eval_boxes[sample_token])

        # Then remove boxes with zero points in them. Eval boxes have -1 points by default.
        eval_boxes.boxes[sample_token] = [
            box for box in eval_boxes[sample_token] if not box.num_pts == 0
        ]
        point_filter += len(eval_boxes[sample_token])

        # Perform bike-rack filtering.
        sample_anns = nusc.get('sample', sample_token)['anns']
        bikerack_recs = [
            nusc.get('sample_annotation', ann) for ann in sample_anns
            if nusc.get('sample_annotation', ann)['category_name'] ==
            'static_object.bicycle_rack'
        ]
        bikerack_boxes = [
            Box(rec['translation'], rec['size'], Quaternion(rec['rotation']))
            for rec in bikerack_recs
        ]
        filtered_boxes = []
        for box in eval_boxes[sample_token]:
            if box.__getattribute__(class_field) in ['bicycle', 'motorcycle']:
                in_a_bikerack = False
                for bikerack_box in bikerack_boxes:
                    if np.sum(
                            points_in_box(
                                bikerack_box,
                                np.expand_dims(np.array(box.translation),
                                               axis=1))) > 0:
                        in_a_bikerack = True
                if not in_a_bikerack:
                    filtered_boxes.append(box)
            else:
                filtered_boxes.append(box)

        eval_boxes.boxes[sample_token] = filtered_boxes
        bike_rack_filter += len(eval_boxes.boxes[sample_token])

    if verbose:
        print("=> Original number of boxes: %d" % total)
        print("=> After distance based filtering: %d" % dist_filter)
        print("=> After LIDAR points based filtering: %d" % point_filter)
        print("=> After bike rack filtering: %d" % bike_rack_filter)

    return eval_boxes
def pointcloud_color_from_image(
        nusc: NuScenes, pointsensor_token: str,
        camera_token: str) -> Tuple[np.array, np.array]:
    """
    Given a point sensor (lidar/radar) token and camera sample_data token, load point-cloud and map it to the image
    plane, then retrieve the colors of the closest image pixels.
    :param nusc: NuScenes instance.
    :param pointsensor_token: Lidar/radar sample_data token.
    :param camera_token: Camera sample data token.
    :return (coloring <np.float: 3, n>, mask <np.bool: m>). Returns the colors for n points that reproject into the
        image out of m total points. The mask indicates which points are selected.
    """

    cam = nusc.get('sample_data', camera_token)
    pointsensor = nusc.get('sample_data', pointsensor_token)

    pc = LidarPointCloud.from_file(
        osp.join(nusc.dataroot, pointsensor['filename']))
    im = Image.open(osp.join(nusc.dataroot, cam['filename']))

    # Points live in the point sensor frame. So they need to be transformed via global to the image plane.
    # First step: transform the point-cloud to the ego vehicle frame for the timestamp of the sweep.
    cs_record = nusc.get('calibrated_sensor',
                         pointsensor['calibrated_sensor_token'])
    pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix)
    pc.translate(np.array(cs_record['translation']))

    # Second step: transform to the global frame.
    poserecord = nusc.get('ego_pose', pointsensor['ego_pose_token'])
    pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix)
    pc.translate(np.array(poserecord['translation']))

    # Third step: transform into the ego vehicle frame for the timestamp of the image.
    poserecord = nusc.get('ego_pose', cam['ego_pose_token'])
    pc.translate(-np.array(poserecord['translation']))
    pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix.T)

    # Fourth step: transform into the camera.
    cs_record = nusc.get('calibrated_sensor', cam['calibrated_sensor_token'])
    pc.translate(-np.array(cs_record['translation']))
    pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix.T)

    # Fifth step: actually take a "picture" of the point cloud.
    # Grab the depths (camera frame z axis points away from the camera).
    depths = pc.points[2, :]

    # Take the actual picture (matrix multiplication with camera-matrix + renormalization).
    points = view_points(pc.points[:3, :],
                         np.array(cs_record['camera_intrinsic']),
                         normalize=True)

    # Remove points that are either outside or behind the camera. Leave a margin of 1 pixel for aesthetic reasons.
    mask = np.ones(depths.shape[0], dtype=bool)
    mask = np.logical_and(mask, depths > 0)
    mask = np.logical_and(mask, points[0, :] > 1)
    mask = np.logical_and(mask, points[0, :] < im.size[0] - 1)
    mask = np.logical_and(mask, points[1, :] > 1)
    mask = np.logical_and(mask, points[1, :] < im.size[1] - 1)
    points = points[:, mask]

    # Pick the colors of the points
    im_data = np.array(im)
    coloring = np.zeros(points.shape)
    for i, p in enumerate(points.transpose()):
        point = p[:2].round().astype(np.int32)
        coloring[:, i] = im_data[point[1], point[0], :]

    return coloring, mask
def export_scene_pointcloud(nusc: NuScenes,
                            out_path: str,
                            scene_token: str,
                            channel: str = 'LIDAR_TOP',
                            min_dist: float = 3.0,
                            max_dist: float = 30.0,
                            verbose: bool = True) -> None:
    """
    Export fused point clouds of a scene to a Wavefront OBJ file.
    This point-cloud can be viewed in your favorite 3D rendering tool, e.g. Meshlab or Maya.
    :param nusc: NuScenes instance.
    :param out_path: Output path to write the point-cloud to.
    :param scene_token: Unique identifier of scene to render.
    :param channel: Channel to render.
    :param min_dist: Minimum distance to ego vehicle below which points are dropped.
    :param max_dist: Maximum distance to ego vehicle above which points are dropped.
    :param verbose: Whether to print messages to stdout.
    """

    # Check inputs.
    valid_channels = [
        'LIDAR_TOP', 'RADAR_FRONT', 'RADAR_FRONT_RIGHT', 'RADAR_FRONT_LEFT',
        'RADAR_BACK_LEFT', 'RADAR_BACK_RIGHT'
    ]
    camera_channels = [
        'CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_LEFT',
        'CAM_BACK', 'CAM_BACK_RIGHT'
    ]
    assert channel in valid_channels, 'Input channel {} not valid.'.format(
        channel)

    # Get records from DB.
    scene_rec = nusc.get('scene', scene_token)
    start_sample_rec = nusc.get('sample', scene_rec['first_sample_token'])
    sd_rec = nusc.get('sample_data', start_sample_rec['data'][channel])

    # Make list of frames
    cur_sd_rec = sd_rec
    sd_tokens = []
    while cur_sd_rec['next'] != '':
        cur_sd_rec = nusc.get('sample_data', cur_sd_rec['next'])
        sd_tokens.append(cur_sd_rec['token'])

    # Write point-cloud.
    with open(out_path, 'w') as f:
        f.write("OBJ File:\n")

        for sd_token in tqdm(sd_tokens):
            if verbose:
                print('Processing {}'.format(sd_rec['filename']))
            sc_rec = nusc.get('sample_data', sd_token)
            sample_rec = nusc.get('sample', sc_rec['sample_token'])
            lidar_token = sd_rec['token']
            lidar_rec = nusc.get('sample_data', lidar_token)
            pc = LidarPointCloud.from_file(
                osp.join(nusc.dataroot, lidar_rec['filename']))

            # Get point cloud colors.
            coloring = np.ones((3, pc.points.shape[1])) * -1
            for channel in camera_channels:
                camera_token = sample_rec['data'][channel]
                cam_coloring, cam_mask = pointcloud_color_from_image(
                    nusc, lidar_token, camera_token)
                coloring[:, cam_mask] = cam_coloring

            # Points live in their own reference frame. So they need to be transformed via global to the image plane.
            # First step: transform the point cloud to the ego vehicle frame for the timestamp of the sweep.
            cs_record = nusc.get('calibrated_sensor',
                                 lidar_rec['calibrated_sensor_token'])
            pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix)
            pc.translate(np.array(cs_record['translation']))

            # Optional Filter by distance to remove the ego vehicle.
            dists_origin = np.sqrt(np.sum(pc.points[:3, :]**2, axis=0))
            keep = np.logical_and(min_dist <= dists_origin,
                                  dists_origin <= max_dist)
            pc.points = pc.points[:, keep]
            coloring = coloring[:, keep]
            if verbose:
                print('Distance filter: Keeping %d of %d points...' %
                      (keep.sum(), len(keep)))

            # Second step: transform to the global frame.
            poserecord = nusc.get('ego_pose', lidar_rec['ego_pose_token'])
            pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix)
            pc.translate(np.array(poserecord['translation']))

            # Write points to file
            for (v, c) in zip(pc.points.transpose(), coloring.transpose()):
                if (c == -1).any():
                    # Ignore points without a color.
                    pass
                else:
                    f.write(
                        "v {v[0]:.8f} {v[1]:.8f} {v[2]:.8f} {c[0]:.4f} {c[1]:.4f} {c[2]:.4f}\n"
                        .format(v=v, c=c / 255.0))

            if not sd_rec['next'] == "":
                sd_rec = nusc.get('sample_data', sd_rec['next'])
示例#22
0
def load_gt(nusc: NuScenes,
            eval_split: str,
            box_cls,
            verbose: bool = False) -> EvalBoxes:
    """
    Loads ground truth boxes from DB.
    :param nusc: A NuScenes instance.
    :param eval_split: The evaluation split for which we load GT boxes.
    :param box_cls: Type of box to load, e.g. DetectionBox or TrackingBox.
    :param verbose: Whether to print messages to stdout.
    :return: The GT boxes.
    """
    # Init.
    if box_cls == DetectionBox:
        attribute_map = {a['token']: a['name'] for a in nusc.attribute}

    if verbose:
        print('Loading annotations for {} split from nuScenes version: {}'.
              format(eval_split, nusc.version))
    # Read out all sample_tokens in DB.
    sample_tokens_all = [s['token'] for s in nusc.sample]
    assert len(sample_tokens_all) > 0, "Error: Database has no samples!"

    # Only keep samples from this split.
    splits = create_splits_scenes()

    # Check compatibility of split with nusc_version.
    version = nusc.version
    if eval_split in {'train', 'val', 'train_detect', 'train_track'}:
        assert version.endswith('trainval'), \
            'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version)
    elif eval_split in {'mini_train', 'mini_val'}:
        assert version.endswith('mini'), \
            'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version)
    elif eval_split == 'test':
        assert version.endswith('test'), \
            'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version)
    else:
        raise ValueError(
            'Error: Requested split {} which this function cannot map to the correct NuScenes version.'
            .format(eval_split))

    if eval_split == 'test':
        # Check that you aren't trying to cheat :).
        assert len(nusc.sample_annotation) > 0, \
            'Error: You are trying to evaluate on the test set but you do not have the annotations!'

    sample_tokens = []
    for sample_token in sample_tokens_all:
        scene_token = nusc.get('sample', sample_token)['scene_token']
        scene_record = nusc.get('scene', scene_token)
        if scene_record['name'] in splits[eval_split]:
            sample_tokens.append(sample_token)

    all_annotations = EvalBoxes()

    # Load annotations and filter predictions and annotations.
    tracking_id_set = set()
    for sample_token in tqdm.tqdm(sample_tokens, leave=verbose):

        sample = nusc.get('sample', sample_token)
        sample_annotation_tokens = sample['anns']

        sample_boxes = []
        for sample_annotation_token in sample_annotation_tokens:

            sample_annotation = nusc.get('sample_annotation',
                                         sample_annotation_token)
            if box_cls == DetectionBox:
                # Get label name in detection task and filter unused labels.
                detection_name = category_to_detection_name(
                    sample_annotation['category_name'])
                if detection_name is None:
                    continue

                # Get attribute_name.
                attr_tokens = sample_annotation['attribute_tokens']
                attr_count = len(attr_tokens)
                if attr_count == 0:
                    attribute_name = ''
                elif attr_count == 1:
                    attribute_name = attribute_map[attr_tokens[0]]
                else:
                    raise Exception(
                        'Error: GT annotations must not have more than one attribute!'
                    )

                sample_boxes.append(
                    box_cls(
                        sample_token=sample_token,
                        translation=sample_annotation['translation'],
                        size=sample_annotation['size'],
                        rotation=sample_annotation['rotation'],
                        velocity=nusc.box_velocity(
                            sample_annotation['token'])[:2],
                        num_pts=sample_annotation['num_lidar_pts'] +
                        sample_annotation['num_radar_pts'],
                        detection_name=detection_name,
                        detection_score=-1.0,  # GT samples do not have a score.
                        attribute_name=attribute_name))
            elif box_cls == TrackingBox:
                # Use nuScenes token as tracking id.
                tracking_id = sample_annotation['instance_token']
                tracking_id_set.add(tracking_id)

                # Get label name in detection task and filter unused labels.
                tracking_name = category_to_tracking_name(
                    sample_annotation['category_name'])
                if tracking_name is None:
                    continue

                sample_boxes.append(
                    box_cls(
                        sample_token=sample_token,
                        translation=sample_annotation['translation'],
                        size=sample_annotation['size'],
                        rotation=sample_annotation['rotation'],
                        velocity=nusc.box_velocity(
                            sample_annotation['token'])[:2],
                        num_pts=sample_annotation['num_lidar_pts'] +
                        sample_annotation['num_radar_pts'],
                        tracking_id=tracking_id,
                        tracking_name=tracking_name,
                        tracking_score=-1.0  # GT samples do not have a score.
                    ))
            else:
                raise NotImplementedError('Error: Invalid box_cls %s!' %
                                          box_cls)

        all_annotations.add_boxes(sample_token, sample_boxes)

    if verbose:
        print("Loaded ground truth annotations for {} samples.".format(
            len(all_annotations.sample_tokens)))

    return all_annotations
示例#23
0
    def test_filter_eval_boxes(self):
        """
        This tests runs the evaluation for an arbitrary random set of predictions.
        This score is then captured in this very test such that if we change the eval code,
        this test will trigger if the results changed.
        """
        # Get the maximum distance from the config
        cfg = config_factory('detection_cvpr_2019')
        max_dist = cfg.class_range

        assert 'NUSCENES' in os.environ, 'Set NUSCENES env. variable to enable tests.'

        nusc = NuScenes(version='v1.0-mini',
                        dataroot=os.environ['NUSCENES'],
                        verbose=False)

        sample_token = '0af0feb5b1394b928dd13d648de898f5'
        # This sample has a bike rack instance 'bfe685042aa34ab7b2b2f24ee0f1645f' with these parameters
        # 'translation': [683.681, 1592.002, 0.809],
        # 'size': [1.641, 14.465, 1.4],
        # 'rotation': [0.3473693995546558, 0.0, 0.0, 0.9377283723195315]

        # Test bicycle filtering by creating a box at the same position as the bike rack.
        box1 = DetectionBox(sample_token=sample_token,
                            translation=(683.681, 1592.002, 0.809),
                            size=(1, 1, 1),
                            detection_name='bicycle')

        eval_boxes = EvalBoxes()
        eval_boxes.add_boxes(sample_token, [box1])

        filtered_boxes = filter_eval_boxes(nusc, eval_boxes, max_dist)

        self.assertEqual(len(filtered_boxes.boxes[sample_token]),
                         0)  # box1 should be filtered.

        # Test motorcycle filtering by creating a box at the same position as the bike rack.
        box2 = DetectionBox(sample_token=sample_token,
                            translation=(683.681, 1592.002, 0.809),
                            size=(1, 1, 1),
                            detection_name='motorcycle')

        eval_boxes = EvalBoxes()
        eval_boxes.add_boxes(sample_token, [box1, box2])

        filtered_boxes = filter_eval_boxes(nusc, eval_boxes, max_dist)

        self.assertEqual(len(filtered_boxes.boxes[sample_token]),
                         0)  # both box1 and box2 should be filtered.

        # Now create a car at the same position as the bike rack.
        box3 = DetectionBox(sample_token=sample_token,
                            translation=(683.681, 1592.002, 0.809),
                            size=(1, 1, 1),
                            detection_name='car')

        eval_boxes = EvalBoxes()
        eval_boxes.add_boxes(sample_token, [box1, box2, box3])

        filtered_boxes = filter_eval_boxes(nusc, eval_boxes, max_dist)

        self.assertEqual(len(filtered_boxes.boxes[sample_token]),
                         1)  # box1 and box2 to be filtered. box3 to stay.
        self.assertEqual(filtered_boxes.boxes[sample_token][0].detection_name,
                         'car')

        # Now add a bike outside the bike rack.
        box4 = DetectionBox(sample_token=sample_token,
                            translation=(68.681, 1592.002, 0.809),
                            size=(1, 1, 1),
                            detection_name='bicycle')

        eval_boxes = EvalBoxes()
        eval_boxes.add_boxes(sample_token, [box1, box2, box3, box4])

        filtered_boxes = filter_eval_boxes(nusc, eval_boxes, max_dist)

        self.assertEqual(len(filtered_boxes.boxes[sample_token]),
                         2)  # box1, box2 to be filtered. box3, box4 to stay.
        self.assertEqual(filtered_boxes.boxes[sample_token][0].detection_name,
                         'car')
        self.assertEqual(filtered_boxes.boxes[sample_token][1].detection_name,
                         'bicycle')
        self.assertEqual(filtered_boxes.boxes[sample_token][1].translation[0],
                         68.681)

        # Add another bike on the bike rack center,
        # but set the ego_dist (derived from ego_translation) higher than what's defined in max_dist
        box5 = DetectionBox(sample_token=sample_token,
                            translation=(683.681, 1592.002, 0.809),
                            size=(1, 1, 1),
                            detection_name='bicycle',
                            ego_translation=(100.0, 0.0, 0.0))

        eval_boxes = EvalBoxes()
        eval_boxes.add_boxes(sample_token, [box1, box2, box3, box4, box5])

        filtered_boxes = filter_eval_boxes(nusc, eval_boxes, max_dist)
        self.assertEqual(len(filtered_boxes.boxes[sample_token]),
                         2)  # box1, box2, box5 filtered. box3, box4 to stay.
        self.assertEqual(filtered_boxes.boxes[sample_token][0].detection_name,
                         'car')
        self.assertEqual(filtered_boxes.boxes[sample_token][1].detection_name,
                         'bicycle')
        self.assertEqual(filtered_boxes.boxes[sample_token][1].translation[0],
                         68.681)

        # Add another bike on the bike rack center but set the num_pts to be zero so that it gets filtered.
        box6 = DetectionBox(sample_token=sample_token,
                            translation=(683.681, 1592.002, 0.809),
                            size=(1, 1, 1),
                            detection_name='bicycle',
                            num_pts=0)

        eval_boxes = EvalBoxes()
        eval_boxes.add_boxes(sample_token,
                             [box1, box2, box3, box4, box5, box6])

        filtered_boxes = filter_eval_boxes(nusc, eval_boxes, max_dist)
        self.assertEqual(len(filtered_boxes.boxes[sample_token]),
                         2)  # box1, box2, box5, box6 filtered. box3, box4 stay
        self.assertEqual(filtered_boxes.boxes[sample_token][0].detection_name,
                         'car')
        self.assertEqual(filtered_boxes.boxes[sample_token][1].detection_name,
                         'bicycle')
        self.assertEqual(filtered_boxes.boxes[sample_token][1].translation[0],
                         68.681)

        # Check for a sample where there are no bike racks. Everything should be filtered correctly.
        sample_token = 'ca9a282c9e77460f8360f564131a8af5'  # This sample has no bike-racks.

        box1 = DetectionBox(sample_token=sample_token,
                            translation=(683.681, 1592.002, 0.809),
                            size=(1, 1, 1),
                            detection_name='bicycle',
                            ego_translation=(25.0, 0.0, 0.0))

        box2 = DetectionBox(sample_token=sample_token,
                            translation=(683.681, 1592.002, 0.809),
                            size=(1, 1, 1),
                            detection_name='motorcycle',
                            ego_translation=(45.0, 0.0, 0.0))

        box3 = DetectionBox(sample_token=sample_token,
                            translation=(683.681, 1592.002, 0.809),
                            size=(1, 1, 1),
                            detection_name='car',
                            ego_translation=(45.0, 0.0, 0.0))

        box4 = DetectionBox(sample_token=sample_token,
                            translation=(683.681, 1592.002, 0.809),
                            size=(1, 1, 1),
                            detection_name='car',
                            ego_translation=(55.0, 0.0, 0.0))

        box5 = DetectionBox(sample_token=sample_token,
                            translation=(683.681, 1592.002, 0.809),
                            size=(1, 1, 1),
                            detection_name='bicycle',
                            num_pts=1)

        box6 = DetectionBox(sample_token=sample_token,
                            translation=(683.681, 1592.002, 0.809),
                            size=(1, 1, 1),
                            detection_name='bicycle',
                            num_pts=0)

        eval_boxes = EvalBoxes()
        eval_boxes.add_boxes(sample_token,
                             [box1, box2, box3, box4, box5, box6])

        filtered_boxes = filter_eval_boxes(nusc, eval_boxes, max_dist)
        self.assertEqual(len(filtered_boxes.boxes[sample_token]),
                         3)  # box2, box4, box6 filtered. box1, box3, box5 stay
        self.assertEqual(filtered_boxes.boxes[sample_token][0].ego_dist, 25.0)
        self.assertEqual(filtered_boxes.boxes[sample_token][1].ego_dist, 45.0)
        self.assertEqual(filtered_boxes.boxes[sample_token][2].num_pts, 1)