Exemple #1
0
    def test_load_pointclouds(self):
        """
        Loads up lidar and radar pointclouds.
        """
        assert 'NUSCENES' in os.environ, 'Set NUSCENES env. variable to enable tests.'
        dataroot = os.environ['NUSCENES']
        nusc = NuScenes(version='v1.0-mini', dataroot=dataroot, verbose=False)
        sample_rec = nusc.sample[0]
        lidar_name = nusc.get('sample_data',
                              sample_rec['data']['LIDAR_TOP'])['filename']
        radar_name = nusc.get('sample_data',
                              sample_rec['data']['RADAR_FRONT'])['filename']
        lidar_path = os.path.join(dataroot, lidar_name)
        radar_path = os.path.join(dataroot, radar_name)
        pc1 = LidarPointCloud.from_file(lidar_path)
        pc2 = RadarPointCloud.from_file(radar_path)
        pc3, _ = LidarPointCloud.from_file_multisweep(nusc,
                                                      sample_rec,
                                                      'LIDAR_TOP',
                                                      'LIDAR_TOP',
                                                      nsweeps=2)
        pc4, _ = RadarPointCloud.from_file_multisweep(nusc,
                                                      sample_rec,
                                                      'RADAR_FRONT',
                                                      'RADAR_FRONT',
                                                      nsweeps=2)

        # Check for valid dimensions.
        assert pc1.points.shape[0] == pc3.points.shape[
            0] == 4, 'Error: Invalid dimension for lidar pointcloud!'
        assert pc2.points.shape[0] == pc4.points.shape[
            0] == 18, 'Error: Invalid dimension for radar pointcloud!'
        assert pc1.points.dtype == pc3.points.dtype, 'Error: Invalid dtype for lidar pointcloud!'
        assert pc2.points.dtype == pc4.points.dtype, 'Error: Invalid dtype for radar pointcloud!'
    def __init__(self, root='/datasets/nuscene/v1.0-mini', sampling_time=3, agent_time=0, layer_names=None,
                 colors=None, resolution: float = 0.1,  # meters / pixel
                 meters_ahead: float = 25, meters_behind: float = 25,
                 meters_left: float = 25, meters_right: float = 25, version='v1.0-mini'):
        if layer_names is None:
            layer_names = ['drivable_area', 'road_segment', 'road_block',
                           'lane', 'ped_crossing', 'walkway', 'stop_line',
                           'carpark_area', 'road_divider', 'lane_divider']
        if colors is None:
            colors = [(255, 255, 255), (255, 255, 255), (255, 255, 255),
                      (255, 255, 255), (255, 255, 255), (255, 255, 255), (255, 255, 255),
                      (255, 255, 255), (255, 255, 255), (255, 255, 255), ]
        self.root = root
        self.nus = NuScenes(version, dataroot=self.root)
        self.scenes = self.nus.scene
        self.samples = self.nus.sample

        self.layer_names = layer_names
        self.colors = colors

        self.helper = PredictHelper(self.nus)

        self.seconds = sampling_time
        self.agent_seconds = agent_time

        self.static_layer = StaticLayerRasterizer(self.helper, layer_names=self.layer_names, colors=self.colors,
                                                  resolution=resolution, meters_ahead=meters_ahead,
                                                  meters_behind=meters_behind,
                                                  meters_left=meters_left, meters_right=meters_right)
        self.agent_layer = AgentBoxesWithFadedHistory(self.helper, seconds_of_history=self.agent_seconds,
                                                      resolution=resolution, meters_ahead=meters_ahead,
                                                      meters_behind=meters_behind,
                                                      meters_left=meters_left, meters_right=meters_right)
Exemple #3
0
def get_sample_ground_plane(root_path, version):
    nusc = NuScenes(version=version, dataroot=root_path, verbose=True)
    rets = {}

    for sample in tqdm(nusc.sample):
        chan = "LIDAR_TOP"
        sd_token = sample["data"][chan]
        sd_rec = nusc.get("sample_data", sd_token)

        lidar_path, _, _ = get_sample_data(nusc, sd_token)
        points = read_file(lidar_path)
        points = np.concatenate((points[:, :3], np.ones((points.shape[0], 1))),
                                axis=1)

        plane, inliers, outliers = fit_plane_LSE_RANSAC(
            points, return_outlier_list=True)

        xx = points[:, 0]
        yy = points[:, 1]
        zz = (-plane[0] * xx - plane[1] * yy - plane[3]) / plane[2]

        rets.update({sd_token: {
            "plane": plane,
            "height": zz,
        }})

    with open(nusc.root_path / "infos_trainval_ground_plane.pkl", "wb") as f:
        pickle.dump(rets, f)
Exemple #4
0
def add_center_dist(nusc: NuScenes, eval_boxes: EvalBoxes):
    """
    Adds the cylindrical (xy) center distance from ego vehicle to each box.
    :param nusc: The NuScenes instance.
    :param eval_boxes: A set of boxes, either GT or predictions.
    :return: eval_boxes augmented with center distances.
    """
    for sample_token in eval_boxes.sample_tokens:
        sample_rec = nusc.get('sample', sample_token)
        sd_record = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP'])
        pose_record = nusc.get('ego_pose', sd_record['ego_pose_token'])

        for box in eval_boxes[sample_token]:
            # Both boxes and ego pose are given in global coord system, so distance can be calculated directly.
            # Note that the z component of the ego pose is 0.
            ego_translation = (box.translation[0] -
                               pose_record['translation'][0],
                               box.translation[1] -
                               pose_record['translation'][1],
                               box.translation[2] -
                               pose_record['translation'][2])
            if isinstance(box, DetectionBox) or isinstance(box, TrackingBox):
                box.ego_translation = ego_translation
            else:
                raise NotImplementedError

    return eval_boxes
Exemple #5
0
def parse_frame(
    data: NuScenes,
    scene_name: str,
    frame_index: int,
    cam_token: str,
    boxes: Optional[List[Box]] = None,
) -> Tuple[Frame, Optional[str]]:
    """Parse a single camera frame."""
    cam_data = data.get("sample_data", cam_token)
    ego_pose_cam = data.get("ego_pose", cam_data["ego_pose_token"])
    cam_filepath = cam_data["filename"]
    img_wh = (cam_data["width"], cam_data["height"])
    calibration_cam = data.get("calibrated_sensor",
                               cam_data["calibrated_sensor_token"])
    labels: Optional[List[Label]] = None
    if boxes is not None:
        labels = parse_labels(data, boxes, ego_pose_cam, calibration_cam,
                              img_wh)

    frame = Frame(
        name=os.path.basename(cam_filepath),
        videoName=scene_name,
        frameIndex=frame_index,
        url=cam_filepath,
        timestamp=cam_data["timestamp"],
        extrinsics=get_extrinsics(ego_pose_cam, calibration_cam),
        intrinsics=calibration_to_intrinsics(calibration_cam),
        size=ImageSize(width=img_wh[0], height=img_wh[1]),
        labels=labels,
    )
    next_token: Optional[str] = None
    if (cam_data["next"] != ""
            and not data.get("sample_data", cam_data["next"])["is_key_frame"]):
        next_token = cam_data["next"]
    return frame, next_token
Exemple #6
0
 def __init__(self,
              DATAROOT='./data/sets/nuscenes',
              dataset_version='v1.0-mini'):
     self.DATAROOT = DATAROOT
     self.dataset_version = dataset_version
     self.nuscenes = NuScenes(dataset_version, dataroot=self.DATAROOT)
     self.helper = PredictHelper(self.nuscenes)
Exemple #7
0
def get_camera_data(nusc: NuScenes,
                    annotation_token: str,
                    box_vis_level: BoxVisibility = BoxVisibility.ANY):
    """
    Given an annotation token (3d detection in world coordinate frame) this method 
    returns the camera in which the annotation is located. If the box is splitted 
    between 2 cameras, it brings the first one found.
    :param nusc: NuScenes instance.
    :param annotation_token: Annotation token.
    :param box_vis_level: If sample_data is an image, this sets required visibility for boxes.
    :return camera channel.
    """
    #Get sample annotation
    ann_record = nusc.get('sample_annotation', annotation_token)

    sample_record = nusc.get('sample', ann_record['sample_token'])

    boxes, cam = [], []

    #Stores every camera
    cams = [key for key in sample_record['data'].keys() if 'CAM' in key]

    #Try with every camera a match for the annotation
    for cam in cams:
        _, boxes, _ = nusc.get_sample_data(
            sample_record['data'][cam],
            box_vis_level=box_vis_level,
            selected_anntokens=[annotation_token])
        if len(boxes) > 0:
            break  # Breaks if find an image that matches
    assert len(boxes) < 2, "Found multiple annotations. Something is wrong!"

    return cam
Exemple #8
0
def load_data(filepath: str, version: str) -> Tuple[NuScenes, pd.DataFrame]:
    """Load nuscenes data and extract meta-information into dataframe."""
    data = NuScenes(version=version, dataroot=filepath, verbose=True)
    records = [(data.get("sample",
                         record["first_sample_token"])["timestamp"], record)
               for record in data.scene]
    entries = []

    for start_time, record in sorted(records):
        start_time = (
            data.get("sample", record["first_sample_token"])["timestamp"] /
            1000000)
        token = record["token"]
        name = record["name"]
        date = datetime.utcfromtimestamp(start_time)
        host = "-".join(record["name"].split("-")[:2])
        first_sample_token = record["first_sample_token"]

        entries.append((host, name, date, token, first_sample_token))

    dataframe = pd.DataFrame(
        entries,
        columns=[
            "host",
            "scene_name",
            "date",
            "scene_token",
            "first_sample_token",
        ],
    )
    return data, dataframe
Exemple #9
0
    def __init__(
            self,
            root_path=f'/media/starlet/LdTho/data/sets/nuscenes/v1.0-{VERSION}',
            info_path=None,
            class_names=["traffic_cone"],
            prep_func=None,
            num_point_features=None):
        self.NumPointFeatures = 4
        self.class_names = class_names
        self.nusc = NuScenes(dataroot=root_path, version=f'v1.0-{VERSION}')
        self._prep_func = prep_func
        self.filtered_sample_tokens = []
        for sample in self.nusc.sample:
            sample_token = sample['token']
            sample_lidar_token = sample['data']['LIDAR_TOP']
            boxes = self.nusc.get_boxes(sample_lidar_token)
            box_names = [
                NameMapping[b.name] for b in boxes
                if b.name in NameMapping.keys()
            ]
            for box in boxes:
                if box.name not in NameMapping.keys():
                    continue
                # if NameMapping[box.name] in self.class_names:
                if (NameMapping[box.name] in [
                        "traffic_cone"
                ]) & (box_names.count('traffic_cone') > MIN_CONES_PER_SAMPLE):
                    self.filtered_sample_tokens.append(sample_token)
                    break
        self.filtered_sample_tokens = self.filtered_sample_tokens[:round(
            len(self.filtered_sample_tokens) * TRAINVAL_SPLIT_PERCENTAGE)]

        self.split = np.arange(len(self.filtered_sample_tokens))
Exemple #10
0
def pred_to_world(nusc: NuScenes,
                  pointsensor_token: str,
                  bbox_3d,
                  pointsensor_channel: str = 'LIDAR_TOP'):
    """
    Given an annotation token (3d detection in world coordinate frame) and pointsensor sample_data token,
    transform the label from world-coordinate frame to LiDAR.
    :param nusc: NuScenes instance.
    :param pointsensor_token: Lidar/radar sample_data token.
    :param bbox_3d: box object with the predicted 3D bbox info.
    :param pointsensor_channel: Laser channel name, e.g. 'LIDAR_TOP'.
    :return box mapped in the world coordinate frame.
    """

    # Point LiDAR sample
    point_data = nusc.get('sample_data',
                          pointsensor_token)  # Sample LiDAR info

    # From LiDAR to ego
    cs_rec = nusc.get('calibrated_sensor',
                      point_data['calibrated_sensor_token'])
    # Transformation metadata from ego to world coordinate frame
    pose_rec = nusc.get('ego_pose', point_data['ego_pose_token'])

    # Map tp ego-vehicle coordinate frame
    bbox_3d.rotate(Quaternion(cs_rec['rotation']))
    bbox_3d.translate(np.array(cs_rec['translation']))

    # Map from ego-vehicle to world coordinate frame
    bbox_3d.rotate(Quaternion(pose_rec['rotation']))
    bbox_3d.translate(np.array(pose_rec['translation']))

    return bbox_3d
def seg_concat():
    nusc = NuScenes(
        version='v1.0-trainval',
        dataroot=
        '/mrtstorage/users/kpeng/nuscene_pcdet/data/nuscenes/v1.0-trainval/',
        verbose=True)
    for my_sample in nusc.sample:
        sample_data_token = my_sample['data']['LIDAR_TOP']
        ori_filename = nusc.get('sample_data', sample_data_token)['filename']
        #print(ori_filename)
        anno_data = torch.from_numpy(
            np.float32(
                np.fromfile(
                    "/mrtstorage/users/kpeng/nu_lidar_seg/processed_anno/" +
                    sample_data_token + "_lidarseg.bin",
                    dtype=np.uint8,
                    count=-1)))
        ori_data = np.fromfile(file_path + ori_filename,
                               dtype=np.float32,
                               count=-1)
        ori_data = torch.from_numpy(ori_data.reshape(-1, 5))
        des_data = torch.cat([ori_data, anno_data.unsqueeze(1)],
                             dim=-1).flatten()

        des_data = des_data.numpy().tobytes()

        binfile = open(save_path + ori_filename, 'wb+')
        binfile.write(des_data)
        binfile.close()
    """
Exemple #12
0
def filter_eval_boxes(nusc: NuScenes,
                      eval_boxes: EvalBoxes,
                      max_dist: Dict[str, float],
                      verbose: bool = False) -> EvalBoxes:
    """
    Applies filtering to boxes. Distance, bike-racks and points per box.
    :param nusc: An instance of the NuScenes class.
    :param eval_boxes: An instance of the EvalBoxes class.
    :param max_dist: Maps the detection name to the eval distance threshold for that class.
    :param verbose: Whether to print to stdout.
    """
    # Accumulators for number of filtered boxes.
    total, dist_filter, point_filter, bike_rack_filter = 0, 0, 0, 0
    for ind, sample_token in enumerate(eval_boxes.sample_tokens):

        # Filter on distance first
        total += len(eval_boxes[sample_token])
        eval_boxes.boxes[sample_token] = [box for box in eval_boxes[sample_token] if
                                          box.ego_dist < max_dist[box.detection_name]]
        dist_filter += len(eval_boxes[sample_token])

        # Then remove boxes with zero points in them. Eval boxes have -1 points by default.
        eval_boxes.boxes[sample_token] = [box for box in eval_boxes[sample_token] if not box.num_pts == 0]
        point_filter += len(eval_boxes[sample_token])

        # Perform bike-rack filtering
        sample_anns = nusc.get('sample', sample_token)['anns']
        bikerack_recs = [nusc.get('sample_annotation', ann) for ann in sample_anns if
                         nusc.get('sample_annotation', ann)['category_name'] == 'static_object.bicycle_rack']
        bikerack_boxes = [Box(rec['translation'], rec['size'], Quaternion(rec['rotation'])) for rec in bikerack_recs]

        filtered_boxes = []
        for box in eval_boxes[sample_token]:
            if box.detection_name in ['bicycle', 'motorcycle']:
                in_a_bikerack = False
                for bikerack_box in bikerack_boxes:
                    if np.sum(points_in_box(bikerack_box, np.expand_dims(np.array(box.translation), axis=1))) > 0:
                        in_a_bikerack = True
                if not in_a_bikerack:
                    filtered_boxes.append(box)
            else:
                filtered_boxes.append(box)

        eval_boxes.boxes[sample_token] = filtered_boxes
        bike_rack_filter += len(eval_boxes.boxes[sample_token])
    if verbose:
        print("=> Original number of boxes: %d" % total)
        print("=> After distance based filtering: %d" % dist_filter)
        print("=> After LIDAR points based filtering: %d" % point_filter)
        print("=> After bike rack filtering: %d" % bike_rack_filter)

    return eval_boxes
 def __init__(self,
              DATAROOT='./data/sets/nuscenes',
              dataset_version='v1.0-mini'):
     self.DATAROOT = DATAROOT
     self.dataset_version = dataset_version
     self.nuscenes = NuScenes(dataset_version, dataroot=self.DATAROOT)
     self.helper = PredictHelper(self.nuscenes)
     # ['vehicle.car', 'vehicle.truck', 'vehicle.bus.rigid', 'vehicle.bus.bendy', 'vehicle.construction']
     self.category_token_to_id = {
         "fd69059b62a3469fbaef25340c0eab7f": 1,  # 'vehicle.car'
         "6021b5187b924d64be64a702e5570edf": 1,  # 'vehicle.truck'
         "fedb11688db84088883945752e480c2c": 2,  # 'vehicle.bus.rigid'
         "003edbfb9ca849ee8a7496e9af3025d4": 2,  # 'vehicle.bus.bendy'
         "5b3cd6f2bca64b83aa3d0008df87d0e4": 3,  # 'vehicle.construction'
         "7b2ff083a64e4d53809ae5d9be563504": 1
     }  # vehicle.emergency.police
def build(dataset_config,
          train_dataloader_config,
          val_dataloader_config,          
          grid_size=[480, 360, 32]):
    data_path = train_dataloader_config["data_path"]
    train_imageset = train_dataloader_config["imageset"]
    val_imageset = val_dataloader_config["imageset"]
    train_ref = train_dataloader_config["return_ref"]
    val_ref = val_dataloader_config["return_ref"]

    

    label_mapping = dataset_config["label_mapping"]

    SemKITTI = get_pc_model_class(dataset_config['pc_dataset_type'])

    nusc=None
    if "nusc" in dataset_config['pc_dataset_type']:
        from nuscenes import NuScenes
        nusc = NuScenes(version='v1.0-trainval', dataroot=data_path, verbose=True)

    train_pt_dataset = SemKITTI(data_path, imageset=train_imageset,
                                return_ref=train_ref, label_mapping=label_mapping, nusc=nusc)
    val_pt_dataset = SemKITTI(data_path, imageset=val_imageset,
                              return_ref=val_ref, label_mapping=label_mapping, nusc=nusc)

    train_dataset = get_model_class(dataset_config['dataset_type'])(
        train_pt_dataset,
        grid_size=grid_size,
        flip_aug=True,
        fixed_volume_space=dataset_config['fixed_volume_space'],
        max_volume_space=dataset_config['max_volume_space'],
        min_volume_space=dataset_config['min_volume_space'],
        ignore_label=dataset_config["ignore_label"],
        rotate_aug=True,
        scale_aug=True,
        transform_aug=True
    )

    val_dataset = get_model_class(dataset_config['dataset_type'])(
        val_pt_dataset,
        grid_size=grid_size,
        fixed_volume_space=dataset_config['fixed_volume_space'],
        max_volume_space=dataset_config['max_volume_space'],
        min_volume_space=dataset_config['min_volume_space'],
        ignore_label=dataset_config["ignore_label"],
    )

    train_dataset_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                                       batch_size=train_dataloader_config["batch_size"],
                                                       collate_fn=collate_fn_BEV,
                                                       shuffle=train_dataloader_config["shuffle"],
                                                       num_workers=train_dataloader_config["num_workers"])
    val_dataset_loader = torch.utils.data.DataLoader(dataset=val_dataset,
                                                     batch_size=val_dataloader_config["batch_size"],
                                                     collate_fn=collate_fn_BEV,
                                                     shuffle=val_dataloader_config["shuffle"],
                                                     num_workers=val_dataloader_config["num_workers"])

    return train_dataset_loader, val_dataset_loader
Exemple #15
0
def create_nuscenes_infos(root_path, version="v1.0-trainval", nsweeps=10, filter_zero=True):
    nusc = NuScenes(version=version, dataroot=root_path, verbose=True)
    available_vers = ["v1.0-trainval", "v1.0-test", "v1.0-mini"]
    assert version in available_vers
    if version == "v1.0-trainval":
        train_scenes = splits.train
        # random.shuffle(train_scenes)
        # train_scenes = train_scenes[:int(len(train_scenes)*0.2)]
        val_scenes = splits.val
    elif version == "v1.0-test":
        train_scenes = splits.test
        val_scenes = []
    elif version == "v1.0-mini":
        train_scenes = splits.mini_train
        val_scenes = splits.mini_val
    else:
        raise ValueError("unknown")
    test = "test" in version
    root_path = Path(root_path)
    # filter exist scenes. you may only download part of dataset.
    available_scenes = _get_available_scenes(nusc)
    available_scene_names = [s["name"] for s in available_scenes]
    train_scenes = list(filter(lambda x: x in available_scene_names, train_scenes))
    val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes))
    train_scenes = set(
        [
            available_scenes[available_scene_names.index(s)]["token"]
            for s in train_scenes
        ]
    )
    val_scenes = set(
        [available_scenes[available_scene_names.index(s)]["token"] for s in val_scenes]
    )
    if test:
        print(f"test scene: {len(train_scenes)}")
    else:
        print(f"train scene: {len(train_scenes)}, val scene: {len(val_scenes)}")

    train_nusc_infos, val_nusc_infos = _fill_trainval_infos(
        nusc, train_scenes, val_scenes, test, nsweeps=nsweeps, filter_zero=filter_zero
    )

    if test:
        print(f"test sample: {len(train_nusc_infos)}")
        with open(
            root_path / "infos_test_{:02d}sweeps_withvelo.pkl".format(nsweeps), "wb"
        ) as f:
            pickle.dump(train_nusc_infos, f)
    else:
        print(
            f"train sample: {len(train_nusc_infos)}, val sample: {len(val_nusc_infos)}"
        )
        with open(
            root_path / "infos_train_{:02d}sweeps_withvelo_filter_{}.pkl".format(nsweeps, filter_zero), "wb"
        ) as f:
            pickle.dump(train_nusc_infos, f)
        with open(
            root_path / "infos_val_{:02d}sweeps_withvelo_filter_{}.pkl".format(nsweeps, filter_zero), "wb"
        ) as f:
            pickle.dump(val_nusc_infos, f)
def main(args):
    print("Running with args:")
    print(vars(args))

    print("Device:")
    print(device)

    # load data
    nusc = NuScenes(version=args.version, dataroot=args.data_root)
    helper = PredictHelper(nusc)
    data_tokens = get_prediction_challenge_split(args.split_name, dataroot=args.data_root)

    if args.key == "covernet":
        dataset = CoverNetDataset(data_tokens, helper)
    elif args.key == "mtp":
        dataset = MTPDataset(data_tokens, helper)
    dataloader = DataLoader(dataset, batch_size=16, num_workers=0, shuffle=False)
    print(f"Loaded split {args.split_name}, length {len(dataset)}, in {len(dataloader)} batches.")

    # prepare model
    model = get_model(args)
    model.load_state_dict(
        torch.load(os.path.join(args.experiment_dir, 'weights', args.weights)))

    model.eval()

    predictions = get_predictions(args, dataloader, model)
    json.dump(predictions,
              open(os.path.join(args.experiment_dir, f'{args.key}_preds_{datetime.datetime.now():%Y-%m-%d %Hh%Mm%Ss}_{args.suffix}.json'), "w"))
Exemple #17
0
    def __init__(self):
        DATAROOT = '/home/patrick/datasets/nuscenes'  # This is the path where you stored your copy of the nuScenes dataset.
        self.nuscenes = NuScenes('v1.0-mini', dataroot=DATAROOT)
        self.mini_train = get_prediction_challenge_split("mini_train",
                                                         dataroot=DATAROOT)

        self.helper = PredictHelper(self.nuscenes)
        self.physics_oracle = PhysicsOracle(sec_from_now=6, helper=self.helper)

        self.map_rasterizer = StaticLayerRasterizer(self.helper,
                                                    meters_ahead=60,
                                                    meters_behind=10,
                                                    meters_left=35,
                                                    meters_right=35)
        self.agent_rasterizer = FutureAgentBoxesWithFadedHistory(
            self.helper,
            meters_ahead=60,
            meters_behind=10,
            meters_left=35,
            meters_right=35)

        self.json_path = 'manual_results.json'
        self.annotations = []
        if os.path.exists(self.json_path):
            with open(self.json_path) as json_file:
                self.annotations = json.load(
                    json_file)  # Load existing JSON file
    def test_delta(self):
        """
        This tests runs the evaluation for an arbitrary random set of predictions.
        This score is then captured in this very test such that if we change the eval code,
        this test will trigger if the results changed.
        """
        random.seed(42)
        np.random.seed(42)
        assert 'NUSCENES' in os.environ, 'Set NUSCENES env. variable to enable tests.'

        nusc = NuScenes(version='v1.0-mini', dataroot=os.environ['NUSCENES'], verbose=False)

        with open(self.res_mockup, 'w') as f:
            json.dump(self._mock_submission(nusc, 'mini_val'), f, indent=2)

        cfg = config_factory('detection_cvpr_2019')
        nusc_eval = DetectionEval(nusc, cfg, self.res_mockup, eval_set='mini_val', output_dir=self.res_eval_folder,
                                  verbose=False)
        metrics, md_list = nusc_eval.evaluate()

        # 1. Score = 0.22082865720221012. Measured on the branch "release_v0.2" on March 7 2019.
        # 2. Score = 0.2199307290627096. Changed to measure center distance from the ego-vehicle.
        # 3. Score = 0.24954451673961747. Changed to 1.0-mini and cleaned up build script.
        # 4. Score = 0.20478832626986893. Updated treatment of cones, barriers, and other algo tunings.
        # 5. Score = 0.2043569666105005. AP calculation area is changed from >=min_recall to >min_recall.
        # 6. Score = 0.20636954644294506. After bike-rack filtering.
        # 7. Score = 0.20237925145690996. After TP reversion bug.
        # 8. Score = 0.24047129251302665. After bike racks bug.
        # 9. Score = 0.24104572227466886. After bug fix in calc_tp. Include the max recall and exclude the min recall.
        # 10. Score = 0.19449091580477748. Changed to use v1.0 mini_val split.
        self.assertAlmostEqual(metrics.nd_score, 0.19449091580477748)
Exemple #19
0
def main(result_path, output_dir, eval_set, dataroot, version, verbose,
         config_name, plot_examples):

    # Init.
    cfg = config_factory(config_name)
    nusc_ = NuScenes(version=version, verbose=verbose, dataroot=dataroot)
    nusc_eval = NuScenesEval(nusc_,
                             config=cfg,
                             result_path=result_path,
                             eval_set=eval_set,
                             output_dir=output_dir,
                             verbose=verbose)

    # Visualize samples.
    random.seed(43)
    if plot_examples:
        sample_tokens_ = list(nusc_eval.sample_tokens)
        random.shuffle(sample_tokens_)
        for sample_token_ in sample_tokens_:
            visualize_sample(
                nusc_,
                sample_token_,
                nusc_eval.gt_boxes,
                nusc_eval.pred_boxes,
                eval_range=max(nusc_eval.cfg.class_range.values()),
                savepath=os.path.join(output_dir,
                                      '{}.png'.format(sample_token_)))

    # Run evaluation.
    metrics, md_list = nusc_eval.run()
    nusc_eval.render(md_list, metrics)
def export_ego_poses(nusc: NuScenes, out_dir: str):
    """ Script to render where ego vehicle drives on the maps """

    # Load NuScenes locations
    locations = np.unique([l['location'] for l in nusc.log])

    # Create output directory
    if not os.path.isdir(out_dir):
        os.makedirs(out_dir)

    for location in locations:
        print('Rendering map {}...'.format(location))
        nusc.render_egoposes_on_map(location)
        out_path = os.path.join(out_dir, 'egoposes-{}.png'.format(location))
        plt.tight_layout()
        plt.savefig(out_path)
    def basic_test(self,
                   eval_set: str = 'mini_val',
                   add_errors: bool = False,
                   render_curves: bool = False) -> Dict[str, Any]:
        """
        Run the evaluation with fixed randomness on the specified subset, with or without introducing errors in the
        submission.
        :param eval_set: Which split to evaluate on.
        :param add_errors: Whether to use GT as submission or introduce additional errors.
        :param render_curves: Whether to render stats curves to disk.
        :return: The metrics returned by the evaluation.
        """
        random.seed(42)
        np.random.seed(42)
        assert 'NUSCENES' in os.environ, 'Set NUSCENES env. variable to enable tests.'

        if eval_set.startswith('mini'):
            version = 'v1.0-mini'
        elif eval_set == 'test':
            version = 'v1.0-test'
        else:
            version = 'v1.0-trainval'
        nusc = NuScenes(version=version, dataroot=os.environ['NUSCENES'], verbose=False)

        with open(self.res_mockup, 'w') as f:
            mock = self._mock_submission(nusc, eval_set, add_errors=add_errors)
            json.dump(mock, f, indent=2)

        cfg = config_factory('tracking_nips_2019')
        nusc_eval = TrackingEval(cfg, self.res_mockup, eval_set=eval_set, output_dir=self.res_eval_folder,
                                 nusc_version=version, nusc_dataroot=os.environ['NUSCENES'], verbose=False)
        metrics = nusc_eval.main(render_curves=render_curves)

        return metrics
def main(version: str,
         data_root: str,
         split_name: str,
         output_dir: str,
         config_name: str = 'predict_2020_icra.json') -> None:
    """
    Performs inference for all of the baseline models defined in the physics model module.
    :param version: nuScenes data set version.
    :param data_root: Directory where the NuScenes data is stored.
    :param split_name: nuScenes data split name, e.g. train, val, mini_train, etc.
    :param output_dir: Directory where predictions should be stored.
    :param config_name: Name of config file.
    """

    nusc = NuScenes(version=version, dataroot=data_root)
    helper = PredictHelper(nusc)
    dataset = get_prediction_challenge_split(split_name)
    config = load_prediction_config(helper, config_name)
    oracle = PhysicsOracle(config.seconds, helper)
    cv_heading = ConstantVelocityHeading(config.seconds, helper)

    cv_preds = []
    oracle_preds = []
    for token in dataset:
        cv_preds.append(cv_heading(token).serialize())
        oracle_preds.append(oracle(token).serialize())

    json.dump(cv_preds, open(os.path.join(output_dir, "cv_preds.json"), "w"))
    json.dump(oracle_preds,
              open(os.path.join(output_dir, "oracle_preds.json"), "w"))
Exemple #23
0
  def __init__(self, batch_size, num_classes, training=True, normalize=None):
    self._num_classes = num_classes
    self.training = training
    self.normalize = normalize
    self.batch_size = batch_size
    self.data_path = "/home/fengjia/data/sets/nuscenes"
    self.nusc= NuScenes(version='v1.0-trainval', dataroot = self.data_path, verbose= True)
    self.explorer = NuScenesExplorer(self.nusc)
    self.classes = ('__background__', 
                           'pedestrian', 'barrier', 'trafficcone', 'bicycle', 'bus', 'car', 'construction', 'motorcycle', 'trailer', 'truck')

    # PATH = self.data_path + '/annotations_list.txt'
    PATH = self.data_path + '/car_pedestrian_annotations_list.txt'

    with open(PATH) as f:
        self.token = [x.strip() for x in f.readlines()]
    self.token = self.token[:400]
Exemple #24
0
def export_videos(nusc: NuScenes, out_dir: str):
    """ Export videos of the images displayed in the images. """

    # Load NuScenes class
    scene_tokens = [s['token'] for s in nusc.scene]

    # Create output directory
    if not os.path.isdir(out_dir):
        os.makedirs(out_dir)

    # Write videos to disk
    for scene_token in scene_tokens:
        scene = nusc.get('scene', scene_token)
        print('Writing scene %s' % scene['name'])
        out_path = os.path.join(out_dir, scene['name']) + '.avi'
        if not os.path.exists(out_path):
            nusc.render_scene(scene['token'], out_path=out_path)
Exemple #25
0
def get_pcl():
    # v1.0-trainval
    # nusc = Nuscenes(version='v1.0-trainval', dataroot='/home/fengjia/data/sets/nuscenes', verbose=True)
    nusc = NuScenes(version='v1.0-trainval',
                    dataroot='/home/fengjia/data/sets/nuscenes',
                    verbose=True)
    f = open(r'annotations_list.txt', 'w')

    count = 0
    for scene in nusc.scene:
        sample_token = scene['first_sample_token']
        my_sample = nusc.get('sample', sample_token)
        while sample_token != '':
            my_sample = nusc.get('sample', sample_token)
            for i in range(len(my_sample['anns'])):
                my_annotation_token = my_sample['anns'][i]
                my_annotation_metadata = nusc.get('sample_annotation',
                                                  my_annotation_token)
                my_sample_token = my_annotation_metadata['sample_token']
                my_sample_temp = nusc.get('sample', my_sample_token)
                sample_data_cam = nusc.get('sample_data',
                                           my_sample_temp['data']['CAM_FRONT'])
                s = sample_data_cam['token']
                s += '_'
                s += my_annotation_metadata['token']
                s += '\n'
                f.write(s)
                count += 1
            sample_token = my_sample['next']
    f.close()
    print(count)
Exemple #26
0
def eval_main_old(root_path, version, eval_version, res_path, eval_set, output_dir):
    #import pdb; pdb.set_trace()
    nusc = NuScenes(version=version, dataroot=str(root_path), verbose=False)

    cfg = config_factory(eval_version)
    nusc_eval = NuScenesEval(nusc, config=cfg, result_path=res_path, eval_set=eval_set,
                            output_dir=output_dir,
                            verbose=False)
    nusc_eval.main(render_curves=False)
Exemple #27
0
    def _evaluate_single(self,
                         result_path,
                         logger=None,
                         metric='bbox',
                         result_name='img_bbox'):
        """Evaluation for a single model in nuScenes protocol.

        Args:
            result_path (str): Path of the result file.
            logger (logging.Logger | str | None): Logger used for printing
                related information during evaluation. Default: None.
            metric (str): Metric name used for evaluation. Default: 'bbox'.
            result_name (str): Result name in the metric prefix.
                Default: 'img_bbox'.

        Returns:
            dict: Dictionary of evaluation details.
        """
        from nuscenes import NuScenes
        from nuscenes.eval.detection.evaluate import NuScenesEval

        output_dir = osp.join(*osp.split(result_path)[:-1])
        nusc = NuScenes(version=self.version,
                        dataroot=self.data_root,
                        verbose=False)
        eval_set_map = {
            'v1.0-mini': 'mini_val',
            'v1.0-trainval': 'val',
        }
        nusc_eval = NuScenesEval(nusc,
                                 config=self.eval_detection_configs,
                                 result_path=result_path,
                                 eval_set=eval_set_map[self.version],
                                 output_dir=output_dir,
                                 verbose=False)
        nusc_eval.main(render_curves=True)

        # record metrics
        metrics = mmcv.load(osp.join(output_dir, 'metrics_summary.json'))
        detail = dict()
        metric_prefix = f'{result_name}_NuScenes'
        for name in self.CLASSES:
            for k, v in metrics['label_aps'][name].items():
                val = float('{:.4f}'.format(v))
                detail['{}/{}_AP_dist_{}'.format(metric_prefix, name, k)] = val
            for k, v in metrics['label_tp_errors'][name].items():
                val = float('{:.4f}'.format(v))
                detail['{}/{}_{}'.format(metric_prefix, name, k)] = val
            for k, v in metrics['tp_errors'].items():
                val = float('{:.4f}'.format(v))
                detail['{}/{}'.format(metric_prefix,
                                      self.ErrNameMapping[k])] = val

        detail['{}/NDS'.format(metric_prefix)] = metrics['nd_score']
        detail['{}/mAP'.format(metric_prefix)] = metrics['mean_ap']
        return detail
Exemple #28
0
def main(args):
    print("Args:")
    print(vars(args))

    print("Device:")
    print(device)

    # prepare output directories
    if not os.path.exists(args.experiment_dir):
        os.mkdir(args.experiment_dir)

    if not os.path.exists(os.path.join(args.experiment_dir, 'weights')):
        os.mkdir(os.path.join(args.experiment_dir, 'weights'))

    # store the arguments for reference
    config_fname = f'config_for_runtime_{RUN_TIME:%Y-%m-%d %Hh%Mm%Ss}.json'
    with open(os.path.join(args.experiment_dir, config_fname),
              'w') as json_file:
        json.dump(vars(args), json_file)

    # load data
    nusc = NuScenes(version=args.version, dataroot=args.data_root)
    helper = PredictHelper(nusc)
    train_tokens = get_prediction_challenge_split(args.train_split_name,
                                                  dataroot=args.data_root)
    val_tokens = get_prediction_challenge_split(args.val_split_name,
                                                dataroot=args.data_root)

    # apply downsampling
    train_tokens = np.random.choice(
        train_tokens,
        int(len(train_tokens) / args.train_downsample_factor),
        replace=False)
    val_tokens = np.random.choice(
        val_tokens,
        int(len(val_tokens) / args.val_downsample_factor),
        replace=False)

    # create data loaders
    train_dataset = get_dataset(train_tokens, helper, args)
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=args.batch_size,
                                  num_workers=args.num_workers,
                                  shuffle=True)

    val_dataset = get_dataset(val_tokens, helper, args)
    val_dataloader = DataLoader(val_dataset,
                                batch_size=args.batch_size,
                                num_workers=args.num_workers,
                                shuffle=False)

    # run training
    train_epochs(train_dataloader=train_dataloader,
                 val_dataloader=val_dataloader,
                 args=args)
Exemple #29
0
    def _evaluate_single(self,
                         result_path,
                         logger=None,
                         metric="bbox",
                         result_name="pts_bbox"):
        """Evaluation for a single model in nuScenes protocol.

        Args:
            result_path (str): Path of the result file.
            logger (logging.Logger | str | None): Logger used for printing
                related information during evaluation. Default: None.
            metric (str): Metric name used for evaluation. Default: 'bbox'.
            result_name (str): Result name in the metric prefix.
                Default: 'pts_bbox'.

        Returns:
            dict: Dictionary of evaluation details.
        """
        from nuscenes import NuScenes
        from nuscenes.eval.detection.evaluate import NuScenesEval

        output_dir = osp.join(*osp.split(result_path)[:-1])
        nusc = NuScenes(version=self.version,
                        dataroot=self.data_root,
                        verbose=False)
        eval_set_map = {
            "v1.0-mini": "mini_val",
            "v1.0-trainval": "val",
        }
        nusc_eval = NuScenesEval(
            nusc,
            config=self.eval_detection_configs,
            result_path=result_path,
            eval_set=eval_set_map[self.version],
            output_dir=output_dir,
            verbose=False,
        )
        nusc_eval.main(render_curves=False)

        # record metrics
        metrics = mmcv.load(osp.join(output_dir, "metrics_summary.json"))
        detail = dict()
        metric_prefix = f"{result_name}_NuScenes"
        for name in self.CLASSES:
            for k, v in metrics["label_aps"][name].items():
                val = float("{:.4f}".format(v))
                detail["{}/{}_AP_dist_{}".format(metric_prefix, name, k)] = val
            for k, v in metrics["label_tp_errors"][name].items():
                val = float("{:.4f}".format(v))
                detail["{}/{}_{}".format(metric_prefix, name, k)] = val

        detail["{}/NDS".format(metric_prefix)] = metrics["nd_score"]
        detail["{}/mAP".format(metric_prefix)] = metrics["mean_ap"]
        return detail
    def __init__(self, root='/datasets/nuscene/v1.0-mini', sampling_time=3, layer_names=None, colors=None):
        if layer_names is None:
            layer_names = ['drivable_area', 'road_segment', 'road_block',
                           'lane', 'ped_crossing', 'walkway', 'stop_line',
                           'carpark_area', 'road_divider', 'lane_divider']
        if colors is None:
            colors = [(255, 255, 255), (255, 255, 255), (255, 255, 255),
                      (255, 255, 255), (255, 255, 255), (255, 255, 255), (255, 255, 255),
                      (255, 255, 255), (255, 255, 255), (255, 255, 255),]
        self.root = root
        self.nus = NuScenes('v1.0-mini', dataroot=self.root)
        self.scenes = self.nus.scene
        self.samples = self.nus.sample

        self.layer_names = layer_names
        self.colors = colors

        self.helper = PredictHelper(self.nus)

        self.seconds = sampling_time