示例#1
0
def scenario_filter(scene_df, keep_senarios=[]):
    nusc_map = NuScenesMap(dataroot=full_path,
                           map_name=scene_df.iloc[0]['scene_location'])

    keep_instance_token_list = []
    processed_instance_token_list = []
    for i, r in scene_df.iterrows():
        if r.instance_token in processed_instance_token_list:
            continue
        processed_instance_token_list.append(r.instance_token)
        instance_road_object_traj = r.past_instance_on_road_objects + [
            r.current_instance_on_road_objects
        ] + r.future_instance_on_road_objects
        for road_objects in instance_road_object_traj:
            # filter ados that passes through intersections #
            if 'intersection' in keep_senarios:
                if road_objects['road_segment'] is not "":
                    ro = nusc_map.get('road_segment',
                                      road_objects['road_segment'])
                    if ro['is_intersection']:
                        keep_instance_token_list.append(r.instance_token)
                        continue

    #filtered_df = scene_df.drop(del_idx).reset_index(drop=True)
    filtered_df = scene_df[scene_df.instance_token.str.contains(
        '|'.join(keep_instance_token_list))]
    return filtered_df
示例#2
0
def nuscenes_map_to_line_representation(
        nusc_map: NuScenesMap,
        patch: List[float],
        realign: bool = False) -> Tuple[torch.Tensor]:
    record = nusc_map.get_records_in_patch(patch, ["drivable_area"])
    pt1, pt2 = [], []
    for da_token in record["drivable_area"]:
        da = nusc_map.get("drivable_area", da_token)
        for poly in map(nusc_map.extract_polygon, da["polygon_tokens"]):
            p1, p2 = get_edges_of_polygon_in_patch(poly, patch)
            if len(p1) > 0 and len(p2) > 0:
                p1, p2 = preprocess_map_edges(
                    torch.as_tensor(p1),
                    torch.as_tensor(p2),
                    passes=10,
                    tol=0.1,
                )
                pt1.append(p1)
                pt2.append(p2)
    pt1, pt2 = torch.cat(pt1), torch.cat(pt2)
    if realign:
        pt1, pt2 = realign_map_edges(pt1, pt2, 0.0)

    centers = (pt1 + pt2) / 2
    centers1 = centers.unsqueeze(1)
    centers2 = centers.unsqueeze(0)
    dist = (centers2 - centers1).pow(2).sum(dim=-1).sqrt()
    for i in range(centers.size(0)):
        dist[i, i] = 1e12
    very_close = (dist < 0.01).any(dim=-1)

    to_remove = []
    for i, c in enumerate(very_close):
        if c:
            to_remove.append(i)

    for i, rem in enumerate(to_remove):
        rem = rem - i
        pt1 = torch.cat([pt1[:rem], pt1[rem + 1:]])
        pt2 = torch.cat([pt2[:rem], pt2[rem + 1:]])

    return pt1, pt2
                        ego_pose[0] + patch_radius,
                        ego_pose[1] + patch_radius,
                    )
                    records_in_patch = nusc_map.get_records_in_patch(
                        box_coords,
                        layer_names,
                        'intersect',
                    )

                    near_plane = 1e-8
                    #                 # Retrieve and render each record.
                    taken_already = []
                    index = -1
                    for layer_name in layer_names:
                        for token in records_in_patch[layer_name]:
                            record = nusc_map.get(layer_name, token)

                            line = nusc_map.extract_line(record['line_token'])
                            if line.is_empty:  # Skip lines without nodes
                                continue
                            xs, ys = line.xy
                            points = np.array([
                                [record['pose']['tx']],
                                [
                                    record['pose']['ty'],
                                ],
                                [record['pose']['tz']],
                            ], )
                            # Transform into the ego vehicle frame for the timestamp of the image.
                            points = points - \
                                np.array(poserecord['translation']).reshape(