Exemplo n.º 1
0
def scenario_filter(scene_df, keep_senarios=[]):
    nusc_map = NuScenesMap(dataroot=full_path,
                           map_name=scene_df.iloc[0]['scene_location'])

    keep_instance_token_list = []
    processed_instance_token_list = []
    for i, r in scene_df.iterrows():
        if r.instance_token in processed_instance_token_list:
            continue
        processed_instance_token_list.append(r.instance_token)
        instance_road_object_traj = r.past_instance_on_road_objects + [
            r.current_instance_on_road_objects
        ] + r.future_instance_on_road_objects
        for road_objects in instance_road_object_traj:
            # filter ados that passes through intersections #
            if 'intersection' in keep_senarios:
                if road_objects['road_segment'] is not "":
                    ro = nusc_map.get('road_segment',
                                      road_objects['road_segment'])
                    if ro['is_intersection']:
                        keep_instance_token_list.append(r.instance_token)
                        continue

    #filtered_df = scene_df.drop(del_idx).reset_index(drop=True)
    filtered_df = scene_df[scene_df.instance_token.str.contains(
        '|'.join(keep_instance_token_list))]
    return filtered_df
Exemplo n.º 2
0
def load_map_data(dataroot, location):

    # Load the NuScenes map object
    nusc_map = NuScenesMap(dataroot, location)

    map_data = OrderedDict()
    for layer in nusc_utils.STATIC_CLASSES:

        # Retrieve all data associated with the current layer
        records = getattr(nusc_map, layer)
        polygons = list()

        # Drivable area records can contain multiple polygons
        if layer == 'drivable_area':
            for record in records:

                # Convert each entry in the record into a shapely object
                for token in record['polygon_tokens']:
                    poly = nusc_map.extract_polygon(token)
                    if poly.is_valid:
                        polygons.append(poly)
        else:
            for record in records:

                # Convert each entry in the record into a shapely object
                poly = nusc_map.extract_polygon(record['polygon_token'])
                if poly.is_valid:
                    polygons.append(poly)

        # Store as an R-Tree for fast intersection queries
        map_data[layer] = STRtree(polygons)

    return map_data
Exemplo n.º 3
0
 def render_scene(self, idx):
     sample = self.samples[idx]
     sample_token = sample['token']
     scene = self.nus.get('scene', sample['scene_token'])
     log = self.nus.get('log', scene['log_token'])
     location = log['location']
     nus_map = NuScenesMap(dataroot=self.root, map_name=location)
     layer_names = ['road_segment', 'lane', 'ped_crossing', 'walkway', 'stop_line', 'carpark_area']
     camera_channel = 'CAM_FRONT'
     nus_map.render_map_in_image(self.nus, sample_token, layer_names=layer_names, camera_channel=camera_channel)
Exemplo n.º 4
0
    def get_map_info(self, ego_pos, ego_yaw_deg, nusc_map: NuScenesMap,
                     sensing_patch):

        sb = sensing_patch.bounds
        sensing_patch_center = [(sb[0] + sb[2]) / 2, (sb[1] + sb[3]) / 2]
        sensing_patch_length = sb[3] - sb[1]
        sensing_patch_width = sb[2] - sb[0]

        map_info = {}

        #### Get layer information ####

        #### Intersections ####
        road_segment_record_list = nusc_map.get_layer_record_in_patch(
            patch_box=(sensing_patch_center[0], sensing_patch_center[1],
                       sensing_patch_length, sensing_patch_width),
            patch_angle=-ego_yaw_deg,
            layer_name='road_segment',
            rot_center=(ego_pos[0], ego_pos[1]))

        intersection_data = []
        for r in road_segment_record_list:
            if r['is_intersection']:
                bd = nusc_map.get_bounds('road_segment', r['token'])

                intersection_data.append({'record': r, 'bounding_box': bd})

        map_info['intersection'] = intersection_data

        #### STOP areas ####
        stop_line_record_list = nusc_map.get_layer_record_in_patch(
            patch_box=(sensing_patch_center[0], sensing_patch_center[1],
                       sensing_patch_length, sensing_patch_width),
            patch_angle=-ego_yaw_deg,
            layer_name='stop_line',
            rot_center=(ego_pos[0], ego_pos[1]))

        stop_line_data = []
        for r in stop_line_record_list:
            bd = nusc_map.get_bounds('stop_line', r['token'])

            stop_line_data.append({'record': r, 'bounding_box': bd})

        map_info['stop_line'] = stop_line_data

        #### Get center lane information ####

        closest_lane_data, incoming_lane_data, outgoing_lane_data = self.get_closest_lane(
            ego_pos, nusc_map)

        map_info['closest_lane'] = closest_lane_data
        map_info['incoming_lanes'] = incoming_lane_data
        map_info['outgoing_lanes'] = outgoing_lane_data

        return map_info
Exemplo n.º 5
0
def drop_disconnected_lanes(nusc_map: NuScenesMap) -> NuScenesMap:
    """
    Remove any disconnected lanes.
    Note: This function is currently not used and we do not recommend using it. Some lanes that we do not drive on are
    disconnected from the other lanes. Removing them would create a single connected graph. It also removes
    meaningful information, which is why we do not drop these.
    :param nusc_map: The NuScenesMap instance of a particular map location.
    :return: The cleaned NuScenesMap instance.
    """

    # Get disconnected lanes.
    disconnected = get_disconnected_lanes(nusc_map)

    # Remove lane.
    nusc_map.lane = [
        lane for lane in nusc_map.lane if lane['token'] not in disconnected
    ]

    # Remove lane_connector.
    nusc_map.lane_connector = [
        lane for lane in nusc_map.lane_connector
        if lane['token'] not in disconnected
    ]

    # Remove connectivity entries.
    for lane_token in disconnected:
        if lane_token in nusc_map.connectivity:
            del nusc_map.connectivity[lane_token]

    # Remove arcline_path_3.
    for lane_token in disconnected:
        if lane_token in nusc_map.arcline_path_3:
            del nusc_map.arcline_path_3[lane_token]

    # Remove connectivity references.
    empty_connectivity = []
    for lane_token, connectivity in nusc_map.connectivity.items():
        connectivity['incoming'] = [
            i for i in connectivity['incoming'] if i not in disconnected
        ]
        connectivity['outgoing'] = [
            o for o in connectivity['outgoing'] if o not in disconnected
        ]
        if len(connectivity['incoming']) + len(connectivity['outgoing']) == 0:
            empty_connectivity.append(lane_token)
    for lane_token in empty_connectivity:
        del nusc_map.connectivity[lane_token]

    # To fix the map class, we need to update some indices.
    nusc_map._make_token2ind()

    return nusc_map
    def setUp(self):
        """ Initialize the map for each location. """

        self.nusc_maps = dict()
        for map_name in locations:
            # Load map.
            nusc_map = NuScenesMap(map_name=map_name,
                                   dataroot=os.environ['NUSCENES'])

            # Render for debugging.
            if self.render:
                nusc_map.render_layers(['lane'], figsize=1)
                plt.show()

            self.nusc_maps[map_name] = nusc_map
def populate_lane_dict(
        nusc_map: NuScenesMap, root: ET.Element,
        data: Dict[str, Iterable[Any]]) -> Dict[str, Iterable[int]]:
    """
    Return a map that links a nuscenes 'way' token to all the nodes that are a part of that way.
    Also adds all these nodes to the xml map.
    """
    lane_dict = defaultdict(list)

    # Map x,y value of node to the new(argo) node id in the xml
    present_nodes = {}  # k:tuple, v: node_id
    global_id = 0  # New id for nodes in the xml

    # Loop over all lanes in the json to populate the lane to node dictionary
    for way in data["lane"] + data["lane_connector"]:
        lane_record = nusc_map.get_arcline_path(
            way["token"])  # get arcline associated with lane
        poses = arcline_path_utils.discretize_lane(
            lane_record, LANE_DISCRETIZATION_RESOLUTION_M
        )  # discretize the lane to given resolution
        for pose in poses:
            currNode = (pose[0], pose[1])
            if currNode not in present_nodes:
                node = ET.SubElement(root, "node")
                node.set("id", str(global_id))
                present_nodes[currNode] = global_id
                global_id += 1
                node.set("x", str(pose[0]))
                node.set("y", str(pose[1]))
            lane_dict[way["token"]].append(present_nodes[currNode])
    return lane_dict
def convert_map(args: argparse.Namespace) -> None:
    """
    Core function of this script. Loads each json file, calls the helper functions to extract relevant information
    from the json files, and finally creates xml files in the argoverse format.
    """

    # Load json file. Loop over all the json files in directory
    for filename in [
            "boston-seaport",
            "singapore-hollandvillage",
            "singapore-onenorth",
            "singapore-queenstown",
    ]:
        print(filename)
        with open(f"/{args.nuscenes_dir}/maps/expansion/{filename}.json") as f:
            data = json.load(f)
        nusc_map = NuScenesMap(dataroot=f"/{args.nuscenes_dir}",
                               map_name=filename)

        # Create new xml ETree
        root = ET.Element("NuScenesMap")

        # Map lane token of nuscenes to node_tokens that form the centerline
        lane_dict = populate_lane_dict(nusc_map, root,
                                       data)  # k: token, v: list of node_token

        poly_dict = populate_polys(data)

        create_lanes_xml(nusc_map, root, data, filename, args.argo_dir,
                         lane_dict, poly_dict)
Exemplo n.º 9
0
def get_drivable_area_mask(nusc_map: NuScenesMap, scale_h: int = 2, scale_w: int = 2) -> Tuple[np.ndarray, List[float]]:
    if nusc_map.map_name == 'singapore-onenorth':
        map_dims = [1586, 2026]
    elif nusc_map.map_name == 'singapore-hollandvillage':
        map_dims = [2810, 3000]
    elif nusc_map.map_name == 'singapore-queenstown':
        map_dims = [3230, 3688]
    elif nusc_map.map_name == 'boston-seaport':
        map_dims = [2980, 2120]
    else:
        raise Exception('Error: Invalid map!')

    patch_box = [map_dims[0] / 2, map_dims[1] / 2, map_dims[1], map_dims[0]]
    
    patch = get_patch(patch_box, 0.0)
    
    map_scale = 2
    
    canvas_size = (patch_box[2]*scale_h, patch_box[3]*scale_w)

    map_mask = np.zeros(canvas_size, np.uint8)
    records = nusc_map.drivable_area
    for record in records:
        polygons = [nusc_map.extract_polygon(polygon_token) for polygon_token in record['polygon_tokens']]
        
        for polygon in polygons:
            new_polygon = polygon.intersection(patch)
            new_polygon = affinity.scale(new_polygon, xfact=map_scale, yfact=map_scale, origin=(0, 0))
            if not new_polygon.is_empty:
                if new_polygon.geom_type is 'Polygon':
                    new_polygon = MultiPolygon([new_polygon])
                
                map_mask = mask_for_polygons(new_polygon, map_mask)
    
    return map_mask
Exemplo n.º 10
0
def get_egoposes_on_drivable_ratio(nusc: NuScenes, nusc_map: NuScenesMap,
                                   scene_token: str) -> float:
    """
    Get the ratio of ego poses on the drivable area.
    :param nusc: A NuScenes instance.
    :param nusc_map: The NuScenesMap instance of a particular map location.
    :param scene_token: The token of the current scene.
    :return: The ratio of poses that fall on the driveable area.
    """

    # Go through each sample in the scene.
    sample_tokens = nusc.field2token('sample', 'scene_token', scene_token)
    poses_all = 0
    poses_valid = 0
    for sample_token in sample_tokens:

        # Poses are associated with the sample_data. Here we use the lidar sample_data.
        sample_record = nusc.get('sample', sample_token)
        sample_data_record = nusc.get('sample_data',
                                      sample_record['data']['LIDAR_TOP'])
        pose_record = nusc.get('ego_pose',
                               sample_data_record['ego_pose_token'])

        # Check if the ego pose is on the driveable area.
        ego_pose = pose_record['translation'][:2]
        record = nusc_map.record_on_point(ego_pose[0], ego_pose[1],
                                          'drivable_area')
        if len(record) > 0:
            poses_valid += 1
        poses_all += 1
    ratio_valid = poses_valid / poses_all

    return ratio_valid
Exemplo n.º 11
0
def nuscenes_map_to_line_representation(
        nusc_map: NuScenesMap,
        patch: List[float],
        realign: bool = False) -> Tuple[torch.Tensor]:
    record = nusc_map.get_records_in_patch(patch, ["drivable_area"])
    pt1, pt2 = [], []
    for da_token in record["drivable_area"]:
        da = nusc_map.get("drivable_area", da_token)
        for poly in map(nusc_map.extract_polygon, da["polygon_tokens"]):
            p1, p2 = get_edges_of_polygon_in_patch(poly, patch)
            if len(p1) > 0 and len(p2) > 0:
                p1, p2 = preprocess_map_edges(
                    torch.as_tensor(p1),
                    torch.as_tensor(p2),
                    passes=10,
                    tol=0.1,
                )
                pt1.append(p1)
                pt2.append(p2)
    pt1, pt2 = torch.cat(pt1), torch.cat(pt2)
    if realign:
        pt1, pt2 = realign_map_edges(pt1, pt2, 0.0)

    centers = (pt1 + pt2) / 2
    centers1 = centers.unsqueeze(1)
    centers2 = centers.unsqueeze(0)
    dist = (centers2 - centers1).pow(2).sum(dim=-1).sqrt()
    for i in range(centers.size(0)):
        dist[i, i] = 1e12
    very_close = (dist < 0.01).any(dim=-1)

    to_remove = []
    for i, c in enumerate(very_close):
        if c:
            to_remove.append(i)

    for i, rem in enumerate(to_remove):
        rem = rem - i
        pt1 = torch.cat([pt1[:rem], pt1[rem + 1:]])
        pt2 = torch.cat([pt2[:rem], pt2[rem + 1:]])

    return pt1, pt2
Exemplo n.º 12
0
def get_lanes_in_radius(x: float, y: float, radius: float,
                        discretization_meters: float,
                        map_api: NuScenesMap) -> Dict[str, List[Tuple[float, float, float]]]:
    """
    Retrieves all the lanes and lane connectors in a radius of the query point.
    :param x: x-coordinate of point in global coordinates.
    :param y: y-coordinate of point in global coordinates.
    :param radius: Any lanes within radius meters of the (x, y) point will be returned.
    :param discretization_meters: How finely to discretize the lane. If 1 is given, for example,
        the lane will be discretized into a list of points such that the distances between points
        is approximately 1 meter.
    :param map_api: The NuScenesMap instance to query.
    :return: Mapping from lane id to list of coordinate tuples in global coordinate system.
    """

    lanes = map_api.get_records_in_radius(x, y, radius, ['lane', 'lane_connector'])
    lanes = lanes['lane'] + lanes['lane_connector']
    lanes = map_api.discretize_lanes(lanes, discretization_meters)

    return lanes
Exemplo n.º 13
0
def get_nusc_maps(dataroot):
    nusc_maps = {
        map_name: NuScenesMap(dataroot=dataroot, map_name=map_name)
        for map_name in [
            "singapore-hollandvillage",
            "singapore-queenstown",
            "boston-seaport",
            "singapore-onenorth",
        ]
    }
    return nusc_maps
Exemplo n.º 14
0
    def render_map(self, idx, combined=True):
        sample = self.samples[idx]
        sample_token = sample['token']

        sample_data_lidar = self.nus.get('sample_data',
                                         sample['data']['LIDAR_TOP'])
        ego_pose = self.nus.get('ego_pose',
                                sample_data_lidar['ego_pose_token'])
        ego_pose_xy = ego_pose['translation']
        ego_pose_rotation = ego_pose['rotation']
        timestamp = ego_pose['timestamp']

        # 2. Generate Map & Agent Masks
        scene = self.nus.get('scene', sample['scene_token'])
        log = self.nus.get('log', scene['log_token'])
        location = log['location']
        nus_map = NuScenesMap(dataroot=self.root, map_name=location)

        static_layer = StaticLayerRasterizer(self.helper,
                                             layer_names=self.layer_names,
                                             colors=self.colors)
        agent_layer = AgentBoxesWithFadedHistory(
            self.helper, seconds_of_history=self.seconds)

        map_masks, lanes, map_img = static_layer.generate_mask(
            ego_pose_xy, ego_pose_rotation, sample_token)
        agent_mask = agent_layer.generate_mask(ego_pose_xy, ego_pose_rotation,
                                               sample_token)

        if combined:
            plt.subplot(1, 2, 1)
            plt.title("combined map")
            plt.imshow(map_img)
            plt.subplot(1, 2, 2)
            plt.title("agent")
            plt.imshow(agent_mask)
            plt.show()
        else:
            num_labels = len(self.layer_names)
            num_rows = num_labels // 3
            fig, ax = plt.subplots(num_rows, 3, figsize=(10, 3 * num_rows))
            for row in range(num_rows):
                for col in range(3):
                    num = 3 * row + col
                    if num == num_labels - 1:
                        break
                    ax[row][col].set_title(self.layer_names[num])
                    ax[row][col].imshow(map_masks[num])
            plt.show()
def load_all_maps(helper: PredictHelper,
                  verbose: bool = False) -> Dict[str, NuScenesMap]:
    """
    Loads all NuScenesMap instances for all available maps.
    :param helper: Instance of PredictHelper.
    :param verbose: Whether to print to stdout.
    :return: Mapping from map-name to the NuScenesMap api instance.
    """
    dataroot = helper.data.dataroot
    maps = {}

    for map_name in locations:
        if verbose:
            print(f'static_layers.py - Loading Map: {map_name}')

        maps[map_name] = NuScenesMap(dataroot, map_name=map_name)

    return maps
Exemplo n.º 16
0
def load_all_maps(helper: PredictHelper) -> Dict[str, NuScenesMap]:
    """
    Loads all NuScenesMap instances for all available maps.
    :param helper: Instance of PredictHelper.
    :return: Mapping from map-name to the NuScenesMap api instance.
    """
    dataroot = helper.data.dataroot

    json_files = filter(lambda f: "json" in f and "prediction_scenes" not in f,
                        os.listdir(os.path.join(dataroot, "maps")))
    maps = {}

    for map_file in json_files:

        map_name = str(map_file.split(".")[0])

        print(f'static_layers.py - Loading Map: {map_name}')

        maps[map_name] = NuScenesMap(dataroot, map_name=map_name)

    return maps
Exemplo n.º 17
0
    def __init__(self,
                 config: dict = {},
                 helper: PredictHelper = None,
                 py_logger=None,
                 tb_logger=None):
        self.na_config = Configuration({
            'debug': False,
            'pred_horizon': 6,
            'obs_horizon': 2,
            'freq': 2,
            'load_dataset': False,
            'version': 'v1.0-mini',
            'debug': False,
            'py_logging_path': None,
            'tb_logging_path': None
        })

        self.na_config.update(config)
        self.name = None

        self.py_logger = py_logger
        self.tb_logger = tb_logger

        self.dataroot = None
        if 'mini' in self.na_config['version']:
            self.dataroot = mini_path
        else:
            self.dataroot = full_path
        if self.py_logger is None and self.na_config[
                'py_logging_path'] is not None:
            print(f"py logging path: {self.na_config['py_logging_path']}")
            self.py_logger = logger
            self.py_logger.add(self.na_config['py_logging_path'] + "/log.txt")

        #     self.py_logger = logging.getLogger(self.name)
        #     print(f"py logging path: {self.na_config['py_logging_path']}")
        #     self.py_logger.addHandler(logging.FileHandler(os.path.join(self.na_config['py_logging_path'], 'log.txt'),  mode='w'))
        # if self.py_logger is not None:
        #     self.py_logger.propagate = False

        if self.tb_logger is None and self.na_config[
                'tb_logging_path'] is not None:
            self.tb_logger = SummaryWriter(
                log_dir=os.path.join(self.na_config['tb_logging_path']))

        self.helper = helper
        self.nusc = None
        if self.helper is not None:
            self.nusc = self.helper.data
        else:
            if self.dataroot is not None and self.na_config[
                    'version'] is not None and self.na_config[
                        'load_dataset'] and self.helper is None:
                self.nusc = NuScenes(dataroot=self.dataroot,
                                     version=self.na_config['version'],
                                     verbose=True)
                self.helper = PredictHelper(self.nusc)

        #### Initialize Map ####
        self.nusc_map_dict = {
            'boston-seaport':
            NuScenesMap(dataroot=self.dataroot, map_name='boston-seaport'),
            'singapore-hollandvillage':
            NuScenesMap(dataroot=self.dataroot,
                        map_name='singapore-hollandvillage'),
            'singapore-onenorth':
            NuScenesMap(dataroot=self.dataroot, map_name='singapore-onenorth'),
            'singapore-queenstown':
            NuScenesMap(dataroot=self.dataroot,
                        map_name='singapore-queenstown'),
        }

        #### Initialize CAN ####
        self.nusc_can = NuScenesCanBus(dataroot=self.dataroot)

        ####
        self.all_info = {'config': self.na_config}
Exemplo n.º 18
0
    def get_info(self,
                 sample_token: str,
                 ego_pos: np.ndarray = None,
                 ego_quat: np.ndarray = None,
                 instance_based=False):

        sample = self.nusc.get('sample', sample_token)
        scene = self.nusc.get('scene', sample['scene_token'])
        scene_log = self.nusc.get('log', scene['log_token'])
        nusc_map = NuScenesMap(dataroot=self.dataroot,
                               map_name=scene_log['location'])
        sample_data = self.nusc.get('sample_data', sample['data']['CAM_FRONT'])

        if ego_pos is None:
            ego_pose = self.nusc.get('ego_pose', sample_data['ego_pose_token'])
            ego_pos = np.array(ego_pose['translation'])
        if ego_quat is None:
            ego_pose = self.nusc.get('ego_pose', sample_data['ego_pose_token'])
            ego_quat = np.array(ego_pose['rotation'])

        ego_yaw = Quaternion(ego_quat)
        ego_yaw = quaternion_yaw(ego_yaw)
        ego_yaw_rad = angle_of_rotation(ego_yaw)
        ego_yaw_degrees = np.rad2deg(ego_yaw_rad)

        ego_on_road_objects = None
        if self.agent_road_objects:
            ego_on_road_objects = self.get_road_objects(ego_pos, nusc_map)

        ego_info = {
            'translation': ego_pos,
            'rotation_deg': ego_yaw_degrees,
            'rotation_quat': ego_quat,
            'velocity': 0,
            'acceleration': 0,
            'heading_change_rate': 0,
            'road_objects': ego_on_road_objects
        }

        #### define patch ####
        sensing_patch_width = self.config['sensing_patch_size'][0]
        sensing_patch_length = self.config['sensing_patch_size'][1]

        # patch_center_before_rotation = np.array([ego_pos[0],
        #                                          ego_pos[1] + sensing_patch_length/2])

        # lower_left_before_rotation = np.array([ego_pos[0] - sensing_patch_width/2,
        #                                        ego_pos[1] - sensing_patch_length/2])

        # sensing_patch = self.get_patch_coord(patch_box=(patch_center_before_rotation[0],
        #                                                 patch_center_before_rotation[1],
        #                                                 sensing_patch_length,
        #                                                 sensing_patch_width),
        #                                      rotate_center=(ego_pos[0], ego_pos[1]),
        #                                      patch_angle=-ego_yaw_degrees)

        patch_center_before_rotation = np.array([ego_pos[0], ego_pos[1]])

        lower_left_before_rotation = np.array([
            ego_pos[0] - sensing_patch_width / 2,
            ego_pos[1] - sensing_patch_length / 2
        ])

        sensing_patch_coord_before_rotation = [
            ego_pos[0] - sensing_patch_width / 2,
            ego_pos[1] - sensing_patch_length / 2,
            ego_pos[0] + sensing_patch_width / 2,
            ego_pos[1] + sensing_patch_length / 2
        ]

        ## generate sensing patch mesh
        x = np.arange(sensing_patch_coord_before_rotation[0],
                      sensing_patch_coord_before_rotation[2], 0.2)
        y = np.arange(sensing_patch_coord_before_rotation[1],
                      sensing_patch_coord_before_rotation[3], 0.2)

        X, Y = np.meshgrid(x, y)
        ### apply rotation
        X, Y = rotate_mesh2D(pos=ego_pos,
                             rot_rad=ego_yaw_rad,
                             X=X,
                             Y=Y,
                             frame='current')

        ## generate sensing patch shapely polygon
        sensing_patch = self.get_patch_coord(
            patch_box=(patch_center_before_rotation[0],
                       patch_center_before_rotation[1], sensing_patch_length,
                       sensing_patch_width),
            rotate_center=(ego_pos[0], ego_pos[1]),
            patch_angle=-ego_yaw_degrees)

        sensing_patch_info = {'mesh': [X, Y], 'polygon': sensing_patch}
        # get agent_info in patch
        agent_info = self.get_agent_info(sample, sensing_patch, nusc_map)

        # get map info in patch
        map_info = self.get_map_info(ego_pos, ego_yaw_degrees, nusc_map,
                                     sensing_patch)

        # get canbus info
        can_info = None
        if not instance_based:
            can_info = self.get_can_info(scene['name'])

        sensing_info = {
            'sample_token': sample_token,
            'sensing_patch': sensing_patch_info,
            'ego_info': ego_info,
            'agent_info': agent_info,
            'map_info': map_info,
            'can_info': can_info
        }

        return sensing_info
Exemplo n.º 19
0
    def __getitem__(self, idx):
        # 1. past_agents_traj : (Num obv agents in batch_i X 20 X 2)
        # 2. past_agents_traj_len : (Num obv agents in batch_i, )
        # 3. future_agents_traj : (Num pred agents in batch_i X 20 X 2)
        # 4. future_agents_traj_len : (Num pred agents in batch_i, )
        # 5. future_agent_masks : (Num obv agents in batch_i)
        # 6. decode_rel_pos: (Num pred agents in batch_i X 2)
        # 7. decode_start_pos: (Num pred agents in batch_i X 2)
        # 8. map_image : (3 X 224 X 224)
        # 9. scene ID: (string)

        sample = self.samples[idx]
        sample_token = sample['token']

        # 1. calculate ego pose
        sample_data_lidar = self.nus.get('sample_data', sample['data']['LIDAR_TOP'])
        ego_pose = self.nus.get('ego_pose', sample_data_lidar['ego_pose_token'])
        ego_pose_xy = ego_pose['translation']
        ego_pose_rotation = ego_pose['rotation']
        # 타임스탬프
        timestamp = ego_pose['timestamp']

        # 2. Generate Map & Agent Masks
        scene = self.nus.get('scene', sample['scene_token'])
        log = self.nus.get('log', scene['log_token'])
        location = log['location']
        nus_map = NuScenesMap(dataroot=self.root, map_name=location)

        static_layer = StaticLayerRasterizer(self.helper, layer_names=self.layer_names, colors=self.colors)
        agent_layer = AgentBoxesWithFadedHistory(self.helper, seconds_of_history=self.seconds)

        map_masks, lanes, map_img = static_layer.generate_mask(ego_pose_xy, ego_pose_rotation, sample_token)
        agent_mask = agent_layer.generate_mask(ego_pose_xy, ego_pose_rotation, sample_token)

        # 3. Generate Agent Trajectory
        annotation_tokens = sample['anns']
        num_agent = len(annotation_tokens)
        agents = []
        for ans_token in annotation_tokens:
            agent_states = []
            agent = self.nus.get('sample_annotation', ans_token)
            instance_token = agent['instance_token']

            # 에이전트 주행경로
            xy_global = agent['translation']
            past_xy_global = self.helper.get_past_for_agent(
                instance_token, sample_token, seconds=self.seconds, in_agent_frame=False)
            future_xy_global = self.helper.get_future_for_agent(
                instance_token, sample_token, seconds=self.seconds, in_agent_frame=False)

            # 로컬 주행경로
            xy_local = convert_global_coords_to_local(np.array([xy_global]), ego_pose_xy, ego_pose_rotation)
            past_xy_local = convert_global_coords_to_local(past_xy_global, ego_pose_xy, ego_pose_rotation)
            future_xy_local = convert_global_coords_to_local(future_xy_global, ego_pose_xy, ego_pose_rotation)

            # 에이전트 주행상태
            rot = agent['rotation']
            vel = self.helper.get_velocity_for_agent(instance_token, sample_token)
            accel = self.helper.get_acceleration_for_agent(instance_token, sample_token)
            yaw_rate = self.helper.get_heading_change_rate_for_agent(instance_token, sample_token)

            agent_states = {'present_pos': xy_global, 'past_pos': past_xy_global, 'future_pos': future_xy_global,
                            'rot': rot, 'vel': vel, 'accel': accel, 'yaw_rate': yaw_rate,
                            'present_local_xy': xy_local, 'past_local_xy': past_xy_local,
                            'future_local_xy': future_xy_local}

            agents.append(agent_states)

        return map_masks, agents
Exemplo n.º 20
0
    def __getitem__(self, idx):
        sample = self.samples[idx]
        sample_token = sample['token']

        # 1. calculate ego pose
        sample_data_lidar = self.nus.get('sample_data',
                                         sample['data']['LIDAR_TOP'])
        ego_pose = self.nus.get('ego_pose',
                                sample_data_lidar['ego_pose_token'])
        ego_pose_xy = ego_pose['translation']
        ego_pose_rotation = ego_pose['rotation']
        # 타임스탬프
        timestamp = ego_pose['timestamp']

        # 2. Generate Map & Agent Masks
        scene = self.nus.get('scene', sample['scene_token'])
        log = self.nus.get('log', scene['log_token'])
        location = log['location']
        nus_map = NuScenesMap(dataroot=self.root, map_name=location)

        static_layer = StaticLayerRasterizer(self.helper,
                                             layer_names=self.layer_names,
                                             colors=self.colors)
        agent_layer = AgentBoxesWithFadedHistory(
            self.helper, seconds_of_history=self.agent_seconds)

        map_masks, lanes, map_img = static_layer.generate_mask(
            ego_pose_xy, ego_pose_rotation, sample_token)
        agent_mask = agent_layer.generate_mask(ego_pose_xy, ego_pose_rotation,
                                               sample_token)

        lane_tokens = list(lanes.keys())
        num_lanes = len(lane_tokens)
        lanes_disc = [np.array(lanes[token])[:, :2] for token in lane_tokens]
        lanes_arc = []
        for seq in lanes_disc:
            seq_len = len(seq)
            lane_middle = seq[seq_len // 2]
            opt_middle = (seq[0] + seq[-1]) / 2
            lane_h = np.linalg.norm(lane_middle - opt_middle)
            lane_w = np.linalg.norm(seq[-1] - seq[0])
            curve = lane_h / 2 + lane_w**2 / (8 * lane_h)
            lanes_arc.append(curve)
        lanes_arc = np.array(lanes_arc)

        # 3. Generate Agent Trajectory
        annotation_tokens = sample['anns']
        num_agent = len(annotation_tokens)
        agents = []
        for ans_token in annotation_tokens:
            agent_states = []
            agent = self.nus.get('sample_annotation', ans_token)
            instance_token = agent['instance_token']

            # 에이전트 주행경로
            xy_global = agent['translation']
            past_xy_global = self.helper.get_past_for_agent(
                instance_token,
                sample_token,
                seconds=self.seconds,
                in_agent_frame=False)
            future_xy_global = self.helper.get_future_for_agent(
                instance_token,
                sample_token,
                seconds=self.seconds,
                in_agent_frame=False)

            # 경로 곡률
            agents_arc = []
            for seq in [past_xy_global, future_xy_global]:
                seq_len = len(seq)
                if seq_len < 2:
                    continue
                path_middle = seq[seq_len // 2]
                opt_middle = (seq[0] + seq[-1]) / 2
                path_h = np.linalg.norm(path_middle - opt_middle)
                path_w = np.linalg.norm(seq[-1] - seq[0])
                if path_h == 0:
                    path_h = 0.001
                if path_w == 0:
                    path_w = 0.001
                curve = path_h / 2 + (path_w * path_w) / (8 * path_h)
                agents_arc.append(curve)
            agents_arc = np.array(agents_arc)

            # 로컬 주행경로
            xy_local = convert_global_coords_to_local(
                np.array([xy_global[:2]]), ego_pose_xy, ego_pose_rotation)
            if len(past_xy_global) < 1:
                past_xy_global = np.append(past_xy_global, [0., 0.])
            if len(future_xy_global) < 1:
                future_xy_global = np.append(future_xy_global, [0., 0.])
            past_xy_local = convert_global_coords_to_local(
                past_xy_global, ego_pose_xy, ego_pose_rotation)
            future_xy_local = convert_global_coords_to_local(
                future_xy_global, ego_pose_xy, ego_pose_rotation)

            # 에이전트 주행상태
            rot = agent['rotation']
            vel = self.helper.get_velocity_for_agent(instance_token,
                                                     sample_token)
            accel = self.helper.get_acceleration_for_agent(
                instance_token, sample_token)
            yaw_rate = self.helper.get_heading_change_rate_for_agent(
                instance_token, sample_token)

            agent_states = {
                'present_pos': xy_global,
                'past_pos': past_xy_global,
                'future_pos': future_xy_global,
                'rot': rot,
                'vel': vel,
                'accel': accel,
                'yaw_rate': yaw_rate,
                'present_local_xy': xy_local,
                'past_local_xy': past_xy_local,
                'future_local_xy': future_xy_local,
                'curvature': agents_arc
            }

            agents.append(agent_states)

        return map_masks, agent_mask, agents, idx
 cam_path, boxes, camera_intrinsic = nusc.get_sample_data(
     sd_rec['token'], )
 im = Image.open(cam_path)
 im.save(
     data_directory + spl + str(scene_id) + camera_channel +
     '/img1/' + str(count_frames) + '.jpg',
     'JPEG',
 )
 TL_found = False
 bb = []
 patch_radius = 700
 sample_record = nusc.get('sample', sample_token)
 log_record = nusc.get('log', scene_record['log_token'])
 log_location = log_record['location']
 nusc_map = NuScenesMap(
     dataroot=opt.nuscenes_root,
     map_name=log_location,
 )
 img = transforms.ToTensor()(Image.open(cam_path))
 # Pad to square resolution
 img, _ = pad_to_square(img, 0)
 # Resize
 img = resize(img, 416)
 input_imgs = Variable(img.type(Tensor)).unsqueeze(0)
 cs_record = nusc.get(
     'calibrated_sensor',
     cam_record['calibrated_sensor_token'],
 )
 poserecord = nusc.get('ego_pose', cam_record['ego_pose_token'])
 ego_pose = poserecord['translation']
 if first_time == True:
     first_rec = cs_record.copy()
Exemplo n.º 22
0
def main():
  input_root, output_root, do_trajectories, do_maps = get_command_line_args()
  print("Preprocessing Script for nuScenes Dataset.")
  print("Trajectories: {}, Maps: {}".format("Y" if do_trajectories else "N", "Y" if do_maps else "N"))

  if do_trajectories:
    nusc = NuScenes(version='v1.0-trainval', dataroot=input_root)
    
    name2ind = {} # Maps "scene-name" to nusc.scene(list) index.
    for ind, member in enumerate(nusc.scene):
      name2ind[member['name']] = ind

    token2attr = {} # Maps attribute_token to attribute string.
    for attribute in nusc.attribute:
      token2attr[attribute['token']] = attribute['name']
    
    splits = create_splits_scenes()

  if do_maps:
    from nuscenes.map_expansion.map_api import NuScenesMap

    city_list = ['singapore-onenorth', 'singapore-hollandvillage', 'singapore-queenstown', 'boston-seaport']
    for city_name in city_list:
      scale_h = scale_w = 2
      print("Generating maps for {:s}.".format(city_name))

      mask_path = P(output_root).joinpath('raw_map', '{:s}_mask.pkl'.format(city_name))
      dt_path = P(output_root).joinpath('raw_map', '{:s}_dt.pkl'.format(city_name))
      mask_vis_path = P(output_root).joinpath('raw_map_visualization', '{:s}_mask_vis.png'.format(city_name))
      dt_vis_path = P(output_root).joinpath('raw_map_visualization', '{:s}_dt_vis.png'.format(city_name))
      mask_vis_path.parent.mkdir(parents=True, exist_ok=True)
      mask_path.parent.mkdir(parents=True, exist_ok=True)

      nusc_map = NuScenesMap(input_root, city_name)

      print("Calculating a map mask with scale_h: {:d}, scale_w: {:d}... ".format(scale_h, scale_w), end="", flush=True)
      map_mask = get_drivable_area_mask(nusc_map, scale_h=2, scale_w=2)
      print("Done.")

      print("Calculating Signed Distance Transform... ", end="", flush=True)
      image = map_mask.astype(np.int32)
      invert_image = 1-image
      dt = np.where(invert_image, -distance_transform_edt(invert_image), distance_transform_edt(image))
      print("Done.")

      print("Saving Results... ", end="", flush=True)
      dump({'map': map_mask, 'scale_h': 2, 'scale_w': 2}, mask_path)
      dump({'map': dt, 'scale_h': 2, 'scale_w': 2}, dt_path)

      mask_vis = (map_mask*255).astype(np.uint8)

      dt_max = dt.max()
      dt_min = dt.min()
      dt_vis = ((dt - dt_min)/(dt_max - dt_min)*255).astype(np.uint8)

      cv2.imwrite(str(mask_vis_path), mask_vis)
      cv2.imwrite(str(dt_vis_path), dt_vis)
      print("Done. Saved {:s}, {:s}, {:s}, and {:s}.".format(str(mask_path), str(mask_vis_path), str(dt_path), str(dt_vis_path)))

  if do_trajectories:
    for partition in ['train', 'train_val', 'val']:
      print("Generating Trajectories for {:s} set.".format(partition))
      
      if 'train' in partition:
        scene_list = splits['train']
        if partition == "train":
          scene_list = scene_list[NUM_IN_TRAIN_VAL:]
          
        if partition == "train_val":
          scene_list = scene_list[:NUM_IN_TRAIN_VAL]

      else:
        scene_list = splits['val']

      pool = Pool(cpu_count())
      callback = Counting_Callback(task_name="Trajectory Imputation & Smoothing", num_data=len(scene_list))
      for name in scene_list:  
        """
        Generate a raw DataFrame object for each scene_name.
        Filter object categories other than "human" and "vehicle".
        Perform Kalman Smoothing and/or rule-based Imputation.
        """
        
        ind = name2ind[name]

        scene = nusc.scene[ind]

        log = nusc.get('log', scene['log_token'])
        location = log['location']

        data_dict = annotation_dict()

        sample_token = scene['first_sample_token']
        last_sample_token = scene['last_sample_token']
        frame = 0
        passed_last = False
        while not passed_last:
          sample_data = nusc.get("sample", sample_token)
          timestamp = sample_data["timestamp"]

          # Gather pose token from LiDAR since it has timestamp synced with sample_data.
          lidar_data = nusc.get("sample_data", sample_data["data"]["LIDAR_TOP"])
          ego_pose_token = lidar_data['ego_pose_token']

          ego_pose_data = nusc.get("ego_pose", ego_pose_token)
          
          ego_translation = ego_pose_data["translation"]
          ego_rotation = ego_pose_data["rotation"]
          
          # Append Ego Motion Data
          data_dict.append(location,
                            sample_token,
                            '00000000000000000000000000000000',
                            'vehicle.ego',
                            'ego',
                            timestamp,
                            frame,
                            ego_translation,
                            ego_rotation)

          for anns_token in sample_data['anns']:
            anns_data = nusc.get("sample_annotation", anns_token)

            instance_token = anns_data['instance_token']
            instance_category = anns_data['category_name']
            
            instance_attributes = [token2attr[token] for token in anns_data['attribute_tokens']]
            instance_attributes = ", ".join(instance_attributes)

            instance_translation = anns_data["translation"]
            instance_rotation = anns_data["rotation"]

            # Append Instance Motion Data
            data_dict.append(location,
                              sample_token,
                              instance_token,
                              instance_category,
                              instance_attributes,
                              timestamp,
                              frame,
                              instance_translation,
                              instance_rotation)
          # goto next sample
          if sample_token == last_sample_token or len(sample_data['next']) == 0:
            passed_last = True
          
          else:
            sample_token = sample_data['next']
            frame += 1

        df = pd.DataFrame.from_dict(data_dict) # Generate a DataFrame
        pool.apply_async(kalman_smoother,
                        (df.copy(), name),
                        callback=callback) # Perform Kalman Smoothing
        
      pool.close()
      pool.join()

      # Get Kalman Smoothed results and sort w.r.t. scene_anme.
      smoothed_trajectories = callback.get_results()
      smoothed_trajectories.sort(key=lambda x: x[1])

      pool = Pool(cpu_count())
      callback = Counting_Callback(task_name="Trajectory Chopping & Sample Generation", num_data=float('inf'))
      num_data = 0
      for df, scene_name in smoothed_trajectories:
        """
        Chop a smoothed DataFrame into multiple samples (~33 samples per scene)
        such that each sample spans 5 seconds where the reference frame is set at the 2 second's frame.

        Then, split the sample to obsv (0~2 seconds) and pred (2~5 seconds) files.
        """
        scn_code = int(scene_name.split('-')[-1])
        
        frames = df.FRAME.to_list()
        initial_frame = frames[0]
        last_frame = frames[-1]

        for ref_frame in range(initial_frame+3, last_frame-5):
          obsv_path = P(output_root).joinpath(partition, 'observation', '{:04d}-{:03d}.pkl'.format(scn_code, ref_frame))
          obsv_path.parent.mkdir(parents=True, exist_ok=True)

          pred_path = P(output_root).joinpath(partition, 'prediction', '{:04d}-{:03d}.pkl'.format(scn_code, ref_frame))
          pred_path.parent.mkdir(parents=True, exist_ok=True)
          
          pool.apply_async(generate_trajectories, (df.copy(), ref_frame, obsv_path, pred_path), callback=callback)
          # generate_trajectories(df.copy(), ref_frame, obsv_path, pred_path)
          num_data += 1
      
      callback.set_num_data(num_data)
      pool.close()
      pool.join()

      print("Saved {:d} {:s} samples at {:s}.".format(num_data, partition, str(P(output_root).joinpath(partition))))
    
    # Create train_all set using symbolic link.
    print("Making symlinks to form train_all split... ", end="", flush=True)
    trainall_dirname = 'train_all'
    trainall_obsv_path = P(output_root).joinpath('{:s}/observation'.format(trainall_dirname))
    trainall_obsv_path.mkdir(parents=True, exist_ok=True)
    trainall_pred_path = P(output_root).joinpath('{:s}/prediction'.format(trainall_dirname))
    trainall_pred_path.mkdir(parents=True, exist_ok=True)

    train_path = P(output_root).joinpath('train')
    train_obsv_pkl = list(train_path.glob('observation/*.pkl'))
    train_pred_pkl = list(train_path.glob('prediction/*.pkl'))

    trainval_path = P(output_root).joinpath('train_val')
    trainval_obsv_pkl = list(trainval_path.glob('observation/*.pkl'))
    trainval_pred_pkl = list(trainval_path.glob('prediction/*.pkl'))

    obsv_pkl_list = train_obsv_pkl + trainval_obsv_pkl
    pred_pkl_list = train_pred_pkl + trainval_pred_pkl
    for obsv_pkl, pred_pkl in zip(obsv_pkl_list, pred_pkl_list):
      obsv_filename, obsv_split = obsv_pkl.name, obsv_pkl.parent.parent.stem
      pred_filename, pred_split = pred_pkl.name, pred_pkl.parent.parent.stem
      
      obsv_relpath = P('../../{:s}/observation/'.format(obsv_split)).joinpath(obsv_filename)
      obsv_link = trainall_obsv_path.joinpath(obsv_filename)
      obsv_link.symlink_to(obsv_relpath)
      
      pred_relpath = P('../../{:s}/prediction/'.format(pred_split)).joinpath(pred_filename)
      pred_link = trainall_pred_path.joinpath(pred_filename)
      pred_link.symlink_to(pred_relpath)
    print(" Done.")
Exemplo n.º 23
0
    def convert_scene(self, scene_name, utils, dataroot, up_to_step,  *args, **kwargs):
        experiment_result_dict = None
        if 'experiment_result_dict' in kwargs.keys():
            experiment_result_dict = kwargs['experiment_result_dict']
        
        scene_i = None
        for scene_i in self.nusc.scene:
            if scene_i['name'] == scene_name:
                scene = scene_i
                break
            
        scene_name = scene['name']
        log = self.nusc.get('log', scene['log_token'])
        location = log['location']
        print(f'Loading map "{location}"')
        self.nusc_map = NuScenesMap(dataroot=dataroot, map_name=location)
        print(f'Loading bitmap "{self.nusc_map.map_name}"')
        bitmap = BitMap(self.nusc_map.dataroot, self.nusc_map.map_name, 'basemap')
        print(f'Loaded {bitmap.image.shape} bitmap')

        cur_sample = self.nusc.get('sample', scene['first_sample_token'])

        can_parsers = [
            [self.nusc_can.get_messages(scene_name, 'ms_imu'), 0, utils.get_imu_msg],
            [self.nusc_can.get_messages(scene_name, 'pose'), 0, utils.get_odom_msg],
            [self.nusc_can.get_messages(scene_name, 'steeranglefeedback'), 0, lambda x: utils.get_basic_can_msg('Steering Angle', x)],
            [self.nusc_can.get_messages(scene_name, 'vehicle_monitor'), 0, lambda x: utils.get_basic_can_msg('Vehicle Monitor', x)],
            [self.nusc_can.get_messages(scene_name, 'zoesensors'), 0, lambda x: utils.get_basic_can_msg('Zoe Sensors', x)],
            [self.nusc_can.get_messages(scene_name, 'zoe_veh_info'), 0, lambda x: utils.get_basic_can_msg('Zoe Vehicle Info', x)],
        ]

        bag_name = f"{self.model_name}_{self.ckpt_name}_{self.config['scene_name']}.bag"
        bag_path = os.path.join(self.scene_data_p, bag_name)
        print(f'Writing to {bag_path}')
        bag = rosbag.Bag(bag_path, 'w', compression='lz4')

        stamp = utils.get_time(self.nusc.get('ego_pose', self.nusc.get('sample_data', cur_sample['data']['LIDAR_TOP'])['ego_pose_token']))
        map_msg = utils.get_scene_map(scene, self.nusc_map, bitmap, stamp)
        centerlines_msg = utils.get_centerline_markers(scene, self.nusc_map, stamp)
        bag.write('/map', map_msg, stamp)
        bag.write('/semantic_map', centerlines_msg, stamp)
        last_map_stamp = stamp

        #idx = list(experiment_result_dict['sim_ego_pos'].keys())[0]
        idx = 0
        while cur_sample is not None:
            if up_to_step is not None and idx > up_to_step:
               break
            sample_lidar = self.nusc.get('sample_data', cur_sample['data']['LIDAR_TOP'])
            ego_pose = self.nusc.get('ego_pose', sample_lidar['ego_pose_token'])
            stamp = utils.get_time(ego_pose)
            
            #### write experiment_result to bag ####
            ego_marker = None
            if experiment_result_dict is not None:
                # figures #
                for k,v in experiment_result_dict['figures'].items():
                    if idx in v.keys():
                        png = v[idx]
                        msg = CompressedImage()
                        msg.header.frame_id = k
                        msg.header.stamp = stamp
                        msg.format = 'png'
                        msg.data = png
                        bag.write("/"+k+'/image_rect_compressed', msg, stamp)
                #print(idx)
                #print(experiment_result_dict['sim_ego_pos'].keys())
                if idx in experiment_result_dict['sim_ego_pos'].keys():
                    ego_pose = {
                        'translation': experiment_result_dict['sim_ego_pos'][idx],
                        'rotation': experiment_result_dict['sim_ego_quat'][idx],
                        'token': ego_pose['token'],
                        'timestamp': ego_pose['timestamp']
                    }
                    
                    if idx == list(experiment_result_dict['sim_ego_pos'].keys())[-1]:
                        next_idx = idx
                        break
                    else:
                        next_idx = idx+1
                    next_sample = utils.nusc.get('sample',cur_sample['next'])
                    sample_lidar = utils.nusc.get('sample_data', next_sample['data']['LIDAR_TOP'])
                    real_next_ego_pose = utils.nusc.get('ego_pose', sample_lidar['ego_pose_token'])

                    next_ego_pose = {
                        'translation': experiment_result_dict['sim_ego_pos'][next_idx],
                        'rotation': experiment_result_dict['sim_ego_quat'][next_idx],
                        'token': real_next_ego_pose['token'],
                        'timestamp': real_next_ego_pose['timestamp']
                    }
                    # publish /tf
                    tf_array = utils.get_tfmessage(cur_sample, current_pose=ego_pose, next_pose=next_ego_pose)
                    bag.write('/tf', tf_array, stamp)

                else:
                    # publish /tf
                    tf_array = utils.get_tfmessage(cur_sample)
                    bag.write('/tf', tf_array, stamp)
            else:
                # publish /tf
                tf_array = utils.get_tfmessage(cur_sample)
                bag.write('/tf', tf_array, stamp)
                
            
            # write map topics every two seconds
            if stamp - rospy.Duration(2.0) >= last_map_stamp:
                map_msg.header.stamp = stamp
                for marker in centerlines_msg.markers:
                    marker.header.stamp = stamp
                bag.write('/map', map_msg, stamp)
                bag.write('/semantic_map', centerlines_msg, stamp)
                last_map_stamp = stamp

            # write CAN messages to /pose, /odom, and /diagnostics
            can_msg_events = []
            for i in range(len(can_parsers)):
                (can_msgs, index, msg_func) = can_parsers[i]
                while index < len(can_msgs) and utils.get_utime(can_msgs[index]) < stamp:
                    can_msg_events.append(msg_func(can_msgs[index]))
                    index += 1
                    can_parsers[i][1] = index
            can_msg_events.sort(key = lambda x: x[0])
            for (msg_stamp, topic, msg) in can_msg_events:
                bag.write(topic, msg, stamp)

            
            # /driveable_area occupancy grid
            utils.write_occupancy_grid(bag, self.nusc_map, ego_pose, stamp)
                    
            # iterate sensors
            for (sensor_id, sample_token) in cur_sample['data'].items():
                sample_data = self.nusc.get('sample_data', sample_token)
                topic = '/' + sensor_id

                # write the sensor data
                # if sample_data['sensor_modality'] == 'radar':
                #     msg = utils.get_radar(sample_data, sensor_id)
                #     bag.write(topic, msg, stamp)
                # elif sample_data['sensor_modality'] == 'lidar':
                #     msg = utils.get_lidar(sample_data, sensor_id)
                #     bag.write(topic, msg, stamp)
                
                # if sample_data['sensor_modality'] == 'camera':
                #     msg = utils.get_camera(sample_data, sensor_id, dataroot)
                #     bag.write(topic + '/image_rect_compressed', msg, stamp)
                #     msg = utils.get_camera_info(sample_data, sensor_id)
                #     bag.write(topic + '/camera_info', msg, stamp)

                # if sample_data['sensor_modality'] == 'camera':
                #     msg = utils.get_lidar_imagemarkers(sample_lidar, sample_data, sensor_id)
                #     bag.write(topic + '/image_markers_lidar', msg, stamp)
                #     utils.write_boxes_imagemarkers(bag, cur_sample['anns'], sample_data, sensor_id, topic, stamp)

            # publish /markers/annotations
            marker_array = MarkerArray()
            
            ego_marker = Marker()
            ego_marker.header.frame_id = 'base_link'
            ego_marker.header.stamp = stamp
            ego_marker.id = 10000
            ego_marker.text = 'ego'
            ego_marker.type = Marker.CUBE
            #ego_marker.pose = utils.get_pose(next_ego_pose)
            ego_marker.frame_locked = True
            ego_marker.scale.x = 4
            ego_marker.scale.y = 1
            ego_marker.scale.z = 2
            ego_marker.color = utils.make_color((255,61,99), 0.5)
            marker_array.markers.append(ego_marker)


            # publish /pose
            pose_stamped = PoseStamped()
            pose_stamped.header.frame_id = 'base_link'
            pose_stamped.header.stamp = stamp
            pose_stamped.pose.orientation.w = 1
            bag.write('/pose', pose_stamped, stamp)

            # publish /gps
            coordinates = utils.derive_latlon(location, ego_pose)
            gps = NavSatFix()
            gps.header.frame_id = 'base_link'
            gps.header.stamp = stamp
            gps.status.status = 1
            gps.status.service = 1
            gps.latitude = coordinates['latitude']
            gps.longitude = coordinates['longitude']
            gps.altitude = utils.get_transform(ego_pose).translation.z
            bag.write('/gps', gps, stamp)

            for annotation_id in cur_sample['anns']:
                ann = self.nusc.get('sample_annotation', annotation_id)
                marker_id = int(ann['instance_token'][:4], 16)
                c = np.array(self.nusc.explorer.get_color(ann['category_name'])) / 255.0

                marker = Marker()
                marker.header.frame_id = 'map'
                marker.header.stamp = stamp
                marker.id = marker_id
                marker.text = ann['instance_token'][:4]
                marker.type = Marker.CUBE
                marker.pose = utils.get_pose(ann)
                marker.frame_locked = True
                marker.scale.x = ann['size'][1]
                marker.scale.y = ann['size'][0]
                marker.scale.z = ann['size'][2]
                marker.color = utils.make_color(c, 0.5)
                marker_array.markers.append(marker)

            # ego marker #
            # marker = Marker()
            # marker.header.frame_id = 'map'
            # marker.header.stamp = stamp
            # marker.id = marker_id
            # marker.text = 'ego'
            # marker.type = Marker.CUBE
            # marker.pose = utils.get_pose(ego_pose)
            # marker.frame_locked = True
            # marker.scale.x = 4.3
            # marker.scale.y = 1.8
            # marker.scale.z = 1.6
            # c = [0.1,0.1,0.6]
            # marker.color = utils.make_color(c, 0.9)
            # marker_array.markers.append(marker)
                    
            bag.write('/markers/annotations', marker_array, stamp)

            # collect all sensor frames after this sample but before the next sample
            non_keyframe_sensor_msgs = []
            for (sensor_id, sample_token) in cur_sample['data'].items():
                topic = '/' + sensor_id

                next_sample_token = self.nusc.get('sample_data', sample_token)['next']
                while next_sample_token != '':
                    next_sample_data = self.nusc.get('sample_data', next_sample_token)
                    # if next_sample_data['is_key_frame'] or get_time(next_sample_data).to_nsec() > next_stamp.to_nsec():
                    #     break
                    if next_sample_data['is_key_frame']:
                        break

                    # if next_sample_data['sensor_modality'] == 'radar':
                    #     msg = utils.get_radar(next_sample_data, sensor_id)
                    #     non_keyframe_sensor_msgs.append((msg.header.stamp.to_nsec(), topic, msg))
                    # elif next_sample_data['sensor_modality'] == 'lidar':
                    #     msg = utils.get_lidar(next_sample_data, sensor_id)
                    #     non_keyframe_sensor_msgs.append((msg.header.stamp.to_nsec(), topic, msg))
                    
                    # if next_sample_data['sensor_modality'] == 'camera':
                    #     msg = utils.get_camera(next_sample_data, sensor_id, dataroot)
                    #     camera_stamp_nsec = msg.header.stamp.to_nsec()
                    #     non_keyframe_sensor_msgs.append((camera_stamp_nsec, topic + '/image_rect_compressed', msg))

                    #     msg = utils.get_camera_info(next_sample_data, sensor_id)
                    #     non_keyframe_sensor_msgs.append((camera_stamp_nsec, topic + '/camera_info', msg))

                        # closest_lidar = utils.find_closest_lidar(cur_sample['data']['LIDAR_TOP'], camera_stamp_nsec)
                        # if closest_lidar is not None:
                        #     msg = utils.get_lidar_imagemarkers(closest_lidar, next_sample_data, sensor_id)
                        #     non_keyframe_sensor_msgs.append((msg.header.stamp.to_nsec(), topic + '/image_markers_lidar', msg))
                        # else:
                        #     msg = utils.get_remove_imagemarkers(sensor_id, 'LIDAR_TOP', msg.header.stamp)
                        #     non_keyframe_sensor_msgs.append((msg.header.stamp.to_nsec(), topic + '/image_markers_lidar', msg))

                        # Delete all image markers on non-keyframe camera images
                        # msg = get_remove_imagemarkers(sensor_id, 'LIDAR_TOP', msg.header.stamp)
                        # non_keyframe_sensor_msgs.append((camera_stamp_nsec, topic + '/image_markers_lidar', msg))
                        # msg = get_remove_imagemarkers(sensor_id, 'annotations', msg.header.stamp)
                        # non_keyframe_sensor_msgs.append((camera_stamp_nsec, topic + '/image_markers_annotations', msg))

                    next_sample_token = next_sample_data['next']
            
                    
            # sort and publish the non-keyframe sensor msgs
            non_keyframe_sensor_msgs.sort(key=lambda x: x[0])
            for (_, topic, msg) in non_keyframe_sensor_msgs:
                bag.write(topic, msg, msg.header.stamp)

            # move to the next sample
            cur_sample = self.nusc.get('sample', cur_sample['next']) if cur_sample.get('next') != '' else None
            idx += 1
        bag.close()
        print(f'Finished writing {bag_name}')
Exemplo n.º 24
0
def preprocess_maps(dataroot, glob_path="./*.json"):
    fs = glob(glob_path)
    for fi, f in enumerate(fs):
        with open(f, "r") as reader:
            data = json.load(reader)
        nusc_map = NuScenesMap(dataroot=dataroot, map_name=data["map_name"])
        dataset = dict()
        center, h, w = data["center"], data["height"], data["width"]
        patch = [
            center[0] - w / 2,
            center[1] - h / 2,
            center[0] + w / 2,
            center[1] + h / 2,
        ]
        dataset["patch"] = patch
        dataset["center"] = np.array([center])
        dataset["height"] = h
        dataset["width"] = w
        dataset["map_name"] = data["map_name"]
        dataset["dx"] = np.array(data["dx"])
        dataset["bx"] = np.array(data["bx"])
        dataset["road_img"] = np.array(data["road_img"])

        # Needed for lidar sensors
        pt1, pt2 = nuscenes_map_to_line_representation(nusc_map, patch, False)
        dataset["edges"] = (pt1, pt2)

        drivable_area, xs, ys = get_drivable_area_matrix(data, patch, res=500)
        dataset["plotting_utils"] = (
            drivable_area.numpy().flatten(),
            xs.numpy().flatten(),
            ys.numpy().flatten(),
            [(0.5, 0.5, 0.5) if row else (1, 1, 1)
             for row in drivable_area.numpy().flatten()],
        )

        dataset["splines"] = dict()
        signal_loc = torch.as_tensor(data["signal_locations"])
        signal_color = data["mapping"]
        dataset["signal_locations"] = signal_loc
        dataset["color_mapping"] = signal_color
        dataset["starts_to_signal"] = data["starts_to_signal"]
        dataset["signal_loc"] = []
        dataset["signal_color"] = []
        for starti, (key, paths) in enumerate(data["all_paths"].items()):
            idx = data["starts_to_signal"][starti]
            loc = signal_loc[idx]
            col = signal_color[idx]
            dataset["signal_loc"].append(loc)
            dataset["signal_color"].append(col)

            dataset["splines"][starti] = dict()
            for pathi, path in enumerate(paths):
                path = np.array(path)
                if path.shape[0] < 75:
                    print(
                        "Skipping spline as it contains very few control points"
                    )
                    continue
                dataset["splines"][starti][pathi] = []
                for i in range(0, 50, 10):
                    cps = path[
                        np.linspace(i, path.shape[0] -
                                    15, 12, dtype=np.int), :2, ]

                    diff = cps[0] - cps[1]
                    theta = np.arctan2(diff[1], diff[0])
                    start_orientation = (angle_normalize(
                        torch.as_tensor(math.pi + theta)).float().reshape(
                            1, 1))
                    if i == 0:
                        extra_pt1 = np.array([[
                            cps[0, 0] + np.cos(theta) * 30.0,
                            cps[0, 1] + np.sin(theta) * 30.0,
                        ]])
                    else:
                        extra_pt1 = path[0:1, :2]

                    diff = cps[-1] - cps[-2]
                    theta = np.arctan2(diff[1], diff[0])
                    dest_orientation = (angle_normalize(
                        torch.as_tensor(theta)).float().reshape(1, 1))
                    extra_pt2 = np.array([[
                        cps[-1, 0] + np.cos(theta) * 30.0,
                        cps[-1, 1] + np.sin(theta) * 30.0,
                    ]])

                    cps = torch.cat([
                        torch.from_numpy(cps),
                        torch.from_numpy(extra_pt2),
                        torch.from_numpy(extra_pt1),
                    ])[None, :, :].float()

                    start_position = cps[:, 0, :]
                    destination = cps[:, -3, :]

                    dataset["splines"][starti][pathi].append((
                        start_position,
                        destination,
                        start_orientation,
                        dest_orientation,
                        cps,
                    ))
        outname = f"env{data['map_name']}_{data['center'][0]}_{data['center'][1]}.pth"
        print("saving", outname)
        torch.save(dataset, outname)
Exemplo n.º 25
0
 def test_init_all_maps(self):
     for my_map in ['singapore-onenorth', 'singapore-hollandvillage', 'singapore-queenstown', 'boston-seaport']:
         nusc_map = NuScenesMap(map_name=my_map)
def create_lanes_xml(
    nusc_map: NuScenesMap,
    root: ET.Element,
    data: Dict[str, Iterable[Any]],
    filename: str,
    argo_dir: str,
    lane_dict: Dict[str, Iterable[int]],
    poly_dict: Dict[str, np.ndarray],
) -> None:
    """
    Fill up the xml map file with lane centelines.
    Also create the supporting files halluc_bbox_table.npy and tableidx_to_laneid_map.json
    """
    # Id to assign to lanes in the new xml map file. We arbitrarily start with 8000000.
    # We make up new lane_ids since the original one's are non numerical
    global_way_id = 8000000

    # Map that links new lane_id in the xml to its original token in the json.
    way_to_lane_id = {}

    # map lane segment IDs to their index in the table
    tableidx_to_laneid_map = {}
    # array that holds xmin,ymin,xmax,ymax for each coord
    halluc_bbox_table = []
    table_idx_counter = 0

    ## Iterate over the lanes to create the required xml and supporting files
    for way in data["lane"] + data["lane_connector"]:
        node = ET.SubElement(root, "way")

        if way["token"] not in way_to_lane_id:
            way_to_lane_id[way["token"]] = global_way_id
            global_way_id += 1
        curr_id = way_to_lane_id[way["token"]]

        node.set("lane_id", str(curr_id))
        traffic = ET.SubElement(node, "tag")
        traffic.set("k", "has_traffic_control")
        traffic.set("v", DEFAULT_TRAFFIC_CONTROL)

        turn = ET.SubElement(node, "tag")
        turn.set("k", "turn_direction")
        turn.set("v", DEFAULT_TURN_DIRECTION)

        intersection = ET.SubElement(node, "tag")
        intersection.set("k", "is_intersection")
        intersection.set("v", DEFAULT_IS_INTERSECTION)

        ln = ET.SubElement(node, "tag")
        ln.set("k", "l_neighbor_id")
        ln.set("v", DEFAULT_L_NEIGHBOR)

        rn = ET.SubElement(node, "tag")
        rn.set("k", "r_neighbor_id")
        rn.set("v", DEFAULT_R_NEIGHBOR)

        for waypoint in lane_dict[way["token"]]:
            nd = ET.SubElement(node, "nd")
            nd.set("ref", str(waypoint))

        predecessors = nusc_map.get_incoming_lane_ids(way["token"])
        successors = nusc_map.get_outgoing_lane_ids(way["token"])

        for pred_id in predecessors:
            pre = ET.SubElement(node, "tag")
            pre.set("k", "predecessor")
            if pred_id not in way_to_lane_id:
                way_to_lane_id[pred_id] = global_way_id
                global_way_id += 1
            int_pred_id = way_to_lane_id[pred_id]
            pre.set("v", str(int_pred_id))

        for succ_id in successors:
            succ = ET.SubElement(node, "tag")
            succ.set("k", "successor")
            if succ_id not in way_to_lane_id:
                way_to_lane_id[succ_id] = global_way_id
                global_way_id += 1
            int_succ_id = way_to_lane_id[succ_id]
            succ.set("v", str(int_succ_id))

        lane_id = way_to_lane_id[way["token"]]
        tableidx_to_laneid_map[table_idx_counter] = lane_id
        table_idx_counter += 1

        xmin, ymin, xmax, ymax = compute_point_cloud_bbox(
            poly_dict[way["polygon_token"]])
        halluc_bbox_table += [(xmin, ymin, xmax, ymax)]

    halluc_bbox_table = np.array(halluc_bbox_table)
    halluc_bbox_dict = {
        "tableidx_to_laneid_map": tableidx_to_laneid_map,
        "halluc_bbox_table": halluc_bbox_table,
    }
    np.save(
        f"{argo_dir}/{filename_to_id[filename]}_halluc_bbox_table.npy",
        halluc_bbox_table,
    )
    with open(
            f"{argo_dir}/{filename_to_id[filename]}_tableidx_to_laneid_map.json",
            "w") as outfile:
        json.dump(tableidx_to_laneid_map, outfile)

    tree = ET.ElementTree(root)
    with open(
            f"{argo_dir}/pruned_nuscenes_{filename_to_id[filename]}_vector_map.xml",
            "wb") as files:
        tree.write(files)
Exemplo n.º 27
0
            data.sort_values('frame_id', inplace=True)
            max_timesteps = data['frame_id'].max()

            x_min = np.round(data['x'].min() - 50)
            x_max = np.round(data['x'].max() + 50)
            y_min = np.round(data['y'].min() - 50)
            y_max = np.round(data['y'].max() + 50)

            data['x'] = data['x'] - x_min
            data['y'] = data['y'] - y_min

            scene = Scene(timesteps=max_timesteps + 1, dt=0.5, name=str(scene_id))

            # Generate Maps
            map_name = nusc.get('log', nuscene['log_token'])['location']
            nusc_map = NuScenesMap(dataroot=data_path, map_name=map_name)

            type_map = dict()
            x_size = x_max - x_min
            y_size = y_max - y_min
            patch_box = (x_min + 0.5 * (x_max - x_min), y_min + 0.5 * (y_max - y_min), y_size, x_size)
            patch_angle = 0  # Default orientation where North is up
            canvas_size = (np.round(3 * y_size).astype(int), np.round(3 * x_size).astype(int))
            homography = np.array([[3., 0., 0.], [0., 3., 0.], [0., 0., 3.]])
            layer_names = ['lane', 'road_segment', 'drivable_area', 'road_divider', 'lane_divider', 'stop_line',
                           'ped_crossing', 'stop_line', 'ped_crossing', 'walkway']
            map_mask = (nusc_map.get_map_mask(patch_box, patch_angle, layer_names, canvas_size) * 255.0).astype(
                np.uint8)
            map_mask = np.swapaxes(map_mask, 1, 2) # x axis comes first
            # PEDESTRIANS
            map_mask_pedestrian = np.stack((map_mask[9], map_mask[8], np.max(map_mask[:3], axis=0)), axis=2)
Exemplo n.º 28
0
def process_scene(sample_token, processed_sample_tokens, env, nusc, helper,
                  data_path, data_class, half_dt, dynamic):
    data = pd.DataFrame(columns=[
        'frame_id', 'type', 'node_id', 'robot', 'x', 'y', 'z', 'length',
        'width', 'height', 'heading'
    ])

    samples = aggregate_samples(nusc,
                                start=sample_token,
                                data_class=data_class)

    attribute_dict = defaultdict(set)

    frame_id = 0
    for sample in samples:
        annotation_tokens = sample['anns']
        for annotation_token in annotation_tokens:
            annotation = nusc.get('sample_annotation', annotation_token)
            category = annotation['category_name']
            if len(annotation['attribute_tokens']):
                attribute = nusc.get('attribute',
                                     annotation['attribute_tokens'][0])['name']
            else:
                if 'vehicle' in category:
                    attribute = ''
                else:
                    continue

            if 'pedestrian' in category and 'stroller' not in category and 'wheelchair' not in category:
                our_category = env.NodeType.PEDESTRIAN
            elif 'vehicle' in category and 'bicycle' not in category and 'motorcycle' not in category:  # and 'parked' not in attribute:
                our_category = env.NodeType.VEHICLE
            else:
                continue

            attribute_dict[annotation['instance_token']].add(attribute)

            data_point = pd.Series({
                'frame_id':
                frame_id,
                'type':
                our_category,
                'node_id':
                annotation['instance_token'],
                'robot':
                False,
                'x':
                annotation['translation'][0],
                'y':
                annotation['translation'][1],
                'z':
                annotation['translation'][2],
                'length':
                annotation['size'][0],
                'width':
                annotation['size'][1],
                'height':
                annotation['size'][2],
                'heading':
                Quaternion(annotation['rotation']).yaw_pitch_roll[0]
            })
            data = data.append(data_point, ignore_index=True)

        # Ego Vehicle
        our_category = env.NodeType.VEHICLE
        sample_data = nusc.get('sample_data', sample['data']['CAM_FRONT'])
        annotation = nusc.get('ego_pose', sample_data['ego_pose_token'])
        data_point = pd.Series({
            'frame_id':
            frame_id,
            'type':
            our_category,
            'node_id':
            'ego',
            'robot':
            True,
            'x':
            annotation['translation'][0],
            'y':
            annotation['translation'][1],
            'z':
            annotation['translation'][2],
            'length':
            4,
            'width':
            1.7,
            'height':
            1.5,
            'heading':
            Quaternion(annotation['rotation']).yaw_pitch_roll[0],
            'orientation':
            None
        })
        data = data.append(data_point, ignore_index=True)

        processed_sample_tokens.add(sample['token'])
        frame_id += 1

    if len(data.index) == 0:
        return None

    data.sort_values('frame_id', inplace=True)
    max_timesteps = data['frame_id'].max()

    x_min = np.round(data['x'].min() - 50)
    x_max = np.round(data['x'].max() + 50)
    y_min = np.round(data['y'].min() - 50)
    y_max = np.round(data['y'].max() + 50)

    data['x'] = data['x'] - x_min
    data['y'] = data['y'] - y_min

    scene = Scene(timesteps=max_timesteps + 1,
                  dt=dt,
                  name=sample_token,
                  aug_func=augment,
                  sample_tokens=[sample['token'] for sample in samples],
                  x_min=x_min,
                  y_min=y_min)

    # Generate Maps
    map_name = helper.get_map_name_from_sample_token(sample_token)
    nusc_map = NuScenesMap(dataroot=data_path, map_name=map_name)

    type_map = dict()
    x_size = x_max - x_min
    y_size = y_max - y_min
    patch_box = (x_min + 0.5 * (x_max - x_min), y_min + 0.5 * (y_max - y_min),
                 y_size, x_size)
    patch_angle = 0  # Default orientation where North is up
    canvas_size = (np.round(3 * y_size).astype(int),
                   np.round(3 * x_size).astype(int))
    homography = np.array([[3., 0., 0.], [0., 3., 0.], [0., 0., 3.]])
    layer_names = [
        'lane', 'road_segment', 'drivable_area', 'road_divider',
        'lane_divider', 'stop_line', 'ped_crossing', 'stop_line',
        'ped_crossing', 'walkway'
    ]
    map_mask = (nusc_map.get_map_mask(patch_box, patch_angle, layer_names,
                                      canvas_size) * 255.0).astype(np.uint8)
    map_mask = np.swapaxes(map_mask, 1, 2)  # x axis comes first
    # PEDESTRIANS
    map_mask_pedestrian = np.stack(
        (map_mask[9], map_mask[8], np.max(map_mask[:3], axis=0)), axis=0)
    type_map['PEDESTRIAN'] = GeometricMap(data=map_mask_pedestrian,
                                          homography=homography,
                                          description=', '.join(layer_names))
    # VEHICLES
    map_mask_vehicle = np.stack(
        (np.max(map_mask[:3], axis=0), map_mask[3], map_mask[4]), axis=0)
    type_map['VEHICLE'] = GeometricMap(data=map_mask_vehicle,
                                       homography=homography,
                                       description=', '.join(layer_names))

    map_mask_plot = np.stack(
        ((np.max(map_mask[:3], axis=0) -
          (map_mask[3] + 0.5 * map_mask[4]).clip(max=255)).clip(min=0).astype(
              np.uint8), map_mask[8], map_mask[9]),
        axis=0)
    type_map['VISUALIZATION'] = GeometricMap(
        data=map_mask_plot,
        homography=homography,
        description=', '.join(layer_names))

    scene.map = type_map
    del map_mask
    del map_mask_pedestrian
    del map_mask_vehicle
    del map_mask_plot

    for node_id in pd.unique(data['node_id']):
        node_frequency_multiplier = 1
        node_df = data[data['node_id'] == node_id]

        if dynamic:
            if 'vehicle.parked' in attribute_dict[
                    node_id] or 'vehicle.stopped' in attribute_dict[node_id]:
                continue

        elif len(attribute_dict[node_id]
                 ) == 1 and 'vehicle.parked' in attribute_dict[node_id]:
            # Catching instances of vehicles that were parked and then moving, but not allowing
            # only parked vehicles through.
            continue

        if node_df['x'].shape[0] < 2:
            continue

        if not np.all(np.diff(node_df['frame_id']) == 1):
            min_index = node_df['frame_id'].min()
            max_index = node_df['frame_id'].max()
            node_df = node_df.set_index('frame_id').reindex(
                range(min_index, max_index + 1)).interpolate().reset_index()
            node_df['type'] = node_df['type'].mode()[0]
            node_df['node_id'] = node_id
            node_df['robot'] = False
            # print('Occlusion')
            # continue  # TODO Make better

        node_values = node_df[['x', 'y']].values
        x = node_values[:, 0]
        y = node_values[:, 1]
        heading = node_df['heading'].values
        if node_df.iloc[0][
                'type'] == env.NodeType.VEHICLE and not node_id == 'ego':
            # Kalman filter Agent
            vx = derivative_of(x, scene.dt)
            vy = derivative_of(y, scene.dt)
            velocity = np.linalg.norm(np.stack((vx, vy), axis=-1), axis=-1)

            filter_veh = NonlinearKinematicBicycle(dt=scene.dt,
                                                   sMeasurement=1.0)
            P_matrix = None
            for i in range(len(x)):
                if i == 0:  # initalize KF
                    # initial P_matrix
                    P_matrix = np.identity(4)
                elif i < len(x):
                    # assign new est values
                    x[i] = x_vec_est_new[0][0]
                    y[i] = x_vec_est_new[1][0]
                    heading[i] = x_vec_est_new[2][0]
                    velocity[i] = x_vec_est_new[3][0]

                if i < len(x) - 1:  # no action on last data
                    # filtering
                    x_vec_est = np.array([[x[i]], [y[i]], [heading[i]],
                                          [velocity[i]]])
                    z_new = np.array([[x[i + 1]], [y[i + 1]], [heading[i + 1]],
                                      [velocity[i + 1]]])
                    x_vec_est_new, P_matrix_new = filter_veh.predict_and_update(
                        x_vec_est=x_vec_est,
                        u_vec=np.array([[0.], [0.]]),
                        P_matrix=P_matrix,
                        z_new=z_new)
                    P_matrix = P_matrix_new

            curvature, pl, _ = trajectory_curvature(np.stack((x, y), axis=-1))
            if pl < 1.0:  # vehicle is "not" moving
                x = x[0].repeat(max_timesteps + 1)
                y = y[0].repeat(max_timesteps + 1)
                heading = heading[0].repeat(max_timesteps + 1)
            global total
            global curv_0_2
            global curv_0_1
            total += 1
            if pl > 1.0:
                if curvature > .2:
                    curv_0_2 += 1
                    node_frequency_multiplier = 3 * int(
                        np.floor(total / curv_0_2))
                elif curvature > .1:
                    curv_0_1 += 1
                    node_frequency_multiplier = 3 * int(
                        np.floor(total / curv_0_1))

        if half_dt:
            t_old = np.linspace(0, 1, x.shape[0])
            t_new = np.linspace(0, 1, 2 * x.shape[0])
            x = np.interp(t_new, t_old, x)
            y = np.interp(t_new, t_old, y)
            heading = np.interp(
                t_new, t_old, heading + np.pi, period=2 * np.pi) - np.pi

            vx = derivative_of(x, scene.dt / 2)
            vy = derivative_of(y, scene.dt / 2)
            ax = derivative_of(vx, scene.dt / 2)
            ay = derivative_of(vy, scene.dt / 2)
        else:
            vx = derivative_of(x, scene.dt)
            vy = derivative_of(y, scene.dt)
            ax = derivative_of(vx, scene.dt)
            ay = derivative_of(vy, scene.dt)

        if node_df.iloc[0]['type'] == env.NodeType.VEHICLE:
            v = np.stack((vx, vy), axis=-1)
            v_norm = np.linalg.norm(np.stack((vx, vy), axis=-1),
                                    axis=-1,
                                    keepdims=True)
            heading_v = np.divide(v,
                                  v_norm,
                                  out=np.zeros_like(v),
                                  where=(v_norm > 1.))
            heading_x = heading_v[:, 0]
            heading_y = heading_v[:, 1]

            a_norm = np.divide(ax * vx + ay * vy,
                               v_norm[..., 0],
                               out=np.zeros_like(ax),
                               where=(v_norm[..., 0] > 1.))

            if half_dt:
                d_heading = derivative_of(heading, scene.dt / 2, radian=True)
            else:
                d_heading = derivative_of(heading, scene.dt, radian=True)

            data_dict = {
                ('position', 'x'):
                x,
                ('position', 'y'):
                y,
                ('velocity', 'x'):
                vx,
                ('velocity', 'y'):
                vy,
                ('velocity', 'norm'):
                np.linalg.norm(np.stack((vx, vy), axis=-1), axis=-1),
                ('acceleration', 'x'):
                ax,
                ('acceleration', 'y'):
                ay,
                ('acceleration', 'norm'):
                a_norm,
                ('heading', 'x'):
                heading_x,
                ('heading', 'y'):
                heading_y,
                ('heading', '°'):
                heading,
                ('heading', 'd°'):
                d_heading
            }
            node_data = pd.DataFrame(data_dict, columns=data_columns_vehicle)
        else:
            data_dict = {
                ('position', 'x'): x,
                ('position', 'y'): y,
                ('velocity', 'x'): vx,
                ('velocity', 'y'): vy,
                ('acceleration', 'x'): ax,
                ('acceleration', 'y'): ay
            }
            node_data = pd.DataFrame(data_dict,
                                     columns=data_columns_pedestrian)

        node = Node(node_type=node_df.iloc[0]['type'],
                    node_id=0 if node_id == 'ego' else abs(hash(node_id)),
                    data=node_data,
                    frequency_multiplier=node_frequency_multiplier,
                    description=node_id)

        node.first_timestep = node_df['frame_id'].iloc[0]
        if half_dt:
            node.first_timestep *= 2

        if node_df.iloc[0]['robot'] == True:
            node.is_robot = True
            scene.robot = node

        scene.nodes.append(node)

    if half_dt:
        # We are interpolating to make the overall dt half of what it was.
        scene.dt /= 2.0
        scene.timesteps *= 2

    return scene
Exemplo n.º 29
0
def process_once(scene_name_list=[], data_save_dir=None, config={}):
    """get raw data from dataset, everything are lists

    :param scene_name_list: list of scene names to process
    :param data_save_dir: directory to save processed raw data
    :param config: additional configs
    :returns: one pandas dataframe for each scene

    """
    test_scene_name = config['scene_name']
    
    if config['num_workers'] == 1:
        nusc = config['other_configs']['nusc']
        helper = config['other_configs']['helper']
        rasterizer = config['other_configs']['rasterizer']
    else:
        nusc = ray.get(config['other_configs']['nusc'])
        helper = ray.get(config['other_configs']['helper'])
        rasterizer = ray.get(config['other_configs']['rasterizer'])

    dataroot = config['other_configs']['dataroot']
    
    nusc_can = NuScenesCanBus(dataroot=dataroot)
   
    df_dict = {
        #### scene info ####
        'scene_name': [],
        'scene_token':[],
        'scene_nbr_samples':[],
        'scene_description':[],
        'scene_log_token':[],
        'scene_map_token':[],
        'scene_vehicle_id': [],
        'scene_date_captured':[],
        'scene_location':[],
        #### sample info ####
        'sample_idx': [],
        'sample_token':[],
        'time_stamp': [],
        #### ego info ####
        'current_ego_pos': [],
        'current_ego_quat':[],
        'current_ego_accel':[],
        'current_ego_speed':[],
        'current_ego_steering': [],
        'current_ego_on_road_objects':[],
        'current_ego_raster_img':[],
        'current_ego_raster_img_path': [],
        #### instance info ####
        'annotation_token': [],
        'instance_token': [],
        'instance_category': [],
        'instance_attributes': [],
        'instance_nbr_annotations':[],
        'current_instance_pos': [],
        'current_instance_quat': [],
        'current_instance_speed': [],
        'current_instance_accel':[],
        'current_instance_on_road_objects':[],
        'current_instance_raster_img':[],
        'current_instance_raster_img_path': []
    }

    #### loop over scenes ####
    for scene_name in tqdm.tqdm(scene_name_list, 'processing scene'):
        if test_scene_name is not None:
            if not scene_name == test_scene_name:
                continue
        
        scene_df_dict = copy.deepcopy(df_dict)
        
        scene_token = nusc.field2token('scene', 'name', scene_name)[0]
        scene = nusc.get('scene', scene_token)
        scene_log = nusc.get('log', scene['log_token'])
        scene_map = nusc.get('map', scene_log['map_token'])
        nusc_map = NuScenesMap(dataroot=dataroot, map_name=scene_log['location'])

        raster_dir = os.path.join(data_save_dir, scene_name)
        if not os.path.isdir(raster_dir):
            os.makedirs(raster_dir)
        
        # can bus information
        if scene_name in NO_CANBUS_SCENES:
            ego_accel_traj = [None] * 50
            ego_rotation_rate_traj = [None] * 50
            ego_speed_traj = [None] * 50
        else:
            pose_msg = nusc_can.get_messages(scene_name, 'pose')
            ego_accel_traj = [pm['accel'] for pm in pose_msg][::25]
            ego_rotation_rate_traj = [pm['rotation_rate'] for pm in pose_msg][::25]
            ego_speed_traj = [pm['vel'] for pm in pose_msg][::25]
        
        #### loop over samples ####
        sample_token = scene['first_sample_token']
        sample = nusc.get('sample', sample_token)

        sample_idx = 0
        t = time.time()
        while sample['next'] != '':
            # print(sample_idx)
            # print(t-time.time())
            # t = time.time()
            cam_data = nusc.get('sample_data', sample['data']['CAM_FRONT'])
            ego_pose = nusc.get('ego_pose', cam_data['ego_pose_token'])

            # ego raster
            ego_raster = rasterizer.make_input_representation(instance_token=None, sample_token=sample_token, ego=True, ego_pose=ego_pose, include_history=False)
            ego_rel_path = str(sample_idx) + "_" + 'ego.png'
            current_ego_raster_img_path = os.path.join(raster_dir, ego_rel_path)
            ego_raster_png = im.fromarray(ego_raster)
            ego_raster_png.save(current_ego_raster_img_path)

            # ego road objects
            ego_on_road_objects = nusc_map.layers_on_point(ego_pose['translation'][0], ego_pose['translation'][1])
            
            #### loop over annotations ####
            for ann_token in sample['anns']:
                ann = nusc.get('sample_annotation', ann_token)
                instance = nusc.get('instance', ann['instance_token'])

                if filter_instance(ann, nusc):
                    #### populate data ####
                    ## scene ##
                    scene_df_dict['scene_name'].append(scene_name)
                    scene_df_dict['scene_token'].append(scene_token)
                    scene_df_dict['scene_nbr_samples'].append(scene['nbr_samples'])
                    scene_df_dict['scene_description'].append(scene['description'])
                    scene_df_dict['scene_log_token'].append(scene_log['token'])
                    scene_df_dict['scene_map_token'].append(scene_map['token'])
                    scene_df_dict['scene_vehicle_id'].append(scene_log['vehicle'])
                    scene_df_dict['scene_date_captured'].append(scene_log['date_captured'])
                    scene_df_dict['scene_location'].append(scene_log['location'])
                    ## sample info ##
                    scene_df_dict['sample_idx'].append(sample_idx)
                    scene_df_dict['sample_token'].append(sample_token)
                    scene_df_dict['time_stamp'].append(sample['timestamp'])
                    ## ego info ##
                    scene_df_dict['current_ego_pos'].append(ego_pose['translation'])
                    scene_df_dict['current_ego_quat'].append(ego_pose['rotation'])
                    idx = min(sample_idx, len(ego_speed_traj)-1)
                    scene_df_dict['current_ego_speed'].append(ego_speed_traj[idx])
                    idx = min(sample_idx, len(ego_accel_traj)-1)
                    scene_df_dict['current_ego_accel'].append(ego_accel_traj[idx])
                    idx = min(sample_idx, len(ego_rotation_rate_traj)-1)
                    scene_df_dict['current_ego_steering'].append(ego_rotation_rate_traj[idx])
                    scene_df_dict['current_ego_on_road_objects'].append(ego_on_road_objects)
                    scene_df_dict['current_ego_raster_img'].append(ego_raster)
                    scene_df_dict['current_ego_raster_img_path'].append(scene_name+"/"+ego_rel_path)
                    ## instance info ##
                    scene_df_dict['annotation_token'].append(ann_token)
                    scene_df_dict['instance_token'].append(ann['instance_token'])
                    scene_df_dict['instance_category'].append(ann['category_name'])
                    instance_attributes = [nusc.get('attribute', attribute_token)['name'] for attribute_token in ann['attribute_tokens']]
                    scene_df_dict['instance_attributes'].append(instance_attributes)
                    scene_df_dict['instance_nbr_annotations'].append(instance['nbr_annotations'])
                    scene_df_dict['current_instance_pos'].append(ann['translation'])
                    scene_df_dict['current_instance_quat'].append(ann['rotation'])
                    instance_speed = helper.get_velocity_for_agent(ann['instance_token'], sample_token)
                    scene_df_dict['current_instance_speed'].append(instance_speed)
                    instance_accel = helper.get_acceleration_for_agent(ann['instance_token'], sample_token)
                    scene_df_dict['current_instance_accel'].append(instance_accel)
                    instance_on_road_objects = nusc_map.layers_on_point(ann['translation'][0], ann['translation'][1])
                    scene_df_dict['current_instance_on_road_objects'].append(instance_on_road_objects)
                    instance_raster = rasterizer.make_input_representation(instance_token=ann['instance_token'], sample_token=sample_token, ego=False, include_history=False)
                    scene_df_dict['current_instance_raster_img'].append(instance_raster)
                    instance_rel_path = str(sample_idx) + "_"+ann['instance_token']+'.png'
                    instance_raster_img_path = os.path.join(raster_dir, instance_rel_path)
                    instance_raster_png = im.fromarray(instance_raster)
                    instance_raster_png.save(instance_raster_img_path)
                    scene_df_dict['current_instance_raster_img_path'].append(scene_name+"/"+instance_rel_path)
                    
            sample_token = sample['next']
            sample = nusc.get('sample', sample_token)
            sample_idx += 1

        scene_df = pd.DataFrame(scene_df_dict)
        scene_df.to_pickle(data_save_dir+"/"+scene_name+".pkl")
Exemplo n.º 30
0
    def plot_agent_scene(
            self,
            ego_centric: bool = False,
            ego_traj=None,
            instance_token: str = None,
            sample_token: str = None,
            map_layers=None,
            sensor_info=None,
            sensing_patch=None,
            predictions=None,
            agent_state_dict=None,
            plot_list=None,
            legend=False,
            text_box=False,
            show_axis=True,
            render_ego_pose_range=False,
            paper_ready=False,
            read_from_cached=False,
            plot_agent_trajs=True,
            animated_agent=False,
            bfig=None,  # for birdseye image
            bax=None,
            sfig=None,  # for camera
            sax=None,
            render_additional=None,
            plot_human_ego=True,
            patch_margin=30,
            sim_ego_pose=None,
            save_image_ax=False):

        if paper_ready:
            legend = False
            text_box = False
            show_axis = False
            render_ego_pose_range = False
            plot_agent_trajs = False
            animated_agent = True
        #show_axis = True
        if map_layers is None:
            map_layers = self.map_layers

        if plot_list is None:
            plot_list = self.plot_list

        sample = self.nusc.get('sample', sample_token)
        scene = self.nusc.get('scene', sample['scene_token'])
        scene_log = self.nusc.get('log', scene['log_token'])
        nusc_map = NuScenesMap(dataroot=self.dataroot,
                               map_name=scene_log['location'])

        patch_margin = patch_margin
        min_diff_patch = 30

        if not ego_centric:
            agent_future = self.helper.get_future_for_agent(
                instance_token,
                sample_token,
                self.na_config['pred_horizon'],
                in_agent_frame=False,
                just_xy=True)

            agent_past = self.helper.get_past_for_agent(
                instance_token,
                sample_token,
                self.na_config['obs_horizon'],
                in_agent_frame=False,
                just_xy=True)

            #### set plot patch ####
            if agent_future.shape[0] > 0:
                p = agent_future[0]
            else:
                p = agent_past[-1]

            my_patch = (
                p[0] - patch_margin,
                p[1] - patch_margin,
                p[0] + patch_margin,
                p[1] + patch_margin,
            )

            # min_patch = np.floor(agent_future.min(axis=0) - patch_margin)
            # max_patch = np.ceil(agent_future.max(axis=0) + patch_margin)
            # diff_patch = max_patch - min_patch

            # if any(diff_patch < min_diff_patch):
            #     center_patch = (min_patch + max_patch) / 2
            #     diff_patch = np.maximum(diff_patch, min_diff_patch)
            #     min_patch = center_patch - diff_patch / 2
            #     max_patch = center_patch + diff_patch / 2
            # my_patch = (min_patch[0], min_patch[1], max_patch[0], max_patch[1])

        else:
            sample_data = self.nusc.get('sample_data',
                                        sample['data']['CAM_FRONT'])
            ego_pose = self.nusc.get('ego_pose', sample_data['ego_pose_token'])
            my_patch = (
                ego_pose['translation'][0] - patch_margin,
                ego_pose['translation'][1] - patch_margin,
                ego_pose['translation'][0] + patch_margin,
                ego_pose['translation'][1] + patch_margin,
            )

            if sim_ego_pose is not None:
                my_patch = (
                    sim_ego_pose['translation'][0] - patch_margin,
                    sim_ego_pose['translation'][1] - patch_margin,
                    sim_ego_pose['translation'][0] + patch_margin,
                    sim_ego_pose['translation'][1] + patch_margin,
                )

        #### read from saved path if present ####
        read_img = False
        if read_from_cached:
            scene_path = os.path.join(
                scene_img_dir,
                scene['name'] + "-token-" + sample['scene_token'])
            p_scene = Path(scene_img_dir)
            saved_scene_list = [
                str(f) for f in p_scene.iterdir() if f.is_dir()
            ]

            if scene_path in saved_scene_list:
                p_sample = Path(scene_path)
                for f in p_sample.iterdir():
                    if sample_token in str(f):
                        ax = cloudpickle.load(
                            open(os.path.join(scene_path, str(f)), 'rb'))
                        fig = plt.figure(figsize=(10, 10))
                        fig._axstack.add('ax', ax)
                        read_img = True
        if not read_img:
            fig, ax = nusc_map.render_map_patch(
                my_patch,
                map_layers,
                figsize=(10, 10),
                render_egoposes_range=render_ego_pose_range,
                render_legend=legend,
                fig=bfig,
                axes=bax)

            if not ego_centric:
                ax.set_title(scene['name'] + " instance_token: " +
                             instance_token + ", sample_token: " +
                             sample_token + "\n" + ", decription " +
                             scene['description'])
            else:
                ax.set_title(scene['name'] + ", sample_token: " +
                             sample_token + "\n" + ", decription " +
                             scene['description'])

        #### label map ####
        if 'labeled_map' in plot_list:
            records_within_patch = nusc_map.get_records_in_patch(
                my_patch, nusc_map.non_geometric_layers, mode='within')
            self.label_map(ax,
                           nusc_map,
                           records_within_patch['stop_line'],
                           text_box=text_box)

            #### Plot ego ####
            if 'ego' in plot_list:
                ego_pos, ego_quat = self.plot_ego(
                    ax,
                    sample,
                    ego_traj=ego_traj,
                    animated_agent=animated_agent,
                    plot_ego=plot_human_ego)

            #### Plot other agents ####
            if 'pedestrian' in plot_list or 'other_cars' in plot_list:
                road_agents_in_patch = self.plot_road_agents(
                    ax,
                    instance_token,
                    sample,
                    plot_list,
                    text_box,
                    sensor_info,
                    my_patch,
                    plot_traj=plot_agent_trajs,
                    animated_agent=animated_agent)

        ##################
        # Car to predict #
        ##################
        if not ego_centric:
            agent_pos, agent_quat = self.plot_car_to_predict(
                ax, agent_future, agent_past, instance_token, sample_token,
                text_box, predictions, agent_state_dict)

        #### plot all_info ####
        if 'map_info' in self.plot_list:
            if not ego_centric:
                self.plot_map_info(ax, agent_pos, nusc_map, text_box=text_box)
            else:
                self.plot_map_info(ax, ego_pos, nusc_map, text_box=text_box)

        #### plot sensor info ###
        if sensor_info is not None and 'sensor_info' in plot_list:
            self.plot_sensor_info(ax,
                                  sensor_info=sensor_info,
                                  text_box=text_box)

        #### render map layers on camera images ####
        if 'cam' in plot_list:
            #sfig, sax = plt.subplots(nrows=2, ncols=3, sharex=True, sharey=True, figsize=(9,16))

            layer_names = [
                'road_segment', 'lane', 'ped_crossing', 'walkway', 'stop_line',
                'carpark_area'
            ]
            layer_names = []
            #cam_names = ['CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT',
            #             'CAM_BACK_RIGHT', 'CAM_BACK', 'CAM_BACK_LEFT']
            cam_names = ['CAM_FRONT']
            k = 0
            if sfig is None:
                if len(cam_names) == 6:
                    sfig, sax = plt.subplots(nrows=2, ncols=3)
                    for i in range(2):
                        for j in range(3):
                            sax[i, j].xaxis.set_visible(False)
                            sax[i, j].yaxis.set_visible(False)
                            cam_fig, cam_ax = nusc_map.render_map_in_image(
                                self.nusc,
                                sample_token,
                                layer_names=layer_names,
                                camera_channel=cam_names[k],
                                ax=sax[i, j])
                            k += 1
                elif len(cam_names) == 1:
                    cam_fig, cam_ax = plt.subplots()
                    cam_ax.xaxis.set_visible(False)
                    cam_ax.yaxis.set_visible(False)
                    nusc_map.render_map_in_image(self.nusc,
                                                 sample_token,
                                                 layer_names=layer_names,
                                                 camera_channel=cam_names[k],
                                                 ax=cam_ax)
                else:
                    raise ValueError('')

            if sfig is not None:
                sfig.tight_layout(pad=0)
                sfig.set_figheight(7)
                sfig.set_figwidth(15)

            # for car_info in road_agents_in_patch['vehicles']:
            #     instance_token = car_info['instance_token']
            #     # render annotations inside patch
            #     ann = self.helper.get_sample_annotation(instance_token, sample_token)
            #     ann_fig, ann_ax = self.nusc.render_annotation(ann['token'])
            #     if sensing_patch is not None and self.in_shapely_polygon(car_info['translation'], sensing_patch):
            #         ann_ax.set_title("Sensed")
        else:
            sfig, sax = None, None
            cam_fig, cam_ax = None, None

        #### render additional outside information ####
        if render_additional is not None:
            self.render_additional(ax, render_additional)

        if not show_axis:
            plt.axis('off')
            plt.grid('off')
            ax.grid(False)
        ax.set_aspect('equal')

        other = {
            'cam_fig': cam_fig,
            'cam_ax': cam_ax,
            'sfig': sfig,
            'sax': sax
        }
        return fig, ax, other