Example #1
0
    def __init__(self, root='/datasets/nuscene/v1.0-mini', sampling_time=3, agent_time=0, layer_names=None,
                 colors=None, resolution: float = 0.1,  # meters / pixel
                 meters_ahead: float = 25, meters_behind: float = 25,
                 meters_left: float = 25, meters_right: float = 25, version='v1.0-mini'):
        if layer_names is None:
            layer_names = ['drivable_area', 'road_segment', 'road_block',
                           'lane', 'ped_crossing', 'walkway', 'stop_line',
                           'carpark_area', 'road_divider', 'lane_divider']
        if colors is None:
            colors = [(255, 255, 255), (255, 255, 255), (255, 255, 255),
                      (255, 255, 255), (255, 255, 255), (255, 255, 255), (255, 255, 255),
                      (255, 255, 255), (255, 255, 255), (255, 255, 255), ]
        self.root = root
        self.nus = NuScenes(version, dataroot=self.root)
        self.scenes = self.nus.scene
        self.samples = self.nus.sample

        self.layer_names = layer_names
        self.colors = colors

        self.helper = PredictHelper(self.nus)

        self.seconds = sampling_time
        self.agent_seconds = agent_time

        self.static_layer = StaticLayerRasterizer(self.helper, layer_names=self.layer_names, colors=self.colors,
                                                  resolution=resolution, meters_ahead=meters_ahead,
                                                  meters_behind=meters_behind,
                                                  meters_left=meters_left, meters_right=meters_right)
        self.agent_layer = AgentBoxesWithFadedHistory(self.helper, seconds_of_history=self.agent_seconds,
                                                      resolution=resolution, meters_ahead=meters_ahead,
                                                      meters_behind=meters_behind,
                                                      meters_left=meters_left, meters_right=meters_right)
Example #2
0
    def render_map(self, idx, combined=True):
        sample = self.samples[idx]
        sample_token = sample['token']

        sample_data_lidar = self.nus.get('sample_data',
                                         sample['data']['LIDAR_TOP'])
        ego_pose = self.nus.get('ego_pose',
                                sample_data_lidar['ego_pose_token'])
        ego_pose_xy = ego_pose['translation']
        ego_pose_rotation = ego_pose['rotation']
        timestamp = ego_pose['timestamp']

        # 2. Generate Map & Agent Masks
        scene = self.nus.get('scene', sample['scene_token'])
        log = self.nus.get('log', scene['log_token'])
        location = log['location']
        nus_map = NuScenesMap(dataroot=self.root, map_name=location)

        static_layer = StaticLayerRasterizer(self.helper,
                                             layer_names=self.layer_names,
                                             colors=self.colors)
        agent_layer = AgentBoxesWithFadedHistory(
            self.helper, seconds_of_history=self.seconds)

        map_masks, lanes, map_img = static_layer.generate_mask(
            ego_pose_xy, ego_pose_rotation, sample_token)
        agent_mask = agent_layer.generate_mask(ego_pose_xy, ego_pose_rotation,
                                               sample_token)

        if combined:
            plt.subplot(1, 2, 1)
            plt.title("combined map")
            plt.imshow(map_img)
            plt.subplot(1, 2, 2)
            plt.title("agent")
            plt.imshow(agent_mask)
            plt.show()
        else:
            num_labels = len(self.layer_names)
            num_rows = num_labels // 3
            fig, ax = plt.subplots(num_rows, 3, figsize=(10, 3 * num_rows))
            for row in range(num_rows):
                for col in range(3):
                    num = 3 * row + col
                    if num == num_labels - 1:
                        break
                    ax[row][col].set_title(self.layer_names[num])
                    ax[row][col].imshow(map_masks[num])
            plt.show()
Example #3
0
    def __init__(
            self,
            root='/datasets/nuscene/v1.0-mini',
            sampling_time=3,
            agent_time=0,
            layer_names=None,
            colors=None,
            resolution: float = 0.1,  # meters / pixel
            meters_ahead: float = 32,
            meters_behind: float = 32,
            meters_left: float = 32,
            meters_right: float = 32,
            version='v1.0-mini'):
        self.layer_names = [
            'drivable_area', 'road_segment', 'road_block', 'lane',
            'ped_crossing', 'walkway', 'stop_line', 'carpark_area',
            'road_divider', 'lane_divider'
        ] if layer_names is None else layer_names
        self.colors = [
            (255, 255, 255),
            (255, 255, 255),
            (255, 255, 255),
            (255, 255, 255),
            (255, 255, 255),
            (255, 255, 255),
            (255, 255, 255),
            (255, 255, 255),
            (255, 255, 255),
            (255, 255, 255),
        ] if colors is None else colors
        self.root = root
        self.nus = NuScenes(version, dataroot=self.root)
        self.helper = PredictHelper(self.nus)

        self.sampling_time = sampling_time
        self.agent_seconds = agent_time
        self.scene_size = (64, 64)
        self.past_len = 4
        self.future_len = 6

        self.static_layer = StaticLayerRasterizer(self.helper,
                                                  layer_names=self.layer_names,
                                                  colors=self.colors,
                                                  resolution=resolution,
                                                  meters_ahead=meters_ahead,
                                                  meters_behind=meters_behind,
                                                  meters_left=meters_left,
                                                  meters_right=meters_right)
        self.agent_layer = AgentBoxesWithFadedHistory(
            self.helper,
            seconds_of_history=self.agent_seconds,
            resolution=resolution,
            meters_ahead=meters_ahead,
            meters_behind=meters_behind,
            meters_left=meters_left,
            meters_right=meters_right)
        self.combinator = Rasterizer()
        self.show_agent = True
        self.resized_ratio = 0.5  # resize ratio of image for visualizing

        self.drivable_area_idx = self.layer_names.index('drivable_area')
        self.road_divider_idx = self.layer_names.index('road_divider')
        self.lane_divider_idx = self.layer_names.index('lane_divider')
Example #4
0
class NusTrajectoryExtractor:
    def __init__(
            self,
            root='/datasets/nuscene/v1.0-mini',
            sampling_time=3,
            agent_time=0,
            layer_names=None,
            colors=None,
            resolution: float = 0.1,  # meters / pixel
            meters_ahead: float = 32,
            meters_behind: float = 32,
            meters_left: float = 32,
            meters_right: float = 32,
            version='v1.0-mini'):
        self.layer_names = [
            'drivable_area', 'road_segment', 'road_block', 'lane',
            'ped_crossing', 'walkway', 'stop_line', 'carpark_area',
            'road_divider', 'lane_divider'
        ] if layer_names is None else layer_names
        self.colors = [
            (255, 255, 255),
            (255, 255, 255),
            (255, 255, 255),
            (255, 255, 255),
            (255, 255, 255),
            (255, 255, 255),
            (255, 255, 255),
            (255, 255, 255),
            (255, 255, 255),
            (255, 255, 255),
        ] if colors is None else colors
        self.root = root
        self.nus = NuScenes(version, dataroot=self.root)
        self.helper = PredictHelper(self.nus)

        self.sampling_time = sampling_time
        self.agent_seconds = agent_time
        self.scene_size = (64, 64)
        self.past_len = 4
        self.future_len = 6

        self.static_layer = StaticLayerRasterizer(self.helper,
                                                  layer_names=self.layer_names,
                                                  colors=self.colors,
                                                  resolution=resolution,
                                                  meters_ahead=meters_ahead,
                                                  meters_behind=meters_behind,
                                                  meters_left=meters_left,
                                                  meters_right=meters_right)
        self.agent_layer = AgentBoxesWithFadedHistory(
            self.helper,
            seconds_of_history=self.agent_seconds,
            resolution=resolution,
            meters_ahead=meters_ahead,
            meters_behind=meters_behind,
            meters_left=meters_left,
            meters_right=meters_right)
        self.combinator = Rasterizer()
        self.show_agent = True
        self.resized_ratio = 0.5  # resize ratio of image for visualizing

        self.drivable_area_idx = self.layer_names.index('drivable_area')
        self.road_divider_idx = self.layer_names.index('road_divider')
        self.lane_divider_idx = self.layer_names.index('lane_divider')

    def __len__(self):
        return len(self.nus.sample)

    def get_annotation(self, instance_token, sample_token):
        annotation = self.helper.get_sample_annotation(
            instance_token, sample_token)  # agent annotation
        # ego pose
        ego_pose_xy = annotation['translation']
        ego_pose_rotation = annotation['rotation']

        # agent attributes
        agent_attributes = {
            'category_name': annotation['category_name'],
            'translation': annotation['translation'],
            'size': annotation['size'],
            'rotation': annotation['rotation'],
            'visibility': int(annotation['visibility_token']
                              )  # 1 ~ 4 (0~40%, 40~60%, 60~80%, 80~100%)
        }
        # agent trajectory annotation
        agent_mask, total_agents_annotation, agent_annotation = \
            self.agent_layer.generate_mask_with_path(
                instance_token, sample_token, seconds=self.sampling_time, vehicle_only=True)
        '''
        total_agents_annotation:
            'instance_tokens', category_names',
            'translations', 'pasts', 'futures', 'velocities', 'accelerations', 'yaw_rates'
        '''
        agent_trajectories = {
            'agent_mask': agent_mask,
            'total_agents_annotation': total_agents_annotation,
            'agent_annotation': agent_annotation
        }

        # map attributes
        map_masks, lanes, map_img, map_img_with_lanes = \
            self.static_layer.generate_mask(ego_pose_xy, ego_pose_rotation, sample_token)
        map_attributes = {
            'map_masks': map_masks,
            'lanes': lanes,
            'map_img': map_img,
            'map_img_with_lanes': map_img_with_lanes,
            'map_img_with_agents':
            self.combinator.combine([map_img, agent_mask])
        }

        scene_data = {
            'instance_token': instance_token,
            'sample_token': sample_token,
            'annotation': annotation,
            'ego_pose_xy': ego_pose_xy,
            'ego_pose_rotation': ego_pose_rotation,
            'agent_attributes': agent_attributes,
            'agent_trajectories': agent_trajectories,
            'map_attributes': map_attributes,
        }

        return scene_data

    def get_cmu_annotation(self, instance_tokens, sample_token):
        scene_data = self.get_annotation(instance_tokens[0], sample_token)
        ego_pose_xy = scene_data['ego_pose_xy']
        ego_pose_rotation = scene_data['ego_pose_rotation']

        agents_annotation = scene_data['agent_trajectories'][
            'total_agents_annotation']

        agents_past = [
            convert_global_coords_to_local(path_global_i, ego_pose_xy,
                                           ego_pose_rotation).tolist()
            if len(path_global_i) != 0 else []
            for path_global_i in agents_annotation['pasts']
        ]
        agents_future = [
            convert_global_coords_to_local(path_global_i, ego_pose_xy,
                                           ego_pose_rotation).tolist()
            if len(path_global_i) != 0 else []
            for path_global_i in agents_annotation['futures']
        ]
        agents_translation = \
            convert_global_coords_to_local(agents_annotation['translations'], ego_pose_xy, ego_pose_rotation).tolist() \
                if len(agents_annotation['translations']) != 0 else []

        drivable_area = scene_data['map_attributes']['map_masks'][
            self.drivable_area_idx]
        road_divider = scene_data['map_attributes']['map_masks'][
            self.road_divider_idx]
        lane_divider = scene_data['map_attributes']['map_masks'][
            self.lane_divider_idx]

        drivable_area = cv2.cvtColor(
            cv2.resize(drivable_area, self.scene_size), cv2.COLOR_BGR2GRAY)
        road_divider = cv2.cvtColor(cv2.resize(road_divider, self.scene_size),
                                    cv2.COLOR_BGR2GRAY)
        lane_divider = cv2.cvtColor(cv2.resize(lane_divider, self.scene_size),
                                    cv2.COLOR_BGR2GRAY)

        # distance_map, prior = self.generateDistanceMaskFromColorMap(
        #     drivable_area, image_size=self.scene_size, prior_size=self.scene_size)
        map_image_show = cv2.resize(
            scene_data['map_attributes']['map_img_with_agents'],
            dsize=(0, 0),
            fx=self.resized_ratio,
            fy=self.resized_ratio,
            interpolation=cv2.INTER_LINEAR)

        num_agents = len(agents_past)
        agents_mask = [
            tk in instance_tokens
            for tk in agents_annotation['instance_tokens']
        ]  # agents to decode
        future_agent_masks = []
        past_agents_traj = []
        future_agents_traj = []
        past_agents_traj_len = []
        future_agents_traj_len = []
        decode_start_vel = []
        decode_start_pos = []

        for idx in range(num_agents):
            past = agents_past[idx][-self.past_len +
                                    1:] + [agents_translation[idx]]
            future = agents_future[idx][:self.future_len]

            if len(past) != self.past_len:
                continue
            elif not (abs(past[-1][0]) <= self.scene_size[1] // 2) or not (abs(
                    past[-1][1]) <= self.scene_size[0] // 2):
                continue

            if len(future) != self.future_len:
                agents_mask[idx] = False

            future_agent_masks.append(agents_mask[idx])
            past_agents_traj.append(past)
            past_agents_traj_len.append(len(past))
            future_agents_traj.append(future)
            future_agents_traj_len.append(len(future))
            decode_start_pos.append(past[-1])
            decode_start_vel.append([
                (past[-1][0] - past[-2][0]) / self.sampling_time,
                (past[-1][1] - past[-2][1]) / self.sampling_time
            ])

        episode = [
            past_agents_traj, past_agents_traj_len, future_agents_traj,
            future_agents_traj_len, future_agent_masks, decode_start_vel,
            decode_start_pos
        ]
        episode_img = [drivable_area, road_divider, lane_divider]

        return {
            'episode': episode,
            'episode_img': episode_img,
            'img_show': map_image_show
        }

    def save_cmu_dataset(self, save_dir, partition='all'):
        from nuscenes.eval.prediction.splits import get_prediction_challenge_split

        split_types = ['mini_train', 'mini_val', 'train', 'train_val', 'val']
        if partition == 'mini':
            split_types = ['mini_train', 'mini_val']

        if not os.path.isdir(save_dir):
            os.mkdir(save_dir)

        for split in tqdm(split_types, desc='split dataset'):
            partition_tokens = get_prediction_challenge_split(
                split, dataroot=self.root)
            tokens_dict = {}
            for token in partition_tokens:
                instance_token, sample_token = token.split('_')
                try:
                    tokens_dict[sample_token].append(instance_token)
                except KeyError:
                    tokens_dict[sample_token] = [instance_token]

            with open('{}/{}.tokens'.format(save_dir, split), 'wb') as f:
                pickle.dump(tokens_dict, f, pickle.HIGHEST_PROTOCOL)

            for sample_tk, instance_tks in tqdm(tokens_dict.items(),
                                                desc=split,
                                                total=len(tokens_dict)):
                sample_dir = os.path.join(save_dir, sample_tk)
                if not os.path.isdir(sample_dir):
                    os.mkdir(sample_dir)
                    scene_data = self.get_cmu_annotation(
                        instance_tks, sample_tk)
                    with open('{}/map.bin'.format(sample_dir), 'wb') as f:
                        pickle.dump(scene_data['episode_img'], f,
                                    pickle.HIGHEST_PROTOCOL)
                    with open('{}/viz.bin'.format(sample_dir), 'wb') as f:
                        pickle.dump(scene_data['img_show'], f,
                                    pickle.HIGHEST_PROTOCOL)
                    with open('{}/episode.bin'.format(sample_dir), 'wb') as f:
                        pickle.dump(scene_data['episode'], f,
                                    pickle.HIGHEST_PROTOCOL)
                    with open('{}/instance_tks.bin'.format(sample_dir),
                              'wb') as f:
                        pickle.dump(instance_tks, f, pickle.HIGHEST_PROTOCOL)

    def render_sample(self, sample_token):
        self.nus.render_sample(sample_token)

    def render_scene(self, sample_token):
        sample = self.nus.get('sample', sample_token)
        scene = self.nus.get('scene', sample['scene_token'])
        log = self.nus.get('log', scene['log_token'])
        location = log['location']
        nus_map = NuScenesMap(dataroot=self.root, map_name=location)
        layer_names = [
            'road_segment', 'lane', 'ped_crossing', 'walkway', 'stop_line',
            'carpark_area'
        ]
        camera_channel = 'CAM_FRONT'
        nus_map.render_map_in_image(self.nus,
                                    sample_token,
                                    layer_names=layer_names,
                                    camera_channel=camera_channel)
Example #5
0
class NusLoaderQ10(Dataset):
    def __init__(self, root='/datasets/nuscene/v1.0-mini', sampling_time=3, agent_time=0, layer_names=None,
                 colors=None, resolution: float = 0.1,  # meters / pixel
                 meters_ahead: float = 25, meters_behind: float = 25,
                 meters_left: float = 25, meters_right: float = 25, version='v1.0-mini'):
        if layer_names is None:
            layer_names = ['drivable_area', 'road_segment', 'road_block',
                           'lane', 'ped_crossing', 'walkway', 'stop_line',
                           'carpark_area', 'road_divider', 'lane_divider']
        if colors is None:
            colors = [(255, 255, 255), (255, 255, 255), (255, 255, 255),
                      (255, 255, 255), (255, 255, 255), (255, 255, 255), (255, 255, 255),
                      (255, 255, 255), (255, 255, 255), (255, 255, 255), ]
        self.root = root
        self.nus = NuScenes(version, dataroot=self.root)
        self.scenes = self.nus.scene
        self.samples = self.nus.sample

        self.layer_names = layer_names
        self.colors = colors

        self.helper = PredictHelper(self.nus)

        self.seconds = sampling_time
        self.agent_seconds = agent_time

        self.static_layer = StaticLayerRasterizer(self.helper, layer_names=self.layer_names, colors=self.colors,
                                                  resolution=resolution, meters_ahead=meters_ahead,
                                                  meters_behind=meters_behind,
                                                  meters_left=meters_left, meters_right=meters_right)
        self.agent_layer = AgentBoxesWithFadedHistory(self.helper, seconds_of_history=self.agent_seconds,
                                                      resolution=resolution, meters_ahead=meters_ahead,
                                                      meters_behind=meters_behind,
                                                      meters_left=meters_left, meters_right=meters_right)

    def __len__(self):
        return len(self.samples)

    def __getitem__(self, idx):
        scene_id = idx
        sample = self.samples[idx]
        sample_token = sample['token']

        # 1. calculate ego pose
        sample_data_lidar = self.nus.get('sample_data', sample['data']['LIDAR_TOP'])
        ego_pose = self.nus.get('ego_pose', sample_data_lidar['ego_pose_token'])
        ego_pose_xy = ego_pose['translation']
        ego_pose_rotation = ego_pose['rotation']

        # 2. Generate map mask
        map_masks, lanes, map_img = self.static_layer.generate_mask(ego_pose_xy, ego_pose_rotation, sample_token)

        # 3. Generate Agent Trajectory
        agent_mask, xy_global = self.agent_layer.generate_mask(
            ego_pose_xy, ego_pose_rotation, sample_token, self.seconds, show_agent=False)

        xy_local = []

        # past, future trajectory
        for path_global in xy_global[:2]:
            pose_xy = []
            for path_global_i in path_global:
                if len(path_global_i) == 0:
                    pose_xy.append(path_global_i)
                else:
                    pose_xy.append(convert_global_coords_to_local(path_global_i, ego_pose_xy, ego_pose_rotation))
            xy_local.append(pose_xy)
        # current pose
        if len(xy_global[2]) == 0:
            xy_local.append(xy_global[2])
        else:
            xy_local.append(convert_global_coords_to_local(xy_global[2], ego_pose_xy, ego_pose_rotation))

        # 4. Generate Virtual Agent Trajectory
        lane_tokens = list(lanes.keys())
        lanes_disc = [np.array(lanes[token])[:, :2] for token in lane_tokens]

        virtual_mask, virtual_xy = self.agent_layer.generate_virtual_mask(
            ego_pose_xy, ego_pose_rotation, lanes_disc, sample_token, show_agent=False,
            past_trj_len=4, future_trj_len=6, min_dist=6)

        virtual_xy_local = []

        # past, future trajectory
        for path_global in virtual_xy[:2]:
            pose_xy = []
            for path_global_i in path_global:
                if len(path_global_i) == 0:
                    pose_xy.append(path_global_i)
                else:
                    pose_xy.append(convert_global_coords_to_local(path_global_i, ego_pose_xy, ego_pose_rotation))
            virtual_xy_local.append(pose_xy)
        # current pose
        if len(virtual_xy[2]) == 0:
            virtual_xy_local.append(virtual_xy[2])
        else:
            virtual_xy_local.append(convert_global_coords_to_local(virtual_xy[2], ego_pose_xy, ego_pose_rotation))

        return map_masks, map_img, agent_mask, xy_local, virtual_mask, virtual_xy_local, scene_id

    def render_sample(self, idx):
        sample = self.samples[idx]
        sample_token = sample['token']
        self.nus.render_sample(sample_token)

    def render_scene(self, idx):
        sample = self.samples[idx]
        sample_token = sample['token']
        scene = self.nus.get('scene', sample['scene_token'])
        log = self.nus.get('log', scene['log_token'])
        location = log['location']
        nus_map = NuScenesMap(dataroot=self.root, map_name=location)
        layer_names = ['road_segment', 'lane', 'ped_crossing', 'walkway', 'stop_line', 'carpark_area']
        camera_channel = 'CAM_FRONT'
        nus_map.render_map_in_image(self.nus, sample_token, layer_names=layer_names, camera_channel=camera_channel)

    def render_map(self, idx, combined=True):
        sample = self.samples[idx]
        sample_token = sample['token']

        sample_data_lidar = self.nus.get('sample_data', sample['data']['LIDAR_TOP'])
        ego_pose = self.nus.get('ego_pose', sample_data_lidar['ego_pose_token'])
        ego_pose_xy = ego_pose['translation']
        ego_pose_rotation = ego_pose['rotation']
        timestamp = ego_pose['timestamp']

        # 2. Generate Map & Agent Masks
        scene = self.nus.get('scene', sample['scene_token'])
        log = self.nus.get('log', scene['log_token'])
        location = log['location']
        nus_map = NuScenesMap(dataroot=self.root, map_name=location)

        static_layer = StaticLayerRasterizer(self.helper, layer_names=self.layer_names, colors=self.colors)
        agent_layer = AgentBoxesWithFadedHistory(self.helper, seconds_of_history=self.seconds)

        map_masks, lanes, map_img = static_layer.generate_mask(ego_pose_xy, ego_pose_rotation, sample_token)
        agent_mask = agent_layer.generate_mask(ego_pose_xy, ego_pose_rotation, sample_token)

        if combined:
            plt.subplot(1, 2, 1)
            plt.title("combined map")
            plt.imshow(map_img)
            plt.subplot(1, 2, 2)
            plt.title("agent")
            plt.imshow(agent_mask)
            plt.show()
        else:
            num_labels = len(self.layer_names)
            num_rows = num_labels // 3
            fig, ax = plt.subplots(num_rows, 3, figsize=(10, 3 * num_rows))
            for row in range(num_rows):
                for col in range(3):
                    num = 3 * row + col
                    if num == num_labels - 1:
                        break
                    ax[row][col].set_title(self.layer_names[num])
                    ax[row][col].imshow(map_masks[num])
            plt.show()
Example #6
0
    def __getitem__(self, idx):
        # 1. past_agents_traj : (Num obv agents in batch_i X 20 X 2)
        # 2. past_agents_traj_len : (Num obv agents in batch_i, )
        # 3. future_agents_traj : (Num pred agents in batch_i X 20 X 2)
        # 4. future_agents_traj_len : (Num pred agents in batch_i, )
        # 5. future_agent_masks : (Num obv agents in batch_i)
        # 6. decode_rel_pos: (Num pred agents in batch_i X 2)
        # 7. decode_start_pos: (Num pred agents in batch_i X 2)
        # 8. map_image : (3 X 224 X 224)
        # 9. scene ID: (string)

        sample = self.samples[idx]
        sample_token = sample['token']

        # 1. calculate ego pose
        sample_data_lidar = self.nus.get('sample_data', sample['data']['LIDAR_TOP'])
        ego_pose = self.nus.get('ego_pose', sample_data_lidar['ego_pose_token'])
        ego_pose_xy = ego_pose['translation']
        ego_pose_rotation = ego_pose['rotation']
        # 타임스탬프
        timestamp = ego_pose['timestamp']

        # 2. Generate Map & Agent Masks
        scene = self.nus.get('scene', sample['scene_token'])
        log = self.nus.get('log', scene['log_token'])
        location = log['location']
        nus_map = NuScenesMap(dataroot=self.root, map_name=location)

        static_layer = StaticLayerRasterizer(self.helper, layer_names=self.layer_names, colors=self.colors)
        agent_layer = AgentBoxesWithFadedHistory(self.helper, seconds_of_history=self.seconds)

        map_masks, lanes, map_img = static_layer.generate_mask(ego_pose_xy, ego_pose_rotation, sample_token)
        agent_mask = agent_layer.generate_mask(ego_pose_xy, ego_pose_rotation, sample_token)

        # 3. Generate Agent Trajectory
        annotation_tokens = sample['anns']
        num_agent = len(annotation_tokens)
        agents = []
        for ans_token in annotation_tokens:
            agent_states = []
            agent = self.nus.get('sample_annotation', ans_token)
            instance_token = agent['instance_token']

            # 에이전트 주행경로
            xy_global = agent['translation']
            past_xy_global = self.helper.get_past_for_agent(
                instance_token, sample_token, seconds=self.seconds, in_agent_frame=False)
            future_xy_global = self.helper.get_future_for_agent(
                instance_token, sample_token, seconds=self.seconds, in_agent_frame=False)

            # 로컬 주행경로
            xy_local = convert_global_coords_to_local(np.array([xy_global]), ego_pose_xy, ego_pose_rotation)
            past_xy_local = convert_global_coords_to_local(past_xy_global, ego_pose_xy, ego_pose_rotation)
            future_xy_local = convert_global_coords_to_local(future_xy_global, ego_pose_xy, ego_pose_rotation)

            # 에이전트 주행상태
            rot = agent['rotation']
            vel = self.helper.get_velocity_for_agent(instance_token, sample_token)
            accel = self.helper.get_acceleration_for_agent(instance_token, sample_token)
            yaw_rate = self.helper.get_heading_change_rate_for_agent(instance_token, sample_token)

            agent_states = {'present_pos': xy_global, 'past_pos': past_xy_global, 'future_pos': future_xy_global,
                            'rot': rot, 'vel': vel, 'accel': accel, 'yaw_rate': yaw_rate,
                            'present_local_xy': xy_local, 'past_local_xy': past_xy_local,
                            'future_local_xy': future_xy_local}

            agents.append(agent_states)

        return map_masks, agents
Example #7
0
class NusTrajectoryExtractor:
    def __init__(self, root='/datasets/nuscene/v1.0-mini', sampling_time=3, agent_time=0, layer_names=None,
                 colors=None, resolution: float = 0.1,  # meters / pixel
                 meters_ahead: float = 32, meters_behind: float = 32,
                 meters_left: float = 32, meters_right: float = 32, version='v1.0-mini'):
        self.layer_names = ['drivable_area', 'road_segment', 'road_block',
                            'lane', 'ped_crossing', 'walkway', 'stop_line',
                            'carpark_area', 'road_divider', 'lane_divider'] if layer_names is None else layer_names
        self.colors = [(255, 255, 255), (255, 255, 255), (255, 255, 255),
                       (255, 255, 255), (255, 255, 255), (255, 255, 255), (255, 255, 255),
                       (255, 255, 255), (255, 255, 255), (255, 255, 255), ] if colors is None else colors
        self.root = root
        self.nus = NuScenes(version, dataroot=self.root)
        self.helper = PredictHelper(self.nus)

        self.seconds = sampling_time
        self.agent_seconds = agent_time

        self.static_layer = StaticLayerRasterizer(self.helper, layer_names=self.layer_names, colors=self.colors,
                                                  resolution=resolution, meters_ahead=meters_ahead,
                                                  meters_behind=meters_behind,
                                                  meters_left=meters_left, meters_right=meters_right)
        self.agent_layer = AgentBoxesWithFadedHistory(self.helper, seconds_of_history=self.agent_seconds,
                                                      resolution=resolution, meters_ahead=meters_ahead,
                                                      meters_behind=meters_behind,
                                                      meters_left=meters_left, meters_right=meters_right)
        self.combinator = Rasterizer()
        self.show_agent = True

    def __len__(self):
        return len(self.nus.sample)

    def get_ego_pose(self, idx):
        sample_data_lidar = self.nus.get('sample_data', self.nus.sample[idx]['data']['LIDAR_TOP'])
        ego_pose = self.nus.get('ego_pose', sample_data_lidar['ego_pose_token'])
        return ego_pose['translation'], ego_pose['rotation']

    def get_trajectory(self, idx):
        scene_id = idx
        sample = self.nus.sample[idx]
        sample_token = sample['token']

        # 1. calculate ego pose
        ego_pose_xy, ego_pose_rotation = self.get_ego_pose(idx)
        # 2. Generate map mask
        map_masks, lanes, map_img = self.static_layer.generate_mask(ego_pose_xy, ego_pose_rotation, sample_token)
        # 3. Generate Agent Trajectory
        # global
        agent_mask, xy_global = self.agent_layer.generate_mask(
            ego_pose_xy, ego_pose_rotation, sample_token, self.seconds, show_agent=self.show_agent)
        # local
        xy_local = []
        for path_global in xy_global[:2]:
            pose_xy = []
            for path_global_i in path_global:
                if len(path_global_i) == 0:
                    pose_xy.append(path_global_i)
                else:
                    pose_xy.append(
                        convert_global_coords_to_local(path_global_i, ego_pose_xy, ego_pose_rotation).tolist())
            xy_local.append(pose_xy)
        if len(xy_global[2]) == 0:
            xy_local.append(xy_global[2])
        else:
            xy_local.append(convert_global_coords_to_local(xy_global[2], ego_pose_xy, ego_pose_rotation).tolist())

        map_img = self.combinator.combine([map_img, agent_mask])

        scene_data = {
            'ego_pose_xy': ego_pose_xy,
            'ego_pose_rotation': ego_pose_rotation,

            'map_masks': map_masks,
            'map_img': map_img,
            'driving_lines': lanes,

            'agent_mask': agent_mask,
            'xy_global': xy_global,
            'xy_local': xy_local,
            'scene_id': scene_id
        }

        return scene_data

    def render_sample(self, idx):
        sample = self.nus.sample[idx]
        sample_token = sample['token']
        self.nus.render_sample(sample_token)

    def render_scene(self, idx):
        sample = self.nus.sample[idx]
        sample_token = sample['token']
        scene = self.nus.get('scene', sample['scene_token'])
        log = self.nus.get('log', scene['log_token'])
        location = log['location']
        nus_map = NuScenesMap(dataroot=self.root, map_name=location)
        layer_names = ['road_segment', 'lane', 'ped_crossing', 'walkway', 'stop_line', 'carpark_area']
        camera_channel = 'CAM_FRONT'
        nus_map.render_map_in_image(self.nus, sample_token, layer_names=layer_names, camera_channel=camera_channel)
Example #8
0
    def __getitem__(self, idx):
        sample = self.samples[idx]
        sample_token = sample['token']

        # 1. calculate ego pose
        sample_data_lidar = self.nus.get('sample_data',
                                         sample['data']['LIDAR_TOP'])
        ego_pose = self.nus.get('ego_pose',
                                sample_data_lidar['ego_pose_token'])
        ego_pose_xy = ego_pose['translation']
        ego_pose_rotation = ego_pose['rotation']
        # 타임스탬프
        timestamp = ego_pose['timestamp']

        # 2. Generate Map & Agent Masks
        scene = self.nus.get('scene', sample['scene_token'])
        log = self.nus.get('log', scene['log_token'])
        location = log['location']
        nus_map = NuScenesMap(dataroot=self.root, map_name=location)

        static_layer = StaticLayerRasterizer(self.helper,
                                             layer_names=self.layer_names,
                                             colors=self.colors)
        agent_layer = AgentBoxesWithFadedHistory(
            self.helper, seconds_of_history=self.agent_seconds)

        map_masks, lanes, map_img = static_layer.generate_mask(
            ego_pose_xy, ego_pose_rotation, sample_token)
        agent_mask = agent_layer.generate_mask(ego_pose_xy, ego_pose_rotation,
                                               sample_token)

        lane_tokens = list(lanes.keys())
        num_lanes = len(lane_tokens)
        lanes_disc = [np.array(lanes[token])[:, :2] for token in lane_tokens]
        lanes_arc = []
        for seq in lanes_disc:
            seq_len = len(seq)
            lane_middle = seq[seq_len // 2]
            opt_middle = (seq[0] + seq[-1]) / 2
            lane_h = np.linalg.norm(lane_middle - opt_middle)
            lane_w = np.linalg.norm(seq[-1] - seq[0])
            curve = lane_h / 2 + lane_w**2 / (8 * lane_h)
            lanes_arc.append(curve)
        lanes_arc = np.array(lanes_arc)

        # 3. Generate Agent Trajectory
        annotation_tokens = sample['anns']
        num_agent = len(annotation_tokens)
        agents = []
        for ans_token in annotation_tokens:
            agent_states = []
            agent = self.nus.get('sample_annotation', ans_token)
            instance_token = agent['instance_token']

            # 에이전트 주행경로
            xy_global = agent['translation']
            past_xy_global = self.helper.get_past_for_agent(
                instance_token,
                sample_token,
                seconds=self.seconds,
                in_agent_frame=False)
            future_xy_global = self.helper.get_future_for_agent(
                instance_token,
                sample_token,
                seconds=self.seconds,
                in_agent_frame=False)

            # 경로 곡률
            agents_arc = []
            for seq in [past_xy_global, future_xy_global]:
                seq_len = len(seq)
                if seq_len < 2:
                    continue
                path_middle = seq[seq_len // 2]
                opt_middle = (seq[0] + seq[-1]) / 2
                path_h = np.linalg.norm(path_middle - opt_middle)
                path_w = np.linalg.norm(seq[-1] - seq[0])
                if path_h == 0:
                    path_h = 0.001
                if path_w == 0:
                    path_w = 0.001
                curve = path_h / 2 + (path_w * path_w) / (8 * path_h)
                agents_arc.append(curve)
            agents_arc = np.array(agents_arc)

            # 로컬 주행경로
            xy_local = convert_global_coords_to_local(
                np.array([xy_global[:2]]), ego_pose_xy, ego_pose_rotation)
            if len(past_xy_global) < 1:
                past_xy_global = np.append(past_xy_global, [0., 0.])
            if len(future_xy_global) < 1:
                future_xy_global = np.append(future_xy_global, [0., 0.])
            past_xy_local = convert_global_coords_to_local(
                past_xy_global, ego_pose_xy, ego_pose_rotation)
            future_xy_local = convert_global_coords_to_local(
                future_xy_global, ego_pose_xy, ego_pose_rotation)

            # 에이전트 주행상태
            rot = agent['rotation']
            vel = self.helper.get_velocity_for_agent(instance_token,
                                                     sample_token)
            accel = self.helper.get_acceleration_for_agent(
                instance_token, sample_token)
            yaw_rate = self.helper.get_heading_change_rate_for_agent(
                instance_token, sample_token)

            agent_states = {
                'present_pos': xy_global,
                'past_pos': past_xy_global,
                'future_pos': future_xy_global,
                'rot': rot,
                'vel': vel,
                'accel': accel,
                'yaw_rate': yaw_rate,
                'present_local_xy': xy_local,
                'past_local_xy': past_xy_local,
                'future_local_xy': future_xy_local,
                'curvature': agents_arc
            }

            agents.append(agent_states)

        return map_masks, agent_mask, agents, idx