Esempio n. 1
0
    def generate_mask(self, translation, rotation, sample_token: str):

        map_name = self.helper.get_map_name_from_sample_token(sample_token)

        x, y = translation[:2]
        yaw = quaternion_yaw(Quaternion(rotation))
        yaw_corrected = correct_yaw(yaw)

        image_side_length = 2 * max(self.meters_ahead, self.meters_behind, self.meters_left, self.meters_right)
        image_side_length_pixels = int(image_side_length / self.resolution)
        patchbox = get_patchbox(x, y, image_side_length)

        angle_in_degrees = angle_of_rotation(yaw_corrected) * 180 / np.pi
        canvas_size = (image_side_length_pixels, image_side_length_pixels)

        masks = self.maps[map_name].get_map_mask(patchbox, angle_in_degrees, self.layer_names, canvas_size=canvas_size)# lane masks
        agent_pixels = int(image_side_length_pixels / 2), int(image_side_length_pixels / 2)
        base_image = np.zeros((image_side_length_pixels, image_side_length_pixels, 3))

        lanes = get_lanes_in_radius(x, y, radius=50, discretization_meters=1, map_api=self.maps[map_name])
        image_with_lanes = draw_lanes_on_image(base_image, lanes, (x, y), yaw,
                                               agent_pixels, self.resolution, color_function=color_by_yaw)
        rotation_mat = get_rotation_matrix(image_with_lanes.shape, yaw)
        rotated_image = cv2.warpAffine(image_with_lanes, rotation_mat, image_with_lanes.shape[:2])
        rotated_image = rotated_image.astype("uint8")

        row_crop, col_crop = get_crops(self.meters_ahead, self.meters_behind, self.meters_left,
                                       self.meters_right, self.resolution,
                                       int(image_side_length / self.resolution))

        image_show = rotated_image[row_crop, col_crop, :]

        return masks, lanes, image_show
Esempio n. 2
0
    def plot_ego(self,
                 ax,
                 sample,
                 ego_traj=None,
                 animated_agent=False,
                 plot_ego=True):
        sample_data = self.nusc.get('sample_data', sample['data']['CAM_FRONT'])
        ego_pose = self.nusc.get('ego_pose', sample_data['ego_pose_token'])

        pos = [ego_pose['translation'][0], ego_pose['translation'][1]]
        ego_yaw = Quaternion(ego_pose['rotation'])
        ego_yaw = quaternion_yaw(ego_yaw)
        ego_yaw = angle_of_rotation(ego_yaw)
        ego_yaw = np.rad2deg(ego_yaw)

        if plot_ego:
            self.plot_elements(pos,
                               ego_yaw,
                               'ego',
                               ax,
                               animated_agent=animated_agent)

        if ego_traj is not None:
            for name, traj_dict in ego_traj.items():
                ax.scatter(traj_dict['traj'][:, 0],
                           traj_dict['traj'][:, 1],
                           c=traj_dict['color'],
                           s=60,
                           zorder=80)

        return pos, ego_pose['rotation']
Esempio n. 3
0
    def make_representation(self, instance_token: str,
                            sample_token: str) -> np.ndarray:
        """
        Makes rasterized representation of static map layers.
        :param instance_token: Token for instance.
        :param sample_token: Token for sample.
        :return: Three channel image.
        """

        sample_annotation = self.helper.get_sample_annotation(
            instance_token, sample_token)
        map_name = self.helper.get_map_name_from_sample_token(sample_token)

        x, y = sample_annotation['translation'][:2]

        yaw = quaternion_yaw(Quaternion(sample_annotation['rotation']))

        yaw_corrected = correct_yaw(yaw)

        image_side_length = 2 * max(self.meters_ahead, self.meters_behind,
                                    self.meters_left, self.meters_right)
        image_side_length_pixels = int(image_side_length / self.resolution)

        patchbox = get_patchbox(x, y, image_side_length)

        angle_in_degrees = angle_of_rotation(yaw_corrected) * 180 / np.pi

        canvas_size = (image_side_length_pixels, image_side_length_pixels)

        masks = self.maps[map_name].get_map_mask(patchbox,
                                                 angle_in_degrees,
                                                 self.layer_names,
                                                 canvas_size=canvas_size)

        images = []
        for mask, color in zip(masks, self.colors):
            images.append(
                change_color_of_binary_mask(
                    np.repeat(mask[::-1, :, np.newaxis], 3, 2), color))

        lanes = draw_lanes_in_agent_frame(image_side_length_pixels,
                                          x,
                                          y,
                                          yaw,
                                          radius=50,
                                          image_resolution=self.resolution,
                                          discretization_resolution_meters=1,
                                          map_api=self.maps[map_name])

        images.append(lanes)

        image = self.combinator.combine(images)

        row_crop, col_crop = get_crops(
            self.meters_ahead, self.meters_behind, self.meters_left,
            self.meters_right, self.resolution,
            int(image_side_length / self.resolution))

        return image[row_crop, col_crop, :]
Esempio n. 4
0
def get_rotation_matrix(image_shape: Tuple[int, int, int],
                        yaw_in_radians: float) -> np.ndarray:
    """
    Gets a rotation matrix to rotate a three channel image so that
    yaw_in_radians points along the positive y-axis.
    :param image_shape: (Length, width, n_channels).
    :param yaw_in_radians: Angle to rotate the image by.
    :return: rotation matrix represented as np.ndarray.
    :return: The rotation matrix.
    """

    rotation_in_degrees = angle_of_rotation(yaw_in_radians) * 180 / np.pi

    return cv2.getRotationMatrix2D((image_shape[1] / 2, image_shape[0] / 2),
                                   rotation_in_degrees, 1)
Esempio n. 5
0
    def generate_mask(self, translation, rotation, sample_token: str):

        map_name = self.helper.get_map_name_from_sample_token(sample_token)

        # translation factors (ego frame)
        x, y = translation[:2]
        yaw = quaternion_yaw(Quaternion(rotation))
        yaw_corrected = correct_yaw(yaw)

        # 1. generate map masks
        image_side_length = 2 * max(self.meters_ahead, self.meters_behind, self.meters_left, self.meters_right)
        image_side_length_pixels = int(image_side_length / self.resolution)
        patchbox = get_patchbox(x, y, image_side_length)
        angle_in_degrees = angle_of_rotation(yaw_corrected) * 180 / np.pi
        canvas_size = (image_side_length_pixels, image_side_length_pixels)

        masks = self.maps[map_name].get_map_mask(patchbox, angle_in_degrees, self.layer_names, canvas_size=canvas_size)

        # 2. generate guided lanes
        agent_pixels = int(image_side_length_pixels / 2), int(image_side_length_pixels / 2)
        base_image = np.zeros((image_side_length_pixels, image_side_length_pixels, 3))

        meter_resolution = 0.5
        radius = 50.0
        lanes = get_lanes_in_radius(x, y, radius=radius, discretization_meters=meter_resolution, map_api=self.maps[map_name])
        image_with_lanes = draw_lanes_on_image(base_image, lanes, (x, y), yaw,
                                               agent_pixels, self.resolution, color_function=color_by_yaw)
        rotation_mat = get_rotation_matrix(image_with_lanes.shape, yaw)
        rotated_image = cv2.warpAffine(image_with_lanes, rotation_mat, image_with_lanes.shape[:2])
        rotated_image_lanes = rotated_image.astype("uint8")

        # 3. combine masks
        images = []
        for mask, color in zip(masks, self.colors):
            images.append(change_color_of_binary_mask(np.repeat(mask[::-1, :, np.newaxis], 3, 2), color))
        map_img = self.combinator.combine(images)
        images.append(rotated_image_lanes)
        map_img_with_lanes = self.combinator.combine(images)

        # crop
        row_crop, col_crop = get_crops(self.meters_ahead, self.meters_behind, self.meters_left,
                                       self.meters_right, self.resolution,
                                       int(image_side_length / self.resolution))

        return np.array(images)[:, row_crop, col_crop, :], lanes, map_img, map_img_with_lanes
Esempio n. 6
0
def ado_maneuver_filter(r):
    ado_heading = []
    quat_history = r.past_agent_quat + [r.current_agent_quat]
    for q in quat_history[-4:]:
        #### convert from global quat to local steering ####
        yaw = Quaternion(q)
        yaw = quaternion_yaw(yaw)
        ado_heading.append(-angle_of_rotation(yaw))

    ado_heading = np.array(ado_heading)
    steering = (ado_heading[1:] - ado_heading[:-1]) / 0.5

    if steering[-4:].mean() < -0.1:
        return 'turn_left'
    elif steering[-4:].mean() > 0.1:
        return 'turn_right'
    else:
        return 'straight'
Esempio n. 7
0
    def plot_road_agents(self,
                         ax,
                         fig,
                         sample,
                         plot_list,
                         text_box,
                         sensor_info,
                         my_patch,
                         plot_traj=False,
                         contour_func=None,
                         animated_agent=False):

        road_agents_in_patch = {'pedestrians': [], 'vehicles': []}
        for ann_token in sample['anns']:
            ann = self.nusc.get('sample_annotation', ann_token)
            category = ann['category_name']
            if len(ann['attribute_tokens']) != 0:
                attribute = self.nusc.get('attribute',
                                          ann['attribute_tokens'][0])['name']
            else:
                attribute = ""

            pos = [ann['translation'][0], ann['translation'][1]]
            instance_token = ann['instance_token']
            sample_token = sample['token']
            #### Plot other agents ####
            valid_agent = False
            if 'other_cars' in plot_list and 'vehicle' in category and 'parked' not in attribute and self.in_my_patch(
                    pos, my_patch):
                valid_agent = True
                agent_yaw = Quaternion(ann['rotation'])
                agent_yaw = quaternion_yaw(agent_yaw)
                agent_yaw = angle_of_rotation(agent_yaw)
                agent_yaw = np.rad2deg(agent_yaw)
                self.plot_elements(pos,
                                   agent_yaw,
                                   'other_cars',
                                   ax,
                                   animated_agent=animated_agent)

                car_info = {
                    'instance_token': ann['instance_token'],
                    'category': category,
                    'attribute': attribute,
                    'translation': pos,
                    'rotation_quat': ann['rotation'],
                    'rotation_deg': agent_yaw
                }
                road_agents_in_patch['vehicles'].append(car_info)

                if text_box:
                    self.plot_text_box(
                        ax, category,
                        [ann['translation'][0] + 1.2, ann['translation'][1]])
                    self.plot_text_box(ax, attribute, [
                        ann['translation'][0] + 1.2,
                        ann['translation'][1] - 1.2
                    ])
                    self.plot_text_box(ax, ann['instance_token'], [
                        ann['translation'][0] + 1.2,
                        ann['translation'][1] + 1.2
                    ])

            #### Plot pedestrians ####
            if 'pedestrian' in plot_list and 'pedestrian' in category and not 'stroller' in category and not 'wheelchair' in category and self.in_my_patch(
                    pos, my_patch):
                valid_agent = True
                agent_yaw = Quaternion(ann['rotation'])
                agent_yaw = quaternion_yaw(agent_yaw)
                agent_yaw = angle_of_rotation(agent_yaw)
                agent_yaw = np.rad2deg(agent_yaw)

                self.plot_elements(pos, agent_yaw, 'pedestrian', ax)
                if text_box:
                    self.plot_text_box(
                        ax, category,
                        [ann['translation'][0] + 1.2, ann['translation'][1]])
                    self.plot_text_box(ax, attribute, [
                        ann['translation'][0] + 1.2,
                        ann['translation'][1] - 1.2
                    ])

            if valid_agent and plot_traj:
                agent_future = self.helper.get_future_for_agent(
                    instance_token,
                    sample_token,
                    self.na_config['pred_horizon'],
                    in_agent_frame=False,
                    just_xy=True)

                agent_past = self.helper.get_past_for_agent(
                    instance_token,
                    sample_token,
                    self.na_config['obs_horizon'],
                    in_agent_frame=False,
                    just_xy=True)

                if len(agent_future) > 0:
                    ax.scatter(agent_future[:, 0],
                               agent_future[:, 1],
                               s=10,
                               c='y',
                               alpha=1.0,
                               zorder=200)
                if len(agent_past) > 0:
                    ax.scatter(agent_past[:, 0],
                               agent_past[:, 1],
                               s=10,
                               c='k',
                               alpha=0.2,
                               zorder=200)

        return road_agents_in_patch
Esempio n. 8
0
    def plot_car_to_predict(self,
                            ax,
                            agent_future: np.ndarray,
                            agent_past: np.ndarray,
                            instance_token: str,
                            sample_token: str,
                            text_box: bool = True,
                            predictions: list = None,
                            agent_state_dict: dict = None):
        '''
        predictions = {
            'name': {'data': <data>, 'color': <coloar>, 'frame': <frame>, 'style':'.'}
        }
        
        '''

        ## plot car ####
        ann = self.helper.get_sample_annotation(instance_token, sample_token)

        category = ann['category_name']
        if len(ann['attribute_tokens']) != 0:
            attribute = self.nusc.get('attribute',
                                      ann['attribute_tokens'][0])['name']
        else:
            attribute = ""

        agent_yaw = Quaternion(ann['rotation'])
        agent_yaw = quaternion_yaw(agent_yaw)
        agent_yaw = angle_of_rotation(agent_yaw)
        agent_yaw = np.rad2deg(agent_yaw)

        self.plot_elements([ann['translation'][0], ann['translation'][1]],
                           agent_yaw, 'current_car', ax)
        if text_box:
            self.plot_text_box(
                ax, category,
                [ann['translation'][0] + 1.2, ann['translation'][1]])
            self.plot_text_box(
                ax, attribute,
                [ann['translation'][0] + 1.2, ann['translation'][1] + 1.2])
            if agent_state_dict is not None:
                state_str = ""
                for k, v in agent_state_dict.items():
                    state_str += f"{k[0]}:{v:.2f}, "
                self.plot_text_box(
                    ax, state_str,
                    [ann['translation'][0] + 1.2, ann['translation'][1] + 3.2])

        agent_pos = [ann['translation'][0], ann['translation'][1]]
        agent_yaw_deg = agent_yaw

        # plot ground truth
        if len(agent_future) > 0:
            ax.scatter(agent_future[:, 0],
                       agent_future[:, 1],
                       s=20,
                       c='yellow',
                       alpha=1.0,
                       zorder=200)
        if len(agent_past) > 0:
            ax.scatter(agent_past[:, 0],
                       agent_past[:, 1],
                       s=20,
                       c='k',
                       alpha=0.5,
                       zorder=200)

        # plot predictions
        if predictions is not None:
            for k, v in viewitems(predictions):
                if v['frame'] == 'local':
                    v['data'] = convert_local_coords_to_global(
                        v['data'], np.array(agent_pos),
                        np.array(ann['rotation']))
                if 'style' not in v.keys():
                    v['style'] = '.'
                if v['style'] == '.':
                    ax.scatter(v['data'][:, 0],
                               v['data'][:, 1],
                               s=20,
                               c=v['color'],
                               alpha=1.0,
                               zorder=2)
                elif v['style'] == '-':
                    ax.plot(v['data'][:, 0],
                            v['data'][:, 1],
                            c=v['color'],
                            alpha=1.0,
                            zorder=2)
                else:
                    raise ValueError('style not supported')

        return agent_pos, ann['rotation']
Esempio n. 9
0
def render(graphics, render_info, config={}):

    if 'image' in config['render_type']:
        sim_ego_yaw = Quaternion(render_info['sim_ego_quat_gb'])
        sim_ego_yaw = quaternion_yaw(sim_ego_yaw)
        sim_ego_yaw = angle_of_rotation(sim_ego_yaw)
        sim_ego_yaw = np.rad2deg(sim_ego_yaw)

        ego_traj = None
        if 'sim_ego' in config['render_elements']:
            ego_traj = {
                # 'lane': {
                #     'traj': self.all_info['future_lanes'][0],
                #     'color': 'green'
                # },
                'sim_ego': {
                    'pos': render_info['sim_ego_pos_gb'],
                    'yaw': sim_ego_yaw,
                    'traj': np.zeros((4, 2)),
                    'color': 'yellow'
                }
            }

        #### control plots ####
        '''
        if 'control_plots' in config['render_elements']:
            fig, ax = plt.subplots(2,1)
            ax[0].plot(list(ap_timesteps), list(ap_speed), 'o-')
            ax[0].set_xlabel('timestep', fontsize=20)
            ax[0].set_ylabel('speed', fontsize=20)
            ax[1].plot(list(ap_timesteps), list(ap_steering), 'o-')
            ax[1].set_xlabel('timestep', fontsize=20)
            ax[1].set_ylabel('steering', fontsize=20)
            canvas = FigureCanvas(fig)
            canvas.draw()
            width, height = fig.get_size_inches() * fig.get_dpi()
            image = np.fromstring(canvas.tostring_rgb(), dtype='uint8').reshape(int(height), int(width), 3)
            if 'image' not in render_info.keys():
                render_info['image'] = {}
            render_info['image'].update({'cmd': image})
        '''

        save_img_dir = None
        if render_info['save_image_dir'] is not None:
            save_img_dir = os.path.join(render_info['save_image_dir'],
                                        str(render_info['scene_name']))
            if not os.path.exists(save_img_dir):
                os.makedirs(save_img_dir, exist_ok=True)

        sensor_info = None
        if 'sensor_info' in config['render_elements']:
            sensor_info = render_info['all_info']['sensor_info']

        ado_traj_dict = None
        if 'ado_traj_dict' in render_info.keys():
            ado_traj_dict = render_info['ado_traj_dict']

        costmap_contour = None
        if 'costmap_contour' in render_info.keys():
            costmap_contour = render_info['costmap_contour']

        other_images_to_be_saved = None
        if 'sim_ego_raster_image' in render_info['all_info'].keys(
        ) and render_info['all_info']['sim_ego_raster_image'] is not None:
            raster = render_info['all_info']['sim_ego_raster_image']
            if raster.shape == (3, 250, 250):
                raster = np.transpose(raster, (1, 2, 0))
            assert_shape(raster, 'raster', (250, 250, 3))
            other_images_to_be_saved = {'raster': raster}

        if 'image' in render_info.keys():
            if other_images_to_be_saved is None:
                other_images_to_be_saved = render_info['image']
            else:
                other_images_to_be_saved.update(render_info['image'])

        render_additional = {}
        if 'lines' in render_info.keys():
            render_additional['lines'] = render_info['lines']
        if 'scatters' in render_info.keys():
            render_additional['scatters'] = render_info['scatters']
        if 'text_boxes' in render_info.keys():
            render_additional['text_boxes'] = render_info['text_boxes']

        if render_info['instance_token'] is None:
            ego_centric = True
        else:
            ego_centric = False

        plot_human_ego = True
        if 'human_ego' not in config['render_elements']:
            plot_human_ego = False

        ego_centric = False
        if render_info['instance_token'] == 'ego' or render_info[
                'instance_token'] is None:
            ego_centric = True

        plot_list = [
            'ego', 'other_cars', 'labeled_map', 'sensing_patch', 'sensor_info',
            'cam'
        ]
        fig, ax, other = graphics.plot_ego_scene(
            ego_centric=ego_centric,
            sample_token=render_info['sample_token'],
            instance_token=render_info['instance_token'],
            ego_traj=ego_traj,
            ado_traj=ado_traj_dict,
            contour=costmap_contour,
            save_img_dir=save_img_dir,
            idx=str(render_info['sample_idx']).zfill(2),
            sensor_info=sensor_info,
            paper_ready=config['render_paper_ready'],
            other_images_to_be_saved=other_images_to_be_saved,
            render_additional=render_additional,
            plot_human_ego=plot_human_ego,
            patch_margin=config['patch_margin'],
            plot_list=plot_list)

        return fig, ax, other
Esempio n. 10
0
    def get_info(self,
                 sample_token: str,
                 ego_pos: np.ndarray = None,
                 ego_quat: np.ndarray = None,
                 instance_based=False):

        sample = self.nusc.get('sample', sample_token)
        scene = self.nusc.get('scene', sample['scene_token'])
        scene_log = self.nusc.get('log', scene['log_token'])
        nusc_map = NuScenesMap(dataroot=self.dataroot,
                               map_name=scene_log['location'])
        sample_data = self.nusc.get('sample_data', sample['data']['CAM_FRONT'])

        if ego_pos is None:
            ego_pose = self.nusc.get('ego_pose', sample_data['ego_pose_token'])
            ego_pos = np.array(ego_pose['translation'])
        if ego_quat is None:
            ego_pose = self.nusc.get('ego_pose', sample_data['ego_pose_token'])
            ego_quat = np.array(ego_pose['rotation'])

        ego_yaw = Quaternion(ego_quat)
        ego_yaw = quaternion_yaw(ego_yaw)
        ego_yaw_rad = angle_of_rotation(ego_yaw)
        ego_yaw_degrees = np.rad2deg(ego_yaw_rad)

        ego_on_road_objects = None
        if self.agent_road_objects:
            ego_on_road_objects = self.get_road_objects(ego_pos, nusc_map)

        ego_info = {
            'translation': ego_pos,
            'rotation_deg': ego_yaw_degrees,
            'rotation_quat': ego_quat,
            'velocity': 0,
            'acceleration': 0,
            'heading_change_rate': 0,
            'road_objects': ego_on_road_objects
        }

        #### define patch ####
        sensing_patch_width = self.config['sensing_patch_size'][0]
        sensing_patch_length = self.config['sensing_patch_size'][1]

        # patch_center_before_rotation = np.array([ego_pos[0],
        #                                          ego_pos[1] + sensing_patch_length/2])

        # lower_left_before_rotation = np.array([ego_pos[0] - sensing_patch_width/2,
        #                                        ego_pos[1] - sensing_patch_length/2])

        # sensing_patch = self.get_patch_coord(patch_box=(patch_center_before_rotation[0],
        #                                                 patch_center_before_rotation[1],
        #                                                 sensing_patch_length,
        #                                                 sensing_patch_width),
        #                                      rotate_center=(ego_pos[0], ego_pos[1]),
        #                                      patch_angle=-ego_yaw_degrees)

        patch_center_before_rotation = np.array([ego_pos[0], ego_pos[1]])

        lower_left_before_rotation = np.array([
            ego_pos[0] - sensing_patch_width / 2,
            ego_pos[1] - sensing_patch_length / 2
        ])

        sensing_patch_coord_before_rotation = [
            ego_pos[0] - sensing_patch_width / 2,
            ego_pos[1] - sensing_patch_length / 2,
            ego_pos[0] + sensing_patch_width / 2,
            ego_pos[1] + sensing_patch_length / 2
        ]

        ## generate sensing patch mesh
        x = np.arange(sensing_patch_coord_before_rotation[0],
                      sensing_patch_coord_before_rotation[2], 0.2)
        y = np.arange(sensing_patch_coord_before_rotation[1],
                      sensing_patch_coord_before_rotation[3], 0.2)

        X, Y = np.meshgrid(x, y)
        ### apply rotation
        X, Y = rotate_mesh2D(pos=ego_pos,
                             rot_rad=ego_yaw_rad,
                             X=X,
                             Y=Y,
                             frame='current')

        ## generate sensing patch shapely polygon
        sensing_patch = self.get_patch_coord(
            patch_box=(patch_center_before_rotation[0],
                       patch_center_before_rotation[1], sensing_patch_length,
                       sensing_patch_width),
            rotate_center=(ego_pos[0], ego_pos[1]),
            patch_angle=-ego_yaw_degrees)

        sensing_patch_info = {'mesh': [X, Y], 'polygon': sensing_patch}
        # get agent_info in patch
        agent_info = self.get_agent_info(sample, sensing_patch, nusc_map)

        # get map info in patch
        map_info = self.get_map_info(ego_pos, ego_yaw_degrees, nusc_map,
                                     sensing_patch)

        # get canbus info
        can_info = None
        if not instance_based:
            can_info = self.get_can_info(scene['name'])

        sensing_info = {
            'sample_token': sample_token,
            'sensing_patch': sensing_patch_info,
            'ego_info': ego_info,
            'agent_info': agent_info,
            'map_info': map_info,
            'can_info': can_info
        }

        return sensing_info
Esempio n. 11
0
    def get_agent_info(self, sample, sensing_patch, nusc_map):
        agent_info = []
        for ann_token in sample['anns']:
            ann = self.nusc.get('sample_annotation', ann_token)
            instance_token = ann['instance_token']
            category = ann['category_name']
            if len(ann['attribute_tokens']) != 0:
                attribute = self.nusc.get('attribute',
                                          ann['attribute_tokens'][0])['name']
            else:
                attribute = ""

            agent_pos = [ann['translation'][0], ann['translation'][1]]

            include = False
            #### Cars ####
            if 'vehicle' in category and 'parked' not in attribute and self.in_shapely_polygon(
                    agent_pos, sensing_patch):
                include = True
                agent_type = 'car'

            #### pedestrians ####
            if 'pedestrian' in category and not 'stroller' in category and not 'wheelchair' in category and self.in_shapely_polygon(
                    agent_pos, sensing_patch):
                include = True
                agent_type = "pedestrian"

            if include:
                agent_yaw = Quaternion(ann['rotation'])
                agent_yaw = quaternion_yaw(agent_yaw)
                agent_yaw = angle_of_rotation(agent_yaw)
                agent_yaw_deg = np.rad2deg(agent_yaw)

                agent_vel = self.helper.get_velocity_for_agent(
                    instance_token, sample['token'])
                agent_acc = self.helper.get_acceleration_for_agent(
                    instance_token, sample['token'])
                agent_heading_change_rate = self.helper.get_heading_change_rate_for_agent(
                    instance_token, sample['token'])

                agent_future = self.helper.get_future_for_agent(
                    instance_token,
                    sample['token'],
                    self.na_config['pred_horizon'],
                    in_agent_frame=False,
                    just_xy=True)

                agent_past = self.helper.get_past_for_agent(
                    instance_token,
                    sample['token'],
                    self.na_config['obs_horizon'],
                    in_agent_frame=False,
                    just_xy=True)

                agent_on_road_objects = None
                if self.agent_road_objects:
                    agent_on_road_objects = self.get_road_objects(
                        agent_pos, nusc_map)

                ## agent nearest lane ##
                closest_lane_data, incoming_lane_data, outgoing_lane_data = self.get_closest_lane(
                    ann['translation'], nusc_map)

                lane_data = {
                    'closest_lane_data': closest_lane_data,
                    'incoming_lane_data': incoming_lane_data,
                    'outgoing_lane_data': outgoing_lane_data
                }

                # past_lane = self.get_past_lane(ann['translation'][:2], ann['rotation'], lane_data)
                # future_lanes = self.get_past_lane(ann['translation'][:2], ann['rotation'], lane_data)

                past_lane, future_lanes = self.get_past_and_future_lanes(
                    ann['translation'][:2], ann['rotation'], lane_data)

                tmp_agent_info = {
                    'instance_token': instance_token,
                    'type': agent_type,
                    'category': category,
                    'attribute': attribute,
                    'translation': agent_pos,
                    'rotation_deg': agent_yaw_deg,
                    'rotation_quat': ann['rotation'],
                    'velocity': agent_vel,
                    'acceleration': agent_acc,
                    'heading_change_rate': agent_heading_change_rate,
                    'past': agent_past,
                    'future': agent_future,
                    'road_objects': agent_on_road_objects,
                    'past_lane': past_lane,
                    'future_lanes': future_lanes
                }

                agent_info.append(tmp_agent_info)

        return agent_info