Example #1
0
    def __getitem__(self, idx):
        ref_index = self.valid_index[idx]

        ref_sd_token = self.sample_data_tokens[ref_index]
        ref_scene_token = self.scene_tokens[ref_index]
        flip_flag = self.flip_flags[ref_index]

        # reference coordinate frame
        ref_from_global = self.get_global_pose(ref_sd_token, inverse=True)

        # NOTE: input
        input_sds = []
        sd_token = ref_sd_token
        while sd_token != "" and len(input_sds) < self.n_input:
            curr_sd = self.nusc.get("sample_data", sd_token)
            input_sds.append(curr_sd)
            sd_token = curr_sd["prev"]

        # call out when we have less than the desired number of input sweeps
        # if len(input_sds) < self.n_input:
        #     warnings.warn(f"The number of input sweeps {len(input_sds)} is less than {self.n_input}.", RuntimeWarning)

        # get input sweep frames
        input_points_list = []
        for i, curr_sd in enumerate(input_sds):
            # load the current lidar sweep
            curr_lidar_pc = MyLidarPointCloud.from_file(f"{self.nusc_root}/{curr_sd['filename']}")

            # remove ego returns
            curr_lidar_pc.remove_ego()

            # transform from the current lidar frame to global and then to the reference lidar frame
            global_from_curr = self.get_global_pose(curr_sd["token"], inverse=False)
            ref_from_curr = ref_from_global.dot(global_from_curr)
            curr_lidar_pc.transform(ref_from_curr)

            # NOTE: check if we are in Singapore (if so flip x)
            if flip_flag:
                curr_lidar_pc.points[0] *= -1

            #
            points = np.asarray(curr_lidar_pc.points[:3].T)
            tindex = np.full((len(points), 1), i)
            points = np.concatenate((points, tindex), axis=1)

            #
            input_points_list.append(points.astype(np.float32))

        # NOTE: output
        # get output sample frames and ground truth trajectory
        output_origin_list = []
        output_points_list = []
        gt_trajectory = np.zeros((self.n_output, 3), np.float64)
        for i in range(self.n_output):
            if self.nusc_split == "train" and self.train_on_all_sweeps:
                index = ref_index + i * self.N_SWEEPS_PER_SAMPLE
            else:
                index = ref_index + i

            # if this exists a valid target
            if index < len(self.scene_tokens) and self.scene_tokens[index] == ref_scene_token:
                curr_sd_token = self.sample_data_tokens[index]
                curr_sd = self.nusc.get("sample_data", curr_sd_token)

                # load the current lidar sweep
                curr_lidar_pc = LidarPointCloud.from_file(f"{self.nusc_root}/{curr_sd['filename']}")

                # transform from the current lidar frame to global and then to the reference lidar frame
                global_from_curr = self.get_global_pose(curr_sd_token, inverse=False)
                ref_from_curr = ref_from_global.dot(global_from_curr)
                curr_lidar_pc.transform(ref_from_curr)

                #
                theta = quaternion_yaw(Quaternion(matrix=ref_from_curr))

                # NOTE: check if we are in Singapore (if so flip x)
                if flip_flag:
                    ref_from_curr[0, 3] *= -1
                    curr_lidar_pc.points[0] *= -1
                    theta *= -1

                origin = np.array(ref_from_curr[:3, 3])
                points = np.array(curr_lidar_pc.points[:3].T)
                gt_trajectory[i, :] = [origin[0], origin[1], theta]

                tindex = np.full(len(points), i)

                labels = self.load_ground_segmentation(curr_sd_token)
                assert(len(labels) == len(points))
                mask = np.logical_and(labels >= 1, labels <= 30)

                points = np.concatenate((points, tindex[:, None], labels[:, None]), axis=1)
                points = points[mask, :]

            else:  # filler
                raise RuntimeError(f"The {i}-th output frame is not available")
                origin = np.array([0.0, 0.0, 0.0])
                points = np.full((0, 5), -1)

            # origin
            output_origin_list.append(origin.astype(np.float32))

            # points
            output_points_list.append(points.astype(np.float32))

        # NOTE: trajectory sampling
        ref_scene = self.nusc.get("scene", ref_scene_token)

        # NOTE: rely on pose and steeranglefeedback data instead of vehicle_monitor
        vm_msgs = self.nusc_can.get_messages(ref_scene["name"], "vehicle_monitor")
        vm_uts = [msg["utime"] for msg in vm_msgs]
        pose_msgs = self.nusc_can.get_messages(ref_scene["name"], "pose")
        pose_uts = [msg["utime"] for msg in pose_msgs]
        steer_msgs = self.nusc_can.get_messages(ref_scene["name"], "steeranglefeedback")
        steer_uts = [msg["utime"] for msg in steer_msgs]

        # locate the closest message by universal timestamp
        ref_sd = self.nusc.get("sample_data", ref_sd_token)
        ref_utime = ref_sd["timestamp"]
        vm_index = locate_message(vm_uts, ref_utime)
        vm_data = vm_msgs[vm_index]
        pose_index = locate_message(pose_uts, ref_utime)
        pose_data = pose_msgs[pose_index]
        steer_index = locate_message(steer_uts, ref_utime)
        steer_data = steer_msgs[steer_index]

        # initial speed
        # v0 = vm_data["vehicle_speed"] / 3.6  # km/h to m/s
        v0 = pose_data["vel"][0]  # [0] means longitudinal velocity

        # curvature (positive: turn left)
        # steering = np.deg2rad(vm_data["steering"])
        steering = steer_data["value"]
        if flip_flag:
            steering *= -1
        Kappa = 2 * steering / 2.588

        #
        left_signal = vm_data["left_signal"]
        right_signal = vm_data["right_signal"]
        if flip_flag:
            left_signal, right_signal = right_signal, left_signal
        drive_command = [left_signal, right_signal]

        # initial state
        T0 = np.array([0.0, 1.0])  # define front
        N0 = np.array([1.0, 0.0]) if Kappa <= 0 else np.array([-1.0, 0.0])  # define side

        #
        # tt = np.arange(self.n_output) * self.SAMPLE_INTERVAL
        # tt = np.arange(0, self.n_output + self.SAMPLE_INTERVAL, self.SAMPLE_INTERVAL)
        t_start = 0  # second
        t_end = (self.n_output-1) * self.SAMPLE_INTERVAL  # second
        t_interval = self.SAMPLE_INTERVAL / 10
        tt = np.arange(t_start, t_end + t_interval, t_interval)
        sampled_trajectories_fine = trajectory_sampler.sample(v0, Kappa, T0, N0, tt, self.n_samples)
        sampled_trajectories = sampled_trajectories_fine[:, ::10]

        #
        obj_boxes = self.load_object_boxes(ref_sd_token)
        obj_shadows = self.load_object_shadows(ref_sd_token)

        #
        fvf_maps = self.load_future_visible_freespace(ref_sd_token)

        #
        example = {
            "scene_token": ref_scene_token,
            "sample_data_token": ref_sd_token,
            "input_points": torch.from_numpy(np.concatenate(input_points_list)),
            "sampled_trajectories_fine": torch.from_numpy(sampled_trajectories_fine),
            "sampled_trajectories": torch.from_numpy(sampled_trajectories),
            "drive_command": torch.tensor(drive_command),
            "output_origin": torch.from_numpy(np.stack(output_origin_list)),
            "output_points": torch.from_numpy(np.concatenate(output_points_list)),
            "gt_trajectory": torch.from_numpy(gt_trajectory),
            "obj_boxes": torch.from_numpy(obj_boxes),
            "obj_shadows": torch.from_numpy(obj_shadows),
            "fvf_maps": torch.from_numpy(fvf_maps),
        }
        return example
Example #2
0
    def update_all_info(self):
        #### scene info ####
        scene_info = {
            'scene_token': self.scene['token'],
            'scene_description': self.scene['description'],
            'scene_name': self.scene['name'],
            'scene_nbr_samples': self.scene['nbr_samples']
        }
        self.all_info['scene_info'] = scene_info

        #### sensor info ####
        if self.instance_token is None:
            instance_based = False
        else:
            instance_based = True
        sensor_info = self.sensor.get_info(self.sample['token'],
                                           ego_pos=self.sim_ego_pos_gb,
                                           ego_quat=self.sim_ego_quat_gb,
                                           instance_based=instance_based)

        filtered_agent_info = self.filter_agent_info(sensor_info['agent_info'])
        sensor_info['agent_info'] = filtered_agent_info
        self.all_info['sensor_info'] = sensor_info

        #### sample info ####
        self.all_info['sample_idx'] = self.sample_idx
        self.all_info['sample_token'] = self.sample['token']
        self.all_info['time'] = self.time

        if self.instance_token is None:
            sample_data = self.helper.data.get(
                'sample_data', self.sample['data']['CAM_FRONT'])
            ego_pose = self.helper.data.get('ego_pose',
                                            sample_data['ego_pose_token'])
        else:
            ego_pose = {
                'translation': self.inst_ann['translation'],
                'rotation': self.inst_ann['rotation']
            }

        #### ego pose ####
        ego_yaw = Quaternion(ego_pose['rotation'])
        self.ego_yaw = quaternion_yaw(ego_yaw)
        #self.ego_yaw = angle_of_rotation(ego_yaw)

        self.all_info['ego_init_pos_gb'] = np.array(self.init_ego_pos)
        self.all_info['ego_init_quat_gb'] = np.array(self.init_ego_quat)
        self.all_info['ego_pos_gb'] = np.array(ego_pose['translation'])[:2]
        self.all_info['ego_quat_gb'] = np.array(ego_pose['rotation'])
        self.all_info['ego_yaw_rad'] = self.ego_yaw

        idx = min(self.sample_idx,
                  len(sensor_info['can_info']['ego_speed_traj']) - 1)
        ego_speed = sensor_info['can_info']['ego_speed_traj'][idx]
        idx = min(self.sample_idx,
                  len(sensor_info['can_info']['ego_rotation_rate_traj']) - 1)
        ego_yaw_rate = sensor_info['can_info']['ego_rotation_rate_traj'][idx][
            -1]
        idx_plus_one = min(
            self.sample_idx + 1,
            len(sensor_info['can_info']['ego_rotation_rate_traj']) - 1)
        self.next_ego_yaw_rate = sensor_info['can_info'][
            'ego_rotation_rate_traj'][idx_plus_one][-1]

        self.all_info['ego_speed'] = ego_speed
        self.all_info['ego_yaw_rate'] = ego_yaw_rate

        self.all_info['ego_pos_traj'] = np.array(self.true_ego_pos_traj)
        self.all_info['ego_quat_traj'] = np.array(self.true_ego_quat_traj)
        self.all_info['ego_speed_traj'] = np.array(
            sensor_info['can_info']['ego_speed_traj'])
        self.all_info['ego_yaw_rate_traj'] = np.array([
            steering[-1]
            for steering in sensor_info['can_info']['ego_rotation_rate_traj']
        ])

        self.all_info['ego_past_pos'] = np.array(
            self.true_ego_pos_traj)[0:self.sample_idx]
        self.all_info['ego_future_pos'] = np.array(
            self.true_ego_pos_traj)[self.sample_idx:]
        self.all_info['ego_past_quat'] = np.array(
            self.true_ego_quat_traj)[0:self.sample_idx]
        self.all_info['ego_future_quat'] = np.array(
            self.true_ego_quat_traj)[self.sample_idx:]
        self.all_info['scene_goal'] = self.all_info['ego_pos_traj'][-1]

        if self.sim_ego_pos_gb is None:
            self.sim_ego_pos_gb = np.array(ego_pose['translation'])[:2]
            self.sim_ego_quat_gb = np.array(ego_pose['rotation'])
            self.sim_ego_speed = self.all_info['ego_speed']
            self.sim_ego_yaw_rate = self.all_info['ego_yaw_rate']

            sim_ego_yaw = Quaternion(self.sim_ego_quat_gb)
            self.sim_ego_yaw = quaternion_yaw(sim_ego_yaw)
            #self.sim_ego_yaw = angle_of_rotation(sim_ego_yaw)

        #### sim ego pose ####
        self.sim_ego_pos_traj.append(self.sim_ego_pos_gb.copy())
        self.sim_ego_quat_traj.append(self.sim_ego_quat_gb.copy())

        self.all_info['sim_ego_pos_gb'] = self.sim_ego_pos_gb
        self.all_info['sim_ego_quat_gb'] = self.sim_ego_quat_gb
        self.all_info['sim_ego_yaw_rad'] = self.sim_ego_yaw
        self.all_info['sim_ego_speed'] = self.sim_ego_speed
        self.all_info['sim_ego_yaw_rate'] = self.sim_ego_yaw_rate
        self.all_info['sim_ego_pos_traj'] = self.sim_ego_pos_traj
        self.all_info['sim_ego_quat_traj'] = self.sim_ego_quat_traj

        #### future lanes ####
        # self.all_info['gt_future_lanes'] = get_future_lanes(self.nusc_map, self.all_info['ego_pos_gb'], self.all_info['ego_quat_gb'], frame='global')

        # self.all_info['future_lanes'] = get_future_lanes(self.nusc_map, self.sim_ego_pos_gb, self.sim_ego_quat_gb, frame='global')

        #### neighbor pos ####
        current_sim_neighbor_pos = []
        for agent in sensor_info['agent_info']:
            current_sim_neighbor_pos.append(agent['translation'])

        self.all_info['current_sim_neighbor_pos'] = np.array(
            current_sim_neighbor_pos)

        #### rasterized image ####
        if 'raster_image' in self.config[
                'all_info_fields'] and self.rasterizer is not None:
            #### ego raster img ####
            ego_raster_img = self.rasterizer.make_input_representation(
                instance_token=None,
                sample_token=self.sample_token,
                ego=True,
                ego_pose=ego_pose,
                include_history=False)
            #ego_raster_img = np.transpose(ego_raster_img, (2,0,1))
            self.all_info['raster_image'] = ego_raster_img

            #### sim ego raster img ####
            sim_ego_pose = {
                'translation': self.sim_ego_pos_gb,
                'rotation': self.sim_ego_quat_gb
            }

            sim_ego_raster_img = self.rasterizer.make_input_representation(
                instance_token=None,
                sample_token=self.sample_token,
                ego=True,
                ego_pose=sim_ego_pose,
                include_history=False)
            #sim_ego_raster_img = np.transpose(sim_ego_raster_img, (2,0,1))
            self.all_info['sim_ego_raster_image'] = sim_ego_raster_img
        else:
            self.all_info['raster_image'] = None
            self.all_info['sim_ego_raster_image'] = None
Example #3
0
    def generate_mask(self, translation, rotation, sample_token: str):

        map_name = self.helper.get_map_name_from_sample_token(sample_token)

        # translation factors (ego frame)
        x, y = translation[:2]
        yaw = quaternion_yaw(Quaternion(rotation))
        yaw_corrected = correct_yaw(yaw)

        # 1. generate map masks
        image_side_length = 2 * max(self.meters_ahead, self.meters_behind,
                                    self.meters_left, self.meters_right)
        image_side_length_pixels = int(image_side_length / self.resolution)
        patchbox = get_patchbox(x, y, image_side_length)
        angle_in_degrees = angle_of_rotation(yaw_corrected) * 180 / np.pi
        canvas_size = (image_side_length_pixels, image_side_length_pixels)

        masks = self.maps[map_name].get_map_mask(patchbox,
                                                 angle_in_degrees,
                                                 self.layer_names,
                                                 canvas_size=canvas_size)

        # 2. generate guided lanes
        agent_pixels = int(image_side_length_pixels / 2), int(
            image_side_length_pixels / 2)
        base_image = np.zeros(
            (image_side_length_pixels, image_side_length_pixels, 3))

        meter_resolution = 0.5
        radius = 50.0
        lanes = get_lanes_in_radius(x,
                                    y,
                                    radius=radius,
                                    discretization_meters=meter_resolution,
                                    map_api=self.maps[map_name])
        image_with_lanes = draw_lanes_on_image(base_image,
                                               lanes, (x, y),
                                               yaw,
                                               agent_pixels,
                                               self.resolution,
                                               color_function=color_by_yaw)
        rotation_mat = get_rotation_matrix(image_with_lanes.shape, yaw)
        rotated_image = cv2.warpAffine(image_with_lanes, rotation_mat,
                                       image_with_lanes.shape[:2])
        rotated_image_lanes = rotated_image.astype("uint8")

        # 3. combine masks
        images = []
        for mask, color in zip(masks, self.colors):
            images.append(
                change_color_of_binary_mask(
                    np.repeat(mask[::-1, :, np.newaxis], 3, 2), color))
        images.append(rotated_image_lanes)
        image_show = self.combinator.combine(images)

        # crop
        row_crop, col_crop = get_crops(
            self.meters_ahead, self.meters_behind, self.meters_left,
            self.meters_right, self.resolution,
            int(image_side_length / self.resolution))

        return np.array(images)[:, row_crop, col_crop, :], lanes, image_show
Example #4
0
    def plot_car_to_predict(self,
                            ax,
                            agent_future: np.ndarray,
                            agent_past: np.ndarray,
                            instance_token: str,
                            sample_token: str,
                            text_box: bool = True,
                            predictions: list = None,
                            agent_state_dict: dict = None):
        '''
        predictions = {
            'name': {'data': <data>, 'color': <coloar>, 'frame': <frame>, 'style':'.'}
        }
        
        '''

        ## plot car ####
        ann = self.helper.get_sample_annotation(instance_token, sample_token)

        category = ann['category_name']
        if len(ann['attribute_tokens']) != 0:
            attribute = self.nusc.get('attribute',
                                      ann['attribute_tokens'][0])['name']
        else:
            attribute = ""

        agent_yaw = Quaternion(ann['rotation'])
        agent_yaw = quaternion_yaw(agent_yaw)
        agent_yaw = angle_of_rotation(agent_yaw)
        agent_yaw = np.rad2deg(agent_yaw)

        self.plot_elements([ann['translation'][0], ann['translation'][1]],
                           agent_yaw, 'current_car', ax)
        if text_box:
            self.plot_text_box(
                ax, category,
                [ann['translation'][0] + 1.2, ann['translation'][1]])
            self.plot_text_box(
                ax, attribute,
                [ann['translation'][0] + 1.2, ann['translation'][1] + 1.2])
            if agent_state_dict is not None:
                state_str = ""
                for k, v in agent_state_dict.items():
                    state_str += f"{k[0]}:{v:.2f}, "
                self.plot_text_box(
                    ax, state_str,
                    [ann['translation'][0] + 1.2, ann['translation'][1] + 3.2])

        agent_pos = [ann['translation'][0], ann['translation'][1]]
        agent_yaw_deg = agent_yaw

        # plot ground truth
        if len(agent_future) > 0:
            ax.scatter(agent_future[:, 0],
                       agent_future[:, 1],
                       s=20,
                       c='yellow',
                       alpha=1.0,
                       zorder=200)
        if len(agent_past) > 0:
            ax.scatter(agent_past[:, 0],
                       agent_past[:, 1],
                       s=20,
                       c='k',
                       alpha=0.5,
                       zorder=200)

        # plot predictions
        if predictions is not None:
            for k, v in viewitems(predictions):
                if v['frame'] == 'local':
                    v['data'] = convert_local_coords_to_global(
                        v['data'], np.array(agent_pos),
                        np.array(ann['rotation']))
                if 'style' not in v.keys():
                    v['style'] = '.'
                if v['style'] == '.':
                    ax.scatter(v['data'][:, 0],
                               v['data'][:, 1],
                               s=20,
                               c=v['color'],
                               alpha=1.0,
                               zorder=2)
                elif v['style'] == '-':
                    ax.plot(v['data'][:, 0],
                            v['data'][:, 1],
                            c=v['color'],
                            alpha=1.0,
                            zorder=2)
                else:
                    raise ValueError('style not supported')

        return agent_pos, ann['rotation']
Example #5
0
    def plot_road_agents(self,
                         ax,
                         fig,
                         sample,
                         plot_list,
                         text_box,
                         sensor_info,
                         my_patch,
                         plot_traj=False,
                         contour_func=None,
                         animated_agent=False):

        road_agents_in_patch = {'pedestrians': [], 'vehicles': []}
        for ann_token in sample['anns']:
            ann = self.nusc.get('sample_annotation', ann_token)
            category = ann['category_name']
            if len(ann['attribute_tokens']) != 0:
                attribute = self.nusc.get('attribute',
                                          ann['attribute_tokens'][0])['name']
            else:
                attribute = ""

            pos = [ann['translation'][0], ann['translation'][1]]
            instance_token = ann['instance_token']
            sample_token = sample['token']
            #### Plot other agents ####
            valid_agent = False
            if 'other_cars' in plot_list and 'vehicle' in category and 'parked' not in attribute and self.in_my_patch(
                    pos, my_patch):
                valid_agent = True
                agent_yaw = Quaternion(ann['rotation'])
                agent_yaw = quaternion_yaw(agent_yaw)
                agent_yaw = angle_of_rotation(agent_yaw)
                agent_yaw = np.rad2deg(agent_yaw)
                self.plot_elements(pos,
                                   agent_yaw,
                                   'other_cars',
                                   ax,
                                   animated_agent=animated_agent)

                car_info = {
                    'instance_token': ann['instance_token'],
                    'category': category,
                    'attribute': attribute,
                    'translation': pos,
                    'rotation_quat': ann['rotation'],
                    'rotation_deg': agent_yaw
                }
                road_agents_in_patch['vehicles'].append(car_info)

                if text_box:
                    self.plot_text_box(
                        ax, category,
                        [ann['translation'][0] + 1.2, ann['translation'][1]])
                    self.plot_text_box(ax, attribute, [
                        ann['translation'][0] + 1.2,
                        ann['translation'][1] - 1.2
                    ])
                    self.plot_text_box(ax, ann['instance_token'], [
                        ann['translation'][0] + 1.2,
                        ann['translation'][1] + 1.2
                    ])

            #### Plot pedestrians ####
            if 'pedestrian' in plot_list and 'pedestrian' in category and not 'stroller' in category and not 'wheelchair' in category and self.in_my_patch(
                    pos, my_patch):
                valid_agent = True
                agent_yaw = Quaternion(ann['rotation'])
                agent_yaw = quaternion_yaw(agent_yaw)
                agent_yaw = angle_of_rotation(agent_yaw)
                agent_yaw = np.rad2deg(agent_yaw)

                self.plot_elements(pos, agent_yaw, 'pedestrian', ax)
                if text_box:
                    self.plot_text_box(
                        ax, category,
                        [ann['translation'][0] + 1.2, ann['translation'][1]])
                    self.plot_text_box(ax, attribute, [
                        ann['translation'][0] + 1.2,
                        ann['translation'][1] - 1.2
                    ])

            if valid_agent and plot_traj:
                agent_future = self.helper.get_future_for_agent(
                    instance_token,
                    sample_token,
                    self.na_config['pred_horizon'],
                    in_agent_frame=False,
                    just_xy=True)

                agent_past = self.helper.get_past_for_agent(
                    instance_token,
                    sample_token,
                    self.na_config['obs_horizon'],
                    in_agent_frame=False,
                    just_xy=True)

                if len(agent_future) > 0:
                    ax.scatter(agent_future[:, 0],
                               agent_future[:, 1],
                               s=10,
                               c='y',
                               alpha=1.0,
                               zorder=200)
                if len(agent_past) > 0:
                    ax.scatter(agent_past[:, 0],
                               agent_past[:, 1],
                               s=10,
                               c='k',
                               alpha=0.2,
                               zorder=200)

        return road_agents_in_patch
Example #6
0
    def make_representation(self,
                            instance_token: str = None,
                            sample_token: str = None,
                            ego=False,
                            ego_pose=None) -> np.ndarray:
        """
        Makes rasterized representation of static map layers.
        :param instance_token: Token for instance.
        :param sample_token: Token for sample.
        :return: Three channel image.
        """

        if not ego:
            sample_annotation = self.helper.get_sample_annotation(
                instance_token, sample_token)
        else:
            if ego_pose is None:
                sample_ = self.helper.data.get('sample', sample_token)
                sample_data = self.helper.data.get(
                    'sample_data', sample_['data']['CAM_FRONT'])
                ego_pose = self.helper.data.get('ego_pose',
                                                sample_data['ego_pose_token'])
            sample_annotation = {
                'translation': ego_pose['translation'],
                'rotation': ego_pose['rotation'],
                'instance_token': None
            }

        map_name = self.helper.get_map_name_from_sample_token(sample_token)

        x, y = sample_annotation['translation'][:2]

        yaw = quaternion_yaw(Quaternion(sample_annotation['rotation']))

        yaw_corrected = correct_yaw(yaw)

        image_side_length = 2 * max(self.meters_ahead, self.meters_behind,
                                    self.meters_left, self.meters_right)
        image_side_length_pixels = int(image_side_length / self.resolution)

        patchbox = get_patchbox(x, y, image_side_length)

        angle_in_degrees = angle_of_rotation(yaw_corrected) * 180 / np.pi

        canvas_size = (image_side_length_pixels, image_side_length_pixels)

        masks = self.maps[map_name].get_map_mask(patchbox,
                                                 angle_in_degrees,
                                                 self.layer_names,
                                                 canvas_size=canvas_size)

        images = []
        for mask, color in zip(masks, self.colors):
            images.append(
                change_color_of_binary_mask(
                    np.repeat(mask[::-1, :, np.newaxis], 3, 2), color))

        lanes = draw_lanes_in_agent_frame(image_side_length_pixels,
                                          x,
                                          y,
                                          yaw,
                                          radius=50,
                                          image_resolution=self.resolution,
                                          discretization_resolution_meters=1,
                                          map_api=self.maps[map_name])

        images.append(lanes)

        image = self.combinator.combine(images)

        row_crop, col_crop = get_crops(
            self.meters_ahead, self.meters_behind, self.meters_left,
            self.meters_right, self.resolution,
            int(image_side_length / self.resolution))

        return image[row_crop, col_crop, :]
Example #7
0
def render(graphics, render_info, config={}):

    if 'image' in config['render_type']:
        sim_ego_yaw = Quaternion(render_info['sim_ego_quat_gb'])
        sim_ego_yaw = quaternion_yaw(sim_ego_yaw)
        sim_ego_yaw = angle_of_rotation(sim_ego_yaw)
        sim_ego_yaw = np.rad2deg(sim_ego_yaw)

        ego_traj = None
        if 'sim_ego' in config['render_elements']:
            ego_traj = {
                # 'lane': {
                #     'traj': self.all_info['future_lanes'][0],
                #     'color': 'green'
                # },
                'sim_ego': {
                    'pos': render_info['sim_ego_pos_gb'],
                    'yaw': sim_ego_yaw,
                    'traj': np.zeros((4, 2)),
                    'color': 'yellow'
                }
            }

        #### control plots ####
        '''
        if 'control_plots' in config['render_elements']:
            fig, ax = plt.subplots(2,1)
            ax[0].plot(list(ap_timesteps), list(ap_speed), 'o-')
            ax[0].set_xlabel('timestep', fontsize=20)
            ax[0].set_ylabel('speed', fontsize=20)
            ax[1].plot(list(ap_timesteps), list(ap_steering), 'o-')
            ax[1].set_xlabel('timestep', fontsize=20)
            ax[1].set_ylabel('steering', fontsize=20)
            canvas = FigureCanvas(fig)
            canvas.draw()
            width, height = fig.get_size_inches() * fig.get_dpi()
            image = np.fromstring(canvas.tostring_rgb(), dtype='uint8').reshape(int(height), int(width), 3)
            if 'image' not in render_info.keys():
                render_info['image'] = {}
            render_info['image'].update({'cmd': image})
        '''

        save_img_dir = None
        if render_info['save_image_dir'] is not None:
            save_img_dir = os.path.join(render_info['save_image_dir'],
                                        str(render_info['scene_name']))
            if not os.path.exists(save_img_dir):
                os.makedirs(save_img_dir, exist_ok=True)

        sensor_info = None
        if 'sensor_info' in config['render_elements']:
            sensor_info = render_info['all_info']['sensor_info']

        ado_traj_dict = None
        if 'ado_traj_dict' in render_info.keys():
            ado_traj_dict = render_info['ado_traj_dict']

        costmap_contour = None
        if 'costmap_contour' in render_info.keys():
            costmap_contour = render_info['costmap_contour']

        other_images_to_be_saved = None
        if 'sim_ego_raster_image' in render_info['all_info'].keys(
        ) and render_info['all_info']['sim_ego_raster_image'] is not None:
            raster = render_info['all_info']['sim_ego_raster_image']
            if raster.shape == (3, 250, 250):
                raster = np.transpose(raster, (1, 2, 0))
            assert_shape(raster, 'raster', (250, 250, 3))
            other_images_to_be_saved = {'raster': raster}

        if 'image' in render_info.keys():
            if other_images_to_be_saved is None:
                other_images_to_be_saved = render_info['image']
            else:
                other_images_to_be_saved.update(render_info['image'])

        render_additional = {}
        if 'lines' in render_info.keys():
            render_additional['lines'] = render_info['lines']
        if 'scatters' in render_info.keys():
            render_additional['scatters'] = render_info['scatters']
        if 'text_boxes' in render_info.keys():
            render_additional['text_boxes'] = render_info['text_boxes']

        if render_info['instance_token'] is None:
            ego_centric = True
        else:
            ego_centric = False

        plot_human_ego = True
        if 'human_ego' not in config['render_elements']:
            plot_human_ego = False

        ego_centric = False
        if render_info['instance_token'] == 'ego' or render_info[
                'instance_token'] is None:
            ego_centric = True

        plot_list = [
            'ego', 'other_cars', 'labeled_map', 'sensing_patch', 'sensor_info',
            'cam'
        ]
        fig, ax, other = graphics.plot_ego_scene(
            ego_centric=ego_centric,
            sample_token=render_info['sample_token'],
            instance_token=render_info['instance_token'],
            ego_traj=ego_traj,
            ado_traj=ado_traj_dict,
            contour=costmap_contour,
            save_img_dir=save_img_dir,
            idx=str(render_info['sample_idx']).zfill(2),
            sensor_info=sensor_info,
            paper_ready=config['render_paper_ready'],
            other_images_to_be_saved=other_images_to_be_saved,
            render_additional=render_additional,
            plot_human_ego=plot_human_ego,
            patch_margin=config['patch_margin'],
            plot_list=plot_list)

        return fig, ax, other
Example #8
0
    def get_info(self,
                 sample_token: str,
                 ego_pos: np.ndarray = None,
                 ego_quat: np.ndarray = None,
                 instance_based=False):

        sample = self.nusc.get('sample', sample_token)
        scene = self.nusc.get('scene', sample['scene_token'])
        scene_log = self.nusc.get('log', scene['log_token'])
        nusc_map = NuScenesMap(dataroot=self.dataroot,
                               map_name=scene_log['location'])
        sample_data = self.nusc.get('sample_data', sample['data']['CAM_FRONT'])

        if ego_pos is None:
            ego_pose = self.nusc.get('ego_pose', sample_data['ego_pose_token'])
            ego_pos = np.array(ego_pose['translation'])
        if ego_quat is None:
            ego_pose = self.nusc.get('ego_pose', sample_data['ego_pose_token'])
            ego_quat = np.array(ego_pose['rotation'])

        ego_yaw = Quaternion(ego_quat)
        ego_yaw = quaternion_yaw(ego_yaw)
        ego_yaw_rad = angle_of_rotation(ego_yaw)
        ego_yaw_degrees = np.rad2deg(ego_yaw_rad)

        ego_on_road_objects = None
        if self.agent_road_objects:
            ego_on_road_objects = self.get_road_objects(ego_pos, nusc_map)

        ego_info = {
            'translation': ego_pos,
            'rotation_deg': ego_yaw_degrees,
            'rotation_quat': ego_quat,
            'velocity': 0,
            'acceleration': 0,
            'heading_change_rate': 0,
            'road_objects': ego_on_road_objects
        }

        #### define patch ####
        sensing_patch_width = self.config['sensing_patch_size'][0]
        sensing_patch_length = self.config['sensing_patch_size'][1]

        # patch_center_before_rotation = np.array([ego_pos[0],
        #                                          ego_pos[1] + sensing_patch_length/2])

        # lower_left_before_rotation = np.array([ego_pos[0] - sensing_patch_width/2,
        #                                        ego_pos[1] - sensing_patch_length/2])

        # sensing_patch = self.get_patch_coord(patch_box=(patch_center_before_rotation[0],
        #                                                 patch_center_before_rotation[1],
        #                                                 sensing_patch_length,
        #                                                 sensing_patch_width),
        #                                      rotate_center=(ego_pos[0], ego_pos[1]),
        #                                      patch_angle=-ego_yaw_degrees)

        patch_center_before_rotation = np.array([ego_pos[0], ego_pos[1]])

        lower_left_before_rotation = np.array([
            ego_pos[0] - sensing_patch_width / 2,
            ego_pos[1] - sensing_patch_length / 2
        ])

        sensing_patch_coord_before_rotation = [
            ego_pos[0] - sensing_patch_width / 2,
            ego_pos[1] - sensing_patch_length / 2,
            ego_pos[0] + sensing_patch_width / 2,
            ego_pos[1] + sensing_patch_length / 2
        ]

        ## generate sensing patch mesh
        x = np.arange(sensing_patch_coord_before_rotation[0],
                      sensing_patch_coord_before_rotation[2], 0.2)
        y = np.arange(sensing_patch_coord_before_rotation[1],
                      sensing_patch_coord_before_rotation[3], 0.2)

        X, Y = np.meshgrid(x, y)
        ### apply rotation
        X, Y = rotate_mesh2D(pos=ego_pos,
                             rot_rad=ego_yaw_rad,
                             X=X,
                             Y=Y,
                             frame='current')

        ## generate sensing patch shapely polygon
        sensing_patch = self.get_patch_coord(
            patch_box=(patch_center_before_rotation[0],
                       patch_center_before_rotation[1], sensing_patch_length,
                       sensing_patch_width),
            rotate_center=(ego_pos[0], ego_pos[1]),
            patch_angle=-ego_yaw_degrees)

        sensing_patch_info = {'mesh': [X, Y], 'polygon': sensing_patch}
        # get agent_info in patch
        agent_info = self.get_agent_info(sample, sensing_patch, nusc_map)

        # get map info in patch
        map_info = self.get_map_info(ego_pos, ego_yaw_degrees, nusc_map,
                                     sensing_patch)

        # get canbus info
        can_info = None
        if not instance_based:
            can_info = self.get_can_info(scene['name'])

        sensing_info = {
            'sample_token': sample_token,
            'sensing_patch': sensing_patch_info,
            'ego_info': ego_info,
            'agent_info': agent_info,
            'map_info': map_info,
            'can_info': can_info
        }

        return sensing_info
Example #9
0
    def get_agent_info(self, sample, sensing_patch, nusc_map):
        agent_info = []
        for ann_token in sample['anns']:
            ann = self.nusc.get('sample_annotation', ann_token)
            instance_token = ann['instance_token']
            category = ann['category_name']
            if len(ann['attribute_tokens']) != 0:
                attribute = self.nusc.get('attribute',
                                          ann['attribute_tokens'][0])['name']
            else:
                attribute = ""

            agent_pos = [ann['translation'][0], ann['translation'][1]]

            include = False
            #### Cars ####
            if 'vehicle' in category and 'parked' not in attribute and self.in_shapely_polygon(
                    agent_pos, sensing_patch):
                include = True
                agent_type = 'car'

            #### pedestrians ####
            if 'pedestrian' in category and not 'stroller' in category and not 'wheelchair' in category and self.in_shapely_polygon(
                    agent_pos, sensing_patch):
                include = True
                agent_type = "pedestrian"

            if include:
                agent_yaw = Quaternion(ann['rotation'])
                agent_yaw = quaternion_yaw(agent_yaw)
                agent_yaw = angle_of_rotation(agent_yaw)
                agent_yaw_deg = np.rad2deg(agent_yaw)

                agent_vel = self.helper.get_velocity_for_agent(
                    instance_token, sample['token'])
                agent_acc = self.helper.get_acceleration_for_agent(
                    instance_token, sample['token'])
                agent_heading_change_rate = self.helper.get_heading_change_rate_for_agent(
                    instance_token, sample['token'])

                agent_future = self.helper.get_future_for_agent(
                    instance_token,
                    sample['token'],
                    self.na_config['pred_horizon'],
                    in_agent_frame=False,
                    just_xy=True)

                agent_past = self.helper.get_past_for_agent(
                    instance_token,
                    sample['token'],
                    self.na_config['obs_horizon'],
                    in_agent_frame=False,
                    just_xy=True)

                agent_on_road_objects = None
                if self.agent_road_objects:
                    agent_on_road_objects = self.get_road_objects(
                        agent_pos, nusc_map)

                ## agent nearest lane ##
                closest_lane_data, incoming_lane_data, outgoing_lane_data = self.get_closest_lane(
                    ann['translation'], nusc_map)

                lane_data = {
                    'closest_lane_data': closest_lane_data,
                    'incoming_lane_data': incoming_lane_data,
                    'outgoing_lane_data': outgoing_lane_data
                }

                # past_lane = self.get_past_lane(ann['translation'][:2], ann['rotation'], lane_data)
                # future_lanes = self.get_past_lane(ann['translation'][:2], ann['rotation'], lane_data)

                past_lane, future_lanes = self.get_past_and_future_lanes(
                    ann['translation'][:2], ann['rotation'], lane_data)

                tmp_agent_info = {
                    'instance_token': instance_token,
                    'type': agent_type,
                    'category': category,
                    'attribute': attribute,
                    'translation': agent_pos,
                    'rotation_deg': agent_yaw_deg,
                    'rotation_quat': ann['rotation'],
                    'velocity': agent_vel,
                    'acceleration': agent_acc,
                    'heading_change_rate': agent_heading_change_rate,
                    'past': agent_past,
                    'future': agent_future,
                    'road_objects': agent_on_road_objects,
                    'past_lane': past_lane,
                    'future_lanes': future_lanes
                }

                agent_info.append(tmp_agent_info)

        return agent_info
    def get_new_format(self,
                       samples_agents,
                       format_for_model,
                       out_file=("./transformer_format.txt"),
                       num_seconds=None):
        # for current_sample in samples_agents:
        #     instance_token, sample_token = current_sample.split("_")
        #     traj = self.helper.get_future_for_agent(instance_token, sample_token, 6, True)
        #     past_traj = self.helper.get_past_for_agent(instance_token, sample_token, 6, True)
        #
        #     if len(past_traj) + len(traj) + 1 < 20:
        #         print(len(past_traj) + len(traj) + 1)
        #
        # exit()
        ####################
        # Sample Token (frame) to a sequential id
        # for each sample (agent_frame), get the scene it belongs to, and then get the first sample (frame)
        # loop on all samples from the first sample till the end
        # set to dictionary the sequential id for each sample
        splitting_format = '\t'

        if format_for_model.value == FORMAT_FOR_MODEL.TRAFFIC_PREDICT.value:
            splitting_format = " "

        instance_token_to_id_dict = {}
        sample_token_to_id_dict = {}
        scene_token_dict = {}
        sample_id = 0
        instance_id = 0

        for current_sample in samples_agents:
            instance_token, sample_token = current_sample.split("_")
            scene_token = self.nuscenes.get('sample',
                                            sample_token)["scene_token"]

            if scene_token in scene_token_dict:
                continue

            # get the first sample in this sequence
            scene_token_dict[scene_token] = True
            first_sample_token = self.nuscenes.get(
                "scene", scene_token)["first_sample_token"]
            current_sample = self.nuscenes.get('sample', first_sample_token)

            while True:
                if current_sample['token'] not in sample_token_to_id_dict:
                    sample_token_to_id_dict[
                        current_sample['token']] = sample_id
                    sample_id += 1
                else:
                    print("should not happen?")

                instances_in_sample = self.helper.get_annotations_for_sample(
                    current_sample['token'])

                for sample_instance in instances_in_sample:
                    if sample_instance[
                            'instance_token'] not in instance_token_to_id_dict:
                        instance_token_to_id_dict[
                            sample_instance['instance_token']] = instance_id
                        instance_id += 1

                if current_sample['next'] == "":
                    break

                current_sample = self.nuscenes.get('sample',
                                                   current_sample['next'])

        #############
        # Converting to the transformer network format
        # frame_id, agent_id, pos_x, pos_y
        # todo:
        # loop on all the agents, if agent not taken:
        # 1- add it to takens agents (do not retake the agent)
        # 2- get the number of appearance of this agent
        # 3- skip this agent if the number is less than 10s (4 + 6)
        # 4- get the middle agent's token
        # 5- get the past and future agent's locations relative to its location
        samples_new_format = []
        taken_instances = {}
        ds_size = 0

        for current_sample in samples_agents:
            instance_token, sample_token = current_sample.split("_")
            instance_id, sample_id = instance_token_to_id_dict[
                instance_token], sample_token_to_id_dict[sample_token]

            if instance_id in taken_instances:
                continue

            taken_instances[instance_id] = True

            trajectory = self.get_trajectory_around_sample(
                instance_token, sample_token)
            trajectory_full_instances = self.get_trajectory_around_sample(
                instance_token, sample_token, just_xy=False)
            # traj_samples_token = [instance['sample_token'] for instance in trajectory_full_instances]

            if len(trajectory) < 20:
                print("length is less than 20 samples, trajectory length is: ",
                      len(trajectory))
                continue

            ds_size += 1

            if num_seconds is not None:
                start, end = len(trajectory) // 2 - 9, len(
                    trajectory) // 2 + 11,
                starting_frame = (start + end) // 2

                middle_sample_token = trajectory_full_instances[
                    starting_frame]["sample_token"]
                trajectory = self.get_trajectory_around_sample(
                    instance_token,
                    middle_sample_token,
                    just_xy=True,
                    num_seconds=num_seconds,
                    in_agent_frame=True)
                trajectory_full_instances = self.get_trajectory_around_sample(
                    instance_token,
                    middle_sample_token,
                    just_xy=False,
                    num_seconds=num_seconds,
                    in_agent_frame=True)
                # traj_samples_token = [instance['sample_token'] for instance in trajectory_full_instances]

            # get_trajectory at this position
            for i in range(trajectory.shape[0]):
                traj_sample, sample_token = trajectory[
                    i], trajectory_full_instances[i]["sample_token"]
                sample_id = sample_token_to_id_dict[sample_token]
                if format_for_model.value == FORMAT_FOR_MODEL.TRANSFORMER_NET.value:
                    yaw = quaternion_yaw(
                        Quaternion(trajectory_full_instances[i]["rotation"]))

                    # samples_new_format.append(str(sample_id) + splitting_format + str(instance_id)\
                    #                           + splitting_format + str(traj_sample[0]) + splitting_format \
                    #                           + str(traj_sample[1]) + splitting_format + str(yaw) + "\n")
                    x, y, z = trajectory_full_instances[i]["translation"]
                    w, l, h = trajectory_full_instances[i]["size"]

                    samples_new_format.append(
                        str(sample_id) + splitting_format +
                        str(instance_id
                            )  #+ splitting_format + str(object_type)\
                        + splitting_format + str(x) + splitting_format +
                        str(y)  #+ splitting_format + str(z)
                        # + splitting_format + str(l) + splitting_format + str(w) + splitting_format + str(h)
                        + splitting_format + str(yaw) + "\n")
                elif format_for_model.value == FORMAT_FOR_MODEL.TRAFFIC_PREDICT.value:
                    # raise Exception("not implemented yet")
                    category_token = self.nuscenes.get(
                        "instance", instance_token)["category_token"]
                    object_type = self.category_token_to_id[category_token]
                    # frame_id, object_id, object_type,
                    # position_x, position_y, position_z,
                    # object_length, object_width, object_height,
                    # heading
                    x, y, z = trajectory_full_instances[i]["translation"]
                    w, l, h = trajectory_full_instances[i]["size"]
                    # yaw = angle_of_rotation(quaternion_yaw(Quaternion(trajectory_full_instances[i]["rotation"])))
                    yaw = quaternion_yaw(
                        Quaternion(trajectory_full_instances[i]["rotation"]))

                    samples_new_format.append(str(sample_id) + splitting_format + str(instance_id) + splitting_format + str(object_type)\
                                              + splitting_format + str(x) + splitting_format + str(y) + splitting_format + str(z) + splitting_format
                                              + splitting_format + str(l) + splitting_format + str(w) + splitting_format + str(h) + splitting_format
                                              + str(yaw) + "\n")
            # annotations = helper.get_annotations_for_sample(sample_token)

            # for ann in annotations:
            #     # q = ann['rotation']
            #     # yaw = math.atan2(2.0 * (q[3] * q[0] + q[1] * q[2]), - 1.0 + 2.0 * (q[0] * q[0] + q[1] * q[1]))*180/math.pi
            #     # if yaw < 0:
            #     #     yaw += 360
            #     # selected_sample_data = [sample_id, instance_id] + ann['translation'] + [yaw] + ann['size']
            #     selected_sample_data = str(sample_id) + " " + str(instance_token_to_id_dict[ann['instance_token']])\
            #                            + " " + str(ann['translation'][0]) + " " + str(ann['translation'][2]) + "\n"
            #     samples_new_format.append(selected_sample_data)

        # no need for sorting as it occurs in the TransformationNet data loader
        # left it for similarity

        samples_new_format.sort(
            key=lambda x: int(x.split(splitting_format)[0]))

        with open(out_file, 'w') as fw:
            fw.writelines(samples_new_format)

        print(out_file + "size " + str(ds_size))