コード例 #1
0
ファイル: convert_scene.py プロジェクト: xli4217/nuscenes_env
 def __init__(self, config={}):
     self.config = {
         'version': 'v1.0-mini',
         'dataroot': os.path.join(os.environ['PKG_PATH'],'data')
     }
     self.config.update(config)
     
     
     self.nusc = NuScenes(version=self.config['version'], dataroot=self.config['dataroot'], verbose=True)
     self.nusc_can = NuScenesCanBus(dataroot=self.config['dataroot'])
 
     self.utils = Utils(self.nusc, self.nusc_can)
コード例 #2
0
def init_nu():
    """ -------------------------------------------------------------------------------------------------------------
    Initialize nuScene

    return:         [tuple] NuScenes instance, NuScenesCanBus instance
    ------------------------------------------------------------------------------------------------------------- """
    nu              = NuScenes( version=nuscenes_ver, dataroot=dir_orig, verbose=verbose )      # NuScenes database
    nc              = NuScenesCanBus( dataroot=dir_orig )                                       # NuScenes CAN bus
    return nu, nc
コード例 #3
0
ファイル: process_raw.py プロジェクト: xli4217/nuscenes_env
def process_once(scene_name_list=[], data_save_dir=None, config={}):
    """get raw data from dataset, everything are lists

    :param scene_name_list: list of scene names to process
    :param data_save_dir: directory to save processed raw data
    :param config: additional configs
    :returns: one pandas dataframe for each scene

    """
    test_scene_name = config['scene_name']
    
    if config['num_workers'] == 1:
        nusc = config['other_configs']['nusc']
        helper = config['other_configs']['helper']
        rasterizer = config['other_configs']['rasterizer']
    else:
        nusc = ray.get(config['other_configs']['nusc'])
        helper = ray.get(config['other_configs']['helper'])
        rasterizer = ray.get(config['other_configs']['rasterizer'])

    dataroot = config['other_configs']['dataroot']
    
    nusc_can = NuScenesCanBus(dataroot=dataroot)
   
    df_dict = {
        #### scene info ####
        'scene_name': [],
        'scene_token':[],
        'scene_nbr_samples':[],
        'scene_description':[],
        'scene_log_token':[],
        'scene_map_token':[],
        'scene_vehicle_id': [],
        'scene_date_captured':[],
        'scene_location':[],
        #### sample info ####
        'sample_idx': [],
        'sample_token':[],
        'time_stamp': [],
        #### ego info ####
        'current_ego_pos': [],
        'current_ego_quat':[],
        'current_ego_accel':[],
        'current_ego_speed':[],
        'current_ego_steering': [],
        'current_ego_on_road_objects':[],
        'current_ego_raster_img':[],
        'current_ego_raster_img_path': [],
        #### instance info ####
        'annotation_token': [],
        'instance_token': [],
        'instance_category': [],
        'instance_attributes': [],
        'instance_nbr_annotations':[],
        'current_instance_pos': [],
        'current_instance_quat': [],
        'current_instance_speed': [],
        'current_instance_accel':[],
        'current_instance_on_road_objects':[],
        'current_instance_raster_img':[],
        'current_instance_raster_img_path': []
    }

    #### loop over scenes ####
    for scene_name in tqdm.tqdm(scene_name_list, 'processing scene'):
        if test_scene_name is not None:
            if not scene_name == test_scene_name:
                continue
        
        scene_df_dict = copy.deepcopy(df_dict)
        
        scene_token = nusc.field2token('scene', 'name', scene_name)[0]
        scene = nusc.get('scene', scene_token)
        scene_log = nusc.get('log', scene['log_token'])
        scene_map = nusc.get('map', scene_log['map_token'])
        nusc_map = NuScenesMap(dataroot=dataroot, map_name=scene_log['location'])

        raster_dir = os.path.join(data_save_dir, scene_name)
        if not os.path.isdir(raster_dir):
            os.makedirs(raster_dir)
        
        # can bus information
        if scene_name in NO_CANBUS_SCENES:
            ego_accel_traj = [None] * 50
            ego_rotation_rate_traj = [None] * 50
            ego_speed_traj = [None] * 50
        else:
            pose_msg = nusc_can.get_messages(scene_name, 'pose')
            ego_accel_traj = [pm['accel'] for pm in pose_msg][::25]
            ego_rotation_rate_traj = [pm['rotation_rate'] for pm in pose_msg][::25]
            ego_speed_traj = [pm['vel'] for pm in pose_msg][::25]
        
        #### loop over samples ####
        sample_token = scene['first_sample_token']
        sample = nusc.get('sample', sample_token)

        sample_idx = 0
        t = time.time()
        while sample['next'] != '':
            # print(sample_idx)
            # print(t-time.time())
            # t = time.time()
            cam_data = nusc.get('sample_data', sample['data']['CAM_FRONT'])
            ego_pose = nusc.get('ego_pose', cam_data['ego_pose_token'])

            # ego raster
            ego_raster = rasterizer.make_input_representation(instance_token=None, sample_token=sample_token, ego=True, ego_pose=ego_pose, include_history=False)
            ego_rel_path = str(sample_idx) + "_" + 'ego.png'
            current_ego_raster_img_path = os.path.join(raster_dir, ego_rel_path)
            ego_raster_png = im.fromarray(ego_raster)
            ego_raster_png.save(current_ego_raster_img_path)

            # ego road objects
            ego_on_road_objects = nusc_map.layers_on_point(ego_pose['translation'][0], ego_pose['translation'][1])
            
            #### loop over annotations ####
            for ann_token in sample['anns']:
                ann = nusc.get('sample_annotation', ann_token)
                instance = nusc.get('instance', ann['instance_token'])

                if filter_instance(ann, nusc):
                    #### populate data ####
                    ## scene ##
                    scene_df_dict['scene_name'].append(scene_name)
                    scene_df_dict['scene_token'].append(scene_token)
                    scene_df_dict['scene_nbr_samples'].append(scene['nbr_samples'])
                    scene_df_dict['scene_description'].append(scene['description'])
                    scene_df_dict['scene_log_token'].append(scene_log['token'])
                    scene_df_dict['scene_map_token'].append(scene_map['token'])
                    scene_df_dict['scene_vehicle_id'].append(scene_log['vehicle'])
                    scene_df_dict['scene_date_captured'].append(scene_log['date_captured'])
                    scene_df_dict['scene_location'].append(scene_log['location'])
                    ## sample info ##
                    scene_df_dict['sample_idx'].append(sample_idx)
                    scene_df_dict['sample_token'].append(sample_token)
                    scene_df_dict['time_stamp'].append(sample['timestamp'])
                    ## ego info ##
                    scene_df_dict['current_ego_pos'].append(ego_pose['translation'])
                    scene_df_dict['current_ego_quat'].append(ego_pose['rotation'])
                    idx = min(sample_idx, len(ego_speed_traj)-1)
                    scene_df_dict['current_ego_speed'].append(ego_speed_traj[idx])
                    idx = min(sample_idx, len(ego_accel_traj)-1)
                    scene_df_dict['current_ego_accel'].append(ego_accel_traj[idx])
                    idx = min(sample_idx, len(ego_rotation_rate_traj)-1)
                    scene_df_dict['current_ego_steering'].append(ego_rotation_rate_traj[idx])
                    scene_df_dict['current_ego_on_road_objects'].append(ego_on_road_objects)
                    scene_df_dict['current_ego_raster_img'].append(ego_raster)
                    scene_df_dict['current_ego_raster_img_path'].append(scene_name+"/"+ego_rel_path)
                    ## instance info ##
                    scene_df_dict['annotation_token'].append(ann_token)
                    scene_df_dict['instance_token'].append(ann['instance_token'])
                    scene_df_dict['instance_category'].append(ann['category_name'])
                    instance_attributes = [nusc.get('attribute', attribute_token)['name'] for attribute_token in ann['attribute_tokens']]
                    scene_df_dict['instance_attributes'].append(instance_attributes)
                    scene_df_dict['instance_nbr_annotations'].append(instance['nbr_annotations'])
                    scene_df_dict['current_instance_pos'].append(ann['translation'])
                    scene_df_dict['current_instance_quat'].append(ann['rotation'])
                    instance_speed = helper.get_velocity_for_agent(ann['instance_token'], sample_token)
                    scene_df_dict['current_instance_speed'].append(instance_speed)
                    instance_accel = helper.get_acceleration_for_agent(ann['instance_token'], sample_token)
                    scene_df_dict['current_instance_accel'].append(instance_accel)
                    instance_on_road_objects = nusc_map.layers_on_point(ann['translation'][0], ann['translation'][1])
                    scene_df_dict['current_instance_on_road_objects'].append(instance_on_road_objects)
                    instance_raster = rasterizer.make_input_representation(instance_token=ann['instance_token'], sample_token=sample_token, ego=False, include_history=False)
                    scene_df_dict['current_instance_raster_img'].append(instance_raster)
                    instance_rel_path = str(sample_idx) + "_"+ann['instance_token']+'.png'
                    instance_raster_img_path = os.path.join(raster_dir, instance_rel_path)
                    instance_raster_png = im.fromarray(instance_raster)
                    instance_raster_png.save(instance_raster_img_path)
                    scene_df_dict['current_instance_raster_img_path'].append(scene_name+"/"+instance_rel_path)
                    
            sample_token = sample['next']
            sample = nusc.get('sample', sample_token)
            sample_idx += 1

        scene_df = pd.DataFrame(scene_df_dict)
        scene_df.to_pickle(data_save_dir+"/"+scene_name+".pkl")
コード例 #4
0
    def __init__(self, nusc, nusc_split, kwargs, seed=0):
        super(nuScenesDataset, self).__init__()

        # set seed for split
        np.random.seed(seed)

        self.nusc = nusc
        self.nusc_root = self.nusc.dataroot
        self.nusc_can = NuScenesCanBus(dataroot=self.nusc_root)
        self.nusc_split = nusc_split

        # number of input samples
        self.n_input = kwargs["n_input"]

        # number of sampled trajectories
        self.n_samples = kwargs["n_samples"]

        # number of output samples
        self.n_output = kwargs["n_output"]
        assert(self.n_output == 7)

        #
        self.train_on_all_sweeps = kwargs["train_on_all_sweeps"]

        # scene-0419 does not have vehicle monitor data
        blacklist = [419] + self.nusc_can.can_blacklist

        # NOTE: use the official split (minus the ones in the blacklist)
        if "scene_token" in kwargs and kwargs["scene_token"] != "":
            scene = self.nusc.get("scene", kwargs["scene_token"])
            scenes = [scene]
        else:
            scene_splits = create_splits_scenes(verbose=False)
            scene_names = scene_splits[self.nusc_split]
            scenes = []
            for scene in self.nusc.scene:
                scene_name = scene["name"]
                scene_no = int(scene_name[-4:])
                if (scene_name in scene_names) and (scene_no not in blacklist):
                    scenes.append(scene)

        # list all sample data
        self.valid_index = []
        self.flip_flags = []
        self.scene_tokens = []
        self.sample_data_tokens = []
        for scene in scenes:
            scene_token = scene["token"]
            # location
            log = self.nusc.get("log", scene["log_token"])
            # flip x axis if in left-hand traffic (singapore)
            flip_flag = True if log["location"].startswith("singapore") else False
            # record the token of every key frame
            start_index = len(self.sample_data_tokens)
            first_sample = self.nusc.get("sample", scene["first_sample_token"])
            sample_data_token = first_sample["data"]["LIDAR_TOP"]
            while sample_data_token != "":
                sample_data = self.nusc.get("sample_data", sample_data_token)
                if (self.nusc_split == "train" and self.train_on_all_sweeps) or (sample_data["is_key_frame"]):
                    self.flip_flags.append(flip_flag)
                    self.scene_tokens.append(scene_token)
                    self.sample_data_tokens.append(sample_data_token)
                sample_data_token = sample_data["next"]
            end_index = len(self.sample_data_tokens)
            # NOTE: make sure we have enough number of sweeps for input and output
            if self.nusc_split == "train" and self.train_on_all_sweeps:
                valid_start_index = start_index + self.n_input - 1
                valid_end_index = end_index - (self.n_output - 1) * self.N_SWEEPS_PER_SAMPLE
            else:
                # NEW: acknowledge the fact and skip the first sample
                n_input_samples = self.n_input // self.N_SWEEPS_PER_SAMPLE
                valid_start_index = start_index + n_input_samples
                valid_end_index = end_index - self.n_output + 1
            self.valid_index += list(range(valid_start_index, valid_end_index))
        self._n_examples = len(self.valid_index)
        print(f"{self.nusc_split}: {self._n_examples} valid samples over {len(scenes)} scenes")
コード例 #5
0
class nuScenesDataset(Dataset):
    N_SWEEPS_PER_SAMPLE = 10
    SAMPLE_INTERVAL = 0.5  # second

    def __init__(self, nusc, nusc_split, kwargs, seed=0):
        super(nuScenesDataset, self).__init__()

        # set seed for split
        np.random.seed(seed)

        self.nusc = nusc
        self.nusc_root = self.nusc.dataroot
        self.nusc_can = NuScenesCanBus(dataroot=self.nusc_root)
        self.nusc_split = nusc_split

        # number of input samples
        self.n_input = kwargs["n_input"]

        # number of sampled trajectories
        self.n_samples = kwargs["n_samples"]

        # number of output samples
        self.n_output = kwargs["n_output"]
        assert(self.n_output == 7)

        #
        self.train_on_all_sweeps = kwargs["train_on_all_sweeps"]

        # scene-0419 does not have vehicle monitor data
        blacklist = [419] + self.nusc_can.can_blacklist

        # NOTE: use the official split (minus the ones in the blacklist)
        if "scene_token" in kwargs and kwargs["scene_token"] != "":
            scene = self.nusc.get("scene", kwargs["scene_token"])
            scenes = [scene]
        else:
            scene_splits = create_splits_scenes(verbose=False)
            scene_names = scene_splits[self.nusc_split]
            scenes = []
            for scene in self.nusc.scene:
                scene_name = scene["name"]
                scene_no = int(scene_name[-4:])
                if (scene_name in scene_names) and (scene_no not in blacklist):
                    scenes.append(scene)

        # list all sample data
        self.valid_index = []
        self.flip_flags = []
        self.scene_tokens = []
        self.sample_data_tokens = []
        for scene in scenes:
            scene_token = scene["token"]
            # location
            log = self.nusc.get("log", scene["log_token"])
            # flip x axis if in left-hand traffic (singapore)
            flip_flag = True if log["location"].startswith("singapore") else False
            # record the token of every key frame
            start_index = len(self.sample_data_tokens)
            first_sample = self.nusc.get("sample", scene["first_sample_token"])
            sample_data_token = first_sample["data"]["LIDAR_TOP"]
            while sample_data_token != "":
                sample_data = self.nusc.get("sample_data", sample_data_token)
                if (self.nusc_split == "train" and self.train_on_all_sweeps) or (sample_data["is_key_frame"]):
                    self.flip_flags.append(flip_flag)
                    self.scene_tokens.append(scene_token)
                    self.sample_data_tokens.append(sample_data_token)
                sample_data_token = sample_data["next"]
            end_index = len(self.sample_data_tokens)
            # NOTE: make sure we have enough number of sweeps for input and output
            if self.nusc_split == "train" and self.train_on_all_sweeps:
                valid_start_index = start_index + self.n_input - 1
                valid_end_index = end_index - (self.n_output - 1) * self.N_SWEEPS_PER_SAMPLE
            else:
                # NEW: acknowledge the fact and skip the first sample
                n_input_samples = self.n_input // self.N_SWEEPS_PER_SAMPLE
                valid_start_index = start_index + n_input_samples
                valid_end_index = end_index - self.n_output + 1
            self.valid_index += list(range(valid_start_index, valid_end_index))
        self._n_examples = len(self.valid_index)
        print(f"{self.nusc_split}: {self._n_examples} valid samples over {len(scenes)} scenes")

    def __len__(self):
        return self._n_examples

    def get_global_pose(self, sd_token, inverse=False):
        sd = self.nusc.get("sample_data", sd_token)
        sd_ep = self.nusc.get("ego_pose", sd["ego_pose_token"])
        sd_cs = self.nusc.get("calibrated_sensor", sd["calibrated_sensor_token"])
        if inverse is False:
            global_from_ego = transform_matrix(sd_ep["translation"], Quaternion(sd_ep["rotation"]), inverse=False)
            ego_from_sensor = transform_matrix(sd_cs["translation"], Quaternion(sd_cs["rotation"]), inverse=False)
            pose = global_from_ego.dot(ego_from_sensor)
        else:
            sensor_from_ego = transform_matrix(sd_cs["translation"], Quaternion(sd_cs["rotation"]), inverse=True)
            ego_from_global = transform_matrix(sd_ep["translation"], Quaternion(sd_ep["rotation"]), inverse=True)
            pose = sensor_from_ego.dot(ego_from_global)
        return pose

    def load_ground_segmentation(self, sample_data_token):
        path = f"{self.nusc.dataroot}/grndseg/{self.nusc.version}/{sample_data_token}_grndseg.bin"
        labels = np.fromfile(path, np.uint8)
        return labels

    def load_future_visible_freespace(self, sample_data_token):
        path = f"{self.nusc.dataroot}/fvfmaps/{self.nusc.version}/{sample_data_token}.bin"
        if os.path.exists(path):
            fvf_maps = np.fromfile(path, dtype=np.int8)
            fvf_maps = fvf_maps.reshape((7, 704, 400))
        else:
            fvf_maps = np.zeros((7, 704, 400), dtype=np.int8)
            warnings.warn(f"Cannot find fvf_maps at {path}")
        return fvf_maps

    def load_object_boxes(self, sample_data_token):
        path = f"{self.nusc.dataroot}/obj_boxes/{self.nusc.version}/{sample_data_token}.bin"
        if os.path.exists(path):
            obj_boxes = np.fromfile(path, dtype=bool)
            obj_boxes = obj_boxes.reshape((7, 704, 400))
        else:
            obj_boxes = np.zeros((7, 704, 400))
        return obj_boxes

    def load_object_shadows(self, sample_data_token):
        path = f"{self.nusc.dataroot}/obj_shadows/{self.nusc.version}/{sample_data_token}.bin"
        if os.path.exists(path):
            obj_shadows = np.fromfile(path, dtype=bool)
            obj_shadows = obj_shadows.reshape((7, 704, 400))
        else:
            obj_shadows = np.zeros((7, 704, 400))
        return obj_shadows

    def __getitem__(self, idx):
        ref_index = self.valid_index[idx]

        ref_sd_token = self.sample_data_tokens[ref_index]
        ref_scene_token = self.scene_tokens[ref_index]
        flip_flag = self.flip_flags[ref_index]

        # reference coordinate frame
        ref_from_global = self.get_global_pose(ref_sd_token, inverse=True)

        # NOTE: input
        input_sds = []
        sd_token = ref_sd_token
        while sd_token != "" and len(input_sds) < self.n_input:
            curr_sd = self.nusc.get("sample_data", sd_token)
            input_sds.append(curr_sd)
            sd_token = curr_sd["prev"]

        # call out when we have less than the desired number of input sweeps
        # if len(input_sds) < self.n_input:
        #     warnings.warn(f"The number of input sweeps {len(input_sds)} is less than {self.n_input}.", RuntimeWarning)

        # get input sweep frames
        input_points_list = []
        for i, curr_sd in enumerate(input_sds):
            # load the current lidar sweep
            curr_lidar_pc = MyLidarPointCloud.from_file(f"{self.nusc_root}/{curr_sd['filename']}")

            # remove ego returns
            curr_lidar_pc.remove_ego()

            # transform from the current lidar frame to global and then to the reference lidar frame
            global_from_curr = self.get_global_pose(curr_sd["token"], inverse=False)
            ref_from_curr = ref_from_global.dot(global_from_curr)
            curr_lidar_pc.transform(ref_from_curr)

            # NOTE: check if we are in Singapore (if so flip x)
            if flip_flag:
                curr_lidar_pc.points[0] *= -1

            #
            points = np.asarray(curr_lidar_pc.points[:3].T)
            tindex = np.full((len(points), 1), i)
            points = np.concatenate((points, tindex), axis=1)

            #
            input_points_list.append(points.astype(np.float32))

        # NOTE: output
        # get output sample frames and ground truth trajectory
        output_origin_list = []
        output_points_list = []
        gt_trajectory = np.zeros((self.n_output, 3), np.float64)
        for i in range(self.n_output):
            if self.nusc_split == "train" and self.train_on_all_sweeps:
                index = ref_index + i * self.N_SWEEPS_PER_SAMPLE
            else:
                index = ref_index + i

            # if this exists a valid target
            if index < len(self.scene_tokens) and self.scene_tokens[index] == ref_scene_token:
                curr_sd_token = self.sample_data_tokens[index]
                curr_sd = self.nusc.get("sample_data", curr_sd_token)

                # load the current lidar sweep
                curr_lidar_pc = LidarPointCloud.from_file(f"{self.nusc_root}/{curr_sd['filename']}")

                # transform from the current lidar frame to global and then to the reference lidar frame
                global_from_curr = self.get_global_pose(curr_sd_token, inverse=False)
                ref_from_curr = ref_from_global.dot(global_from_curr)
                curr_lidar_pc.transform(ref_from_curr)

                #
                theta = quaternion_yaw(Quaternion(matrix=ref_from_curr))

                # NOTE: check if we are in Singapore (if so flip x)
                if flip_flag:
                    ref_from_curr[0, 3] *= -1
                    curr_lidar_pc.points[0] *= -1
                    theta *= -1

                origin = np.array(ref_from_curr[:3, 3])
                points = np.array(curr_lidar_pc.points[:3].T)
                gt_trajectory[i, :] = [origin[0], origin[1], theta]

                tindex = np.full(len(points), i)

                labels = self.load_ground_segmentation(curr_sd_token)
                assert(len(labels) == len(points))
                mask = np.logical_and(labels >= 1, labels <= 30)

                points = np.concatenate((points, tindex[:, None], labels[:, None]), axis=1)
                points = points[mask, :]

            else:  # filler
                raise RuntimeError(f"The {i}-th output frame is not available")
                origin = np.array([0.0, 0.0, 0.0])
                points = np.full((0, 5), -1)

            # origin
            output_origin_list.append(origin.astype(np.float32))

            # points
            output_points_list.append(points.astype(np.float32))

        # NOTE: trajectory sampling
        ref_scene = self.nusc.get("scene", ref_scene_token)

        # NOTE: rely on pose and steeranglefeedback data instead of vehicle_monitor
        vm_msgs = self.nusc_can.get_messages(ref_scene["name"], "vehicle_monitor")
        vm_uts = [msg["utime"] for msg in vm_msgs]
        pose_msgs = self.nusc_can.get_messages(ref_scene["name"], "pose")
        pose_uts = [msg["utime"] for msg in pose_msgs]
        steer_msgs = self.nusc_can.get_messages(ref_scene["name"], "steeranglefeedback")
        steer_uts = [msg["utime"] for msg in steer_msgs]

        # locate the closest message by universal timestamp
        ref_sd = self.nusc.get("sample_data", ref_sd_token)
        ref_utime = ref_sd["timestamp"]
        vm_index = locate_message(vm_uts, ref_utime)
        vm_data = vm_msgs[vm_index]
        pose_index = locate_message(pose_uts, ref_utime)
        pose_data = pose_msgs[pose_index]
        steer_index = locate_message(steer_uts, ref_utime)
        steer_data = steer_msgs[steer_index]

        # initial speed
        # v0 = vm_data["vehicle_speed"] / 3.6  # km/h to m/s
        v0 = pose_data["vel"][0]  # [0] means longitudinal velocity

        # curvature (positive: turn left)
        # steering = np.deg2rad(vm_data["steering"])
        steering = steer_data["value"]
        if flip_flag:
            steering *= -1
        Kappa = 2 * steering / 2.588

        #
        left_signal = vm_data["left_signal"]
        right_signal = vm_data["right_signal"]
        if flip_flag:
            left_signal, right_signal = right_signal, left_signal
        drive_command = [left_signal, right_signal]

        # initial state
        T0 = np.array([0.0, 1.0])  # define front
        N0 = np.array([1.0, 0.0]) if Kappa <= 0 else np.array([-1.0, 0.0])  # define side

        #
        # tt = np.arange(self.n_output) * self.SAMPLE_INTERVAL
        # tt = np.arange(0, self.n_output + self.SAMPLE_INTERVAL, self.SAMPLE_INTERVAL)
        t_start = 0  # second
        t_end = (self.n_output-1) * self.SAMPLE_INTERVAL  # second
        t_interval = self.SAMPLE_INTERVAL / 10
        tt = np.arange(t_start, t_end + t_interval, t_interval)
        sampled_trajectories_fine = trajectory_sampler.sample(v0, Kappa, T0, N0, tt, self.n_samples)
        sampled_trajectories = sampled_trajectories_fine[:, ::10]

        #
        obj_boxes = self.load_object_boxes(ref_sd_token)
        obj_shadows = self.load_object_shadows(ref_sd_token)

        #
        fvf_maps = self.load_future_visible_freespace(ref_sd_token)

        #
        example = {
            "scene_token": ref_scene_token,
            "sample_data_token": ref_sd_token,
            "input_points": torch.from_numpy(np.concatenate(input_points_list)),
            "sampled_trajectories_fine": torch.from_numpy(sampled_trajectories_fine),
            "sampled_trajectories": torch.from_numpy(sampled_trajectories),
            "drive_command": torch.tensor(drive_command),
            "output_origin": torch.from_numpy(np.stack(output_origin_list)),
            "output_points": torch.from_numpy(np.concatenate(output_points_list)),
            "gt_trajectory": torch.from_numpy(gt_trajectory),
            "obj_boxes": torch.from_numpy(obj_boxes),
            "obj_shadows": torch.from_numpy(obj_shadows),
            "fvf_maps": torch.from_numpy(fvf_maps),
        }
        return example
コード例 #6
0
def get_can_data(scene_name, data_root):

    # This py file has a function inside called 'get_can_data(scene_name, data_root)'.
    # It requires two inputs (both strings like 'blah') of the scene_name (e.g. 'scene_0565') and data_root (e.g. '../data/sets/nuscenes'). The can_bus data (as .json files) must be sitting in a can_bus folder in data/sets/nuscenes. The two dots before data are to go up a level. The can_bus data is kept in this folder in the repo but is ignored when pushing up to GitHub as it is a large sized folder than what GitHub would recommend.

    # To bring the internal function 'get_can_data' into the workspace, run the following code:
    # exec(open("../python-functions/can_data_fcn.py").read())

    # This will allow you to use get_can_data function as you please. An example of how it is used is located in python-scripts/test_script.py.

    # When you get the data as a variable name of your choice, the breakdown of the data is as follows:
    # a. Pick the CAN Message you want.
    # b. Pick the CAN Signal you want.

    # ii. veh_msgs:         utimes, fl_wheel_speed, fr_wheel_speed, rl_wheel_speed, rr_wheel_speed
    #   pose_msgs:      utimes, veh_speed, x, y, rot_quaternion_mat
    #   imu_msgs:         utimes, accel_x, accel_y, accel_z, rot_quaternion_mat, roll_rate, pitch_rate, yaw_rate
    #   veh_monitor_msgs:     utimes, swa_data, swa_rate_data, veh_speed, yaw_rate

    # For the data frequency (time-step):
    #   veh_msgs:           100 Hz
    #   pose_msgs:            50 Hz
    #   imu_msgs:           100 Hz
    #   vehicle_monitor_msgs:     2 Hz

    from nuscenes.can_bus.can_bus_api import NuScenesCanBus
    import numpy as np
    import scipy.io as sio
    import math
    import matplotlib.pyplot as plt

#    nusc_can = NuScenesCanBus(dataroot='../../../../../../../Volumes/Arnie SSD/data/sets/nuscenes')
    nusc_can = NuScenesCanBus(dataroot = data_root)
    #scene_name = 'scene-0574'
    

    ############# VEHICLE INFO MESSAGE ##################################
    # Obtain time array of vehicle info message
    veh_messages = nusc_can.get_messages(scene_name, 'zoe_veh_info')
    veh_utimes = np.array([m['utime'] for m in veh_messages])
    veh_msgs_utimes = (veh_utimes - min(veh_utimes)) / 1e6 # Units: sec

    # Obtain the four different wheel speed sensor data
    veh_fl_wheel_speed_data = np.array([m['FL_wheel_speed'] for m in veh_messages]) # Units: rpm
    veh_fr_wheel_speed_data = np.array([m['FR_wheel_speed'] for m in veh_messages]) # Units: rpm
    veh_rl_wheel_speed_data = np.array([m['RL_wheel_speed'] for m in veh_messages]) # Units: rpm
    veh_rr_wheel_speed_data = np.array([m['RR_wheel_speed'] for m in veh_messages]) # Units: rpm

    # Convert wheel speed data to linear velocity for each wheel
    veh_r_wheel = 0.305; # Units: m
    veh_msgs_fl_wheel_speed_data = veh_fl_wheel_speed_data * 2 * math.pi * veh_r_wheel / 60 # Units: mps
    veh_msgs_fr_wheel_speed_data = veh_fr_wheel_speed_data * 2 * math.pi * veh_r_wheel / 60 # Units: mps
    veh_msgs_rl_wheel_speed_data = veh_rl_wheel_speed_data * 2 * math.pi * veh_r_wheel / 60 # Units: mps
    veh_msgs_rr_wheel_speed_data = veh_rr_wheel_speed_data * 2 * math.pi * veh_r_wheel / 60 # Units: mps

    # Obtain steering wheel angle (swa) sensor data
    veh_msgs_swa_data = np.array([m['steer_corrected'] for m in veh_messages]) # Units: deg

    veh_msgs = {
                'utimes': veh_msgs_utimes,
                'fl_wheel_speed': veh_msgs_fl_wheel_speed_data,
                'fr_wheel_speed': veh_msgs_fr_wheel_speed_data,
                'rl_wheel_speed': veh_msgs_rl_wheel_speed_data,
                'rr_wheel_speed': veh_msgs_rr_wheel_speed_data,
                }


    ############# POSE MESSAGE ##################################
    # Obtain time array of pose message
    pose_messages = nusc_can.get_messages(scene_name, 'pose')
    pose_utimes = np.array([m['utime'] for m in pose_messages])
    pose_msgs_utimes = (pose_utimes - min(pose_utimes)) / 1e6 # Units: sec

    # Obtain vehicle speed; in the ego vehicle frame
    pose_msgs_veh_speed = np.array([m['vel'] for m in pose_messages]) # Units: mps

    # Obtain x position from southeast corner of the map
    pose_msgs_x = np.array([m['pos'][0] for m in pose_messages]) # Units: m

    # Obtain y position from southeast corner of the map
    pose_msgs_y = np.array([m['pos'][1] for m in pose_messages]) # Units: m

    # Obtain Quaternion Rotation matrix - outputs as an array; in the ego vehicle frame
    pose_msgs_rot_quaternion_mat = np.array([m['orientation'] for m in pose_messages]) # Units: dimension-less

    pose_msgs = {
                 'utimes': pose_msgs_utimes,
                 'veh_speed': pose_msgs_veh_speed,
                 'x': pose_msgs_x,
                 'y': pose_msgs_y,
                 'rot_quaternion_mat': pose_msgs_rot_quaternion_mat,
                }
    
    
    ############# IMU MESSAGE ##################################
    # Obtain time array of IMU message
    imu_messages = nusc_can.get_messages(scene_name, 'ms_imu')
    imu_utimes = np.array([m['utime'] for m in imu_messages])
    imu_msgs_utimes = (imu_utimes - min(imu_utimes)) / 1e6 # Units: sec

    # Obtain Acceleration in x direction
    imu_msgs_accel_x = np.array([m['linear_accel'][0] for m in imu_messages]) # Units: m/s/s

    # Obtain Acceleration in y direction
    imu_msgs_accel_y = np.array([m['linear_accel'][1] for m in imu_messages]) # Units: m/s/s

    # Obtain Acceleration in z direction
    imu_msgs_accel_z = np.array([m['linear_accel'][2] for m in imu_messages]) # Units: m/s/s

    # Obtain Quaternion Rotation matrix - outputs as an array
    imu_msgs_rot_quaternion_mat = np.array([m['q'] for m in imu_messages]) # Units: dimension-less

    # Obtain Roll Rate
    imu_msgs_roll_rate = np.array([m['rotation_rate'][0] for m in imu_messages]) # Units: rad/s

    # Obtain Pitch Rate
    imu_msgs_pitch_rate = np.array([m['rotation_rate'][1] for m in imu_messages]) # Units: rad/s

    # Obtain Yaw Rate
    imu_msgs_yaw_rate = np.array([m['rotation_rate'][2] for m in imu_messages]) # Units: rad/s

    imu_msgs = {
                'utimes': imu_msgs_utimes,
                'accel_x': imu_msgs_accel_x,
                'accel_y': imu_msgs_accel_y,
                'accel_z': imu_msgs_accel_z,
                'rot_quaternion_mat': imu_msgs_rot_quaternion_mat,
                'roll_rate': imu_msgs_roll_rate,
                'pitch_rate': imu_msgs_pitch_rate,
                'yaw_rate': imu_msgs_yaw_rate,
                }
                

    ############# VEHICLE MONITOR MESSAGE ##################################
    # Note this is different than the Vehicle Info message. This is recorded at a slower frequency.
    # Obtain time array of Vehicle Monitor message
    veh_monitor_messages = nusc_can.get_messages(scene_name, 'vehicle_monitor')
    veh_monitor_utimes = np.array([m['utime'] for m in veh_monitor_messages])
    veh_monitor_msgs_utimes = (veh_monitor_utimes - min(veh_monitor_utimes)) / 1e6 # Units: sec

    # Obtain steering wheel angle (swa) sensor data
    veh_monitor_msgs_swa_data = np.array([m['steering'] for m in veh_monitor_messages]) # Units: deg

    # Obtain steering wheel angle rate (swa_dot) sensor data
    veh_monitor_msgs_swa_rate_data = np.array([m['steering_speed'] for m in veh_monitor_messages]) # Units: deg/s

    # Obtain vehicle speed sensor data
    veh_monitor_msgs_veh_speed = np.array([m['vehicle_speed'] for m in veh_monitor_messages]) # Units: km/h

    # Obtain yaw rate sensor data
    veh_monitor_msgs_yaw_rate = np.array([m['yaw_rate'] for m in veh_monitor_messages]) # Units: deg/s

    veh_monitor_msgs = {
                        'utimes': veh_monitor_msgs_utimes,
                        'swa_data': veh_monitor_msgs_swa_data,
                        'swa_rate_data': veh_monitor_msgs_swa_rate_data,
                        'veh_speed': veh_monitor_msgs_veh_speed,
                        'yaw_rate': veh_monitor_msgs_yaw_rate,
                        }



    # Pass all structures into can_data_out
    can_data_out = {
                    'veh_msgs': veh_msgs,
                    'pose_msgs': pose_msgs,
                    'imu_msgs': imu_msgs,
                    'veh_monitor_msgs': veh_monitor_msgs,
                    }
    
    
    
    

    # Save the long_accel data into a mat file
    #sio.savemat('np_vector.mat', {'long_accel':data})

    # Plot mps wheel speeds and mps vehicle speed together
    #plt.plot(pose_x, pose_y)
    #plt.show()
    #nusc_can.plot_baseline_route(scene_name)
    
    return can_data_out
コード例 #7
0
def create_nuscenes_infos(root_path,
                          modality,
                          radar_version,
                          version="v1.0-trainval",
                          max_sweeps=10):
    from nuscenes.nuscenes import NuScenes
    from nuscenes.can_bus.can_bus_api import NuScenesCanBus
    nusc = NuScenes(version=version, dataroot=root_path, verbose=True)
    nusc_can = NuScenesCanBus(dataroot=root_path)
    from nuscenes.utils import splits
    available_vers = ["v1.0-trainval", "v1.0-test", "v1.0-mini"]
    assert version in available_vers
    if version == "v1.0-trainval":
        train_scenes = splits.train
        val_scenes = splits.val
    elif version == "v1.0-test":
        train_scenes = splits.test
        val_scenes = []
    elif version == "v1.0-mini":
        train_scenes = splits.mini_train
        val_scenes = splits.mini_val
    else:
        raise ValueError("unknown")
    test = "test" in version
    root_path = Path(root_path)
    output_path = Path('/home/spalab/jskim/second.pytorch/second/dataset')
    # filter exist scenes. you may only download part of dataset.
    available_scenes = _get_available_scenes(nusc, nusc_can)
    available_scene_names = [s["name"] for s in available_scenes]
    train_scenes = list(
        filter(lambda x: x in available_scene_names, train_scenes))
    val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes))
    train_scenes = set([
        available_scenes[available_scene_names.index(s)]["token"]
        for s in train_scenes
    ])
    val_scenes = set([
        available_scenes[available_scene_names.index(s)]["token"]
        for s in val_scenes
    ])
    if test:
        print(f"test scene: {len(train_scenes)}")
    else:
        print(
            f"train scene: {len(train_scenes)}, val scene: {len(val_scenes)}")
    train_nusc_infos, val_nusc_infos = _fill_trainval_infos(
        nusc,
        nusc_can,
        train_scenes,
        val_scenes,
        modality,
        radar_version,
        test=test,
        max_sweeps=max_sweeps)
    metadata = {
        "version": version,
    }
    if test:
        print(f"test sample: {len(train_nusc_infos)}")
        data = {
            "infos": train_nusc_infos,
            "metadata": metadata,
        }
        with open(output_path / "infos_test.pkl", 'wb') as f:
            pickle.dump(data, f)
    else:
        print(
            f"train sample: {len(train_nusc_infos)}, val sample: {len(val_nusc_infos)}"
        )
        data = {
            "infos": train_nusc_infos,
            "metadata": metadata,
        }
        if max_sweeps == 6:
            with open(
                    output_path /
                    f"infos_train_multisweep_{radar_version}.pkl", 'wb') as f:
                pickle.dump(data, f)
            data["infos"] = val_nusc_infos
            with open(
                    output_path / f"infos_val_multisweep_{radar_version}.pkl",
                    'wb') as f:
                pickle.dump(data, f)
        elif max_sweeps == 10:
            with open(
                    output_path /
                    f"infos_train_multisweep_{radar_version}_sweeps{max_sweeps}.pkl",
                    'wb') as f:
                pickle.dump(data, f)
            data["infos"] = val_nusc_infos
            with open(
                    output_path /
                    f"infos_val_multisweep_{radar_version}_sweeps{max_sweeps}.pkl",
                    'wb') as f:
                pickle.dump(data, f)
コード例 #8
0
    def __init__(self,
                 config: dict = {},
                 helper: PredictHelper = None,
                 py_logger=None,
                 tb_logger=None):
        self.na_config = Configuration({
            'debug': False,
            'pred_horizon': 6,
            'obs_horizon': 2,
            'freq': 2,
            'load_dataset': False,
            'version': 'v1.0-mini',
            'debug': False,
            'py_logging_path': None,
            'tb_logging_path': None
        })

        self.na_config.update(config)
        self.name = None

        self.py_logger = py_logger
        self.tb_logger = tb_logger

        self.dataroot = None
        if 'mini' in self.na_config['version']:
            self.dataroot = mini_path
        else:
            self.dataroot = full_path
        if self.py_logger is None and self.na_config[
                'py_logging_path'] is not None:
            print(f"py logging path: {self.na_config['py_logging_path']}")
            self.py_logger = logger
            self.py_logger.add(self.na_config['py_logging_path'] + "/log.txt")

        #     self.py_logger = logging.getLogger(self.name)
        #     print(f"py logging path: {self.na_config['py_logging_path']}")
        #     self.py_logger.addHandler(logging.FileHandler(os.path.join(self.na_config['py_logging_path'], 'log.txt'),  mode='w'))
        # if self.py_logger is not None:
        #     self.py_logger.propagate = False

        if self.tb_logger is None and self.na_config[
                'tb_logging_path'] is not None:
            self.tb_logger = SummaryWriter(
                log_dir=os.path.join(self.na_config['tb_logging_path']))

        self.helper = helper
        self.nusc = None
        if self.helper is not None:
            self.nusc = self.helper.data
        else:
            if self.dataroot is not None and self.na_config[
                    'version'] is not None and self.na_config[
                        'load_dataset'] and self.helper is None:
                self.nusc = NuScenes(dataroot=self.dataroot,
                                     version=self.na_config['version'],
                                     verbose=True)
                self.helper = PredictHelper(self.nusc)

        #### Initialize Map ####
        self.nusc_map_dict = {
            'boston-seaport':
            NuScenesMap(dataroot=self.dataroot, map_name='boston-seaport'),
            'singapore-hollandvillage':
            NuScenesMap(dataroot=self.dataroot,
                        map_name='singapore-hollandvillage'),
            'singapore-onenorth':
            NuScenesMap(dataroot=self.dataroot, map_name='singapore-onenorth'),
            'singapore-queenstown':
            NuScenesMap(dataroot=self.dataroot,
                        map_name='singapore-queenstown'),
        }

        #### Initialize CAN ####
        self.nusc_can = NuScenesCanBus(dataroot=self.dataroot)

        ####
        self.all_info = {'config': self.na_config}
コード例 #9
0
ファイル: convert_scene.py プロジェクト: xli4217/nuscenes_env
class ConvertScene(object):
    
    def __init__(self, config={}):
        self.config = {
            'version': 'v1.0-mini',
            'dataroot': os.path.join(os.environ['PKG_PATH'],'data')
        }
        self.config.update(config)
        
        
        self.nusc = NuScenes(version=self.config['version'], dataroot=self.config['dataroot'], verbose=True)
        self.nusc_can = NuScenesCanBus(dataroot=self.config['dataroot'])
    
        self.utils = Utils(self.nusc, self.nusc_can)

    
    def load_experiment_rollout_data(self, experiment_path: str):
        experiment_result_dict = {
            'figures':{
                'bev/bev': {},
                'ctrl/ctrl': {},
                'input_images/current_raster':{},
                'q_transitions/q_transitions':{},
                'agn_node_cams/agn_node_cams':{}
            },
            'sim_ego_pos': {},
            'sim_ego_quat': {}
        }
        
        #### figures ####
        for img_folder_name in experiment_result_dict['figures'].keys():
            p_list = np.array([str(p) for p in Path(experiment_path+'/'+img_folder_name).glob('*.png')])
            idx = np.argsort([int(p.split('/')[-1][:2]) for p in p_list])
            p_list = p_list[idx].tolist()
            
            for p in p_list:
                with open(p, 'rb') as png_file:
                    png = png_file.read()
                    idx = int(p.split('/')[-1][:2])
                    experiment_result_dict['figures'][img_folder_name][idx] = png
        
        
        #### ego ####
        d = cloudpickle.load(open(os.path.join(experiment_path, f'rollout.pkl'), 'rb'))
        sim_ego_pos = {}
        sim_ego_quat = {}
        for obs in d['obs']:
            sim_ego_pos[obs['sample_idx']] = obs['sim_ego_pos_gb'].tolist() + [0.01] 
            sim_ego_quat[obs['sample_idx']] = obs['sim_ego_quat_gb'].tolist()
        
        experiment_result_dict['sim_ego_pos'] = sim_ego_pos
        experiment_result_dict['sim_ego_quat'] = sim_ego_quat

        return experiment_result_dict
    
    def load_experiment_rollout_data_pandas(self, experiment_path: str):
        experiment_result_dict = {
            'figures':{
                'feature': {},
                'ioc_param': {},
                'monitor':{}
            },
            'sim_ego_pos': {},
            'sim_ego_quat': {}
        }
        
        #### figures ####
        for img_folder_name in experiment_result_dict['figures'].keys():
            p_list = np.array([str(p) for p in Path(experiment_path+'/'+img_folder_name).glob('*.png')])
            idx = np.argsort([int(p.split('/')[-1][:2]) for p in p_list])
            p_list = p_list[idx].tolist()
            
            for p in p_list:
                with open(p, 'rb') as png_file:
                    png = png_file.read()
                    idx = int(p.split('/')[-1][:2])
                    experiment_result_dict['figures'][img_folder_name][idx] = png
        
        
        #### ego ####
        d_pd = pd.read_pickle(os.path.join(experiment_path, f'rollout.pkl'))
        d = d_pd['env_info']
        sim_ego_pos = {}
        sim_ego_quat = {}
        for obs in d['obs']:
            sim_ego_pos[obs['sample_idx']] = obs['sim_ego_pos_gb'].tolist() + [0.01] 
            sim_ego_quat[obs['sample_idx']] = obs['sim_ego_quat_gb'].tolist()
        
        experiment_result_dict['sim_ego_pos'] = sim_ego_pos
        experiment_result_dict['sim_ego_quat'] = sim_ego_quat
        
        return experiment_result_dict
    
    def convert(self, scene_data_p, scene_name=None, model_name=None, up_to_step=None, ckpt_name=None):
        self.scene_name = scene_name
        self.model_name = model_name
        self.ckpt_name = ckpt_name

        self.scene_data_p = scene_data_p
        self.experiment_result_dict = self.load_experiment_rollout_data(self.scene_data_p)
        #self.experiment_result_dict = self.load_experiment_rollout_data_pandas(self.scene_data_p)
        self.convert_scene(scene_name, 
                           self.utils, 
                           self.config['dataroot'], 
                           up_to_step=up_to_step,
                           experiment_result_dict=self.experiment_result_dict
                        )
    
    def convert_scene(self, scene_name, utils, dataroot, up_to_step,  *args, **kwargs):
        experiment_result_dict = None
        if 'experiment_result_dict' in kwargs.keys():
            experiment_result_dict = kwargs['experiment_result_dict']
        
        scene_i = None
        for scene_i in self.nusc.scene:
            if scene_i['name'] == scene_name:
                scene = scene_i
                break
            
        scene_name = scene['name']
        log = self.nusc.get('log', scene['log_token'])
        location = log['location']
        print(f'Loading map "{location}"')
        self.nusc_map = NuScenesMap(dataroot=dataroot, map_name=location)
        print(f'Loading bitmap "{self.nusc_map.map_name}"')
        bitmap = BitMap(self.nusc_map.dataroot, self.nusc_map.map_name, 'basemap')
        print(f'Loaded {bitmap.image.shape} bitmap')

        cur_sample = self.nusc.get('sample', scene['first_sample_token'])

        can_parsers = [
            [self.nusc_can.get_messages(scene_name, 'ms_imu'), 0, utils.get_imu_msg],
            [self.nusc_can.get_messages(scene_name, 'pose'), 0, utils.get_odom_msg],
            [self.nusc_can.get_messages(scene_name, 'steeranglefeedback'), 0, lambda x: utils.get_basic_can_msg('Steering Angle', x)],
            [self.nusc_can.get_messages(scene_name, 'vehicle_monitor'), 0, lambda x: utils.get_basic_can_msg('Vehicle Monitor', x)],
            [self.nusc_can.get_messages(scene_name, 'zoesensors'), 0, lambda x: utils.get_basic_can_msg('Zoe Sensors', x)],
            [self.nusc_can.get_messages(scene_name, 'zoe_veh_info'), 0, lambda x: utils.get_basic_can_msg('Zoe Vehicle Info', x)],
        ]

        bag_name = f"{self.model_name}_{self.ckpt_name}_{self.config['scene_name']}.bag"
        bag_path = os.path.join(self.scene_data_p, bag_name)
        print(f'Writing to {bag_path}')
        bag = rosbag.Bag(bag_path, 'w', compression='lz4')

        stamp = utils.get_time(self.nusc.get('ego_pose', self.nusc.get('sample_data', cur_sample['data']['LIDAR_TOP'])['ego_pose_token']))
        map_msg = utils.get_scene_map(scene, self.nusc_map, bitmap, stamp)
        centerlines_msg = utils.get_centerline_markers(scene, self.nusc_map, stamp)
        bag.write('/map', map_msg, stamp)
        bag.write('/semantic_map', centerlines_msg, stamp)
        last_map_stamp = stamp

        #idx = list(experiment_result_dict['sim_ego_pos'].keys())[0]
        idx = 0
        while cur_sample is not None:
            if up_to_step is not None and idx > up_to_step:
               break
            sample_lidar = self.nusc.get('sample_data', cur_sample['data']['LIDAR_TOP'])
            ego_pose = self.nusc.get('ego_pose', sample_lidar['ego_pose_token'])
            stamp = utils.get_time(ego_pose)
            
            #### write experiment_result to bag ####
            ego_marker = None
            if experiment_result_dict is not None:
                # figures #
                for k,v in experiment_result_dict['figures'].items():
                    if idx in v.keys():
                        png = v[idx]
                        msg = CompressedImage()
                        msg.header.frame_id = k
                        msg.header.stamp = stamp
                        msg.format = 'png'
                        msg.data = png
                        bag.write("/"+k+'/image_rect_compressed', msg, stamp)
                #print(idx)
                #print(experiment_result_dict['sim_ego_pos'].keys())
                if idx in experiment_result_dict['sim_ego_pos'].keys():
                    ego_pose = {
                        'translation': experiment_result_dict['sim_ego_pos'][idx],
                        'rotation': experiment_result_dict['sim_ego_quat'][idx],
                        'token': ego_pose['token'],
                        'timestamp': ego_pose['timestamp']
                    }
                    
                    if idx == list(experiment_result_dict['sim_ego_pos'].keys())[-1]:
                        next_idx = idx
                        break
                    else:
                        next_idx = idx+1
                    next_sample = utils.nusc.get('sample',cur_sample['next'])
                    sample_lidar = utils.nusc.get('sample_data', next_sample['data']['LIDAR_TOP'])
                    real_next_ego_pose = utils.nusc.get('ego_pose', sample_lidar['ego_pose_token'])

                    next_ego_pose = {
                        'translation': experiment_result_dict['sim_ego_pos'][next_idx],
                        'rotation': experiment_result_dict['sim_ego_quat'][next_idx],
                        'token': real_next_ego_pose['token'],
                        'timestamp': real_next_ego_pose['timestamp']
                    }
                    # publish /tf
                    tf_array = utils.get_tfmessage(cur_sample, current_pose=ego_pose, next_pose=next_ego_pose)
                    bag.write('/tf', tf_array, stamp)

                else:
                    # publish /tf
                    tf_array = utils.get_tfmessage(cur_sample)
                    bag.write('/tf', tf_array, stamp)
            else:
                # publish /tf
                tf_array = utils.get_tfmessage(cur_sample)
                bag.write('/tf', tf_array, stamp)
                
            
            # write map topics every two seconds
            if stamp - rospy.Duration(2.0) >= last_map_stamp:
                map_msg.header.stamp = stamp
                for marker in centerlines_msg.markers:
                    marker.header.stamp = stamp
                bag.write('/map', map_msg, stamp)
                bag.write('/semantic_map', centerlines_msg, stamp)
                last_map_stamp = stamp

            # write CAN messages to /pose, /odom, and /diagnostics
            can_msg_events = []
            for i in range(len(can_parsers)):
                (can_msgs, index, msg_func) = can_parsers[i]
                while index < len(can_msgs) and utils.get_utime(can_msgs[index]) < stamp:
                    can_msg_events.append(msg_func(can_msgs[index]))
                    index += 1
                    can_parsers[i][1] = index
            can_msg_events.sort(key = lambda x: x[0])
            for (msg_stamp, topic, msg) in can_msg_events:
                bag.write(topic, msg, stamp)

            
            # /driveable_area occupancy grid
            utils.write_occupancy_grid(bag, self.nusc_map, ego_pose, stamp)
                    
            # iterate sensors
            for (sensor_id, sample_token) in cur_sample['data'].items():
                sample_data = self.nusc.get('sample_data', sample_token)
                topic = '/' + sensor_id

                # write the sensor data
                # if sample_data['sensor_modality'] == 'radar':
                #     msg = utils.get_radar(sample_data, sensor_id)
                #     bag.write(topic, msg, stamp)
                # elif sample_data['sensor_modality'] == 'lidar':
                #     msg = utils.get_lidar(sample_data, sensor_id)
                #     bag.write(topic, msg, stamp)
                
                # if sample_data['sensor_modality'] == 'camera':
                #     msg = utils.get_camera(sample_data, sensor_id, dataroot)
                #     bag.write(topic + '/image_rect_compressed', msg, stamp)
                #     msg = utils.get_camera_info(sample_data, sensor_id)
                #     bag.write(topic + '/camera_info', msg, stamp)

                # if sample_data['sensor_modality'] == 'camera':
                #     msg = utils.get_lidar_imagemarkers(sample_lidar, sample_data, sensor_id)
                #     bag.write(topic + '/image_markers_lidar', msg, stamp)
                #     utils.write_boxes_imagemarkers(bag, cur_sample['anns'], sample_data, sensor_id, topic, stamp)

            # publish /markers/annotations
            marker_array = MarkerArray()
            
            ego_marker = Marker()
            ego_marker.header.frame_id = 'base_link'
            ego_marker.header.stamp = stamp
            ego_marker.id = 10000
            ego_marker.text = 'ego'
            ego_marker.type = Marker.CUBE
            #ego_marker.pose = utils.get_pose(next_ego_pose)
            ego_marker.frame_locked = True
            ego_marker.scale.x = 4
            ego_marker.scale.y = 1
            ego_marker.scale.z = 2
            ego_marker.color = utils.make_color((255,61,99), 0.5)
            marker_array.markers.append(ego_marker)


            # publish /pose
            pose_stamped = PoseStamped()
            pose_stamped.header.frame_id = 'base_link'
            pose_stamped.header.stamp = stamp
            pose_stamped.pose.orientation.w = 1
            bag.write('/pose', pose_stamped, stamp)

            # publish /gps
            coordinates = utils.derive_latlon(location, ego_pose)
            gps = NavSatFix()
            gps.header.frame_id = 'base_link'
            gps.header.stamp = stamp
            gps.status.status = 1
            gps.status.service = 1
            gps.latitude = coordinates['latitude']
            gps.longitude = coordinates['longitude']
            gps.altitude = utils.get_transform(ego_pose).translation.z
            bag.write('/gps', gps, stamp)

            for annotation_id in cur_sample['anns']:
                ann = self.nusc.get('sample_annotation', annotation_id)
                marker_id = int(ann['instance_token'][:4], 16)
                c = np.array(self.nusc.explorer.get_color(ann['category_name'])) / 255.0

                marker = Marker()
                marker.header.frame_id = 'map'
                marker.header.stamp = stamp
                marker.id = marker_id
                marker.text = ann['instance_token'][:4]
                marker.type = Marker.CUBE
                marker.pose = utils.get_pose(ann)
                marker.frame_locked = True
                marker.scale.x = ann['size'][1]
                marker.scale.y = ann['size'][0]
                marker.scale.z = ann['size'][2]
                marker.color = utils.make_color(c, 0.5)
                marker_array.markers.append(marker)

            # ego marker #
            # marker = Marker()
            # marker.header.frame_id = 'map'
            # marker.header.stamp = stamp
            # marker.id = marker_id
            # marker.text = 'ego'
            # marker.type = Marker.CUBE
            # marker.pose = utils.get_pose(ego_pose)
            # marker.frame_locked = True
            # marker.scale.x = 4.3
            # marker.scale.y = 1.8
            # marker.scale.z = 1.6
            # c = [0.1,0.1,0.6]
            # marker.color = utils.make_color(c, 0.9)
            # marker_array.markers.append(marker)
                    
            bag.write('/markers/annotations', marker_array, stamp)

            # collect all sensor frames after this sample but before the next sample
            non_keyframe_sensor_msgs = []
            for (sensor_id, sample_token) in cur_sample['data'].items():
                topic = '/' + sensor_id

                next_sample_token = self.nusc.get('sample_data', sample_token)['next']
                while next_sample_token != '':
                    next_sample_data = self.nusc.get('sample_data', next_sample_token)
                    # if next_sample_data['is_key_frame'] or get_time(next_sample_data).to_nsec() > next_stamp.to_nsec():
                    #     break
                    if next_sample_data['is_key_frame']:
                        break

                    # if next_sample_data['sensor_modality'] == 'radar':
                    #     msg = utils.get_radar(next_sample_data, sensor_id)
                    #     non_keyframe_sensor_msgs.append((msg.header.stamp.to_nsec(), topic, msg))
                    # elif next_sample_data['sensor_modality'] == 'lidar':
                    #     msg = utils.get_lidar(next_sample_data, sensor_id)
                    #     non_keyframe_sensor_msgs.append((msg.header.stamp.to_nsec(), topic, msg))
                    
                    # if next_sample_data['sensor_modality'] == 'camera':
                    #     msg = utils.get_camera(next_sample_data, sensor_id, dataroot)
                    #     camera_stamp_nsec = msg.header.stamp.to_nsec()
                    #     non_keyframe_sensor_msgs.append((camera_stamp_nsec, topic + '/image_rect_compressed', msg))

                    #     msg = utils.get_camera_info(next_sample_data, sensor_id)
                    #     non_keyframe_sensor_msgs.append((camera_stamp_nsec, topic + '/camera_info', msg))

                        # closest_lidar = utils.find_closest_lidar(cur_sample['data']['LIDAR_TOP'], camera_stamp_nsec)
                        # if closest_lidar is not None:
                        #     msg = utils.get_lidar_imagemarkers(closest_lidar, next_sample_data, sensor_id)
                        #     non_keyframe_sensor_msgs.append((msg.header.stamp.to_nsec(), topic + '/image_markers_lidar', msg))
                        # else:
                        #     msg = utils.get_remove_imagemarkers(sensor_id, 'LIDAR_TOP', msg.header.stamp)
                        #     non_keyframe_sensor_msgs.append((msg.header.stamp.to_nsec(), topic + '/image_markers_lidar', msg))

                        # Delete all image markers on non-keyframe camera images
                        # msg = get_remove_imagemarkers(sensor_id, 'LIDAR_TOP', msg.header.stamp)
                        # non_keyframe_sensor_msgs.append((camera_stamp_nsec, topic + '/image_markers_lidar', msg))
                        # msg = get_remove_imagemarkers(sensor_id, 'annotations', msg.header.stamp)
                        # non_keyframe_sensor_msgs.append((camera_stamp_nsec, topic + '/image_markers_annotations', msg))

                    next_sample_token = next_sample_data['next']
            
                    
            # sort and publish the non-keyframe sensor msgs
            non_keyframe_sensor_msgs.sort(key=lambda x: x[0])
            for (_, topic, msg) in non_keyframe_sensor_msgs:
                bag.write(topic, msg, msg.header.stamp)

            # move to the next sample
            cur_sample = self.nusc.get('sample', cur_sample['next']) if cur_sample.get('next') != '' else None
            idx += 1
        bag.close()
        print(f'Finished writing {bag_name}')
コード例 #10
0
    trajectories = np.select(conditions, choices)

    if debug:
        trajectories = trajs

    # return trajectories, t_selections

    return trajectories


if __name__ == "__main__":
    from nuscenes.nuscenes import NuScenes
    from nuscenes.can_bus.can_bus_api import NuScenesCanBus
    import matplotlib.pyplot as plt
    nusc = NuScenes("v1.0-mini", "/data/nuscenes")
    nusc_can = NuScenesCanBus(dataroot="/data/nuscenes")

    for scene in nusc.scene:
        scene_name = scene["name"]
        scene_id = int(scene_name[-4:])
        if scene_id in nusc_can.can_blacklist:
            print(f"skipping {scene_name}")
            continue
        pose = nusc_can.get_messages(scene_name, "pose")
        saf = nusc_can.get_messages(scene_name, "steeranglefeedback")
        vm = nusc_can.get_messages(scene_name, "vehicle_monitor")
        # NOTE: I tried to verify if the relevant measurements are consistent
        # across multiple tables that contain redundant information
        # NOTE: verified pose's velocity matches vehicle monitor's
        # but the pose table offers at a much higher frequency
        # NOTE: same that steeranglefeedback's steering angle matches vehicle monitor's