def __process_carla_scene(self, data): # Get extent and sizing from trajectory data. traj_data = data.trajectory_data comp_key = np.logical_and(traj_data['node_id'] == 'ego', traj_data['frame_id'] == 0) s = traj_data[comp_key].iloc[0] ego_initx, ego_inity = s['x'], s['y'] max_timesteps = traj_data['frame_id'].max() x_min = round_to_int(traj_data['x'].min() - 50) x_max = round_to_int(traj_data['x'].max() + 50) y_min = round_to_int(traj_data['y'].min() - 50) y_max = round_to_int(traj_data['y'].max() + 50) x_size = x_max - x_min y_size = y_max - y_min # Filter road from LIDAR points. points = data.overhead_points z_mask = np.logical_and(points[:, 2] > self.Z_LOWERBOUND, points[:, 2] < self.Z_UPPERBOUND) points = data.overhead_points[z_mask] labels = data.overhead_labels[z_mask] road_label_mask = np.logical_or( labels == SegmentationLabel.Road.value, labels == SegmentationLabel.Vehicle.value) road_points = points[road_label_mask] # Adjust and filter occlusions from trajectory data. bitmap = points_to_2d_histogram(road_points, x_min, x_max, y_min, y_max, data.scene_config.pixels_per_m) bitmap[bitmap > 0] = 1 traj_data['x'] = traj_data['x'] - x_min traj_data['y'] = traj_data['y'] - y_min transform = scipy.ndimage.distance_transform_cdt(-bitmap + 1) X = traj_data[['x', 'y']].values Xind = (data.scene_config.pixels_per_m * X).astype(int) vals = transform[Xind.T[0], Xind.T[1]] comp_key = np.logical_or(traj_data['node_id'] == 'ego', vals < self.DISTANCE_FROM_ROAD) traj_data = traj_data[comp_key] # Create bitmap from map data. pixels_per_m = data.scene_config.pixels_per_m road_polygons = data.map_data.road_polygons yellow_lines = data.map_data.yellow_lines white_lines = data.map_data.white_lines dim = (int(pixels_per_m * y_size), int(pixels_per_m * x_size), 3) bitmap = np.zeros(dim) """NuScenes bitmap format scene.map[...].as_image() has shape (y, x, c) Channel 1: lane, road_segment, drivable_area Channel 2: road_divider Channel 3: lane_divider""" for polygon in road_polygons: rzpoly = ( pixels_per_m*(polygon[:,:2] - np.array([x_min, y_min])) ) \ .astype(int).reshape((-1,1,2)) cv.fillPoly(bitmap, [rzpoly], (255, 0, 0)) for line in yellow_lines: rzline = ( pixels_per_m*(line[:,:2] - np.array([x_min, y_min])) ) \ .astype(int).reshape((-1,1,2)) cv.polylines(bitmap, [rzline], False, (0, 255, 0), thickness=2) for line in white_lines: rzline = ( pixels_per_m*(line[:,:2] - np.array([x_min, y_min])) ) \ .astype(int).reshape((-1,1,2)) cv.polylines(bitmap, [rzline], False, (0, 0, 255), thickness=2) bitmap = bitmap.astype(np.uint8).transpose((2, 1, 0)) # Create scene dt = data.fixed_delta_seconds * data.scene_config.record_interval scene = Scene(timesteps=max_timesteps + 1, dt=dt, name=data.scene_name, aug_func=augment) scene.ego_initx = ego_initx scene.ego_inity = ego_inity scene.x_min = x_min scene.y_min = y_min scene.x_max = x_max scene.y_max = y_max scene.map_name = data.map_name scene.x_size = x_size scene.y_size = y_size patch_box = (x_min + 0.5 * (x_max - x_min), y_min + 0.5 * (y_max - y_min), y_size, x_size) patch_angle = 0 canvas_size = (np.round(3 * y_size).astype(int), np.round(3 * x_size).astype(int)) homography = np.array([[3., 0., 0.], [0., 3., 0.], [0., 0., 3.]]) layer_names = ['drivable_area'] scene.patch_box = patch_box scene.patch_angle = patch_angle scene.canvas_size = canvas_size scene.homography = homography scene.layer_names = layer_names type_map = dict() type_map['VEHICLE'] = GeometricMap(data=bitmap, homography=homography, description=', '.join(layer_names)) type_map['VISUALIZATION'] = GeometricMap( data=bitmap, homography=homography, description=', '.join(layer_names)) scene.map = type_map scene_config = data.scene_config return scene, traj_data, max_timesteps, scene_config
def process_scene(self, data): # Remove trajectory points of hidden vehicles. # This method introduces occlusion to trajectories comp_keys = pd.DataFrame.from_dict(data.vehicle_visibility, orient='index') comp_keys = comp_keys.stack().to_frame().reset_index().drop('level_1', axis=1) comp_keys.columns = ['frame_id', 'node_id'] data.trajectory_data = pd.merge(data.trajectory_data, comp_keys, how='inner', on=['frame_id', 'node_id']) # Trim points above/below certain Z levels. points = data.overhead_points z_mask = np.logical_and(points[:, 2] > self.Z_LOWERBOUND, points[:, 2] < self.Z_UPPERBOUND) points = data.overhead_points[z_mask] labels = data.overhead_labels[z_mask] # Select road LIDAR points. road_label_mask = labels == SegmentationLabel.Road.value road_points = points[road_label_mask] # Remove trajectory points that are far from road LIDAR points. # This method is just too computationally expensive # X = traj_data[['x', 'y']].values # D = scipy.spatial.distance_matrix(X, road_points[:, :2]) # traj_mask = np.min(D, axis=1) < self.DISTANCE_FROM_ROAD # traj_data = traj_data[np.logical_or(traj_mask, traj_data['node_id'] == 'ego')] # Get extent and other data traj_data = data.trajectory_data comp_key = np.logical_and(traj_data['node_id'] == 'ego', traj_data['frame_id'] == 0) s = traj_data[comp_key].iloc[0] ego_initx, ego_inity = s['x'], s['y'] max_timesteps = traj_data['frame_id'].max() x_min = round_to_int(traj_data['x'].min() - 50) x_max = round_to_int(traj_data['x'].max() + 50) y_min = round_to_int(traj_data['y'].min() - 50) y_max = round_to_int(traj_data['y'].max() + 50) # Form bitmap bitmap = points_to_2d_histogram(road_points, x_min, x_max, y_min, y_max, data.scene_config.pixels_per_m) bitmap[bitmap > 0.] = 255. bitmap = np.stack( (bitmap, np.zeros(bitmap.shape), np.zeros(bitmap.shape)), axis=0) bitmap = bitmap.astype(np.uint8) # adjust trajectory data traj_data['x'] = traj_data['x'] - x_min traj_data['y'] = traj_data['y'] - y_min # Create scene dt = data.fixed_delta_seconds * data.scene_config.record_interval scene = Scene(timesteps=max_timesteps + 1, dt=dt, name=data.scene_name, aug_func=augment) scene.ego_initx = ego_initx scene.ego_inity = ego_inity scene.x_min = x_min scene.y_min = y_min scene.x_max = x_max scene.y_max = y_max type_map = dict() x_size = x_max - x_min y_size = y_max - y_min scene.map_name = data.map_name scene.x_size = x_size scene.y_size = y_size patch_box = (x_min + 0.5 * (x_max - x_min), y_min + 0.5 * (y_max - y_min), y_size, x_size) patch_angle = 0 canvas_size = (np.round(3 * y_size).astype(int), np.round(3 * x_size).astype(int)) homography = np.array([[3., 0., 0.], [0., 3., 0.], [0., 0., 3.]]) layer_names = ['drivable_area'] scene.patch_box = patch_box scene.patch_angle = patch_angle scene.canvas_size = canvas_size scene.homography = homography scene.layer_names = layer_names type_map['VEHICLE'] = GeometricMap(data=bitmap, homography=homography, description=', '.join(layer_names)) type_map['VISUALIZATION'] = GeometricMap( data=bitmap, homography=homography, description=', '.join(layer_names)) scene.map = type_map scene_config = data.scene_config del bitmap del points del labels del road_points del data return process_trajectron_scene(scene, traj_data, max_timesteps, scene_config)
def process_scene(sample_token, processed_sample_tokens, env, nusc, helper, data_path, data_class, half_dt, dynamic): data = pd.DataFrame(columns=[ 'frame_id', 'type', 'node_id', 'robot', 'x', 'y', 'z', 'length', 'width', 'height', 'heading' ]) samples = aggregate_samples(nusc, start=sample_token, data_class=data_class) attribute_dict = defaultdict(set) frame_id = 0 for sample in samples: annotation_tokens = sample['anns'] for annotation_token in annotation_tokens: annotation = nusc.get('sample_annotation', annotation_token) category = annotation['category_name'] if len(annotation['attribute_tokens']): attribute = nusc.get('attribute', annotation['attribute_tokens'][0])['name'] else: if 'vehicle' in category: attribute = '' else: continue if 'pedestrian' in category and 'stroller' not in category and 'wheelchair' not in category: our_category = env.NodeType.PEDESTRIAN elif 'vehicle' in category and 'bicycle' not in category and 'motorcycle' not in category: # and 'parked' not in attribute: our_category = env.NodeType.VEHICLE else: continue attribute_dict[annotation['instance_token']].add(attribute) data_point = pd.Series({ 'frame_id': frame_id, 'type': our_category, 'node_id': annotation['instance_token'], 'robot': False, 'x': annotation['translation'][0], 'y': annotation['translation'][1], 'z': annotation['translation'][2], 'length': annotation['size'][0], 'width': annotation['size'][1], 'height': annotation['size'][2], 'heading': Quaternion(annotation['rotation']).yaw_pitch_roll[0] }) data = data.append(data_point, ignore_index=True) # Ego Vehicle our_category = env.NodeType.VEHICLE sample_data = nusc.get('sample_data', sample['data']['CAM_FRONT']) annotation = nusc.get('ego_pose', sample_data['ego_pose_token']) data_point = pd.Series({ 'frame_id': frame_id, 'type': our_category, 'node_id': 'ego', 'robot': True, 'x': annotation['translation'][0], 'y': annotation['translation'][1], 'z': annotation['translation'][2], 'length': 4, 'width': 1.7, 'height': 1.5, 'heading': Quaternion(annotation['rotation']).yaw_pitch_roll[0], 'orientation': None }) data = data.append(data_point, ignore_index=True) processed_sample_tokens.add(sample['token']) frame_id += 1 if len(data.index) == 0: return None data.sort_values('frame_id', inplace=True) max_timesteps = data['frame_id'].max() x_min = np.round(data['x'].min() - 50) x_max = np.round(data['x'].max() + 50) y_min = np.round(data['y'].min() - 50) y_max = np.round(data['y'].max() + 50) data['x'] = data['x'] - x_min data['y'] = data['y'] - y_min scene = Scene(timesteps=max_timesteps + 1, dt=dt, name=sample_token, aug_func=augment, sample_tokens=[sample['token'] for sample in samples], x_min=x_min, y_min=y_min) # Generate Maps map_name = helper.get_map_name_from_sample_token(sample_token) nusc_map = NuScenesMap(dataroot=data_path, map_name=map_name) type_map = dict() x_size = x_max - x_min y_size = y_max - y_min patch_box = (x_min + 0.5 * (x_max - x_min), y_min + 0.5 * (y_max - y_min), y_size, x_size) patch_angle = 0 # Default orientation where North is up canvas_size = (np.round(3 * y_size).astype(int), np.round(3 * x_size).astype(int)) homography = np.array([[3., 0., 0.], [0., 3., 0.], [0., 0., 3.]]) layer_names = [ 'lane', 'road_segment', 'drivable_area', 'road_divider', 'lane_divider', 'stop_line', 'ped_crossing', 'stop_line', 'ped_crossing', 'walkway' ] map_mask = (nusc_map.get_map_mask(patch_box, patch_angle, layer_names, canvas_size) * 255.0).astype(np.uint8) map_mask = np.swapaxes(map_mask, 1, 2) # x axis comes first # PEDESTRIANS map_mask_pedestrian = np.stack( (map_mask[9], map_mask[8], np.max(map_mask[:3], axis=0)), axis=0) type_map['PEDESTRIAN'] = GeometricMap(data=map_mask_pedestrian, homography=homography, description=', '.join(layer_names)) # VEHICLES map_mask_vehicle = np.stack( (np.max(map_mask[:3], axis=0), map_mask[3], map_mask[4]), axis=0) type_map['VEHICLE'] = GeometricMap(data=map_mask_vehicle, homography=homography, description=', '.join(layer_names)) map_mask_plot = np.stack( ((np.max(map_mask[:3], axis=0) - (map_mask[3] + 0.5 * map_mask[4]).clip(max=255)).clip(min=0).astype( np.uint8), map_mask[8], map_mask[9]), axis=0) type_map['VISUALIZATION'] = GeometricMap( data=map_mask_plot, homography=homography, description=', '.join(layer_names)) scene.map = type_map del map_mask del map_mask_pedestrian del map_mask_vehicle del map_mask_plot for node_id in pd.unique(data['node_id']): node_frequency_multiplier = 1 node_df = data[data['node_id'] == node_id] if dynamic: if 'vehicle.parked' in attribute_dict[ node_id] or 'vehicle.stopped' in attribute_dict[node_id]: continue elif len(attribute_dict[node_id] ) == 1 and 'vehicle.parked' in attribute_dict[node_id]: # Catching instances of vehicles that were parked and then moving, but not allowing # only parked vehicles through. continue if node_df['x'].shape[0] < 2: continue if not np.all(np.diff(node_df['frame_id']) == 1): min_index = node_df['frame_id'].min() max_index = node_df['frame_id'].max() node_df = node_df.set_index('frame_id').reindex( range(min_index, max_index + 1)).interpolate().reset_index() node_df['type'] = node_df['type'].mode()[0] node_df['node_id'] = node_id node_df['robot'] = False # print('Occlusion') # continue # TODO Make better node_values = node_df[['x', 'y']].values x = node_values[:, 0] y = node_values[:, 1] heading = node_df['heading'].values if node_df.iloc[0][ 'type'] == env.NodeType.VEHICLE and not node_id == 'ego': # Kalman filter Agent vx = derivative_of(x, scene.dt) vy = derivative_of(y, scene.dt) velocity = np.linalg.norm(np.stack((vx, vy), axis=-1), axis=-1) filter_veh = NonlinearKinematicBicycle(dt=scene.dt, sMeasurement=1.0) P_matrix = None for i in range(len(x)): if i == 0: # initalize KF # initial P_matrix P_matrix = np.identity(4) elif i < len(x): # assign new est values x[i] = x_vec_est_new[0][0] y[i] = x_vec_est_new[1][0] heading[i] = x_vec_est_new[2][0] velocity[i] = x_vec_est_new[3][0] if i < len(x) - 1: # no action on last data # filtering x_vec_est = np.array([[x[i]], [y[i]], [heading[i]], [velocity[i]]]) z_new = np.array([[x[i + 1]], [y[i + 1]], [heading[i + 1]], [velocity[i + 1]]]) x_vec_est_new, P_matrix_new = filter_veh.predict_and_update( x_vec_est=x_vec_est, u_vec=np.array([[0.], [0.]]), P_matrix=P_matrix, z_new=z_new) P_matrix = P_matrix_new curvature, pl, _ = trajectory_curvature(np.stack((x, y), axis=-1)) if pl < 1.0: # vehicle is "not" moving x = x[0].repeat(max_timesteps + 1) y = y[0].repeat(max_timesteps + 1) heading = heading[0].repeat(max_timesteps + 1) global total global curv_0_2 global curv_0_1 total += 1 if pl > 1.0: if curvature > .2: curv_0_2 += 1 node_frequency_multiplier = 3 * int( np.floor(total / curv_0_2)) elif curvature > .1: curv_0_1 += 1 node_frequency_multiplier = 3 * int( np.floor(total / curv_0_1)) if half_dt: t_old = np.linspace(0, 1, x.shape[0]) t_new = np.linspace(0, 1, 2 * x.shape[0]) x = np.interp(t_new, t_old, x) y = np.interp(t_new, t_old, y) heading = np.interp( t_new, t_old, heading + np.pi, period=2 * np.pi) - np.pi vx = derivative_of(x, scene.dt / 2) vy = derivative_of(y, scene.dt / 2) ax = derivative_of(vx, scene.dt / 2) ay = derivative_of(vy, scene.dt / 2) else: vx = derivative_of(x, scene.dt) vy = derivative_of(y, scene.dt) ax = derivative_of(vx, scene.dt) ay = derivative_of(vy, scene.dt) if node_df.iloc[0]['type'] == env.NodeType.VEHICLE: v = np.stack((vx, vy), axis=-1) v_norm = np.linalg.norm(np.stack((vx, vy), axis=-1), axis=-1, keepdims=True) heading_v = np.divide(v, v_norm, out=np.zeros_like(v), where=(v_norm > 1.)) heading_x = heading_v[:, 0] heading_y = heading_v[:, 1] a_norm = np.divide(ax * vx + ay * vy, v_norm[..., 0], out=np.zeros_like(ax), where=(v_norm[..., 0] > 1.)) if half_dt: d_heading = derivative_of(heading, scene.dt / 2, radian=True) else: d_heading = derivative_of(heading, scene.dt, radian=True) data_dict = { ('position', 'x'): x, ('position', 'y'): y, ('velocity', 'x'): vx, ('velocity', 'y'): vy, ('velocity', 'norm'): np.linalg.norm(np.stack((vx, vy), axis=-1), axis=-1), ('acceleration', 'x'): ax, ('acceleration', 'y'): ay, ('acceleration', 'norm'): a_norm, ('heading', 'x'): heading_x, ('heading', 'y'): heading_y, ('heading', '°'): heading, ('heading', 'd°'): d_heading } node_data = pd.DataFrame(data_dict, columns=data_columns_vehicle) else: data_dict = { ('position', 'x'): x, ('position', 'y'): y, ('velocity', 'x'): vx, ('velocity', 'y'): vy, ('acceleration', 'x'): ax, ('acceleration', 'y'): ay } node_data = pd.DataFrame(data_dict, columns=data_columns_pedestrian) node = Node(node_type=node_df.iloc[0]['type'], node_id=0 if node_id == 'ego' else abs(hash(node_id)), data=node_data, frequency_multiplier=node_frequency_multiplier, description=node_id) node.first_timestep = node_df['frame_id'].iloc[0] if half_dt: node.first_timestep *= 2 if node_df.iloc[0]['robot'] == True: node.is_robot = True scene.robot = node scene.nodes.append(node) if half_dt: # We are interpolating to make the overall dt half of what it was. scene.dt /= 2.0 scene.timesteps *= 2 return scene
def process_scene(self, data): # Get extent and other data traj_data = data.trajectory_data comp_key = np.logical_and(traj_data['node_id'] == 'ego', traj_data['frame_id'] == 0) s = traj_data[comp_key].iloc[0] ego_initx, ego_inity = s['x'], s['y'] max_timesteps = traj_data['frame_id'].max() x_min = round_to_int(traj_data['x'].min() - 50) x_max = round_to_int(traj_data['x'].max() + 50) y_min = round_to_int(traj_data['y'].min() - 50) y_max = round_to_int(traj_data['y'].max() + 50) x_size = x_max - x_min y_size = y_max - y_min traj_data['x'] = traj_data['x'] - x_min traj_data['y'] = traj_data['y'] - y_min # Trim points above/below certain Z levels. points = data.overhead_points z_mask = np.logical_and(points[:, 2] > self.Z_LOWERBOUND, points[:, 2] < self.Z_UPPERBOUND) points = data.overhead_points[z_mask] labels = data.overhead_labels[z_mask] # Select road LIDAR points. road_label_mask = labels == SegmentationLabel.Road.value road_points = points[road_label_mask] # Form bitmap bitmap = points_to_2d_histogram(road_points, x_min, x_max, y_min, y_max, data.scene_config.pixels_per_m) bitmap[bitmap > 0] = 1 # adjust trajectory data transform = scipy.ndimage.distance_transform_cdt(-bitmap + 1) X = traj_data[['x', 'y']].values Xind = (data.scene_config.pixels_per_m * X).astype(int) vals = transform[Xind.T[0], Xind.T[1]] traj_data = traj_data[vals < self.DISTANCE_FROM_ROAD] # Form color image bitmap bitmap[bitmap > 0] = 255 bitmap = np.stack( (bitmap, np.zeros(bitmap.shape), np.zeros(bitmap.shape)), axis=0) bitmap = bitmap.astype(np.uint8) # Create scene dt = data.fixed_delta_seconds * data.scene_config.record_interval scene = Scene(timesteps=max_timesteps + 1, dt=dt, name=data.scene_name, aug_func=augment) scene.ego_initx = ego_initx scene.ego_inity = ego_inity scene.x_min = x_min scene.y_min = y_min scene.x_max = x_max scene.y_max = y_max type_map = dict() scene.map_name = data.map_name scene.x_size = x_size scene.y_size = y_size patch_box = (x_min + 0.5 * (x_max - x_min), y_min + 0.5 * (y_max - y_min), y_size, x_size) patch_angle = 0 canvas_size = (np.round(3 * y_size).astype(int), np.round(3 * x_size).astype(int)) homography = np.array([[3., 0., 0.], [0., 3., 0.], [0., 0., 3.]]) layer_names = ['drivable_area'] scene.patch_box = patch_box scene.patch_angle = patch_angle scene.canvas_size = canvas_size scene.homography = homography scene.layer_names = layer_names type_map['VEHICLE'] = GeometricMap(data=bitmap, homography=homography, description=', '.join(layer_names)) type_map['VISUALIZATION'] = GeometricMap( data=bitmap, homography=homography, description=', '.join(layer_names)) scene.map = type_map scene_config = data.scene_config del bitmap del points del labels del road_points del data return process_trajectron_scene(scene, traj_data, max_timesteps, scene_config)