def task(self, **kwargs): launch_rviz = kwargs.get('launch_rviz', False) origin_map_image_path = self.data_sources['map_image_path'] map_semantic_trunks = self.data_sources['map_semantic_trunks'] scan_bag_path = self.data_sources['scan_bag_path'] bounding_box_expand_ratio = self.params['bounding_box_expand_ratio'] # Generate canopies and trunk map images map_image = cv2.imread(origin_map_image_path) canopies_map_image = maps_generation.generate_canopies_map(map_image) upper_left, lower_right = cv_utils.get_bounding_box( canopies_map_image, map_semantic_trunks.values(), expand_ratio=bounding_box_expand_ratio) canopies_map_image = canopies_map_image[upper_left[1]:lower_right[1], upper_left[0]:lower_right[0]] canopies_map_yaml_path, _ = ros_utils.save_image_to_map( canopies_map_image, resolution=self.params['resolution'], map_name='canopies_map', dir_name=self.repetition_dir) cv2.imwrite(os.path.join(self.repetition_dir, 'map_image.jpg'), map_image) # Start ROS and RVIZ ros_utils.start_master() if launch_rviz: ros_utils.launch_rviz( os.path.join(config.root_dir_path, 'src/experiments_framework/framework/amcl.rviz')) # Launch localization stack for canopies and trunks self._launch_base_to_scan_static_tf(namespace='canopies') self._launch_map_server(canopies_map_yaml_path, namespace='canopies') self._launch_amcl(namespace='canopies') self._launch_icp(namespace='canopies') # Start input bag and wait _, bag_duration = ros_utils.play_bag( scan_bag_path) # TODO: need to read from data_sources time.sleep(bag_duration) # Stop recording output bag ros_utils.stop_recording_bags() # Kill RVIZ if launch_rviz: ros_utils.kill_rviz()
def task(self, **kwargs): launch_rviz = kwargs.get('launch_rviz', False) map_image_path = self.data_sources['map_image_path'] map_semantic_trunks = self.data_sources['map_semantic_trunks'] scans_and_poses_pickle_path = self.data_sources[ 'scans_and_poses_pickle_path'] odom_pickle_path = self.data_sources['odom_pickle_path'] video_path = self.data_sources['video_path'] bounding_box_expand_ratio = self.params['bounding_box_expand_ratio'] map_resolution = self.params['map_resolution'] # Generate canopies map image map_image = cv2.imread(map_image_path) cv2.imwrite(os.path.join(self.experiment_dir, 'image_for_map.jpg'), map_image) canopies_map_image = maps_generation.generate_canopies_map(map_image) lower_boundaries = [ [ canopies_map_image.shape[0] - 600, min([trunk[0] for trunk in map_semantic_trunks.values()]) ], [ canopies_map_image.shape[0] - 600, max([trunk[0] for trunk in map_semantic_trunks.values()]) ] ] # upper_left, lower_right = cv_utils.get_bounding_box(canopies_map_image, map_semantic_trunks.values() + lower_boundaries, expand_ratio=bounding_box_expand_ratio) upper_left = (700, 700) lower_right = (3200, 2700) canopies_map_image = canopies_map_image[upper_left[1]:lower_right[1], upper_left[0]:lower_right[0]] canopies_map_yaml_path, _ = ros_utils.save_image_to_map( canopies_map_image, resolution=map_resolution, map_name='canopies_map', dir_name=self.experiment_dir) # Start ROS and RVIZ ros_utils.start_master(use_sim_time=False) if launch_rviz: ros_utils.launch_rviz( os.path.join( config.root_dir_path, 'src/experiments_framework/framework/amcl_video.rviz')) # Launch localization stack for canopies self._launch_base_to_scan_static_tf(namespace='canopies') self._launch_map_server(canopies_map_yaml_path, namespace='canopies') self._launch_amcl(namespace='canopies') self._initialize_global_localization(namespace='canopies') # Start playback ros_utils.launch(package='localization', launch_file='scan_pose_odom_playback.launch', argv={ 'ns': 'canopies', 'scans_and_poses_pickle_path': scans_and_poses_pickle_path, 'odom_pickle_path': odom_pickle_path, 'video_path': video_path }) time.sleep(500) # TODO: remove # Stop recording output bag # ros_utils.stop_recording_bags() # Kill RVIZ if launch_rviz: ros_utils.kill_rviz()
def prologue(self): origin_map_image_path = self.data_sources['map_image_path'] map_semantic_trunks = self.data_sources['map_semantic_trunks'] map_external_trunks = self.data_sources['map_external_trunks'] localization_semantic_trunks = self.data_sources[ 'localization_semantic_trunks'] localization_external_trunks = self.data_sources[ 'localization_external_trunks'] origin_localization_image_path = self.data_sources[ 'localization_image_path'] trajectory_waypoints = self.data_sources['trajectory_waypoints'] bounding_box_expand_ratio = self.params['bounding_box_expand_ratio'] mean_trunk_radius_for_map = self.params['mean_trunk_radius_for_map'] mean_trunk_radius_for_localization = self.params[ 'mean_trunk_radius_for_localization'] std_trunk_radius_for_map = self.params['std_trunk_radius_for_map'] std_trunk_radius_for_localization = self.params[ 'std_trunk_radius_for_localization'] # Generate canopies and trunks map images map_image = cv2.imread(origin_map_image_path) cv2.imwrite(os.path.join(self.experiment_dir, 'image_for_map.jpg'), map_image) canopies_map_image = maps_generation.generate_canopies_map(map_image) trunks_map_image = maps_generation.generate_trunks_map( map_image, map_semantic_trunks.values() + map_external_trunks, mean_trunk_radius_for_map, std_trunk_radius_for_map, np_random_state=self.np_random_state) upper_left, lower_right = cv_utils.get_bounding_box( canopies_map_image, map_semantic_trunks.values(), expand_ratio=bounding_box_expand_ratio) canopies_map_image = canopies_map_image[upper_left[1]:lower_right[1], upper_left[0]:lower_right[0]] trunks_map_image = trunks_map_image[upper_left[1]:lower_right[1], upper_left[0]:lower_right[0]] self.canopies_map_yaml_path, _ = ros_utils.save_image_to_map( canopies_map_image, resolution=self.params['map_resolution'], map_name='canopies_map', dir_name=self.experiment_dir) self.trunks_map_yaml_path, _ = ros_utils.save_image_to_map( trunks_map_image, resolution=self.params['map_resolution'], map_name='trunks_map', dir_name=self.experiment_dir) # Generate canopies and trunks localization images localization_image = cv2.imread(origin_localization_image_path) if origin_localization_image_path != origin_map_image_path: localization_image, affine_transform = cv_utils.warp_image( localization_image, localization_semantic_trunks.values(), map_semantic_trunks.values(), transformation_type='affine') affine_transform = np.insert(affine_transform, [2], [0, 0, 1], axis=0) localization_semantic_trunks_np = np.float32( localization_semantic_trunks.values()).reshape(-1, 1, 2) localization_semantic_trunks_np = cv2.perspectiveTransform( localization_semantic_trunks_np, affine_transform) localization_semantic_trunks = { key: (int(np.round(value[0])), int(np.round(value[1]))) for key, value in zip( map_semantic_trunks.keys(), localization_semantic_trunks_np[:, 0, :].tolist()) } localization_external_trunks_np = np.float32( localization_external_trunks).reshape(-1, 1, 2) localization_external_trunks_np = cv2.perspectiveTransform( localization_external_trunks_np, affine_transform) localization_external_trunks = localization_external_trunks_np[:, 0, :].tolist( ) image_for_trajectory_path = os.path.join( self.experiment_dir, 'aligned_image_for_localization.jpg') cv2.imwrite(image_for_trajectory_path, localization_image) else: image_for_trajectory_path = origin_localization_image_path canopies_localization_image = maps_generation.generate_canopies_map( localization_image) trunks_localization_image = maps_generation.generate_trunks_map( localization_image, localization_semantic_trunks.values() + localization_external_trunks, mean_trunk_radius_for_localization, std_trunk_radius_for_localization, np_random_state=self.np_random_state) canopies_localization_image = canopies_localization_image[ upper_left[1]:lower_right[1], upper_left[0]:lower_right[0]] trunks_localization_image = trunks_localization_image[ upper_left[1]:lower_right[1], upper_left[0]:lower_right[0]] self.canopies_localization_image_path = os.path.join( self.experiment_dir, 'canopies_localization.jpg') cv2.imwrite(self.canopies_localization_image_path, canopies_localization_image) self.trunks_localization_image_path = os.path.join( self.experiment_dir, 'trunks_localization.jpg') cv2.imwrite(self.trunks_localization_image_path, trunks_localization_image) self.canopies_localization_image_height = canopies_localization_image.shape[ 0] self.trunks_localization_image_height = trunks_localization_image.shape[ 0] viz_image = cv2.imread(image_for_trajectory_path)[ upper_left[1]:lower_right[1], upper_left[0]:lower_right[0]] self.viz_image_path = os.path.join(self.experiment_dir, 'image_for_visualization.jpg') cv2.imwrite(self.viz_image_path, viz_image) # Get trajectory waypoints_coordinates = [] for waypoint in trajectory_waypoints: if type(waypoint) is tuple and len(waypoint) == 2: point1 = localization_semantic_trunks[waypoint[0]] point2 = localization_semantic_trunks[waypoint[1]] waypoints_coordinates.append( ((point1[0] + point2[0]) / 2, (point1[1] + point2[1]) / 2)) elif type(waypoint) is tuple and len(waypoint) == 6: point1 = np.array( localization_semantic_trunks[waypoint[0]]) + np.array( [waypoint[2], waypoint[3]]) point2 = np.array( localization_semantic_trunks[waypoint[1]]) + np.array( [waypoint[4], waypoint[5]]) waypoints_coordinates.append( ((point1[0] + point2[0]) / 2, (point1[1] + point2[1]) / 2)) else: waypoints_coordinates.append( localization_semantic_trunks[waypoint]) if path_planning_experiment_summary_path is not None: with open(path_planning_experiment_summary_path) as f: path_planning_experiment = json.load(f) self.results['trajectory'] = path_planning_experiment['results'][ '1']['trajectory'] else: path_planning_experiment = PathPlanningExperiment( name='path_planning', data_sources={ 'map_image_path': image_for_trajectory_path, 'map_upper_left': upper_left, 'map_lower_right': lower_right, 'waypoints': waypoints_coordinates }, working_dir=self.experiment_dir, metadata=self.metadata) path_planning_experiment.run(repetitions=1) self.results['trajectory'] = path_planning_experiment.results[1][ 'trajectory'] trajectory = self.results['trajectory'] freq = self.params['target_frequency'] epsilon_t = 1e-3 timestamps = np.linspace(start=epsilon_t, stop=epsilon_t + (1.0 / freq) * len(trajectory), num=len(trajectory)) pose_time_tuples_list = [(x, y, t) for (x, y), t in zip(trajectory, timestamps)] self.trajectory_bag_path = os.path.join(self.experiment_dir, 'trajectory.bag') ros_utils.trajectory_to_bag(pose_time_tuples_list, self.trajectory_bag_path) # Generate scan offline if canopies_scans_pickle_path is not None: self.canopies_scans_pickle_path = canopies_scans_pickle_path else: self.canopies_scans_pickle_path = os.path.join( self.experiment_dir, 'canopies_scan.pkl') offline_synthetic_scan_generator.generate_scans_pickle( self.trajectory_bag_path, canopies_localization_image, self.params['min_angle'], self.params['max_angle'], self.params['samples_num'], self.params['min_distance'], self.params['max_distance'], self.params['localization_resolution'], self.params['r_primary_search_samples'], self.params['r_secondary_search_step'], output_pickle_path=self.canopies_scans_pickle_path) if trunks_scans_pickle_path is not None: self.trunks_scans_pickle_path = trunks_scans_pickle_path else: self.trunks_scans_pickle_path = os.path.join( self.experiment_dir, 'trunks_scan.pkl') offline_synthetic_scan_generator.generate_scans_pickle( self.trajectory_bag_path, trunks_localization_image, self.params['min_angle'], self.params['max_angle'], self.params['samples_num'], self.params['min_distance'], self.params['max_distance'], self.params['localization_resolution'], self.params['r_primary_search_samples'], self.params['r_secondary_search_step'], output_pickle_path=self.trunks_scans_pickle_path) # Calculate valid scans rate with open(self.canopies_scans_pickle_path) as f: canopies_scans = pickle.load(f) with open(self.trunks_scans_pickle_path) as f: trunks_scans = pickle.load(f) canopies_invalid_scans = np.sum( [np.all(np.isnan(scan)) for scan in canopies_scans.values()]) canopies_total_timestamps = len(canopies_scans.values()) self.results['canopies_valid_scans_rate'] = float( canopies_total_timestamps - canopies_invalid_scans) / canopies_total_timestamps trunks_invalid_scans = np.sum( [np.all(np.isnan(scan)) for scan in trunks_scans.values()]) trunks_total_timestamps = len(trunks_scans.values()) self.results['trunks_valid_scans_rate'] = float( trunks_total_timestamps - trunks_invalid_scans) / trunks_total_timestamps
raise NotImplemented group_idx = 1 for snapshots_group, markers_locations_json_path in zip( snapshots_groups, markers_locations_json_paths): execution_dir = utils.create_new_execution_folder( 'trajectory_tagging_%d' % group_idx) group_idx += 1 with open(markers_locations_json_path) as f: markers_locations = json.load(f) for key, data_descriptor in snapshots_group.items(): if relevant_keys is not None: if key not in relevant_keys: continue points = markers_locations[key] image = cv2.imread(data_descriptor.path) map_image = maps_generation.generate_canopies_map(image) (upper_left_x, upper_left_y), (lower_right_x, lower_right_y) = cv_utils.get_bounding_box( map_image, points, expand_ratio=config.bounding_box_expand_ratio) map_image = map_image[upper_left_y:lower_right_y, upper_left_x:lower_right_x] if fork_shaped: _logger.info('Mark fork-shaped trajectory') pose_time_tuples_list = cv_utils.mark_trajectory_on_image( map_image) ros_utils.trajectory_to_bag( pose_time_tuples_list,
def prologue(self): localization_semantic_trunks = self.data_sources[ 'localization_semantic_trunks'] origin_localization_image_path = self.data_sources[ 'localization_image_path'] trajectory_waypoints = self.data_sources['trajectory_waypoints'] bounding_box_expand_ratio = self.params['bounding_box_expand_ratio'] mean_trunk_radius_for_localization = self.params[ 'mean_trunk_radius_for_localization'] std_trunk_radius_for_localization = self.params[ 'std_trunk_radius_for_localization'] localization_external_trunks = self.data_sources[ 'localization_external_trunks'] # Generate canopies and trunks localization images localization_image = cv2.imread(origin_localization_image_path) image_for_trajectory_path = origin_localization_image_path canopies_localization_image = maps_generation.generate_canopies_map( localization_image) upper_left, lower_right = cv_utils.get_bounding_box( canopies_localization_image, localization_semantic_trunks.values(), expand_ratio=bounding_box_expand_ratio) canopies_localization_image = canopies_localization_image[ upper_left[1]:lower_right[1], upper_left[0]:lower_right[0]] self.canopies_localization_image_path = os.path.join( self.experiment_dir, 'canopies_localization.jpg') cv2.imwrite(self.canopies_localization_image_path, canopies_localization_image) self.canopies_localization_image_height = canopies_localization_image.shape[ 0] trunks_localization_image = maps_generation.generate_trunks_map( localization_image, localization_semantic_trunks.values() + localization_external_trunks, mean_trunk_radius_for_localization, std_trunk_radius_for_localization, np_random_state=self.np_random_state) trunks_localization_image = trunks_localization_image[ upper_left[1]:lower_right[1], upper_left[0]:lower_right[0]] self.trunks_localization_image_path = os.path.join( self.experiment_dir, 'trunks_localization.jpg') cv2.imwrite(self.trunks_localization_image_path, trunks_localization_image) self.trunks_localization_image_height = trunks_localization_image.shape[ 0] # Get trajectory waypoints_coordinates = [] for waypoint in trajectory_waypoints: if type(waypoint) is tuple and len(waypoint) == 2: point1 = localization_semantic_trunks[waypoint[0]] point2 = localization_semantic_trunks[waypoint[1]] waypoints_coordinates.append( ((point1[0] + point2[0]) / 2, (point1[1] + point2[1]) / 2)) elif type(waypoint) is tuple and len(waypoint) == 6: point1 = np.array( localization_semantic_trunks[waypoint[0]]) + np.array( [waypoint[2], waypoint[3]]) point2 = np.array( localization_semantic_trunks[waypoint[1]]) + np.array( [waypoint[4], waypoint[5]]) waypoints_coordinates.append( ((point1[0] + point2[0]) / 2, (point1[1] + point2[1]) / 2)) else: waypoints_coordinates.append( localization_semantic_trunks[waypoint]) path_planning_experiment = PathPlanningExperiment( name='path_planning', data_sources={ 'map_image_path': image_for_trajectory_path, 'map_upper_left': upper_left, 'map_lower_right': lower_right, 'waypoints': waypoints_coordinates }, working_dir=self.experiment_dir, metadata=self.metadata) path_planning_experiment.run(repetitions=1) self.results['trajectory'] = path_planning_experiment.results[1][ 'trajectory'] trajectory = self.results['trajectory'] freq = self.params['target_frequency'] epsilon_t = 1e-3 timestamps = np.linspace(start=epsilon_t, stop=epsilon_t + (1.0 / freq) * len(trajectory), num=len(trajectory)) pose_time_tuples_list = [(x, y, t) for (x, y), t in zip(trajectory, timestamps)] self.trajectory_bag_path = os.path.join(self.experiment_dir, 'trajectory.bag') ros_utils.trajectory_to_bag(pose_time_tuples_list, self.trajectory_bag_path) # Generate scan offline self.canopies_scans_pickle_path = os.path.join(self.experiment_dir, 'canopies_scan.pkl') offline_synthetic_scan_generator.generate_scans_pickle( self.trajectory_bag_path, canopies_localization_image, self.params['min_angle'], self.params['max_angle'], self.params['samples_num'], self.params['min_distance'], self.params['max_distance'], self.params['localization_resolution'], self.params['r_primary_search_samples'], self.params['r_secondary_search_step'], output_pickle_path=self.canopies_scans_pickle_path) self.trunks_scans_pickle_path = os.path.join(self.experiment_dir, 'trunks_scan.pkl') offline_synthetic_scan_generator.generate_scans_pickle( self.trajectory_bag_path, trunks_localization_image, self.params['min_angle'], self.params['max_angle'], self.params['samples_num'], self.params['min_distance'], self.params['max_distance'], self.params['localization_resolution'], self.params['r_primary_search_samples'], self.params['r_secondary_search_step'], output_pickle_path=self.trunks_scans_pickle_path)
def generate_scans_pickle(video_path, output_pickle_path, resolution): scans_and_ugv_poses = OrderedDict() prev_scan_time = None cap = cv2.VideoCapture(video_path) if (cap.isOpened() == False): raise Exception('Error opening video stream') frame_idx = 0 frames_count = cap.get(cv2.CAP_PROP_FRAME_COUNT) while cap.isOpened(): is_success, frame = cap.read() frame_idx += 1 if frame_idx >= frames_count: break if not is_success: print 'skipped!!!!!!' continue if prev_scan_time is None: prev_scan_time = datetime.datetime.now() continue curr_scan_time = datetime.datetime.now() vehicle_pose = segmentation.extract_vehicle(frame) if vehicle_pose is None: continue else: contours_image = maps_generation.generate_canopies_map(frame) scan_ranges = contours_scan.generate( contours_image, center_x=vehicle_pose[0], center_y=vehicle_pose[1], min_angle=min_angle, max_angle=max_angle, samples_num=samples_num, min_distance=min_distance, max_distance=max_distance, resolution=resolution, r_primary_search_samples=r_primary_search_samples, r_secondary_search_step=r_secondary_search_step) video_timestamp = cap.get(cv2.CAP_PROP_POS_MSEC) * 1e-3 laser_scan = LaserScan() laser_scan.header.stamp = rospy.rostime.Time.from_sec( video_timestamp) laser_scan.header.frame_id = frame_id laser_scan.header.seq = frame_idx laser_scan.angle_min = min_angle laser_scan.angle_max = max_angle laser_scan.angle_increment = (max_angle - min_angle) / samples_num laser_scan.scan_time = (curr_scan_time - prev_scan_time).seconds laser_scan.range_min = min_distance * resolution laser_scan.range_max = max_distance * resolution laser_scan.ranges = np.asanyarray(scan_ranges) prev_scan_time = curr_scan_time vehicle_pose_point = PointStamped() vehicle_pose_point.point.x = vehicle_pose[0] vehicle_pose_point.point.y = vehicle_pose[1] vehicle_pose_point.header.stamp = rospy.rostime.Time.from_sec( video_timestamp) vehicle_pose_point.header.seq = frame_idx scans_and_ugv_poses[video_timestamp] = (laser_scan, vehicle_pose_point) print video_timestamp cap.release() with open(output_pickle_path, 'wb') as f: pickle.dump(scans_and_ugv_poses, f)
tracker = object_tracking.PointMassTracker( frequency=config.target_system_frequency, transition_variances=(1e-5, 1e-5, 1e-3, 1e-3), observation_variances=(1e-9, 1e-9), init_pose=(vehicle_pose[0], vehicle_pose[1]), init_variances=(1e-3, 1e-3, 1e-3, 1e-3), change_thresholds=(20, 20)) vehicle_x, vehicle_y = vehicle_pose[0], vehicle_pose[1] else: filtered_pose = tracker.update_and_get_estimation(vehicle_pose) # vehicle_x, vehicle_y = filtered_pose[0], filtered_pose[1] if vehicle_pose is None: # TODO: temp continue # TODO: temp vehicle_x, vehicle_y = vehicle_pose[0], vehicle_pose[ 1] # TODO: temp contours_image = maps_generation.generate_canopies_map(frame) scan_ranges = contours_scan.generate( contours_image, center_x=vehicle_x, center_y=vehicle_y, min_angle=min_angle, max_angle=max_angle, samples_num=samples_num, min_distance=min_distance, max_distance=max_distance, resolution=resolution, r_primary_search_samples=r_primary_search_samples, r_secondary_search_step=r_secondary_search_step) video_timestamp = cap.get(cv2.CAP_PROP_POS_MSEC) * 1e-3 ros_timestamp = rospy.rostime.Time.from_sec(video_timestamp)