def fly(client: airsim.MultirotorClient, args: argparse.Namespace) -> None: if args.verbose: print( f"[ff] HomeGeoPoint: {Vec3.from_GeoPoint(client.getHomeGeoPoint())}\n" ) print( f"[ff] VehiclePose.position: {Vec3.from_Vector3r(client.simGetVehiclePose().position)}\n" ) initial_state = client.getMultirotorState() if initial_state.landed_state == airsim.LandedState.Landed: print(f"[ff] Taking off") client.takeoffAsync(timeout_sec=8).join() else: client.hoverAsync().join() # airsim.LandedState.Flying #__move_on_path(client, args.flight_path, args.flight_velocity) #__move_on_box(client, z=-20, side=20, velocity=args.flight_velocity) if not args.use_viewpoints: future = client.moveOnPathAsync([ airsim.Vector3r(coord.x, coord.y, coord.z) for coord in args.flight_path ], args.flight_velocity) else: import viewpoints future = client.moveOnPathAsync( [airsim.Vector3r(*position) for position in viewpoints.Positions], args.flight_velocity) print(f"[ff] Press [space] to take pictures") ch, img_count, img_responses = msvcrt.getch(), 0, [] while ch == b' ': img_responses.extend( client.simGetImages([ airsim.ImageRequest( ff.CameraName.bottom_center, airsim.ImageType.Scene, False, True # compressed PNG image ) ])) img_count += 1 print(f" {img_count} pictures taken", end="\r") ch = msvcrt.getch() print() print(f"[ff] Waiting for drone to finish path...", end=" ", flush=True) future.join() print(f"done.") for i, response in enumerate(img_responses): airsim.write_file(os.path.join(args.output_folder, f"out_{i}.png"), response.image_data_uint8) time.sleep(4) print(f"[ff] Drone reset") client.reset()
def fly(client: airsim.MultirotorClient, args: argparse.Namespace) -> None: initial_pose = client.simGetVehiclePose() initial_state = client.getMultirotorState() if args.verbose: ff.print_pose(initial_pose, to_eularian_angles=airsim.to_eularian_angles) if initial_state.landed_state == airsim.LandedState.Landed: print("[ff] Taking off") client.takeoffAsync(timeout_sec=8).join() else: client.hoverAsync().join() # airsim.LandedState.Flying print("[ff] Flying viewpoints...") print("[ff] Press [space] to take pictures (or any other key to stop)" ) # TODO future = _move_by_path(client, args) img_count = 0 while True: if msvcrt.kbhit(): if msvcrt.getch() != b" ": break # https://stackoverflow.com/a/13207813 response, *_ = client.simGetImages([ airsim.ImageRequest( ff.CameraName.front_center, airsim.ImageType.Scene, False, True, # compressed PNG image ) ]) img_count += 1 print(f" {img_count} pictures taken", end="\r") airsim.write_file( os.path.join(args.output_folder, f"out_{img_count}.png"), response.image_data_uint8, ) # TODO save poses to .log file print("camera_position:", response.camera_position) # Vector3r print("camera_orientation:", response.camera_orientation) # Quaternionr print() print(f"[ff] Waiting for drone to finish path...", end=" ", flush=True) future.join() print("done") time.sleep(4) client.reset() print("[ff] Drone reset")
class ImagePublisher(Node): def __init__(self, rate=60): super().__init__("airsim_images") # Create timer for calling publish at predefined rate self.create_timer(1/rate, self.publish) # AirSim variables self._airsim_client = MultirotorClient(ip=os.environ["WSL_HOST_IP"]) # self._camera_name = "front_center" self._camera_name = "bottom_center" self._camera_frame_id = "realsense" self._vehicle_name = self.get_namespace().split("/")[1] # ROS Publishers # self._pub_ir = self.create_publisher(Image, "ir/image_raw", 1) self._pub_color = self.create_publisher(Image, "color/image_raw", 1) # self._pub_depth = self.create_publisher(Image, "depth/image_raw", 1) # self._pub_info_ir = self.create_publisher(CameraInfo, "ir/camera_info", 1) self._pub_info_color = self.create_publisher(CameraInfo, "color/camera_info", 1) # self._pub_depth = self.create_publisher(Image, "depth/image_raw", 1) # TF related variables self.br = TransformBroadcaster(self) # CV self.bridge = CvBridge() # Internal variables self._cam_info_msgs = {} self.get_logger().info("Initialized image publisher") def publish(self): """Publish images from AirSim to ROS""" responses = self._airsim_client.simGetImages([ # uncompressed RGB array bytes ImageRequest(self._camera_name, ImageType.Scene, compress=False), # # infrared uncompressed image # ImageRequest(self._camera_name, ImageType.Infrared, compress=False), # # floating point uncompressed image # ImageRequest(self._camera_name, ImageType.DepthPlanner, pixels_as_float=True, compress=False), ], self._vehicle_name) color_response = responses[0] # ir_response = responses[1] # depth_response = responses[2] header = Header() header.stamp = self.get_clock().now().to_msg() # TODO: implement parameter for frame id, also decide on if each separate image type should have a different frame id # This may mean we should load the ids via ros parameters header.frame_id = self._camera_frame_id # Handle cam info it has not been found yet if self._vehicle_name not in self._cam_info_msgs.keys(): self._cam_info_msgs[self._vehicle_name] = {} cam_info = self._airsim_client.simGetCameraInfo(self._camera_name, self._vehicle_name) d_params = self._airsim_client.simGetDistortionParams(self._camera_name, self._vehicle_name) self.get_logger().info(f"{d_params}") self.get_logger().info(f""" HFOV: {cam_info.fov}, PROJ: {cam_info.proj_mat} """) # TODO: implement multiple cameras for each lens on realsense and update this method self._cam_info_msgs[self._vehicle_name]["color"] = construct_info(header, cam_info, color_response.height, color_response.width) # self._cam_info_msgs[self._vehicle_name]["ir"] = self._cam_info_msgs[self._vehicle_name]["color"] image_color = construct_image(header, color_response, "bgr8") # image_ir = construct_image(header, ir_response, "rgb8") # image_depth = construct_image(header, depth_response, "rgb8") # TODO: use camera pose from airsim tfmsg = TransformStamped() translation = Vector3(x=0., y=0., z=0.) tfmsg.transform.translation = translation tfmsg.transform.rotation = Quaternion(x=0., y=0., z=0., w=1.) tfmsg.child_frame_id = self._camera_frame_id tf_header = Header() tf_header.stamp = header.stamp tfmsg.header = tf_header tfmsg.header.frame_id = "world" self.br.sendTransform(tfmsg) self._pub_color.publish(image_color) # self._pub_ir.publish(image_ir) # self._pub_depth.publish(image_depth) self._pub_info_color.publish(self._cam_info_msgs[self._vehicle_name]["color"])
class Drone(Thread): def __init__(self, telemetry: Telemetry): super(Drone, self).__init__() self.daemon = True self._exit = False self.telemetry = telemetry self.client: Optional[MultirotorClient] = None self.connect() self.controller = Controller(self.client, self.telemetry) self.collision_type = CollisionType.NONE def run(self): while not self._exit: try: self._process() except BufferError: pass except KeyboardInterrupt: self.shutdown() def _process(self): self.update_telemetry() if self.is_collision(): self.process_collision() elif self.telemetry.collision_mode: self.stop_collision() else: self.controller.check_progress() time.sleep(0.1) def shutdown(self): self._exit = True self.client.enableApiControl(False) self.client.reset() def connect(self): self.client = MultirotorClient() self.client.confirmConnection() self.client.enableApiControl(True) self.client.getBarometerData() # region TELEMETRY def update_telemetry(self): multirotor_state = self.client.getMultirotorState() self.telemetry.landed_state = multirotor_state.landed_state self.telemetry.ned_position = multirotor_state.kinematics_estimated.position self.telemetry.linear_velocity = multirotor_state.kinematics_estimated.linear_velocity self.telemetry.gps_position = self.client.getGpsData().gnss.geo_point self.telemetry.gps_home = self.client.getHomeGeoPoint() self.telemetry.quaternion = self.client.simGetVehiclePose().orientation self.get_front_camera_image() self.get_bottom_camera_image() def get_bottom_camera_image(self): self.client.simSetCameraOrientation('0', Quaternionr(0.0, -0.7, 0.0, 0.5)) bottom_camera_data = self.client.simGetImages([ airsim.ImageRequest("0", airsim.ImageType.DepthPlanner, pixels_as_float=True) ]) self.telemetry.terrain_collision.process_image(bottom_camera_data[0]) def get_front_camera_image(self): self.client.simSetCameraOrientation('0', Quaternionr(0.0, 0.0, 0.0, 1.0)) front_camera_data = self.client.simGetImages([ airsim.ImageRequest("0", airsim.ImageType.DepthPlanner, pixels_as_float=True) ]) self.telemetry.wall_collision.process_image(front_camera_data[0]) # endregion # region COLLISION def is_collision(self): collision = (self.telemetry.wall_collision.collision or self.telemetry.terrain_collision.collision) return collision def process_collision(self): wall_collision = self.telemetry.wall_collision.collision terrain_collision = self.telemetry.terrain_collision.collision if wall_collision: if terrain_collision: self.wall_collision() self.collision_type = CollisionType.BOTH else: self.wall_collision() self.collision_type = CollisionType.WALL else: if terrain_collision: self.terrain_collision() self.collision_type = CollisionType.TERRAIN else: self.collision_type = CollisionType.NONE def wall_collision(self): if (self.collision_type != CollisionType.WALL and self.collision_type != CollisionType.BOTH): self.telemetry.collision_mode = True self.client.moveToZAsync(-500, settings.COLLISION_SPEED) def terrain_collision(self): if self.collision_type != CollisionType.TERRAIN: if self.telemetry.ned_position.z_val < -8: self.telemetry.collision_mode = True target = self.telemetry.target_position actual = self.telemetry.ned_position self.client.moveToPositionAsync(target.x_val, target.y_val, actual.z_val - 1, settings.COLLISION_SPEED) def stop_collision(self): self.controller.send_position() self.telemetry.collision_mode = False