def fly(client: airsim.MultirotorClient, args: argparse.Namespace) -> None: initial_pose = client.simGetVehiclePose() if args.verbose: ff.print_pose(initial_pose, airsim.to_eularian_angles) points, poses, names = [], [], [] for position, orientation in args.viewpoints: position = airsim.Vector3r(*position) orientation = airsim.Quaternionr(*orientation) # xyzw points.append(position) poses.append(airsim.Pose(position, orientation)) names.append( ff.to_xyz_str(position) + "\n" + ff.to_xyzw_str(orientation)) plot_linestrips = True # else plot_transforms curr_press_keys = set() key_combination = {keyboard.Key.space, keyboard.Key.shift} def on_press(key): nonlocal plot_linestrips, curr_press_keys, key_combination if key in key_combination: curr_press_keys.add(key) if curr_press_keys == {keyboard.Key.space}: if (plot_linestrips := not plot_linestrips): client.simPlotLineStrip(points, thickness=3, duration=10) else: client.simPlotTransforms(poses, thickness=3, scale=40, duration=10)
def convert_uavmvs_to_airsim_pose(camera: TrajectoryCamera, translation=None, scaling=None): import airsim assert camera.kind in [TRAJ, CSV] position = convert_uavmvs_to_airsim_position(camera.position, translation, scaling) qw, qx, qy, qz = map(float, camera._rotation_into(CSV)) # qx = -qx # XXX qy = -qy qz = -qz orientation = airsim.Quaternionr(qx, qy, qz, qw) length = orientation.get_length() assert 0.99 <= length <= 1.01, orientation # orientation /= length # NOTE ignore translation and scaling for orientation (we'd only care for rotation) pose = airsim.Pose(position, orientation) del airsim return pose
def convert_ned_to_enu(pos_ned, orientation_ned): pos_enu = airsim.Vector3r(pos_ned.x_val, -pos_ned.y_val, -pos_ned.z_val) orientation_enu = airsim.Quaternionr(orientation_ned.w_val, -orientation_ned.z_val, orientation_ned.x_val, orientation_ned.y_val) return pos_enu, orientation_ned
def AirView(W, X, Y, Z, skeleton_recv, dir=1, rate=1): pp = pprint.PrettyPrinter(indent=4) client = airsim.VehicleClient() client.confirmConnection() # airsim.wait_key('Press any key to start the tracking') x_init, y_init, z_init = 0, 0, -1.6 while True: sk = skeleton_recv.recv() # print('AirSimCV received:', sk) if isinstance(sk, list) and len(sk) == 25: FacePos = sk[0] x_shift = -FacePos[2] / 50 y_shift = FacePos[0] / 212 z_shift = FacePos[1] / 256 #print("received HeadPose:", HeadPose[0]) n_w, n_qx, n_qy, n_qz = W.value, X.value, Y.value, Z.value if dir: client.simSetVehiclePose( airsim.Pose( airsim.Vector3r(x_init + x_shift, y_init + rate * y_shift, z_init + rate * z_shift), airsim.Quaternionr(n_qx, n_qy, n_qz, n_w)), True) else: client.simSetVehiclePose( airsim.Pose( airsim.Vector3r(x_init - x_shift, y_init - rate * y_shift, z_init - rate * z_shift), airsim.Quaternionr(n_qx, n_qy, n_qz, n_w)), True) elif sk == "Break": print("Tracking terminating...") client.simSetPose( airsim.Pose(airsim.Vector3r(0, 0, 0), airsim.to_quaternion(0.0, 0, 0)), True) break elif sk == 'Empty': i = 1 client.simSetPose( airsim.Pose(airsim.Vector3r(0, 0, 0), airsim.to_quaternion(0, 0, 0)), True)
def location(client): return client.simSetVehiclePose( airsim.Pose( airsim.Vector3r(x_val=71.37133026123047, y_val=173.48350524902344, z_val=-0.6527218222618103), airsim.Quaternionr(w_val=0.7069156765937805, x_val=2.5558463676134124e-05, y_val=-1.7646530977799557e-05, z_val=-0.7072978019714355)), True)
def CreateTFMessage(pose_msg, img_time, sequence): # down = airsim.Quaternionr(0, -0.7071067690849304, 0, 0.7071067094802856) down = airsim.Quaternionr(0, 0, 0, 1) enu = airsim.Quaternionr(0.7071068, 0.7071068, 0, 0) final = enu.__mul__(down) msg_tf = TFMessage() msg_tf.transforms.append(TransformStamped()) msg_tf.transforms[0].header.seq = sequence msg_tf.transforms[0].header.stamp = img_time msg_tf.transforms[0].header.frame_id = "/down_camera_frame" msg_tf.transforms[0].child_frame_id = "/world" msg_tf.transforms[0].transform.translation.x = pose_msg.position.x_val msg_tf.transforms[0].transform.translation.y = pose_msg.position.y_val msg_tf.transforms[0].transform.translation.z = pose_msg.position.z_val msg_tf.transforms[0].transform.rotation.x = final.x_val msg_tf.transforms[0].transform.rotation.y = final.y_val msg_tf.transforms[0].transform.rotation.z = final.z_val msg_tf.transforms[0].transform.rotation.w = final.w_val return msg_tf
def location(client): return client.simSetVehiclePose( airsim.Pose( airsim.Vector3r(x_val=resources.x_location, y_val=resources.y_location, z_val=resources.z_location), airsim.Quaternionr(w_val=resources.w_orientation, x_val=resources.x_orientation, y_val=resources.y_orientation, z_val=resources.z_orientation)), True)
def _move_by_vehicle_poses(client: airsim.MultirotorClient, args: argparse.Namespace) -> None: raise Warning("simSetVehiclePose() is meant for ComputerVision mode") # NOTE check https://github.com/microsoft/AirSim/pull/2324 for position, orientation in args.viewpoints: pose = airsim.Pose(airsim.Vector3r(*position), airsim.Quaternionr(*orientation)) client.simSetVehiclePose(pose, ignore_collision=True) client.hoverAsync().join() time.sleep(2)
def generate_random_quaternion(): norm = 0. while norm < 1e-4: d = np.random.randn(3) norm = np.linalg.norm(d) theta = 2. * np.pi * np.random.rand() s = np.sin(theta) c = np.cos(theta) d *= s / norm return airsim.Quaternionr(d[0], d[1], d[2], c)
def location(client): return client.simSetVehiclePose( airsim.Pose( airsim.Vector3r(x_val=60.330116271972656, y_val=-272.38421630859375, z_val=-0.6507290601730347), airsim.Quaternionr(w_val=0.7091798782348633, x_val=4.51675005024299e-05, y_val=-2.079392288578674e-06, z_val=0.7050275802612305)), True)
def get_image(client, pos, q): x, y, z = pos qw, qx, qy, qz = q.elements client.simSetVehiclePose( airsim.Pose(airsim.Vector3r(x, y, z), airsim.Quaternionr(qx, qy, qz, qw)), True) responses = client.simGetImages([ airsim.ImageRequest("0", airsim.ImageType.Scene), ]) return responses[0]
def runSingleCar(id: int): client = airsim.CarClient() client.confirmConnection() vehicle_name = f"Car_{id}" pose = airsim.Pose(airsim.Vector3r(0, 7 * id, 0), airsim.Quaternionr(0, 0, 0, 0)) print(f"Creating {vehicle_name}") success = client.simAddVehicle(vehicle_name, "Physxcar", pose) if not success: print(f"Falied to create {vehicle_name}") return # Sleep for some time to wait for other vehicles to be created time.sleep(1) # driveCar(vehicle_name, client) print(f"Driving {vehicle_name} for a few secs...") client.enableApiControl(True, vehicle_name) car_controls = airsim.CarControls() # go forward car_controls.throttle = 0.5 car_controls.steering = 0 client.setCarControls(car_controls, vehicle_name) time.sleep(3) # let car drive a bit # Go forward + steer right car_controls.throttle = 0.5 car_controls.steering = 1 client.setCarControls(car_controls, vehicle_name) time.sleep(3) # go reverse car_controls.throttle = -0.5 car_controls.is_manual_gear = True car_controls.manual_gear = -1 car_controls.steering = 0 client.setCarControls(car_controls, vehicle_name) time.sleep(3) car_controls.is_manual_gear = False # change back gear to auto car_controls.manual_gear = 0 # apply brakes car_controls.brake = 1 client.setCarControls(car_controls, vehicle_name) time.sleep(3)
def toQuaternion(pitch, roll, yaw): t0 = math.cos(yaw * 0.5) t1 = math.sin(yaw * 0.5) t2 = math.cos(roll * 0.5) t3 = math.sin(roll * 0.5) t4 = math.cos(pitch * 0.5) t5 = math.sin(pitch * 0.5) q = airsim.Quaternionr() q.w_val = t0 * t2 * t4 + t1 * t3 * t5 #w q.x_val = t0 * t3 * t4 - t1 * t2 * t5 #x q.y_val = t0 * t2 * t5 + t1 * t3 * t4 #y q.z_val = t1 * t2 * t4 - t0 * t3 * t5 #z return q
def _move_by_positions(client: airsim.MultirotorClient, args: argparse.Namespace) -> None: for position, orientation in args.viewpoints: _pitch, _roll, yaw = airsim.to_eularian_angles( airsim.Quaternionr(*orientation)) client.moveToPositionAsync( *position, args.flight_velocity, drivetrain=airsim.DrivetrainType.MaxDegreeOfFreedom, yaw_mode=airsim.YawMode(is_rate=False, yaw_or_rate=yaw), ).join() client.hoverAsync().join() time.sleep(2)
def setAtPosition(self, x: float, y: float, z: float): """ Set the drone at position instantly Args: x : float - x coordinate y : float - y coordinate z : float - z coordinate Returns: none """ pos = airsim.Vector3r(x, y, z) q = airsim.Quaternionr(1, 0, 0, 0) pose = airsim.Pose(pos, q) self.client.simSetVehiclePose(pose, True) self.flyToPosition(x, y, z, 1)
def publishState(self): drone_state = self.client.getMultirotorState(vehicle_name=self.name) pos_ned = drone_state.kinematics_estimated.position orientation_ned = drone_state.kinematics_estimated.orientation pos = airsim.Vector3r(pos_ned.x_val, -pos_ned.y_val, -pos_ned.z_val) orientation = airsim.Quaternionr(orientation_ned.w_val, -orientation_ned.z_val, -orientation_ned.x_val, orientation_ned.y_val) sim_pose_msg = PoseStamped() sim_pose_msg.pose.position.x = pos.x_val sim_pose_msg.pose.position.y = pos.y_val sim_pose_msg.pose.position.z = pos.z_val sim_pose_msg.pose.orientation.w = orientation.w_val sim_pose_msg.pose.orientation.x = orientation.x_val sim_pose_msg.pose.orientation.y = orientation.y_val sim_pose_msg.pose.orientation.z = orientation.z_val self.state_pub.publish(sim_pose_msg)
def calcularPoseRelativa(distancia, theta, phi, poseVisor): poseMovido = airsim.Pose(airsim.Vector3r(), airsim.Quaternionr()) rot = Rotation.from_euler('ZYX', [phi, theta, 0], True) position = rot.apply([distancia, 0, 0]) poseMovido.position.x_val += position[0] poseMovido.position.y_val += position[1] poseMovido.position.z_val += position[2] rot = Rotation.from_quat([ poseVisor.orientation.x_val, poseVisor.orientation.y_val, poseVisor.orientation.z_val, poseVisor.orientation.w_val ]) nuevaPosition = rot.apply([ poseMovido.position.x_val, poseMovido.position.y_val, poseMovido.position.z_val ]) nuevaPosition += [ poseVisor.position.x_val, poseVisor.position.y_val, poseVisor.position.z_val ] poseMovido.position = airsim.Vector3r(nuevaPosition[0], nuevaPosition[1], nuevaPosition[2]) return poseMovido
def main(camera_name, align_to_left_camera, image_width, image_heightm, requests, level_name, human_name, item_name, num_capture): timestamp = int(time.time()) client = airsim.VehicleClient() camera_info = camera_info_list[camera_name] level_info = level_info_list[level_name] item_info = item_info_list[item_name] # camera config # TODO: we might add random lense distortion for getting more realistic images. lc, rc = (0.0, 1.0) if align_to_left_camera else (-0.5, 0.5) client.simSetCameraPose( 'front_left', airsim.Pose(airsim.Vector3r(0., lc * camera_info.baseline, 0.), airsim.Quaternionr()).UE4ToAirSim()) client.simSetCameraFov('front_left', camera_info.HFOV) client.simSetCameraImageSize('front_left', image_width, image_height) client.simSetCameraPose( 'front_right', airsim.Pose(airsim.Vector3r(0., rc * camera_info.baseline, 0.), airsim.Quaternionr()).UE4ToAirSim()) client.simSetCameraFov('front_right', camera_info.HFOV) client.simSetCameraImageSize('front_right', image_width, image_height) # load level #client.simLoadLevel(level_name) # TODO: after loading different level, position reference differs from what we expect. fix it. # swapn humans/items # TODO: find a way to immediately reflect segID change human_actor_name = 'human_{}'.format(timestamp) human_BP_name = metahumans_bp_path_template.format(human_name) client.simSpawnObject(human_actor_name, human_BP_name, physics_enabled=False, from_actorBP=True) client.simSetSegmentationObjectID(human_actor_name, 1) if item_name != 'none': item_actor_name = 'item_{}'.format(timestamp) client.simSpawnObject(item_actor_name, item_name, physics_enabled=False, from_actorBP=False) client.simSetSegmentationObjectID(item_actor_name, 2) # sometimes the metahuman's hands are splited away from the body in the first frame... wait a little to avoid it time.sleep(0.5) # generate data capture_folder = './capture_{}_{}_{}_{}/'.format(level_name, human_name, item_name, timestamp) os.makedirs(capture_folder, exist_ok=True) for idx in range(num_capture): # TODO: some metahumans's face are unnaturally lightened in Room environment. find how to fix it! # configure objects in the scene. # we follow the UE4's coordinates/units convention for ease of working with the UE4 environment. # airsim uses NED coordinates with unit 'm' while UE4 uses NEU coordinates with unit 'cm'. # base position/horizontal rotation where we arragnge the stereo camera, humans, items. # here we set the base position to near the bottom center of the booth shelf. base_position = generate_random_position(level_info.position_range) base_rotation = generate_random_rotation((0, 0, 1), (0, 360)) #base_rotation = airsim.Quaternionr() #base_rotation = generate_random_rotation((0, 0, 1), (90, 90)) world_to_base = airsim.Pose(base_position, base_rotation) # derive the stereo camera position camera_roll_error = 0 #5 camera_pitch_error = 0 #3 camera_height_error = 0 #5 camera_horiz_error = 0 #3 #camera_position = generate_random_position((80, 80, 40, 40, 160, 160)) #camera_rotation = generate_random_rotation((0, 0, 1), (-150, -150)) camera_position = generate_random_position( (-17 - camera_horiz_error, -17 + camera_horiz_error, 0 - camera_horiz_error, 0 + camera_horiz_error, 212.5 - camera_height_error, 212.5 + camera_height_error)) camera_rotation = generate_random_rotation( (0, 0, 1), (-camera_roll_error, camera_roll_error)) * generate_random_rotation( (0, 1, 0), (90 - camera_pitch_error, 90 + camera_pitch_error)) base_to_camera = airsim.Pose(camera_position, camera_rotation) world_to_camera = base_to_camera * world_to_base client.simSetVehiclePose(world_to_camera.UE4ToAirSim()) # derive the human position # TODO: add human skelton scaling variation crouching_diff = 0 # TODO: impl human_position = generate_random_position( (-70, -30, -20, 20, -crouching_diff, -crouching_diff)) #human_position = generate_random_position((-40, -40, 0, 0, -crouching_diff, -crouching_diff)) human_rotation_diff = 20 #20#60 human_rotation = generate_random_rotation( (0, 0, 1), (-90 - human_rotation_diff, -90 + human_rotation_diff)) base_to_human = airsim.Pose(human_position, human_rotation) world_to_human = base_to_human * world_to_base client.simSetObjectPose(human_actor_name, world_to_human.UE4ToAirSim()) # derive the human pose left_hand_IKposition = airsim.Vector3r( 50, 0, -100) # relative to the clavicle left_hand_rotation = generate_random_position((-30, -30, 0, 0, 0, 0)) # TODO: find natural range right_hand_IKposition = generate_random_hand_reach( (60, 100)) * -1 # relative to the clavicle right_hand_rotation = generate_random_position( metahumans_hand_rotation_range) # relative to the bone # TODO: find a way to make crouching pose, natural foots, waist pose client.simSetMetahumanPose(human_actor_name, left_hand_IKposition, left_hand_rotation, right_hand_IKposition, right_hand_rotation) # derive the item pose if item_name != 'none': hand_pose_world = client.simGetMetahumanBonePose( human_actor_name, 'middle_01_r').AirSimToUE4() hand_to_item = airsim.Pose( airsim.Vector3r(0, item_info.grab_offset, 0), generate_random_rotation((0, 0, 1), (0, 360))) world_to_item = hand_to_item * hand_pose_world client.simSetObjectPose(item_actor_name, world_to_item.UE4ToAirSim()) # request image captures exposure_bias = generate_random_scalar( level_info.auto_exposure_bias_range) for camera_name in ["front_left", "front_right"]: client.simSetCameraPostProcess( camera_name, auto_exposure_bias=exposure_bias, auto_exposure_max_brightness=1.0, auto_exposure_min_brightness=1.0, lens_flare_intensity=0.0, motion_blur_amount=0.0, ) responses = client.simGetImages(requests) # request attribute data # TODO: gather data useful for machine learning (camera params, key points, etc) # save images and attribute data for response, request in zip(responses, requests): img = airsim.decode_image_response(response) if img.dtype == np.float16: img = np.asarray( (img * 1000).clip(0, 65535), dtype=np.uint16 ) # convert to an uint16 depth image with unit mm. (we are expecting that the depth camera range is up to 65m) if request.image_type == airsim.ImageType.Scene: name = 'rgb' elif request.image_type == airsim.ImageType.DepthPlanar: name = 'depth' # be careful that simulated depth values are not so accurate at far range due to the limited bit-depth of rengdering depth buffer. elif request.image_type == airsim.ImageType.Segmentation: name = 'mask' else: raise Exception('no impl') if request.camera_name == 'front_left': name += 'L' elif request.camera_name == 'front_right': name += 'R' else: raise Exception('no impl') airsim.write_png( '{}{:0>8}_{}.png'.format(capture_folder, idx, name), img) if item_name != 'none': client.simDestroyObject(item_actor_name) client.simDestroyObject(human_actor_name)
def mess2quat(message): quaternion = airsim.Quaternionr(message.x_val, message.y_val, message.z_val, message.w_val) return quaternion
client.confirmConnection() client.enableApiControl(True) if not os.path.exists('c:/temp/'): os.makedirs('c:/temp/') filename = 'c:/temp/' + "none" png_image = client.simGetImage(str(0), airsim.ImageType.Scene) airsim.write_file(os.path.normpath(filename + ".png"), png_image) w = client.simGetCameraInfo("0").pose.orientation.w_val x = client.simGetCameraInfo("0").pose.orientation.x_val y = client.simGetCameraInfo("0").pose.orientation.y_val z = client.simGetCameraInfo("0").pose.orientation.z_val q = airsim.Quaternionr(x, y, z, w).conjugate() client.simSetCameraOrientation("0", q) filename = 'c:/temp/' + "conjugate" png_image = client.simGetImage(str(0), airsim.ImageType.Scene) airsim.write_file(os.path.normpath(filename + ".png"), png_image) q = airsim.Quaternionr(x, y, z, w).inverse() client.simSetCameraOrientation("0", q) filename = 'c:/temp/' + "inverse" png_image = client.simGetImage(str(0), airsim.ImageType.Scene) airsim.write_file(os.path.normpath(filename + ".png"), png_image) q = airsim.Quaternionr(x, y, z, w).conjugate().inverse() client.simSetCameraOrientation("0", q) filename = 'c:/temp/' + "conjugate_inverse" png_image = client.simGetImage(str(0), airsim.ImageType.Scene)
except OSError as error: print(error) client = airsim.VehicleClient() client.confirmConnection() data = pd.read_csv(os.path.join(ROOT, WORKING, 'airsim_rec.txt'), sep="\s+|;", usecols=[1, 2, 3, 4, 5, 6, 7, 8]) for index, row in data.iterrows(): client.simSetVehiclePose( airsim.Pose( airsim.Vector3r(row['POS_X'], row['POS_Y'], row['POS_Z']), airsim.Quaternionr(row['Q_X'], row['Q_Y'], row['Q_Z'], row['Q_W'])), True) png_image = client.simGetImage("0", airsim.ImageType.Scene) #airsim.write_file(row['ImageFile'], png_image) airsim.write_file(os.path.join(OUT_DIR, 'gt_' + row['ImageFile']), png_image) #client = airsim.VehicleClient() #client.confirmConnection() #pose = client.simGetVehiclePose() #client.simSetVehiclePose(airsim.Pose(airsim.Vector3r(0, 0, 0), airsim.to_quaternion(0, 0, 0)), True) #pose = client.simGetVehiclePose() #print("x={}, y={}, z={}".format(pose.position.x_val, pose.position.y_val, pose.position.z_val)) #angles = airsim.to_eularian_angles(client.simGetVehiclePose().orientation)
import read_data as traj path = '/home/sim/data' MONO = True STEREO = False LIDAR = True TRUTH_IMU = False minimum_altitude = 0 if (MONO == False and STEREO == False and LIDAR == False): IMAGE_COLLECTION = False else: IMAGE_COLLECTION = True down = airsim.Quaternionr(0, -7071067690849304, 0, 0.7071067094802856) imu = utils.qnorm(airsim.Quaternionr(1, 0, 0, 0), 10) if (IMAGE_COLLECTION): client = airsim.VehicleClient() client.confirmConnection() # Camera Information camera = client.simGetCameraInfo("3") test_camera = client.simGetCameraInfo("1") if (test_camera.pose != down): client.simSetCameraOrientation("1", down) client.simSetCameraOrientation("2", down) FoV = 90 # Coordinate transformation
def main(): depth_model = tf.keras.models.load_model( filepath="./depth/model.h5", custom_objects={ "relu6": tf.nn.relu6, "tf": tf.nn }, compile=False, ) print("depth model loaded") # client = airsim.MultirotorClient() client.confirmConnection() client.enableApiControl(True) client.armDisarm(True) client.takeoffAsync().join() print("multirotor took off") # client.simSetVehiclePose( airsim.Pose( airsim.Vector3r(*START_POS), airsim.Quaternionr(0, 0, 0, 1), ), ignore_collison=True, ) move_to_target(client) # record = pathlib.Path(RECORD_PATH).joinpath( datetime.datetime.now().strftime("%Y-%m-%d-%H-%M")) scene_record = record.joinpath("scene") depth_record = record.joinpath("depth") scene_record.mkdir(mode=0o755, parents=True) depth_record.mkdir(mode=0o755, parents=True) record_counter = 1 # try: while True: pose = client.simGetVehiclePose().position if pose.distance_to(airsim.Vector3r(*TARGET_POS)) < 2: print("target position arrived") client.moveByVelocityAsync(0, 0, 0, 5).join() break # response = client.simGetImages([ airsim.ImageRequest( camera_name="front_center", image_type=airsim.ImageType.Scene, compress=False, ) ]) scene = airsim.string_to_uint8_array( response[0].image_data_uint8).reshape(response[0].height, response[0].width, 3) depth = np.maximum( np.squeeze( depth_model.predict(np.expand_dims(scene, axis=0))[0]), 1) # result = algorithm.two_step_decision(depth) print(result) # filename = str(record_counter).zfill(3) + ".png" Image.frombytes( mode="RGB", size=(response[0].width, response[0].height), data=response[0].image_data_uint8, ).save(scene_record.joinpath(filename)) util.save_algorithm( depth_data=depth, algo_result=result, path=depth_record.joinpath(filename), ) record_counter += 1 # dodge_duration = DODGE_DIST / FLY_SPEED if result == algorithm.Result.UP_RIGHT: client.moveByVelocityAsync( vx=FLY_SPEED / 4, vy=FLY_SPEED / 2**0.5, vz=-FLY_SPEED / 2**0.5, duration=dodge_duration, ).join() move_to_target(client) elif result == algorithm.Result.RIGHT: client.moveByVelocityAsync( vx=FLY_SPEED, vy=FLY_SPEED, vz=0, duration=dodge_duration, ).join() move_to_target(client) elif result == algorithm.Result.DOWN_RIGHT: client.moveByVelocityAsync( vx=FLY_SPEED / 4, vy=FLY_SPEED / 2**0.5, vz=FLY_SPEED / 2**0.5, duration=dodge_duration, ).join() move_to_target(client) elif result == algorithm.Result.DOWN: client.moveByVelocityAsync( vx=FLY_SPEED / 4, vy=0, vz=FLY_SPEED, duration=dodge_duration, ).join() move_to_target(client) elif result == algorithm.Result.DOWN_LEFT: client.moveByVelocityAsync( vx=FLY_SPEED / 4, vy=-FLY_SPEED / 2**0.5, vz=FLY_SPEED / 2**0.5, duration=dodge_duration, ).join() move_to_target(client) elif result == algorithm.Result.LEFT: client.moveByVelocityAsync( vx=FLY_SPEED, vy=-FLY_SPEED, vz=0, duration=dodge_duration, ).join() move_to_target(client) elif result == algorithm.Result.UP_LEFT: client.moveByVelocityAsync( vx=FLY_SPEED / 4, vy=-FLY_SPEED / 2**0.5, vz=-FLY_SPEED / 2**0.5, duration=dodge_duration, ).join() move_to_target(client) elif result == algorithm.Result.UP: client.moveByVelocityAsync( vx=FLY_SPEED / 4, vy=0, vz=-FLY_SPEED, duration=dodge_duration, ).join() move_to_target(client) elif result == algorithm.Result.STOP: pass else: move_to_target(client) except: traceback.print_exc() # client.reset()
def __init__(self): self.client = airsim.CarClient() self.client.confirmConnection() self.client.enableApiControl(True) self.left_cam_name = '2' self.right_cam_name = '1' self.forward_cam_name = '0' self.backward_cam_name = '4' self.setup_my_cameras() # i just drove around and hit ';' and found the vectors and # quaternions of those locations self.emergency_reset_poses = [ airsim.Pose(airsim.Vector3r(46, -530, -.7), airsim.Quaternionr(0, 0, .01, 1.02)), airsim.Pose(airsim.Vector3r(320, -18, -.7), airsim.Quaternionr(0, 0, 1.0, .01)), airsim.Pose(airsim.Vector3r(229, -313, -.7), airsim.Quaternionr(0, 0, -.73, .69)), airsim.Pose(airsim.Vector3r(-800, -4, -.7), airsim.Quaternionr(0, 0, .02, 1.0)) ] # quaternionr is x, y, z, w for whatever reason # (note:sim displays w, x, y, z when press ';') self.normal_reset_poses = [ airsim.Pose(airsim.Vector3r(63, 50, -.7), airsim.Quaternionr(0, 0, .71, .7)), airsim.Pose(airsim.Vector3r(62, 140, -.7), airsim.Quaternionr( 0, 0, .71, .7, )), airsim.Pose(airsim.Vector3r(114, 228, -.7), airsim.Quaternionr(0, 0, -0.37, .99)), airsim.Pose(airsim.Vector3r(296, 51, -.7), airsim.Quaternionr(0, 0, -.0675, .738)), airsim.Pose(airsim.Vector3r(298, 23, -.7), airsim.Quaternionr(0, 0, -.653, .721)), airsim.Pose(airsim.Vector3r(342, -14, -.7), airsim.Quaternionr(0, 0, -1.06, .014)), airsim.Pose(airsim.Vector3r(200, 10.4, -.7), airsim.Quaternionr(0, 0, 1.0, .009)), airsim.Pose(airsim.Vector3r(166, -65, -.7), airsim.Quaternionr(0, 0, -.661, 0.75)), airsim.Pose(airsim.Vector3r(230, 304, -.7), airsim.Quaternionr(0, 0, -.708, .711)), airsim.Pose(airsim.Vector3r(241, -481, -.7), airsim.Quaternionr(0, 0, 1.006, 0)), airsim.Pose(airsim.Vector3r(64, -520, -.7), airsim.Quaternionr(0, 0, .712, .707)), airsim.Pose(airsim.Vector3r(-24, -529, -.7), airsim.Quaternionr(0, 0, .004, 1.0)), airsim.Pose(airsim.Vector3r(-5.5, -300, -.7), airsim.Quaternionr( 0, 0, .7, .7, )), airsim.Pose(airsim.Vector3r(-236, -11, -.7), airsim.Quaternionr(0, 0, 1.0, 0)), airsim.Pose(airsim.Vector3r(-356, 94, -.7), airsim.Quaternionr(0, 0, -1.0, 0)), airsim.Pose(airsim.Vector3r(-456, -11, -.7), airsim.Quaternionr(0, 0, 1.0, .007)), airsim.Pose(airsim.Vector3r(-553, 22.5, -.7), airsim.Quaternionr(0, 0, .702, .712)), airsim.Pose(airsim.Vector3r(-661, 148.3, -.7), airsim.Quaternionr(0, 0, -.03, 1.0)), airsim.Pose(airsim.Vector3r(-480, 241, -.7), airsim.Quaternionr(0, 0, -.07, 1.0)), airsim.Pose(airsim.Vector3r(-165, 85, -.7), airsim.Quaternionr(0, 0, -.687, .72)), airsim.Pose(airsim.Vector3r(89, 89, -.7), airsim.Quaternionr(0, 0, .01, 1.0)) ]
def generate_random_rotation(axis, range): a = angle_to_rad(range[0] + (range[1] - range[0]) * np.random.rand()) c = np.cos(0.5 * a) s = np.sin(0.5 * a) return airsim.Quaternionr(axis[0] * s, axis[1] * s, axis[2] * s, c)
client = airsim.CarClient() client.confirmConnection() client.enableApiControl(True) if not os.path.exists('c:/temp/'): os.makedirs('c:/temp/') filename = 'c:/temp/' + "none" png_image = client.simGetImage(str(0), airsim.ImageType.Scene) airsim.write_file(os.path.normpath(filename + ".png"), png_image) wf = client.simGetCameraInfo("0").pose.orientation.w_val xf = client.simGetCameraInfo("0").pose.orientation.x_val yf = client.simGetCameraInfo("0").pose.orientation.y_val zf = client.simGetCameraInfo("0").pose.orientation.z_val q = airsim.Quaternionr(xf, yf, zf, wf) client.simSetCameraOrientation("0", q) w = client.simGetCameraInfo("0").pose.orientation.w_val x = client.simGetCameraInfo("0").pose.orientation.x_val y = client.simGetCameraInfo("0").pose.orientation.y_val z = client.simGetCameraInfo("0").pose.orientation.z_val xr = airsim.Quaternionr(1, 0, 0, 0) yr = airsim.Quaternionr(0, 1, 0, 0) zr = airsim.Quaternionr(0, 0, 1, 0) wr = airsim.Quaternionr(0, 0, 0, 1) q = airsim.Quaternionr(x, y, z, w).rotate(xr) client.simSetCameraOrientation("0", q) filename = 'c:/temp/' + "xr" png_image = client.simGetImage(str(0), airsim.ImageType.Scene)
def main(): # create data directory data_directory = os.path.join( DATA_PATH, datetime.datetime.now().strftime("%Y-%m-%d-%H-%M"), ) if not os.path.exists(data_directory): os.makedirs(data_directory) # connect to the AirSim simulator client = airsim.MultirotorClient() client.confirmConnection() # make the multirotor take off client.enableApiControl(True) client.armDisarm(True) client.takeoffAsync().join() try: # initialize collision record collision_record = [] while len(collision_record) < COLLISION_COUNT: # initialize flying position and orientation while True: position = [ random.uniform(start_range[0], start_range[1]) for start_range in FLYING_START ] orientation = random.uniform(-math.pi, math.pi) client.simSetVehiclePose( airsim.Pose( airsim.Vector3r(*position), airsim.Quaternionr( 0, 0, math.sin(orientation / 2), math.cos(orientation / 2) ), ), ignore_collison=True, ) if not client.simGetCollisionInfo().has_collided: break # initialize flying deadline deadline = datetime.datetime.now() + datetime.timedelta( seconds=FLYING_DURATION ) # initialize image queue frame_queue = collections.deque(maxlen=COLLISION_FRAME) # start flying client.moveByVelocityZAsync( vx=FLYING_SPEED * math.cos(orientation), vy=FLYING_SPEED * math.sin(orientation), z=position[2], drivetrain=airsim.DrivetrainType.ForwardOnly, duration=FLYING_DURATION, ) while True: # gather images from the camera responses = client.simGetImages( [ airsim.ImageRequest( "front_center", airsim.ImageType.Scene, pixels_as_float=False, ), airsim.ImageRequest( "front_center", airsim.ImageType.DepthPlanner, pixels_as_float=True, ), airsim.ImageRequest( "front_center", airsim.ImageType.DepthPerspective, pixels_as_float=True, ), airsim.ImageRequest( "front_center", airsim.ImageType.Segmentation, pixels_as_float=False, ), ] ) # push images into queue frame_queue.appendleft( { "scene": responses[0].image_data_uint8, "depth_planner": airsim.get_pfm_array(responses[1]), "depth_perspective": airsim.get_pfm_array(responses[2]), "segmentation": responses[3].image_data_uint8, } ) # check if a collision occured collision_info = client.simGetCollisionInfo() if collision_info.has_collided: # check if the number of pictures is enough if len(frame_queue) < COLLISION_FRAME: print( f"not enough picture before this collision: " f"{len(frame_queue)}" ) break # save the collision info with custom timestamp timestamp = str( datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S") ) collision_record.append( { "normal": collision_info.normal.to_numpy_array().tolist(), "impact_point": collision_info.impact_point.to_numpy_array().tolist(), "position": collision_info.position.to_numpy_array().tolist(), "penetration_depth": collision_info.penetration_depth, "time_stamp": timestamp, "object_name": collision_info.object_name, "object_id": collision_info.object_id, } ) with open( os.path.join(data_directory, "collision_record.json"), "w" ) as record_file: json.dump(collision_record, record_file) # create image directories scene_directory = os.path.join(data_directory, "scene", timestamp) depth_planner_directory = os.path.join( data_directory, "depth_planner", timestamp ) depth_perspective_directory = os.path.join( data_directory, "depth_perspective", timestamp ) segmentation_directory = os.path.join( data_directory, "segmentation", timestamp ) os.makedirs(scene_directory) os.makedirs(depth_planner_directory) os.makedirs(depth_perspective_directory) os.makedirs(segmentation_directory) # save the captured images frame_index = 1 while len(frame_queue) > 0: frame = frame_queue.pop() base_name = str(frame_index).zfill(3) airsim.write_file( os.path.join(scene_directory, base_name + ".png"), frame["scene"], ) airsim.write_pfm( os.path.join(depth_planner_directory, base_name + ".pfm"), frame["depth_planner"], ) airsim.write_pfm( os.path.join(depth_planner_directory, base_name + ".pfm"), frame["depth_perspective"], ) airsim.write_file( os.path.join(segmentation_directory, base_name + ".png"), frame["segmentation"], ) frame_index += 1 # finish this collision print(f"collision record #{len(collision_record)} is saved") break elif datetime.datetime.now() > deadline: print("overdue the deadline") break print(f"{COLLISION_COUNT} collision datasets are saved.") print("returning to origin state...") time.sleep(5) except: logging.exception("") client.reset()
def __init__( self, num_steering_angles, depth_settings_md_size, scene_settings_md_size, max_num_steps_in_episode=10**4, fraction_of_top_of_scene_to_drop=0.0, fraction_of_bottom_of_scene_to_drop=0.0, fraction_of_top_of_depth_to_drop=0.0, fraction_of_bottom_of_depth_to_drop=0.0, seconds_pause_between_steps=0.6, # gives rand num generator time to work (wasn't working b4) seconds_between_collision_in_sim_and_register=1.0, # note avg 4.12 steps per IRL sec on school computer lambda_function_to_apply_to_depth_pixels=None, need_channel_dimension=False, proximity_instead_of_depth_planner=False, concat_x_y_coords_to_channel_dim=False, convert_scene_to_grayscale=False, want_depth_image=True, train_frequency=4): """ Note: preprocessing_lambda_function_to_apply_to_pixels is applied to each pixel, and the looping through the image is handled by this class. Therefore, only 1 parameter to this lambda function, call it pixel_value or something. Default is a do nothing function. """ self.train_frequency = train_frequency self.want_depth_image = want_depth_image if proximity_instead_of_depth_planner: self.want_depth_image = True # 1st 2 must reflect settings.json self.convert_scene_to_grayscale = convert_scene_to_grayscale if convert_scene_to_grayscale: self.SCENE_INPUT_SHAPE = (scene_settings_md_size[0], scene_settings_md_size[1] * 3) else: self.SCENE_INPUT_SHAPE = (scene_settings_md_size[0], scene_settings_md_size[1] * 3, 3) self.DEPTH_PLANNER_INPUT_SHAPE = (depth_settings_md_size[0], depth_settings_md_size[1] * 3) self.SENSOR_INPUT_SHAPE = (7, ) self.PROXIMITY_INPUT_SHAPE = (0, ) self.proximity_instead_of_depth_planner = proximity_instead_of_depth_planner if self.proximity_instead_of_depth_planner is True: self.proximity_sensor = ProximitySensor(max_distance=13.0, kill_distance=3.0, num_proximity_sectors=16) self.PROXIMITY_INPUT_SHAPE = ( self.proximity_sensor.num_proximity_sectors, ) self.need_channel_dimension = need_channel_dimension self.concat_x_y_coords_to_channel_dim = concat_x_y_coords_to_channel_dim if self.concat_x_y_coords_to_channel_dim is True: assert self.need_channel_dimension == True # sim admin stuff self.seconds_pause_between_steps = seconds_pause_between_steps self.seconds_between_collision_in_sim_and_register = seconds_between_collision_in_sim_and_register # image stuff if lambda_function_to_apply_to_depth_pixels is None: # PHI from DQN algorithm self.PHI = None # PHI from DQN algorithm else: self.PHI = np.vectorize( lambda_function_to_apply_to_depth_pixels ) # 10x faster than for-looping through pixels self.first_scene_row_idx = int(self.SCENE_INPUT_SHAPE[0] * fraction_of_top_of_scene_to_drop) self.last_scene_row_idx = int( self.SCENE_INPUT_SHAPE[0] * (1 - fraction_of_bottom_of_scene_to_drop)) self.first_depth_planner_row_idx = int( self.DEPTH_PLANNER_INPUT_SHAPE[0] * fraction_of_top_of_depth_to_drop) self.last_depth_planner_row_idx = int( self.DEPTH_PLANNER_INPUT_SHAPE[0] * (1 - fraction_of_bottom_of_depth_to_drop)) #print('frac bottom depth', fraction_of_bottom_of_depth_to_drop) # debug #print('1st row idx scene', self.first_scene_row_idx) # debug #print('last row idx depth', self.last_depth_planner_row_idx) # debug assert self.first_scene_row_idx < self.last_scene_row_idx assert self.first_depth_planner_row_idx < self.last_depth_planner_row_idx # note the shapes of inputs to the neural network; can retrieve in keras_drl_steering... .py self.SCENE_INPUT_SHAPE = (self.last_scene_row_idx - self.first_scene_row_idx, self.SCENE_INPUT_SHAPE[1]) self.DEPTH_PLANNER_INPUT_SHAPE = (self.last_depth_planner_row_idx - self.first_depth_planner_row_idx, self.DEPTH_PLANNER_INPUT_SHAPE[1]) self.SENSOR_INPUT_SHAPE = self.SENSOR_INPUT_SHAPE # steering stuff self.action_space = spaces.Discrete(num_steering_angles) self.action_space_steering = np.linspace(-1.0, 1.0, num_steering_angles).tolist() self.car_controls = airsim.CarControls( throttle=0.6, steering=0.0, is_manual_gear=True, manual_gear=1) # should constrain speed to < 18ish mph # m/s = mph x 0.44704 self.max_meters_per_second = 35.0 * 0.44704 # 35.0 mph is reasonable max for that windy road # in-sim episode handling self.episode_step_count = 0 self.total_step_count = 0 self.steps_per_episode = max_num_steps_in_episode self.total_num_episodes = 0 # over all eisodes self.total_num_steps = 0 # over all episodes # collision info for emergency resets and reward func calc self.collisions_in_a_row = 0 self.max_acceptable_collisions_in_a_row = 6 # note: if stuck, then collisions will keep piling on # also collisions being stuck can cause glitch through map self.obj_id_of_last_collision = -123456789 # anything <-1 is unassociated w/ an obj in sim (afaik) # connect to the client; we're ready to connect to set up our cameras self.client = airsim.CarClient() self.client.confirmConnection() self.client.enableApiControl(True) self.left_cam_name = '2' self.right_cam_name = '1' self.forward_cam_name = '0' self.backward_cam_name = '4' self._setup_my_cameras() self.depth_planner_dilation_kernel = np.ones((3, 5), np.uint8) * 255 # requests also returned in this order; order by how concatentate img, from L to R if self.want_depth_image: self.list_of_img_request_objects = [ airsim.ImageRequest(camera_name=self.left_cam_name, image_type=airsim.ImageType.Scene, pixels_as_float=False, compress=False), airsim.ImageRequest(camera_name=self.forward_cam_name, image_type=airsim.ImageType.Scene, pixels_as_float=False, compress=False), airsim.ImageRequest(camera_name=self.right_cam_name, image_type=airsim.ImageType.Scene, pixels_as_float=False, compress=False), airsim.ImageRequest(camera_name=self.left_cam_name, image_type=airsim.ImageType.DepthPlanner, pixels_as_float=True, compress=False), airsim.ImageRequest(camera_name=self.forward_cam_name, image_type=airsim.ImageType.DepthPlanner, pixels_as_float=True, compress=False), airsim.ImageRequest(camera_name=self.right_cam_name, image_type=airsim.ImageType.DepthPlanner, pixels_as_float=True, compress=False) ] else: self.list_of_img_request_objects = [ airsim.ImageRequest(camera_name=self.left_cam_name, image_type=airsim.ImageType.Scene, pixels_as_float=False, compress=False), airsim.ImageRequest(camera_name=self.forward_cam_name, image_type=airsim.ImageType.Scene, pixels_as_float=False, compress=False), airsim.ImageRequest(camera_name=self.right_cam_name, image_type=airsim.ImageType.Scene, pixels_as_float=False, compress=False) ] # ; ordering: 1 2 3 4 in sim window while Quaternionr() has 2 3 4 1 for some reason self.reset_poses = [ airsim.Pose( airsim.Vector3r(-727.012, -7.407, -.7), # safe airsim.Quaternionr(0, 0, .016, 1.0)), airsim.Pose( airsim.Vector3r(-169.546, -316.207, -0.72), # safe airsim.Quaternionr(0.0, 0.0, .712, 0.702)), airsim.Pose( airsim.Vector3r(-292.415, 32.229, -0.7), # safe airsim.Quaternionr(0.0, 0.0, -.679, 0.734)), airsim.Pose( airsim.Vector3r(222.984, -491.947, -0.7), # safe airsim.Quaternionr(0.0, 0.0, .716, .698)), airsim.Pose( airsim.Vector3r(311.298, -10.177, -0.688), # safe airsim.Quaternionr(0.0, 0.0, -1.0, .006)), airsim.Pose( airsim.Vector3r(-191.452, -474.923, -0.689), # safe airsim.Quaternionr(0.0, 0.0, .008, 1.0)), airsim.Pose( airsim.Vector3r(-316.513, 144.906, -0.688), # safe airsim.Quaternionr(0.0, 0.0, -1.0, 0.003)), airsim.Pose( airsim.Vector3r( 16.631, -38.537, -.9), # safe - aim @ roundabout center and can go L or R airsim.Quaternionr(-0.005, 0.011, 0.296, 0.955)), airsim.Pose( airsim.Vector3r( -316.513, 144.906, -0.688), # safe - aim @ roundabout but better to go L airsim.Quaternionr(0.0, 0.0, -1.0, .003)), airsim.Pose( airsim.Vector3r(33.322, -528.89, -0.688), # safe airsim.Quaternionr(0.0, 0.0, -0.002, 1.0)) ] # instead of driving about aimlessly, will try to arrive to 1 destination from 1 starting point """ self.beginning_coords = airsim.Pose(airsim.Vector3r(12.314, -31.069, -0.93), # safe airsim.Quaternionr(-0.004,0.008, 0.236, 0.972)) self.ending_coords = airsim.Vector3r(122.288, 12.283, -1.0) # appx 298 m from start; mostly straight """ self.beginning_coords = airsim.Pose( airsim.Vector3r(-758.236, -175.518, -0.66), # safe airsim.Quaternionr(0.0, 0.0, -0.723, 0.691)) #self.ending_coords = airsim.Vector3r(109.694, -396.685, -1.0) # appx 298 m from start; mostly straight self.ending_coords = airsim.Vector3r( -702.551, -228.107, -1.0) # appx 298 m from start; mostly straight # units are meters self.ending_circle_radius = 10.0 # if car in circle w/ this radius, then car has arrived @ destination # stuff used in reward function self.total_distance_to_destination = self._manhattan_distance( self.ending_coords.x_val, self.beginning_coords.position.x_val, self.ending_coords.y_val, self.beginning_coords.position.y_val) self.current_distance_from_destination = self.total_distance_to_destination self.previous_coords = self.beginning_coords.position self.elapsed_episode_time_in_simulation_secs = 1.0 # tracks time between unpause and pause in step; used self.current_distance_travelled_towards_destination = 0.0 self.episode_time_in_irl_seconds = 0.0 # only used for debug and steps/second measurement, # not for tracking progress in sim; note: sim steps per real life seconds is ~6 self.sim_is_paused = False print( 'The car will need to travel {} simulation meters to arrive at the destination' .format(self.total_distance_to_destination))
def publishState(self): drone_state = self.client.getMultirotorState(vehicle_name="Drone1") # drone_state = self.client.simGetVehiclePose() pos_ned = drone_state.kinematics_estimated.position orientation_ned = drone_state.kinematics_estimated.orientation pos = airsim.Vector3r(pos_ned.x_val, pos_ned.y_val, pos_ned.z_val) orientation = airsim.Quaternionr(orientation_ned.w_val, orientation_ned.z_val, orientation_ned.x_val, orientation_ned.y_val) if (distance.euclidean([self.xGoal, self.yGoal], [pos.x_val, pos.y_val]) < 5): if (self.moving): self.client.moveByVelocityAsync(0, 0, 0, 10, vehicle_name="Drone1") print("CLOSE ENOUGH STOP") msg = Int16() msg.data = 1 self.goalReached_pub.publish(msg) self.moving = False else: self.moving = True sim_pose_msg = PoseStamped() sim_pose_msg.pose.position.x = pos.x_val sim_pose_msg.pose.position.y = pos.y_val sim_pose_msg.pose.position.z = pos.z_val sim_pose_msg.pose.orientation.w = orientation.w_val sim_pose_msg.pose.orientation.x = orientation.x_val sim_pose_msg.pose.orientation.y = orientation.y_val sim_pose_msg.pose.orientation.z = orientation.z_val self.state_pub.publish(sim_pose_msg) #tarCarPose = self.client.simGetObjectPose("Target_Transform_46") tarCarPose = self.client.simGetObjectPose("Target") # print(tarCarPose); tarPoseMsg = PoseStamped() tarPoseMsg.pose.position.x = tarCarPose.position.x_val tarPoseMsg.pose.position.y = tarCarPose.position.y_val tarPoseMsg.pose.position.z = tarCarPose.position.z_val tarPoseMsg.pose.orientation.w = 1 tarPoseMsg.pose.orientation.x = 0 tarPoseMsg.pose.orientation.y = 0 tarPoseMsg.pose.orientation.z = 0 # tarFootPose = self.client.simGetObjectPose("ThirdPersonCharacter"); # print("Target Car Pose>>>>>>>>>>>>>>"); # print(tarCarPose); # print("Target Foot Pose>>>>>>>>>>>>>"); # print(tarFootPose); self.tarCar_pub.publish(tarPoseMsg) # self.tarFoot_pub.publish(tarFootPose); dis = np.sqrt((pos.x_val - tarCarPose.position.x_val)**2 + (pos.y_val - tarCarPose.position.x_val)**2) obs = "Null" if (dis < 75): obs = "Captured" elif (dis < 150): obs = "Detect" self.obs_pub.publish(obs) return
def vec2quat(vector): quaternion = airsim.Quaternionr(vector.x_val, vector.y_val, vector.z_val, 0) return quaternion