示例#1
0
    def __init__(self, instance_id=0, cv_mode=False, is_real=False):
        self.instance_id = instance_id
        self.env_id = None
        self.params = get_current_parameters()["PomdpInterface"]
        step_interval = self.params["step_interval"]
        flight_height = self.params["flight_height"]
        self.max_horizon = self.params["max_horizon"]
        self.scale = self.params["scale"]
        self.is_real = is_real

        self.drone = drone_controller_factory(simulator=not is_real)(instance=instance_id, flight_height=flight_height)
        rate = step_interval / self.drone.get_real_time_rate()

        self.rate = Rate(rate)
        print("Adjusted rate: " + str(rate), "Step interval: ", step_interval)

        self.segment_path = None

        self.rewards = []
        self.reward_weights = []

        self.instruction_set = None
        self.current_segment_ordinal = None
        self.current_segment_idx = None
        self.cv_mode = cv_mode

        self.seg_start = 0
        self.seg_end = 0
        self.stepcount = 0

        self.instruction_override = None
示例#2
0
    def __init__(self, instance=0, flight_height=100.0):
        self.instance = instance
        self.port = PORTS[instance]
        self.clock_speed = self._read_clock_speed()
        self.control_interval = 0.5
        self.flight_height = flight_height  # Flight height is in config units, as with everything
        self.units = UnrealUnits()
        print("DroneController Initialize")
        self.rate = Rate(0.1 / self.clock_speed)

        self.samples = []

        killAirSim()
        self._write_airsim_settings()
        startAirSim(self, instance, self.port)
示例#3
0
    def __init__(self, instance=0, flight_height=100.0):
        super(DroneController, self).__init__()
        self.instance = instance
        self.port = PORTS[instance]
        self.clock_speed = self._read_clock_speed()
        self.control_interval = 5.0
        self.flight_height = -flight_height

        print("DroneController Initialize")
        self.rate = Rate(0.1 / self.clock_speed)
        self.height_offset = P.get_current_parameters()["DroneController"].get("start_height_offset")

        self.samples = []

        killAirSim()
        self._write_airsim_settings()
        startAirSim(self, instance, self.port)
示例#4
0
    def __init__(self, instance_id=0, cv_mode=False):
        self.instance_id = instance_id
        self.env_id = None
        self.params = get_current_parameters()["PomdpInterface"]
        step_interval = self.params["step_interval"]
        flight_height = self.params["flight_height"]
        self.scale = self.params["scale"]
        self.drone = DroneController(instance=instance_id,
                                     flight_height=flight_height)

        rate = step_interval / self.drone.clock_speed
        self.rate = Rate(rate)
        print("Adjusted rate: " + str(rate), "Step interval: ", step_interval)
        self.path = None
        self.reward = None
        self.reward_shaping = None
        self.instruction_set = None
        self.current_segment = None
        self.done = None
        self.cv_mode = cv_mode

        self.seg_start = 0
        self.seg_end = 0
示例#5
0
        self.mon = MonitorSuper()
        self.thread = threading.Thread(target=self.run, args=())
        self.thread.daemon = True
        self.thread.start()

    def run(self):
        self.mon.run()

    def get_command(self):
        return self.mon.current_vel


initialize_experiment("nl_datacollect_cage")

teleoper = KeyTeleop()
rate = Rate(0.1)

env = PomdpInterface()

train_instructions, dev_instructions, test_instructions, _ = get_all_instructions(
)

count = 0
stuck_count = 0


def show_depth(image):
    grayscale = np.mean(image[:, :, 0:3], axis=2)
    depth = image[:, :, 3]
    comb = np.stack([grayscale, grayscale, depth], axis=2)
    comb -= comb.min()
示例#6
0
def interactive_demo():

    P.initialize_experiment()
    InteractAPI.launch_ui()

    rate = Rate(0.1)

    env = PomdpInterface(
        is_real=get_current_parameters()["Setup"]["real_drone"])
    train_instructions, dev_instructions, test_instructions, corpus = get_all_instructions(
    )
    all_instr = {
        **train_instructions,
        **dev_instructions,
        **train_instructions
    }
    token2term, word2token = get_word_to_token_map(corpus)

    # Run on dev set
    interact_instructions = dev_instructions

    env_range_start = get_current_parameters()["Setup"].get(
        "env_range_start", 0)
    env_range_end = get_current_parameters()["Setup"].get(
        "env_range_end", 10e10)
    interact_instructions = {
        k: v
        for k, v in interact_instructions.items()
        if env_range_start < k < env_range_end
    }

    count = 0
    stuck_count = 0

    model, _ = load_model(get_current_parameters()["Setup"]["model"])

    InteractAPI.write_empty_instruction()
    InteractAPI.write_real_instruction("None")
    instruction_str = InteractAPI.read_instruction_file()
    print("Initial instruction: ", instruction_str)

    for instruction_sets in interact_instructions.values():
        for set_idx, instruction_set in enumerate(instruction_sets):
            env_id = instruction_set['env']
            env.set_environment(env_id, instruction_set["instructions"])

            presenter = Presenter()
            cumulative_reward = 0
            for seg_idx in range(len(instruction_set["instructions"])):

                print(f"RUNNING ENV {env_id} SEG {seg_idx}")

                real_instruction_str = instruction_set["instructions"][
                    seg_idx]["instruction"]
                InteractAPI.write_real_instruction(real_instruction_str)
                valid_segment = env.set_current_segment(seg_idx)
                if not valid_segment:
                    continue
                state = env.reset(seg_idx)

                keep_going = True
                while keep_going:
                    InteractAPI.write_real_instruction(real_instruction_str)

                    while True:
                        cv2.waitKey(200)
                        instruction = InteractAPI.read_instruction_file()
                        if instruction == "CMD: Next":
                            print("Advancing")
                            keep_going = False
                            InteractAPI.write_empty_instruction()
                            break
                        elif instruction == "CMD: Reset":
                            print("Resetting")
                            env.reset(seg_idx)
                            InteractAPI.write_empty_instruction()
                        elif len(instruction.split(" ")) > 1:
                            instruction_str = instruction
                            break

                    if not keep_going:
                        continue

                    env.override_instruction(instruction_str)
                    tok_instruction = tokenize_instruction(
                        instruction_str, word2token)

                    state = env.reset(seg_idx)
                    print("Executing: f{instruction_str}")
                    while True:
                        rate.sleep()
                        action, internals = model.get_action(
                            state, tok_instruction)

                        state, reward, done, expired, oob = env.step(action)
                        cumulative_reward += reward
                        presenter.show_sample(state, action, reward,
                                              cumulative_reward,
                                              instruction_str)
                        #show_depth(state.image)
                        if done:
                            break
                    InteractAPI.write_empty_instruction()
                    print("Segment finished!")
        print("Env finished!")
import os

import scipy.misc

from drones.airsim_interface.rate import Rate
from env_config.definitions.landmarks import LANDMARK_RADII
from data_io.paths import get_landmark_images_dir
from pomdp.pomdp_interface import PomdpInterface
from visualization import Presenter
"""
This script is used to take pictures of various landmarks
"""
from parameters import parameter_server as P
P.initialize_experiment("nl_datacollect")

rate = Rate(0.1)

IMAGES_PER_LANDMARK_TRAIN = 1000
IMAGES_PER_LANDMARK_TEST = 200

env = PomdpInterface()

count = 0

presenter = Presenter()


def save_landmark_img(state, landmark_name, i, eval):
    data_dir = get_landmark_images_dir(landmark_name, eval)
    os.makedirs(data_dir, exist_ok=True)
    full_path = os.path.join(data_dir, landmark_name + "_" + str(i) + ".jpg")
示例#8
0
class PomdpInterface:
    """Unreal must be running before this is called for now..."""
    def __init__(self, instance_id=0, cv_mode=False):
        self.instance_id = instance_id
        self.env_id = None
        self.params = get_current_parameters()["PomdpInterface"]
        step_interval = self.params["step_interval"]
        flight_height = self.params["flight_height"]
        self.scale = self.params["scale"]
        self.drone = DroneController(instance=instance_id,
                                     flight_height=flight_height)

        rate = step_interval / self.drone.clock_speed
        self.rate = Rate(rate)
        print("Adjusted rate: " + str(rate), "Step interval: ", step_interval)
        self.path = None
        self.reward = None
        self.reward_shaping = None
        self.instruction_set = None
        self.current_segment = None
        self.done = None
        self.cv_mode = cv_mode

        self.seg_start = 0
        self.seg_end = 0

    def _get_reward(self, state, drone_action):
        # If we don't have an instruction set, we can't provide a reward
        if self.instruction_set is None:
            return 0, False

        # If the agent already executed all the actions, return 0 error and done thereafter
        if self.done:
            return 0, self.done

        # Obtain the reward from the reward function
        reward, seg_complete = self.reward.get_reward(state, drone_action)
        reward += self.reward_shaping.get_reward(state, drone_action)

        # If the segment was completed, tell the reward function that we will be following the next segment
        if seg_complete:
            self.current_segment += 1
            if self.current_segment < len(self.instruction_set):
                self.seg_start = self.seg_end
                self.seg_end = self.instruction_set[
                    self.current_segment]["end_idx"]
                self.reward.set_current_segment(self.seg_start, self.seg_end)
            # Unless this was the last segment, in which case we set this pomdp as done and return accordingly
            else:
                self.done = True
        return reward, self.done

    def set_environment(self, env_id, instruction_set=None, fast=False):
        """
        Switch the simulation to env_id. Causes the environment configuration from
        configs/configs/random_config_<env_id>.json to be loaded and landmarks arranged in the simulator
        :param env_id: integer ID
        :param instruction_set: Instruction set to follow for displaying instructions
        :param fast: Set to True to skip a delay at a risk of environment not loading before subsequent function calls
        :return:
        """
        self.env_id = env_id

        self.drone.set_current_env_id(env_id, self.instance_id)
        self.drone.reset_environment()

        # This is necessary to allow the new frame to be rendered with the new pomdp, so that the drone doesn't
        # accidentally see the old pomdp at the start
        if not fast:
            time.sleep(0.1)

        self.current_segment = 0
        self.seg_start = 0
        self.seg_end = 0

        self.path = load_path(env_id)
        if self.path is not None:
            self.reward = FollowPathReward(self.path)
            self.reward_shaping = ImitationReward(self.path)
            self.seg_end = len(self.path) - 1

        self.instruction_set = instruction_set

        if instruction_set is not None:
            self.reward.set_current_segment(self.seg_start, self.seg_end)
            if len(instruction_set) == 0:
                print("OOOPS! Instruction set of length 0!" + str(env_id))
                return

            self.seg_end = instruction_set[self.current_segment]["end_idx"]

            if self.seg_end >= len(self.path):
                print("OOOPS! For env " + str(env_id) + " segment " +
                      str(self.current_segment) + " end oob: " +
                      str(self.seg_end))
                self.seg_end = len(self.path) - 1

    def reset(self, seg_idx=0, landmark_pos=None, random_yaw=0):
        self.rate.reset()
        self.drone.reset_environment()
        start_pos, start_angle = self.get_start_pos(seg_idx, landmark_pos)

        if self.cv_mode:
            start_rpy = start_angle
            self.drone.teleport_3d(start_pos, start_rpy, pos_in_airsim=False)

        else:
            start_yaw = start_angle
            if self.params["randomize_init_pos"]:
                yaw_offset = float(
                    np.random.normal(0, self.params["init_yaw_variance"], 1))
                pos_offset = np.random.normal(0,
                                              self.params["init_pos_variance"],
                                              2)
                print("offset:", pos_offset, yaw_offset)

                start_pos = np.asarray(start_pos) + pos_offset
                start_yaw = start_yaw + yaw_offset
            self.drone.teleport_to(start_pos, start_yaw)

        self.rate.sleep(quiet=True)
        time.sleep(0.2)
        drone_state, image = self.drone.get_state()
        self.done = False

        return DroneState(image, drone_state)

    def get_start_pos(self, seg_idx=0, landmark_pos=None):

        # If we are not in CV mode, then we have a path to follow and track reward for following it closely
        if not self.cv_mode:
            if self.instruction_set:
                start_pos = self.instruction_set[seg_idx]["start_pos"]
                start_angle = self.instruction_set[seg_idx]["start_yaw"]
            else:
                start_pos = [0, 0, 0]
                start_angle = 0

        # If we are in CV mode, there is no path to be followed. Instead we are collecting images of landmarks.
        # Initialize the drone to face the position provided. TODO: Generalize this to other CV uses
        else:
            drone_angle = random.uniform(0, 2 * math.pi)
            drone_dist_mult = random.uniform(0, 1)
            drone_dist = 60 + drone_dist_mult * 300
            drone_pos_dir = np.asarray(
                [math.cos(drone_angle),
                 math.sin(drone_angle)])

            start_pos = landmark_pos + drone_pos_dir * drone_dist
            start_height = random.uniform(-1.5, -2.5)
            start_pos = [start_pos[0], start_pos[1], start_height]

            drone_dir = -drone_pos_dir
            start_yaw = vec_to_yaw(drone_dir)
            start_roll = 0
            start_pitch = 0
            start_angle = [start_roll, start_pitch, start_yaw]

        return start_pos, start_angle

    def reset_to_random_cv_env(self, landmark_name=None):
        config, pos_x, pos_z = make_config_with_landmark(landmark_name)
        self.drone.set_current_env_from_config(config,
                                               instance_id=self.instance_id)
        time.sleep(0.2)
        landmark_pos_2d = np.asarray([pos_x, pos_z])
        self.cv_mode = True
        return self.reset(landmark_pos=landmark_pos_2d)

    def snap_birdseye(self, fast=False, small_env=False):
        self.drone.reset_environment()
        # TODO: Check environment dimensions
        if small_env:
            pos_birdseye_as = [2.25, 2.35, -3.92]
            rpy_birdseye_as = [-1.3089, 0, 0]  # For 15 deg camera
        else:
            pos_birdseye_as = [15, 15, -25]
            rpy_birdseye_as = [-1.3089, 0, 0]  # For 15 deg camera
        self.drone.teleport_3d(pos_birdseye_as, rpy_birdseye_as, fast=fast)
        _, image = self.drone.get_state(depth=False)
        return image

    def snap_cv(self, pos, quat):
        self.drone.reset_environment()
        pos_birdseye_as = pos
        rpy_birdseye_as = euler.quat2euler(quat)
        self.drone.teleport_3d(pos_birdseye_as,
                               rpy_birdseye_as,
                               pos_in_airsim=True,
                               fast=True)
        time.sleep(0.3)
        self.drone.teleport_3d(pos_birdseye_as,
                               rpy_birdseye_as,
                               pos_in_airsim=True,
                               fast=True)
        time.sleep(0.3)
        _, image = self.drone.get_state(depth=False)
        return image

    def get_current_nl_command(self):
        if self.current_segment < len(self.instruction_set):
            return self.instruction_set[self.current_segment]["instruction"]
        return "FINISHED!"

    def step(self, action):
        """
        Takes an action, executes it in the simulation and returns the state, reward and done indicator
        :param action: array of length 4: [forward velocity, left velocity, yaw rate, stop probability]
        :return: DroneState object, reward (float), done (bool)
        """
        #Action
        drone_action = action[0:3]
        drone_stop = action[3]
        #print("Action: ", action)
        if drone_stop > 0.99:
            drone_action = np.array([0, 0, 0])

        self.drone.send_local_velocity_command(
            unnormalize_action(drone_action))
        self.rate.sleep(quiet=True)
        drone_state, image = self.drone.get_state()
        state = DroneState(image, drone_state)

        reward, done = self._get_reward(state, action)

        return state, reward, done
示例#9
0
def automatic_demo():

    P.initialize_experiment()
    instruction_display = InstructionDisplay()

    rate = Rate(0.1)

    env = PomdpInterface(
        is_real=get_current_parameters()["Setup"]["real_drone"])
    train_instructions, dev_instructions, test_instructions, corpus = get_all_instructions(
    )
    all_instr = {
        **train_instructions,
        **dev_instructions,
        **train_instructions
    }
    token2term, word2token = get_word_to_token_map(corpus)

    # Run on dev set
    interact_instructions = dev_instructions

    env_range_start = get_current_parameters()["Setup"].get(
        "env_range_start", 0)
    env_range_end = get_current_parameters()["Setup"].get(
        "env_range_end", 10e10)
    interact_instructions = {
        k: v
        for k, v in interact_instructions.items()
        if env_range_start < k < env_range_end
    }

    model, _ = load_model(get_current_parameters()["Setup"]["model"])

    # Loop over the select few examples
    while True:

        for instruction_sets in interact_instructions.values():
            for set_idx, instruction_set in enumerate(instruction_sets):
                env_id = instruction_set['env']
                found_example = None
                for example in examples:
                    if example[0] == env_id:
                        found_example = example
                if found_example is None:
                    continue
                env.set_environment(env_id, instruction_set["instructions"])

                presenter = Presenter()
                cumulative_reward = 0
                for seg_idx in range(len(instruction_set["instructions"])):
                    if seg_idx != found_example[2]:
                        continue

                    print(f"RUNNING ENV {env_id} SEG {seg_idx}")

                    real_instruction_str = instruction_set["instructions"][
                        seg_idx]["instruction"]
                    instruction_display.show_instruction(real_instruction_str)
                    valid_segment = env.set_current_segment(seg_idx)
                    if not valid_segment:
                        continue
                    state = env.reset(seg_idx)

                    for i in range(START_PAUSE):
                        instruction_display.tick()
                        time.sleep(1)

                        tok_instruction = tokenize_instruction(
                            real_instruction_str, word2token)

                    state = env.reset(seg_idx)
                    print("Executing: f{instruction_str}")
                    while True:
                        instruction_display.tick()
                        rate.sleep()
                        action, internals = model.get_action(
                            state, tok_instruction)
                        state, reward, done, expired, oob = env.step(action)
                        cumulative_reward += reward
                        #presenter.show_sample(state, action, reward, cumulative_reward, real_instruction_str)
                        #show_depth(state.image)
                        if done:
                            break

                    for i in range(END_PAUSE):
                        instruction_display.tick()
                        time.sleep(1)
                        print("Segment finished!")
                    instruction_display.show_instruction("...")

            print("Env finished!")
示例#10
0
class PomdpInterface:

    class EnvException(Exception):
        def __init__(self, message):
            super(PomdpInterface.EnvException, self).__init__(message)

    """Unreal must be running before this is called for now..."""
    def __init__(self, instance_id=0, cv_mode=False, is_real=False):
        self.instance_id = instance_id
        self.env_id = None
        self.params = get_current_parameters()["PomdpInterface"]
        step_interval = self.params["step_interval"]
        flight_height = self.params["flight_height"]
        self.max_horizon = self.params["max_horizon"]
        self.scale = self.params["scale"]
        self.is_real = is_real

        self.drone = drone_controller_factory(simulator=not is_real)(instance=instance_id, flight_height=flight_height)
        rate = step_interval / self.drone.get_real_time_rate()

        self.rate = Rate(rate)
        print("Adjusted rate: " + str(rate), "Step interval: ", step_interval)

        self.segment_path = None

        self.rewards = []
        self.reward_weights = []

        self.instruction_set = None
        self.current_segment_ordinal = None
        self.current_segment_idx = None
        self.cv_mode = cv_mode

        self.seg_start = 0
        self.seg_end = 0
        self.stepcount = 0

        self.instruction_override = None

    def _get_reward(self, state, drone_action, done_now):
        # If we don't have an instruction set, we can't provide a reward
        if self.instruction_set is None:
            raise ValueError("No instruction set: Can't provide a reward.")

        # Obtain the reward from the reward function
        rewards = [w * r(state, drone_action, done_now) for r, w in zip(self.rewards, self.reward_weights)]
        total_reward = sum(rewards)

        return total_reward

    def _is_out_of_bounds(self, state):
        curr_pos = state.get_pos_2d()

        if len(self.segment_path) == 0:
            print("OH NO OH NO OH NO!")
            return True

        distances = np.asarray([np.linalg.norm(p - curr_pos) for p in self.segment_path])
        minidx = np.argmin(distances)
        mindist = distances[minidx]
        #print(f"Idx: {minidx} / {len(self.segment_path)}. Dst to path: {mindist}")
        #done = (mindist > END_DISTANCE and minidx >= len(self.segment_path) - 1) or mindist > PATH_MAX_DISTANCE
        done = mindist > PATH_MAX_DISTANCE
        return done

    def land(self):
        """
        If using the real drone, this causes it to land and disarm
        :return:
        """
        try:
            self.drone.land()
        except RolloutException as e:
            raise PomdpInterface.EnvException("Retry rollout")

    def set_environment(self, env_id, instruction_set=None, fast=False):
        """
        Switch the simulation to env_id. Causes the environment configuration from
        configs/configs/random_config_<env_id>.json to be loaded and landmarks arranged in the simulator
        :param env_id: integer ID
        :param instruction_set: Instruction set to follow for displaying instructions
        :param fast: Set to True to skip a delay at a risk of environment not loading before subsequent function calls
        :return:
        """
        self.env_id = env_id

        try:
            self.drone.set_current_env_id(env_id, self.instance_id)
            self.drone.reset_environment()
        except RolloutException as e:
            raise PomdpInterface.EnvException("Retry rollout")

        # This is necessary to allow the new frame to be rendered with the new pomdp, so that the drone doesn't
        # accidentally see the old pomdp at the start
        if not fast:
            time.sleep(0.1)

        self.instruction_set = instruction_set
        self.stepcount = 0
        self.instruction_override = None

    def set_current_segment(self, seg_idx):
        self.current_segment_ordinal = None
        for i, seg in enumerate(self.instruction_set):
            if seg["seg_idx"] == seg_idx:
                self.current_segment_ordinal = i
        assert self.current_segment_ordinal is not None, f"Requested segment {seg_idx} not found in provided instruction data"
        self.current_segment_idx = seg_idx

        try:
            self.drone.set_current_seg_idx(seg_idx, self.instance_id)
        except RolloutException as e:
            raise PomdpInterface.EnvException("Retry rollout")

        end_idx = self.instruction_set[self.current_segment_ordinal]["end_idx"]
        start_idx = self.instruction_set[self.current_segment_ordinal]["start_idx"]
        full_path = load_and_convert_path(self.env_id)
        self.segment_path = full_path[start_idx:end_idx]
        self.instruction_override = None

        self.stepcount = 0
        if start_idx == end_idx:
            return False

        if self.segment_path is not None:
            self.rewards = [FollowPathFieldReward(self.segment_path), StopCorrectlyReward(self.segment_path)]
            self.reward_weights = [1.0, 1.0]
        return True

    def reset(self, seg_idx=0, landmark_pos=None, random_yaw=0):
        try:
            self.rate.reset()
            self.drone.reset_environment()
            self.stepcount = 0
            start_pos, start_angle = self.get_start_pos(seg_idx, landmark_pos)

            if self.cv_mode:
                start_rpy = start_angle
                self.drone.teleport_3d(start_pos, start_rpy, pos_in_airsim=False)

            else:
                start_yaw = start_angle
                if self.params["randomize_init_pos"]:
                    np.random.seed()
                    yaw_offset = float(np.random.normal(0, self.params["init_yaw_variance"], 1))
                    pos_offset = np.random.normal(0, self.params["init_pos_variance"], 2)
                    #print("offset:", pos_offset, yaw_offset)

                    start_pos = np.asarray(start_pos) + pos_offset
                    start_yaw = start_yaw + yaw_offset
                self.drone.teleport_to(start_pos, start_yaw)

            if self.params.get("voice"):
                cmd = self.get_current_nl_command()
                say(cmd)

            self.drone.rollout_begin(self.get_current_nl_command())
            self.rate.sleep(quiet=True)
            #time.sleep(0.2)
            drone_state, image = self.drone.get_state()

            return DroneState(image, drone_state)
        except RolloutException as e:
            raise PomdpInterface.EnvException("Retry rollout")

    def get_end_pos(self, seg_idx=0, landmark_pos=None):

        seg_ordinal = self.seg_idx_to_ordinal(seg_idx)
        if not self.cv_mode:
            if self.instruction_set:
                end_pos = convert_pos_from_config(self.instruction_set[seg_ordinal]["end_pos"])
                end_angle = convert_yaw_from_config(self.instruction_set[seg_ordinal]["end_yaw"])
            else:
                end_pos = [0, 0, 0]
                end_angle = 0
        return end_pos, end_angle

    def seg_idx_to_ordinal(self, seg_idx):
        for i, instr in enumerate(self.instruction_set):
            if instr["seg_idx"] == seg_idx:
                return i

    def get_start_pos(self, seg_idx=0, landmark_pos=None):

        seg_ordinal = self.seg_idx_to_ordinal(seg_idx)
        # If we are not in CV mode, then we have a path to follow and track reward for following it closely
        if not self.cv_mode:
            if self.instruction_set:
                start_pos = convert_pos_from_config(self.instruction_set[seg_ordinal]["start_pos"])
                start_angle = convert_yaw_from_config(self.instruction_set[seg_ordinal]["start_yaw"])
            else:
                start_pos = [0, 0, 0]
                start_angle = 0

        # If we are in CV mode, there is no path to be followed. Instead we are collecting images of landmarks.
        # Initialize the drone to face the position provided. TODO: Generalize this to other CV uses
        else:
            drone_angle = random.uniform(0, 2 * math.pi)
            drone_dist_mult = random.uniform(0, 1)
            drone_dist = 60 + drone_dist_mult * 300
            drone_pos_dir = np.asarray([math.cos(drone_angle), math.sin(drone_angle)])

            start_pos = landmark_pos + drone_pos_dir * drone_dist
            start_height = random.uniform(-1.5, -2.5)
            start_pos = [start_pos[0], start_pos[1], start_height]

            drone_dir = -drone_pos_dir
            start_yaw = vec_to_yaw(drone_dir)
            start_roll = 0
            start_pitch = 0
            start_angle = [start_roll, start_pitch, start_yaw]
        return start_pos, start_angle

    def get_current_nl_command(self):
        if self.instruction_override:
            return self.instruction_override
        if self.current_segment_ordinal < len(self.instruction_set):
            return self.instruction_set[self.current_segment_ordinal]["instruction"]
        return "FINISHED!"

    def override_instruction(self, instr_str):
        self.instruction_override = instr_str

    def act(self, action):
        # Action
        action = deepcopy(action)
        drone_action = action[0:3]
        # print("Action: ", action)

        raw_action = unnormalize_action(drone_action)
        # print(drone_action, raw_action)
        try:
            self.drone.send_local_velocity_command(raw_action)
        except RolloutException as e:
            raise PomdpInterface.EnvException("Retry rollout")

    def await_env(self):
        self.rate.sleep(quiet=True)

    def observe(self, prev_action):
        try:
            drone_state, image = self.drone.get_state()
            state = DroneState(image, drone_state)
            out_of_bounds = self._is_out_of_bounds(state)
            reward = self._get_reward(state, prev_action, out_of_bounds)

            self.stepcount += 1
            expired = self.stepcount > self.max_horizon

            drone_stop = prev_action[3]
            done = out_of_bounds or drone_stop > 0.5 or expired

            # Actually stop the drone (execute stop-action)
            if done:
                self.drone.send_local_velocity_command([0, 0, 0, 1])
                self.drone.rollout_end()

            # TODO: Respect the done
            return state, reward, done, expired, out_of_bounds
        except RolloutException as e:
            raise PomdpInterface.EnvException("Retry rollout")

    def step(self, action):
        """
        Takes an action, executes it in the simulation and returns the state, reward and done indicator
        :param action: array of length 4: [forward velocity, left velocity, yaw rate, stop probability]
        :return: DroneState object, reward (float), done (bool)
        """
        self.act(action)
        self.await_env()
        return self.observe(action)



    # -----------------------------------------------------------------------------------------------------------------
    # CRAP:

    def reset_to_random_cv_env(self, landmark_name=None):
        config, pos_x, pos_z = make_config_with_landmark(landmark_name)
        self.drone.set_current_env_from_config(config, instance_id=self.instance_id)
        time.sleep(0.2)
        landmark_pos_2d = np.asarray([pos_x, pos_z])
        self.cv_mode = True
        return self.reset(landmark_pos=landmark_pos_2d)

    def snap_birdseye(self, fast=False, small_env=False):
        self.drone.reset_environment()
        # TODO: Check environment dimensions
        if small_env:
            pos_birdseye_as = [2.25, 2.35, -4.92*2 - 0.06 - 0.12]
            rpy_birdseye_as = [-1.3089, 0, 0]   # For 15 deg camera
        else:
            pos_birdseye_as = [15, 15, -25]
            rpy_birdseye_as = [-1.3089, 0, 0]   # For 15 deg camera
        self.drone.teleport_3d(pos_birdseye_as, rpy_birdseye_as, fast=fast)
        _, image = self.drone.get_state(depth=False)
        return image

    def snap_cv(self, pos, quat):
        self.drone.reset_environment()
        pos_birdseye_as = pos
        rpy_birdseye_as = euler.quat2euler(quat)
        self.drone.teleport_3d(pos_birdseye_as, rpy_birdseye_as, pos_in_airsim=True, fast=True)
        time.sleep(0.3)
        self.drone.teleport_3d(pos_birdseye_as, rpy_birdseye_as, pos_in_airsim=True, fast=True)
        time.sleep(0.3)
        _, image = self.drone.get_state(depth=False)
        return image
示例#11
0
class DroneController(DroneControllerBase):
    def __init__(self, instance=0, flight_height=100.0):
        super(DroneController, self).__init__()
        self.instance = instance
        self.port = PORTS[instance]
        self.clock_speed = self._read_clock_speed()
        self.control_interval = 5.0
        self.flight_height = -flight_height

        print("DroneController Initialize")
        self.rate = Rate(0.1 / self.clock_speed)
        self.height_offset = P.get_current_parameters()["DroneController"].get("start_height_offset")

        self.samples = []

        killAirSim()
        self._write_airsim_settings()
        startAirSim(self, instance, self.port)

    def _get_config(self, name, i=None, instance_id=None):
        config = load_env_config(id)
        return config

    def _set_config(self, json_data, subfolder, name, i=None, instance_id=None):
        folder = paths.get_current_config_folder(i, instance_id)
        id = "" if i is None else "_" + str(i)
        path = os.path.join(paths.get_sim_config_dir(), folder, subfolder, name + id + ".json")

        if not os.path.isdir(os.path.dirname(path)):
            try:
                os.makedirs(os.path.dirname(path))
            except Exception:
                pass

        if os.path.isfile(path):
            os.remove(path)

        # print("Dumping config in: " + path)
        with open(path, 'w') as fp:
            json.dump(json_data, fp)

        subprocess.check_call(["touch", path])

    def _load_drone_config(self, i, instance_id=None):
        conf_json = load_env_config(i)
        self._set_config(conf_json, "", "random_config", i=None, instance_id=instance_id)

    def _start_call(self):
        try:
            self.client.enableApiControl(True)
            self.client.armDisarm(True)
        except Exception as e:
            import traceback
            from utils.colors import print_error
            print_error("Error encountered during policy rollout!")
            print_error(e)
            print_error(traceback.format_exc())

            startAirSim(self, self.instance, self.port)

            raise RolloutException("Exception encountered during start_call. Rollout should be re-tried")

    def _start(self):
        self.client = MultirotorClient('127.0.0.1', self.port)
        self.client.confirmConnection()
        self._start_call()

        # got to initial height
        pos_as = self.client.getPosition()
        pos_as = [pos_as.x_val, pos_as.y_val, pos_as.z_val]
        pos_as[2] = self.flight_height - self.height_offset
        #pos = self.units.pos3d_from_as(pos_as)
        #pos[2] = self.units.height_to_as(self.flight_height)
        print ("Telporting to initial position in cfg: ", pos_as)
        self.teleport_3d(pos_as, [0, 0, 0])

    def _read_clock_speed(self):
        speed = 1.0
        if "ClockSpeed" in P.get_current_parameters()["AirSim"]:
            speed = P.get_current_parameters()["AirSim"]["ClockSpeed"]
        print("Read clock speed: " + str(speed))
        return speed

    def _write_airsim_settings(self):
        airsim_settings = P.get_current_parameters()["AirSim"]
        airsim_settings_path = P.get_current_parameters()["Environment"]["airsim_settings_path"]
        airsim_settings_path = os.path.expanduser(airsim_settings_path)
        save_json(airsim_settings, airsim_settings_path)
        print("Wrote new AirSim settings to " + str(airsim_settings_path))

    """def _action_to_global(self, action):
        drone_yaw = self.get_yaw()
        action_global = np.zeros(3)
        # TODO: Add action[1]
        action_global[0] = action[0] * math.cos(drone_yaw)
        action_global[1] = action[0] * math.sin(drone_yaw)
        action_global[2] = action[2]
        return action_global
    """

    def _try_teleport_to(self, position, yaw, pitch=None, roll=None):
        #pos_as = self.units.pos2d_to_as(position[:2])
        #yaw_as = self.units.yaw_to_as(yaw)
        #tgt_h = self.units.height_to_as(self.flight_height)

        pos_as = position[:2]
        yaw_as = yaw
        tgt_h = self.flight_height - self.height_offset

        tele_orientation = self.client.toQuaternion(0, 0, yaw_as)
        self.send_local_velocity_command([0, 0, 0])

        tele_pos = Vector3r()
        tele_pos.x_val = pos_as[0]
        tele_pos.y_val = pos_as[1]

        uphigh = True
        if uphigh:
            self.rate.sleep_n_intervals(3)
            pos1 = self.client.getPosition()
            current_z = pos1.z_val
            pos1.z_val = tgt_h - 50
            self.client.simSetPose(pos1, tele_orientation)
            self.rate.sleep_n_intervals(2)
            tele_pos.z_val = tgt_h - 50
            self.client.simSetPose(tele_pos, tele_orientation)
            self.rate.sleep_n_intervals(2)

        tele_pos.z_val = tgt_h
        self.client.simSetPose(tele_pos, tele_orientation)
        self.send_local_velocity_command([0, 0, 0])
        self.rate.sleep_n_intervals(10)
        #time.sleep(0.2)

    def _get_yaw(self):
        o = self.client.getOrientation()
        roll, pitch, yaw = euler.quat2euler([o.w_val, o.x_val, o.y_val, o.z_val])
        return yaw

    def get_real_time_rate(self):
        return self.clock_speed

    def send_local_velocity_command(self, cmd_vel):
        """
        :param cmd_vel: x in m/s and yaw rate in rad/s
        :return:
        """
        try:
            # Convert the drone reference frame command to a global command as understood by AirSim
            cmd_vel = self._action_to_global(cmd_vel)
            if len(cmd_vel) == 3:
                vel_x = cmd_vel[0]
                vel_y = cmd_vel[1]
                yaw_rate = cmd_vel[2] * 180 / 3.14
                yaw_mode = YawMode(is_rate=True, yaw_or_rate=yaw_rate)
                tgt_h = self.flight_height - self.height_offset
                self.client.moveByVelocityZ(vel_x, vel_y, tgt_h, self.control_interval, yaw_mode=yaw_mode)
        except msgpackrpc.error.TimeoutError as e:
            startAirSim(self, self.instance, self.port)
            raise RolloutException("Simulator Died")

    def teleport_to(self, position, yaw):
        #pos_as = self.units.pos2d_to_as(position[0:2])
        #tgt_h = self.units.height_to_as(self.flight_height)
        try:
            pos_as = position[0:2]
            tgt_h = self.flight_height - self.height_offset

            teleported = False
            for i in range(10):
                self._try_teleport_to(position, yaw)
                curr_pos = self.client.getPosition()
                curr_pos_np = np.asarray([curr_pos.x_val, curr_pos.y_val])

                if np.linalg.norm(pos_as - curr_pos_np) < 1.0 and math.fabs(tgt_h - curr_pos.z_val) < 1.0:
                    teleported = True
                    break
            if not teleported:
                print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
                print("Failed to teleport after 10 attempts!")
                print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
                self.client.moveToPosition(velocity=1.0, x=0.0, y=0.0, z=-10.0)
        except msgpackrpc.error.TimeoutError as e:
            startAirSim(self, self.instance, self.port)
            raise RolloutException("Simulator Died")


    def teleport_3d(self, position, rpy, fast=False):
        try:
            pos_as = position
            tele_rot = self.client.toQuaternion(rpy[0], rpy[1], rpy[2])
            self.send_local_velocity_command([0, 0, 0])
            self.rate.sleep_n_intervals(1)
            tele_pos = Vector3r()
            tele_pos.x_val = pos_as[0]
            tele_pos.y_val = pos_as[1]
            tele_pos.z_val = pos_as[2]

            self.client.simSetPose(tele_pos, tele_rot)
            self.send_local_velocity_command([0, 0, 0])
            if not fast:
                time.sleep(0.2)
        except msgpackrpc.error.TimeoutError as e:
            startAirSim(self, self.instance, self.port)
            raise RolloutException("Simulator Died")


    def get_state(self, depth=False, segmentation=False):
        try:
            # Get images
            request = [ImageRequest(0, AirSimImageType.Scene, False, False)]
            if depth:
                request.append(ImageRequest(0, AirSimImageType.DepthPlanner, True))
            if segmentation:
                request.append(ImageRequest(0, AirSimImageType.Segmentation, False, False))

            response = self.client.simGetImages(request)

            pos_dict = response[0].camera_position
            rot_dict = response[0].camera_orientation

            cam_pos = [pos_dict[b"x_val"], pos_dict[b"y_val"], pos_dict[b"z_val"] + self.height_offset]
            cam_rot = [rot_dict[b"w_val"], rot_dict[b"x_val"], rot_dict[b"y_val"], rot_dict[b"z_val"]]

            img_data = np.frombuffer(response[0].image_data_uint8, np.uint8)
            image_raw = img_data.reshape(response[0].height, response[0].width, 4)
            image_raw = image_raw[:, :, 0:3]

            concat = [image_raw]

            if depth:
                # Depth is cast to uint8, because otherwise it takes up a lot of space and we don't need such fine-grained
                # resolution
                depth_data = np.asarray(response[1].image_data_float)
                depth_raw = depth_data.reshape(response[1].height, response[1].width, 1)
                depth_raw = np.clip(depth_raw, 0, 254 / DEPTH_SCALE)
                depth_raw *= DEPTH_SCALE
                depth_raw = depth_raw.astype(np.uint8)
                concat.append(depth_raw)

            if segmentation:
                seg_data = np.frombuffer(response[2].image_data_uint8, np.uint8)
                seg_raw = seg_data.reshape(response[2].height, response[2].width, 4)
                concat.append(seg_raw)

            img_state = np.concatenate(concat, axis=2)

            # cam_pos, cam_rot are in ned coordinate frame in the simulator. They're left as-is
            # drones position gets converted to config coordinates instead

            # Get drone's physical location, orientation and velocity
            pos = self.client.getPosition()
            vel = self.client.getVelocity()
            o = self.client.getOrientation()

            # Convert to numpys
            drn_pos = np.asarray([pos.x_val, pos.y_val, pos.z_val + self.height_offset]) # xyz
            drn_vel = np.asarray([vel.x_val, vel.y_val, vel.z_val]) # xyz
            #drn_rot = np.asarray([o.w_val, o.x_val, o.y_val, o.z_val]) #wxyz
            drn_rot = np.asarray(euler.quat2euler([o.w_val, o.x_val, o.y_val, o.z_val]))

            # Turn it into a nice 9-D vector with euler angles
            """
            roll, pitch, yaw = self.units.euler_from_as(r_as, p_as, y_as)
            pos_3d_as = np.asarray([pos.x_val, pos.y_val, pos.z_val])
            pos_state = self.units.pos3d_from_as(pos_3d_as)
            vel_state = self.units.vel3d_from_as(np.asarray([vel.x_val, vel.y_val, vel.z_val]))
            ori_state = [roll, pitch, yaw]
            """

            drone_state = np.concatenate((drn_pos, drn_rot, drn_vel, cam_pos, cam_rot), axis=0)

            return drone_state, img_state
        except msgpackrpc.error.TimeoutError as e:
            startAirSim(self, self.instance, self.port)
            raise RolloutException("Simulator Died")

    def set_current_env_from_config(self, config, instance_id=None):
        """
        Store the provided dict representation of a random_config.json into the
        correct folder for instance_id simulator instance to read it when resetting it's pomdp.
        :param config: Random pomdp configuration to store
        :param instance_id: Simulator instance for which to store it.
        """
        try:
            self._set_config(config, "", "random_config", instance_id=instance_id)
        except msgpackrpc.error.TimeoutError as e:
            startAirSim(self, self.instance, self.port)
            raise RolloutException("Simulator Died")

    def set_current_env_id(self, env_id, instance_id=None):
        try:
            self._load_drone_config(env_id, instance_id)
            gc.collect()
        except msgpackrpc.error.TimeoutError as e:
            startAirSim(self, self.instance, self.port)
            raise RolloutException("Simulator Died")

    def set_current_seg_idx(self, seg_idx, instance_id=None):
        pass

    def rollout_begin(self, instruction):
        pass

    def rollout_end(self):
        pass

    def reset_environment(self):
        self._start_call()
        self.client.simResetEnv()
        self._start_call()

    def land(self):
        pass
示例#12
0
    def __init__(self):
        self.mon = MonitorSuper()
        self.thread = threading.Thread(target=self.run, args=())
        self.thread.daemon = True
        self.thread.start()

    def run(self):
        self.mon.run()

    def get_command(self):
        return self.mon.current_vel

initialize_experiment("nl_datacollect_cage")

teleoper = KeyTeleop()
rate = Rate(0.1)

env = PomdpInterface()

env_ids = get_available_env_ids()

count = 0
stuck_count = 0


def show_depth(image):
    grayscale = np.mean(image[:, :, 0:3], axis=2)
    depth = image[:, :, 3]
    comb = np.stack([grayscale, grayscale, depth], axis=2)
    comb -= comb.min()
    comb /= (comb.max() + 1e-9)
示例#13
0
class DroneController():
    def __init__(self, instance=0, flight_height=100.0):
        self.instance = instance
        self.port = PORTS[instance]
        self.clock_speed = self._read_clock_speed()
        self.control_interval = 0.5
        self.flight_height = flight_height  # Flight height is in config units, as with everything
        self.units = UnrealUnits()
        print("DroneController Initialize")
        self.rate = Rate(0.1 / self.clock_speed)

        self.samples = []

        killAirSim()
        self._write_airsim_settings()
        startAirSim(self, instance, self.port)

    def _get_config(self, name, i=None, instance_id=None):
        print("_get_config load_env_config")
        config = load_env_config(i)
        return config

    def _set_config(self,
                    json_data,
                    subfolder,
                    name,
                    i=None,
                    instance_id=None):
        folder = paths.get_current_config_folder(i, instance_id)
        id = "" if i is None else "_" + str(i)
        path = os.path.join(paths.get_sim_config_dir(), folder, subfolder,
                            name + id + ".json")
        print("_set_config folder:", folder)
        print("_set_config subfolder:", subfolder)
        print("_set_config path:", path)
        if not os.path.isdir(os.path.dirname(path)):
            try:
                os.makedirs(os.path.dirname(path))
            except Exception:
                pass

        if os.path.isfile(path):
            os.remove(path)

        # print("Dumping config in: " + path)
        with open(path, 'w') as fp:
            json.dump(json_data, fp)

        subprocess.check_call(["touch", path])

    def _load_drone_config(self, i, instance_id=None):
        print("_load_drone_config load_env_config")
        conf_json = load_env_config(i)
        self._set_config(conf_json,
                         "",
                         "random_config",
                         i=None,
                         instance_id=instance_id)

    def _start_call(self):
        try:
            self.client.enableApiControl(True)
            self.client.armDisarm(True)
        except Exception as e:
            import traceback
            from utils.colors import print_error
            print_error("Error encountered during policy rollout!")
            print_error(e)
            print_error(traceback.format_exc())

            startAirSim(self, self.instance, self.port)

            raise RolloutException(
                "Exception encountered during start_call. Rollout should be re-tried"
            )

    def _start(self):
        self.client = MultirotorClient('127.0.0.1', self.port)
        self.client.confirmConnection()
        self._start_call()

        # got to initial height
        pos_as = self.client.getPosition()
        pos_as = [pos_as.x_val, pos_as.y_val, pos_as.z_val]
        pos = self.units.pos3d_from_as(pos_as)
        pos[2] = self.units.height_to_as(self.flight_height)
        print("Telporting to initial position in cfg: ", pos)
        self.teleport_3d(pos, [0, 0, 0])

    def _read_clock_speed(self):
        speed = 1.0
        if "ClockSpeed" in P.get_current_parameters()["AirSim"]:
            speed = P.get_current_parameters()["AirSim"]["ClockSpeed"]
        print("Read clock speed: " + str(speed))
        return speed

    def _write_airsim_settings(self):
        airsim_settings = P.get_current_parameters()["AirSim"]
        airsim_settings_path = P.get_current_parameters(
        )["Environment"]["airsim_settings_path"]
        airsim_settings_path = os.path.expanduser(airsim_settings_path)
        save_json(airsim_settings, airsim_settings_path)
        print("Wrote new AirSim settings to " + str(airsim_settings_path))

    def _action_to_global(self, action):
        drone_yaw = self.get_yaw()
        action_global = np.zeros(3)
        # TODO: Add action[1]
        action_global[0] = action[0] * math.cos(drone_yaw)
        action_global[1] = action[0] * math.sin(drone_yaw)
        action_global[2] = action[2]
        return action_global

    def _try_teleport_to(self, position, yaw, pitch=None, roll=None):
        pos_as = self.units.pos2d_to_as(position[:2])
        yaw_as = self.units.yaw_to_as(yaw)
        tgt_h = self.units.height_to_as(self.flight_height)

        tele_orientation = self.client.toQuaternion(0, 0, yaw_as)
        self.send_local_velocity_command([0, 0, 0])
        self.rate.sleep_n_intervals(3)
        pos1 = self.client.getPosition()
        current_z = pos1.z_val
        pos1.z_val = tgt_h - 50

        self.client.simSetPose(pos1, tele_orientation)
        self.rate.sleep_n_intervals(2)

        tele_pos = Vector3r()
        tele_pos.x_val = pos_as[0]
        tele_pos.y_val = pos_as[1]
        tele_pos.z_val = tgt_h - 50

        self.client.simSetPose(tele_pos, tele_orientation)
        self.rate.sleep_n_intervals(2)

        tele_pos.z_val = tgt_h

        self.client.simSetPose(tele_pos, tele_orientation)
        self.send_local_velocity_command([0, 0, 0])
        self.rate.sleep_n_intervals(3)
        time.sleep(0.1)

    def get_yaw(self):
        o = self.client.getOrientation()
        vec, theta = quaternions.quat2axangle(
            [o.w_val, o.x_val, o.y_val, o.z_val])
        new_vec = self.units.vel3d_from_as(vec)
        roll, pitch, yaw = euler.axangle2euler(new_vec, theta)
        return yaw

    def send_local_velocity_command(self, cmd_vel):
        # Convert the drone reference frame command to a global command as understood by AirSim
        cmd_vel = self._action_to_global(cmd_vel)
        #print("                  global action: ", cmd_vel)
        if len(cmd_vel) == 3:
            cmd_vel_as = self.units.vel2d_to_as(cmd_vel[:2])
            cmd_yaw_as = self.units.yaw_rate_to_as(cmd_vel[2])
            #print("                         AirSim command: ", cmd_vel_as, "yaw rate: ", cmd_yaw_as)
            vel_x = cmd_vel_as[0]
            vel_y = cmd_vel_as[1]
            yaw_rate = cmd_yaw_as
            z = self.units.height_to_as(self.flight_height)
            yaw_mode = YawMode(is_rate=True, yaw_or_rate=yaw_rate)
            self.client.moveByVelocityZ(vel_x,
                                        vel_y,
                                        z,
                                        self.control_interval,
                                        yaw_mode=yaw_mode)

    def teleport_to(self, position, yaw):
        pos_as = self.units.pos2d_to_as(position[0:2])
        tgt_h = self.units.height_to_as(self.flight_height)
        teleported = False
        for i in range(10):
            self._try_teleport_to(position, yaw)
            curr_pos = self.client.getPosition()
            curr_pos_np = np.asarray([curr_pos.x_val, curr_pos.y_val])

            if np.linalg.norm(pos_as - curr_pos_np) < 1.0 and math.fabs(
                    tgt_h - curr_pos.z_val) < 1.0:
                teleported = True
                break
        if not teleported:
            print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
            print("Failed to teleport after 10 attempts!")
            print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")

    def teleport_3d(self, position, rpy, pos_in_airsim=True, fast=False):
        pos_as = position if pos_in_airsim else self.units.pos3d_to_as(
            position)
        # TODO: This is broken. p and r should also be converted ideally
        if not pos_in_airsim:
            rpy[2] = self.units.yaw_to_as(rpy[2])

        tele_rot = self.client.toQuaternion(rpy[0], rpy[1], rpy[2])
        self.send_local_velocity_command([0, 0, 0])
        self.rate.sleep_n_intervals(1)
        tele_pos = Vector3r()
        tele_pos.x_val = pos_as[0]
        tele_pos.y_val = pos_as[1]
        tele_pos.z_val = pos_as[2]

        self.client.simSetPose(tele_pos, tele_rot)
        self.send_local_velocity_command([0, 0, 0])
        if not fast:
            time.sleep(0.2)
        self.client.simSetPose(tele_pos, tele_rot)
        self.send_local_velocity_command([0, 0, 0])
        if not fast:
            time.sleep(0.2)

    def get_state(self, depth=False, segmentation=False):
        # Get images
        request = [ImageRequest(0, AirSimImageType.Scene, False, False)]
        if depth:
            request.append(ImageRequest(0, AirSimImageType.DepthPlanner, True))
        if segmentation:
            request.append(
                ImageRequest(0, AirSimImageType.Segmentation, False, False))

        response = self.client.simGetImages(request)

        pos_dict = response[0].camera_position
        rot_dict = response[0].camera_orientation

        cam_pos = [pos_dict[b"x_val"], pos_dict[b"y_val"], pos_dict[b"z_val"]]
        cam_rot = [
            rot_dict[b"w_val"], rot_dict[b"x_val"], rot_dict[b"y_val"],
            rot_dict[b"z_val"]
        ]

        img_data = np.frombuffer(response[0].image_data_uint8, np.uint8)
        image_raw = img_data.reshape(response[0].height, response[0].width, 4)
        image_raw = image_raw[:, :, 0:3]

        concat = [image_raw]

        if depth:
            # Depth is cast to uint8, because otherwise it takes up a lot of space and we don't need such fine-grained
            # resolution
            depth_data = np.asarray(response[1].image_data_float)
            depth_raw = depth_data.reshape(response[1].height,
                                           response[1].width, 1)
            depth_raw = np.clip(depth_raw, 0, 254 / DEPTH_SCALE)
            depth_raw *= DEPTH_SCALE
            depth_raw = depth_raw.astype(np.uint8)
            concat.append(depth_raw)

        if segmentation:
            seg_data = np.frombuffer(response[2].image_data_uint8, np.uint8)
            seg_raw = seg_data.reshape(response[2].height, response[2].width,
                                       4)
            concat.append(seg_raw)

        img_state = np.concatenate(concat, axis=2)

        # Get drone's physical location, orientation and velocity
        pos = self.client.getPosition()
        vel = self.client.getVelocity()
        o = self.client.getOrientation()
        # Turn it into a nice 9-D vector with euler angles
        r_as, p_as, y_as = euler.quat2euler(
            [o.w_val, o.x_val, o.y_val, o.z_val])
        roll, pitch, yaw = self.units.euler_from_as(r_as, p_as, y_as)
        pos_3d_as = np.asarray([pos.x_val, pos.y_val, pos.z_val])
        pos_state = self.units.pos3d_from_as(pos_3d_as)
        vel_state = self.units.vel3d_from_as(
            np.asarray([vel.x_val, vel.y_val, vel.z_val]))
        ori_state = [roll, pitch, yaw]

        drone_state = np.concatenate(
            (pos_state, ori_state, vel_state, cam_pos, cam_rot), axis=0)

        return drone_state, img_state

    def set_current_env_from_config(self, config, instance_id=None):
        """
        Store the provided dict representation of a random_config.json into the
        correct folder for instance_id simulator instance to read it when resetting it's pomdp.
        :param config: Random pomdp configuration to store
        :param instance_id: Simulator instance for which to store it.
        """
        self._set_config(config, "", "random_config", instance_id=instance_id)

    def set_current_env_id(self, env_id, instance_id=None):
        # TODO: This was used before to render the path in the simulator, but now that feature is unused anyways
        env_confs = ["config"]  # , "random_curve"]
        folders = ["configs"]  # , "paths"]

        #for env_conf, folder in zip(env_confs, folders):
        self._load_drone_config(env_id, instance_id)
        print("set_current_env_id env_id:", env_id)
        gc.collect()

    def reset_environment(self):
        self._start_call()
        self.client.simResetEnv()
        self._start_call()