Example #1
0
def main():
    if len(sys.argv) != 2:
        sys.stderr.write('Usage: python record_tail.py <start_floor>\n')
        sys.exit(1)
    start_floor = int(sys.argv[1])
    viewer = EnvInteractor()
    env = ObstacleTowerEnv(os.environ['OBS_TOWER_PATH'],
                           worker_id=random.randrange(11, 20))
    while True:
        seed = select_seed(floor=start_floor)
        env.seed(seed)
        env.floor(start_floor)
        obs = env.reset()
        viewer.reset()
        record_episode(seed, env, viewer, obs, max_steps=MAX_STEPS)
def main():
    if len(sys.argv) != 2:
        sys.stderr.write('Usage: record_improve.py <recording_path>\n')
        os.exit(1)
    rec = Recording(sys.argv[1])
    env = ObstacleTowerEnv(os.environ['OBS_TOWER_PATH'],
                           worker_id=random.randrange(11, 20))
    try:
        env.seed(rec.seed)
        if rec.floor:
            env.floor(rec.floor)
        env.reset()
        i = 0
        for i, (action, rew) in enumerate(zip(rec.actions, rec.rewards)):
            _, real_rew, done, _ = env.step(action)
            if not np.allclose(real_rew, rew):
                print('mismatching result at step %d' % i)
                sys.exit(1)
            if done != (i == rec.num_steps - 1):
                print('invalid done result at step %d' % i)
                sys.exit(1)
        print('match succeeded')
    finally:
        env.close()
Example #3
0
import os

from obstacle_tower_env import ObstacleTowerEnv

env = ObstacleTowerEnv(os.environ['OBS_TOWER_PATH'], worker_id=0)

env.seed(72)
env.floor(12)
env.reset()
for action in [
        18, 18, 18, 18, 18, 18, 30, 24, 24, 21, 18, 18, 30, 18, 18, 18, 18, 18,
        18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 24, 18, 30, 18, 18,
        18, 18, 18, 18, 18, 18, 18, 18, 30, 30, 30, 30, 24, 24, 6, 6, 6, 6, 6,
        6, 6, 6, 30, 30, 30, 30, 30, 18, 24, 24, 24, 6, 6, 6, 6, 6, 6, 24, 18,
        24, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
        18, 18, 18, 18, 18, 18, 18, 18, 18, 6, 6, 6, 6, 24, 24, 24, 18, 30, 18,
        18, 30, 18, 30, 30, 18, 18, 18, 18, 18, 18, 18, 18, 30, 24, 24, 30, 30,
        24, 24, 24, 30, 30, 30, 30, 30, 18, 18, 18, 18, 30, 30, 30, 30, 30, 30,
        30, 30, 30, 30, 30, 30, 30, 30, 24, 24, 24, 24, 24, 24, 24, 18, 18, 18,
        18, 18, 18, 18, 18, 18, 18, 18, 24, 18, 18, 30, 18, 18, 18, 18, 18, 18,
        18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
        18, 18, 24, 18, 30, 18, 18, 18, 18, 30, 30, 30, 18, 18, 18, 18, 18, 18,
        18, 18, 18, 18, 18, 30, 18, 18, 30, 18, 18, 18, 18, 18, 18, 18, 18, 18,
        18, 18, 18, 18, 30, 24, 24, 24, 24, 24, 24, 24, 24, 18, 30, 18, 18, 18,
        18, 30, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 30,
        30, 30, 30, 30, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
        18, 18, 30, 24, 21, 18, 24, 24, 24, 24, 18, 18, 18, 24, 18, 18, 18, 18,
        30, 18, 18, 24, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
        24, 24, 24, 24, 24, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
        18, 18, 30, 30, 30, 18, 18, 30, 30, 30, 30, 30, 30, 12, 12, 30, 30, 30,
        30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 18, 18, 18, 18, 18, 18, 18, 18,
Example #4
0
class Worker(object):
    def __init__(self,
                 envpath,
                 wid,
                 retro,
                 realtime_mode,
                 env_seed=0,
                 env_floor=0):
        self.wid = wid
        self.env = ObstacleTowerEnv(environment_filename=envpath,
                                    worker_id=wid,
                                    retro=retro,
                                    realtime_mode=realtime_mode)
        self.kprun = GLOBAL_KPRUN
        self.tableAction = self.createActionTable()
        # 設定關卡
        self.env_seed = env_seed
        self.env_floor = env_floor
        self.step = 0
        self.summary = tf.Summary(value=[
            tf.Summary.Value(tag="Stage_reward " + str(self.wid),
                             simple_value=0)
        ])
        self.kprun.train_writer.add_summary(self.summary, 0)

    def createActionTable(self):
        tableAction = []
        for a in range(0, 3):
            for b in range(0, 3):
                for c in range(0, 2):
                    tableAction.append([a, b, c, 0])
        # print("Action option: ", tableAction[0:17])
        return tableAction

    def reward_compute(self, done, reward_total, keys, previous_keys, reward,
                       previous_reward, time_remaining,
                       previous_time_remaining, previous_stage_time_remaining):
        # 定義獎勵公式
        # reward 是從環境傳來的破關數
        # keys 是撿到鑰匙的數量
        # time_remaining 是剩餘時間
        # 過關最大獎勵為10
        # 一把鑰匙為5
        # 時間果實暫時只給0.5,因為結束會結算剩餘時間,會有獎勵累加的問題。
        # 如果過關,給予十倍過關獎勵 - (場景開始的時間-剩餘時間)/1000
        # print("time_remaining ", time_remaining,
        #       " previous_time_remaining ", previous_time_remaining,
        #         " reward ", reward)
        # 通過一個會開門的綠門會加0.1
        if (reward - previous_reward) > 0 and (reward - previous_reward) < 0.3:
            reward_total += 3
        elif (reward - previous_reward) > 0.9:
            # ***如果剩餘時間比場景時間多會變成加分獎勵,可能會極大增加Agent吃時間果實的機率。
            # ***另一種方式是剩餘的時間直接/1000加上去,這樣就沒有累加效果。
            print("Pass ", reward, " Stage!")
            # reward_total += (reward - previous_reward) * 100 - \
            #                 (previous_stage_time_remaining - time_remaining)

            reward_total += 200
            # 過關之後把時間留到下一關,儲存這回合時間供下次計算過關使用
            previous_time_remaining = time_remaining
            previous_stage_time_remaining = time_remaining
            # Lesson 1 repeat
            if reward > 6.5:
                # self.total_step +=1
                # if self.total_step >=5:
                #     done = True
                #     return reward_total, previous_stage_time_remaining, done
                self.env.seed(np.random.randint(5))
                # env.reset()
                done = True
            return reward_total, previous_stage_time_remaining, done

        # 假設過關的時候有順便吃到果實或鑰匙,所以預設為同時可以加成
        if previous_keys > keys:
            # print("Get Key")
            reward_total += 5

        if previous_time_remaining < time_remaining and previous_time_remaining != 0:
            # print("Get time power up")
            reward_total += 2
        else:
            reward_total -= 0.5
        if done and previous_time_remaining > 100:
            print("Agent died")
            # 如果剩餘時間越多就掛點,扣更多
            # reward_total -= (10 + time_remaining / 100)
            reward_total -= 100
        return reward_total, previous_stage_time_remaining, done

    def work(self):
        global GLOBAL_EP, GLOBAL_RUNNING_R, GLOBAL_UPDATE_COUNTER
        # 設定關卡
        self.env.seed(self.env_seed)
        self.env.floor(self.env_floor)
        # 只要還沒達到目標回合就LOOP
        while not COORD.should_stop():
            # 紀錄步數
            self.step += 1
            # 重設關卡
            obs = self.env.reset()
            # 初始化
            done = False
            stage_reward = 0.0
            reward = 0
            keys = 0
            # 檢查是否有吃到加時間的,如果是第一回合出來沒有time_remaining,事先定義
            time_remaining = 3000
            previous_stage_time_remaining = time_remaining
            # 預處理圖像
            # previous_preprocessed_observation_image = np.reshape(obs[0], [-1])
            previous_preprocessed_observation_image = obs[0]
            buffer_s, buffer_a, buffer_r = [], [], []
            # 只要沒死
            while not done:
                # 如果模型正在更新就等待更新完成
                if not ROLLING_EVENT.is_set():
                    # 等待更新完成
                    ROLLING_EVENT.wait()
                    # 清除記憶體,使用新的代理收集資料
                    buffer_s, buffer_a, buffer_r = [], [], []

                # 儲存上一個動作狀態,供計算獎勵用
                previous_keys = keys
                previous_reward = reward
                previous_time_remaining = time_remaining

                # 根據上一次的狀態決定動作
                action = self.kprun.choose_action(
                    previous_preprocessed_observation_image)
                action = np.clip(np.random.normal(action, 1.), *[6, 12])

                # 做出動作,獲得場景資訊,已過關數,代理資訊
                observation, reward, done, info = self.env.step(
                    np.array(self.tableAction[int(action)]))

                # 預處理模型需要的資料
                observation_image, keys, time_remaining = observation
                # preprocessed_observation_image = np.reshape(
                #     observation_image, [-1])
                preprocessed_observation_image = observation_image
                stage_reward, previous_stage_time_remaining, done = self.reward_compute(
                    done=done,
                    reward_total=stage_reward,
                    keys=keys,
                    previous_keys=previous_keys,
                    reward=reward,
                    previous_reward=previous_reward,
                    time_remaining=time_remaining,
                    previous_time_remaining=previous_time_remaining,
                    previous_stage_time_remaining=previous_stage_time_remaining
                )
                # Normalize reward~不知道中文怎麼打
                stage_reward = stage_reward + 8 / 8

                # 把這次狀態存入 記憶體
                buffer_s.append(np.array([preprocessed_observation_image]))
                buffer_a.append(action)
                buffer_r.append(stage_reward)

                # 儲存下一步要參考的圖像
                previous_preprocessed_observation_image = preprocessed_observation_image

                # 達到更新時,自己先做處理。
                GLOBAL_UPDATE_COUNTER += 1
                # 太多自己就先處理更新
                if len(buffer_s) == EP_LEN - \
                        1 or GLOBAL_UPDATE_COUNTER >= MIN_BATCH_SIZE:
                    v_s_ = self.kprun.get_v(preprocessed_observation_image)
                    # 計算折扣獎勵
                    discounted_r = []
                    for r in buffer_r[::-1]:
                        v_s_ = r + GAMMA * v_s_
                        discounted_r.append(v_s_)
                    discounted_r.reverse()
                    # 整理維度
                    bs, ba, br = np.vstack(buffer_s), np.vstack(
                        buffer_a), np.array(discounted_r)[:, np.newaxis]
                    # 把資料放入共享記憶體
                    QUEUE.put(bs)
                    QUEUE.put(ba)
                    QUEUE.put(br)
                    # print("len(buffer_s)", len(buffer_s))
                    # print("bs.shape", bs.shape)
                    # 清空暫存
                    buffer_s, buffer_a, buffer_r = [], [], []
                    # 如果整個模型步數到達最小BATCH 就整個更新
                    if GLOBAL_UPDATE_COUNTER >= MIN_BATCH_SIZE:
                        # 停止收集資料
                        ROLLING_EVENT.clear()
                        # 更新PPO
                        UPDATE_EVENT.set()
                    # 達到最多EP停止訓練
                    if GLOBAL_EP >= EP_MAX:
                        COORD.request_stop()
                        break
            # 紀錄獎勵
            self.summary = tf.Summary(value=[
                tf.Summary.Value(tag="Stage_reward " + str(self.wid),
                                 simple_value=stage_reward)
            ])
            self.kprun.train_writer.add_summary(self.summary, self.step)
            GLOBAL_EP += 1
            print(
                '{0:.1f}%'.format(GLOBAL_EP / EP_MAX * 100),
                '|W%i' % self.wid,
                '|Ep_r: %.2f' % stage_reward,
            )
        self.env.close()
Example #5
0
def main():
    basicConfig(level=INFO)
    env = ObstacleTowerEnv(str(PRJ_ROOT / 'obstacletower'), retro=False, worker_id=9)
    done = False
    env.floor(1)
    env.reset()

    screen = Screen()
    random_actor = RandomRepeatActor(continue_rate=0.9)
    random_actor.reset(schedules=[
        (Action.CAMERA_RIGHT, 3),
        (Action.CAMERA_LEFT, 6),
        (Action.CAMERA_RIGHT, 3),
        (Action.NOP, 5),
        (Action.FORWARD, 8),
        (Action.RIGHT, 2),
        (Action.LEFT, 4),
        (Action.RIGHT, 2),
    ])

    frame_history = FrameHistory(env)
    moving_checker = MovingChecker(frame_history)
    position_estimator = PositionEstimator(moving_checker)
    map_observation = MapObservation(position_estimator, moving_checker)
    event_handlers: List[EventHandler] = [
        frame_history,
        moving_checker,
        position_estimator,
        map_observation,
    ]

    while not done:
        for h in event_handlers:
            h.begin_loop()

        screen.show("original", frame_history.last_frame)
        cv2.waitKey(0)

        for h in event_handlers:
            h.before_step()

        action = random_actor.decide_action(moving_checker.did_move)
        obs, reward, done, info = env.step(action)
        if reward != 0:
            logger.info(f"Get Reward={reward} Keys={obs[1]}")
        # logger.info(f"Keys={obs[1]} Time Remain={obs[2]}")

        params = EventParamsAfterStep(action, obs, reward, done, info)
        for h in event_handlers:
            h.after_step(params)

        screen.show("map", map_observation.concat_images())

        if len(frame_history.small_frame_pixel_diffs) > 0:
            f1 = frame_history.small_frame_pixel_diffs[-1]
            if len(frame_history.small_frame_pixel_diffs) > 1:
                f2 = frame_history.small_frame_pixel_diffs[-2]
                f1 = np.concatenate((f2, f1), axis=1)
            screen.show("diff", f1)

        for h in event_handlers:
            h.end_loop()
class WrappedObstacleTowerEnv():
    def __init__(self,
                 environment_filename=None,
                 docker_training=False,
                 worker_id=0,
                 retro=False,
                 timeout_wait=30,
                 realtime_mode=False,
                 num_actions=3,
                 mobilenet=False,
                 gray_scale=False,
                 autoencoder=None,
                 floor=0):
        '''
        Arguments:
          environment_filename: The file path to the Unity executable.  Does not require the extension.
          docker_training: Whether this is running within a docker environment and should use a virtual
            frame buffer (xvfb).
          worker_id: The index of the worker in the case where multiple environments are running.  Each
            environment reserves port (5005 + worker_id) for communication with the Unity executable.
          retro: Resize visual observation to 84x84 (int8) and flattens action space.
          timeout_wait: Time for python interface to wait for environment to connect.
          realtime_mode: Whether to render the environment window image and run environment at realtime.
        '''

        self._obstacle_tower_env = ObstacleTowerEnv(environment_filename,
                                                    docker_training, worker_id,
                                                    retro, timeout_wait,
                                                    realtime_mode)
        if floor != 0:
            self._obstacle_tower_env.floor(floor)
        self._flattener = ActionFlattener([3, 3, 2, 3])
        self._action_space = self._flattener.action_space
        self.mobilenet = mobilenet
        self.gray_scale = gray_scale
        if mobilenet:
            self.image_module = WrappedKerasLayer(retro, self.mobilenet)
        self._done = False
        if autoencoder:
            print("Loading autoencoder from {}".format(autoencoder))
            self.autoencoder = build_autoencoder(autoencoder)
            print("Done.")
        else:
            self.autoencoder = None

    def action_spec(self):
        return self._action_spec

    def observation_spec(self):
        return self._observation_spec

    def gray_process_observation(self, observation):
        observation = (observation * 255).astype(np.uint8)
        obs_image = Image.fromarray(observation)
        obs_image = obs_image.resize((84, 84), Image.NEAREST)
        gray_observation = np.mean(np.array(obs_image), axis=-1, keepdims=True)
        gray_observation = (gray_observation / 255)

        # gray_observation = self.autoencoder.predict(gray_observation)
        return gray_observation

    def _preprocess_observation(self, observation):
        """
        Re-sizes visual observation to 84x84
        """
        observation = (observation * 255).astype(np.uint8)
        obs_image = Image.fromarray(observation)
        obs_image = obs_image.resize((224, 224), Image.NEAREST)
        return np.array(obs_image)

    def reset(self):
        observation = self._obstacle_tower_env.reset()
        observation, key, time = observation
        self._done = False
        if self.mobilenet:
            if self.autoencoder:
                observation = self.autoencoder.predict(observation[None, :])[0]
            return self.image_module(self._preprocess_observation(
                observation)), observation, key, time
        elif self.gray_scale:
            gray_observation = self.gray_process_observation(observation)
            if self.autoencoder:
                gray_observation = self.autoencoder.predict(
                    gray_observation[None, :])[0]
            return gray_observation, observation
        else:
            return self._preprocess_observation(observation), observation

    def step(self, action):
        #if self._done:
        #    return self.reset()

        if action == 0:  # forward
            action = [1, 0, 0, 0]
        elif action == 1:  # rotate camera left
            action = [0, 1, 0, 0]
        elif action == 2:  # rotate camera right
            action = [0, 2, 0, 0]
        elif action == 3:  # jump forward
            action = [1, 0, 1, 0]
        # elif action == 5:
        #     action = [2, 0, 0, 0]
        # elif action == 6:
        #     action = [0, 0, 0, 1]
        # elif action == 7:
        #     action = [0, 0, 0, 2]

        observation, reward, done, info = self._obstacle_tower_env.step(action)
        observation, key, time = observation
        self._done = done

        if self.mobilenet:
            if self.autoencoder:
                observation = self.autoencoder.predict(observation[None, :])[0]
            return (self.image_module(
                self._preprocess_observation(observation)), reward, done,
                    info), observation, key, time
        elif self.gray_scale:
            gray_observation = self.gray_process_observation(observation)
            if self.autoencoder:
                gray_observation = self.autoencoder.predict(
                    gray_observation[None, :])[0]
            return (gray_observation, reward, done, info), observation
        else:
            return (self._preprocess_observation(observation), reward, done,
                    info), observation

    def close(self):
        self._obstacle_tower_env.close()

    def floor(self, floor):
        self._obstacle_tower_env.floor(floor)
Example #7
0
#!/usr/bin/env python3

from obstacle_tower_env import ObstacleTowerEnv
from matplotlib import pyplot as plt

ENV_PATH = './obstacle-tower-challenge/ObstacleTower/obstacletower'
env = ObstacleTowerEnv(ENV_PATH, retro=False, realtime_mode=True)

# Seeds can be chosen from range of 0-100.
env.seed(5)

# Floors can be chosen from range of 0-24.
env.floor(15)

# The environment provided has a MultiDiscrete action space, where the 4 dimensions are:

# 0. Movement (No-Op/Forward/Back)
# 1. Camera Rotation (No-Op/Counter-Clockwise/Clockwise)
# 2. Jump (No-Op/Jump)
# 3. Movement (No-Op/Right/Left)

print('action space', env.action_space)

# The observation space provided includes a 168x168 image (the camera from the simulation)
# as well as the number of keys held by the agent (0-5) and the amount of time remaining.

print('observation space', env.observation_space)

# Interacting with the environment

obs = env.reset()
class WrappedObstacleTowerEnv():

    def __init__(
        self,
        environment_filename=None,
        docker_training=False,
        worker_id=0,
        retro=False,
        timeout_wait=3000,
        realtime_mode=False,
        num_actions=3,
        stack_size=4,
        mobilenet=False,
        gray_scale=False,
        floor=0,
        visual_theme=0
        ):
        '''
        Arguments:
          environment_filename: The file path to the Unity executable.  Does not require the extension.
          docker_training: Whether this is running within a docker environment and should use a virtual
            frame buffer (xvfb).
          worker_id: The index of the worker in the case where multiple environments are running.  Each
            environment reserves port (5005 + worker_id) for communication with the Unity executable.
          retro: Resize visual observation to 84x84 (int8) and flattens action space.
          timeout_wait: Time for python interface to wait for environment to connect.
          realtime_mode: Whether to render the environment window image and run environment at realtime.
        '''

        self._obstacle_tower_env = ObstacleTowerEnv(environment_filename,
                                                    docker_training,
                                                    worker_id,
                                                    retro,
                                                    timeout_wait,
                                                    realtime_mode)
        if floor is not 0:
            self._obstacle_tower_env.floor(floor)
        self.start_floor = floor
        self.current_floor = floor

        self.mobilenet = mobilenet
        self.gray_scale = gray_scale
        self.retro = retro
        if mobilenet:
            self.state_size = [1280]
        elif gray_scale:
            self.state_size = [84, 84, 1]
        elif retro:
            self.state_size = [84, 84, 3]
        else:
            self.state_size = [168, 168, 3]

        self.stack_size = stack_size
        self.stack = [np.random.random(self.state_size).astype(np.float32) for _ in range(self.stack_size)]
        self.total_reward = 0
        self.current_reward = 0
        self.max_floor = 25
        self.visual_theme = visual_theme

        self.id = worker_id

    def gray_preprocess_observation(self, observation):
        '''
        Re-sizes obs to 84x84 and compresses to grayscale
        '''
        observation = (observation * 255).astype(np.uint8)
        obs_image = Image.fromarray(observation)
        obs_image = obs_image.resize((84, 84), Image.NEAREST)
        gray_observation = np.mean(np.array(obs_image),axis=-1,keepdims=True)
        return gray_observation / 255

    def mobile_preprocess_observation(self, observation):
        """
        Re-sizes obs to 224x224 for mobilenet
        """
        observation = (observation * 255).astype(np.uint8)
        obs_image = Image.fromarray(observation)
        obs_image = obs_image.resize((224, 224), Image.NEAREST)
        return self.mobilenet(np.array(obs_image))

    def reset(self):
        # Reset env, stack and floor
        # (We save state as an attribute so child objects can access it)
        config = {"total-floors": 15}
        self.state = self._obstacle_tower_env.reset(config)
        self.state, reward, done, info = self._obstacle_tower_env.step(18)
        self.current_floor = self.start_floor
        self.stack = [np.random.random(self.state_size).astype(np.float32) for _ in range(self.stack_size)]
        self.total_reward = 0
        self.current_reward = 0

        # Preprocess current obs and add to stack
        if self.retro:
            observation = (self.state / 255).astype(np.float32)
        else:
            observation, key, time = self.state

        if self.mobilenet:
            observation = self.mobile_preprocess_observation(observation)
        elif self.gray_scale:
            observation = self.gray_preprocess_observation(observation)

        self.stack = self.stack[1:] + [observation]

        # Build our state (MUST BE A TUPLE)
        #one_hot_floor = tf.one_hot(self.current_floor, self.max_floor).numpy()
        one_hot_floor = np.zeros(self.max_floor)
        one_hot_floor[self.current_floor] += 1
        floor_data = np.append(one_hot_floor, self.current_reward).astype(np.float32)
        stacked_state = np.concatenate(self.stack, axis=-1).astype(np.float32)
        if self.retro is True:
            ret_state = (stacked_state, floor_data)
        else:
            # Clip time to 2000, then normalize
            time = (2000. if time > 2000 else time) / 2000.
            key_time_data = np.array([key, time]).astype(np.float32)
            #key_time_data = np.array([key]).astype(np.float32)
            ret_state = (stacked_state, floor_data, key_time_data)

        return ret_state, info

    def step(self, action):
        # Convert int action to vector required by the env
        if self.retro:
            if action == 0: # forward
                action = 18
            elif action == 1: # rotate camera left
                action = 24
            elif action == 2: # rotate camera right
                action = 30
            elif action == 3: # jump forward
                action = 21
            elif action == 4:
                action = 6
            elif action == 5:
                action = 12
        else:
            if action == 0: # forward
                action = [1, 0, 0, 0]
            elif action == 1: # rotate camera left
                action = [1, 1, 0, 0]
            elif action == 2: # rotate camera right
                action = [1, 2, 0, 0]
            elif action == 3: # jump forward
                action = [1, 0, 1, 0]

        # Take the step and record data
        # (We save state as an attribute so child objects can access it)
        self.state, reward, done, info = self._obstacle_tower_env.step(action)

        # Keep track of current floor reward and total reward
        if reward >= 0.95:
            self.current_floor += 1
            self.current_reward = 0
            done = True
        else:
            self.current_reward += reward
        self.total_reward += reward
        
        if (done and reward < 0.95) or self.current_floor == 15:
            # Save info and reset when an episode ends
            info["episode_info"] = {"floor": self.current_floor, "total_reward": self.total_reward}
            ret_state, _ = self.reset()
        else:
            # Preprocess current obs and add to stack
            if self.retro:
                observation = (self.state / 255).astype(np.float32)
            else:
                observation, key, time = self.state

            if self.mobilenet:
                observation = self.mobile_preprocess_observation(observation)
            elif self.gray_scale:
                observation = self.gray_preprocess_observation(observation)

            self.stack = self.stack[1:] + [observation]

            # Build our state (MUST BE A TUPLE)
            #one_hot_floor = tf.one_hot(self.current_floor, self.max_floor).numpy()
            one_hot_floor = np.zeros(self.max_floor)
            one_hot_floor[self.current_floor] += 1
            floor_data = np.append(one_hot_floor, self.current_reward).astype(np.float32)
            stacked_state = np.concatenate(self.stack, axis=-1).astype(np.float32)
            if self.retro is True:
                ret_state = (stacked_state, floor_data)
            else:
                # Clip time to 2000, then normalize
                time = (2000. if time > 2000 else time) / 2000.
                key_time_data = np.array([key, time]).astype(np.float32)
                #key_time_data = np.array([key]).astype(np.float32)
                ret_state = (stacked_state, floor_data, key_time_data)

        return ret_state, reward, done, info

    def close(self):
        self._obstacle_tower_env.close()

    def floor(self, floor):
        self._obstacle_tower_env.floor(floor)
        self.start_floor = floor