class SingleTMaze(MiniGridEnv): is_double = False reward_values = dict(goal=1, fake_goal=0.1) view_size: int = None def __init__(self, corridor_length=3, reward_position=0, max_steps=None, is_double=False, view_size=None, max_corridor_length=None): if max_corridor_length is None: max_corridor_length = corridor_length self.max_corridor_length = max_corridor_length self.view_size = view_size if view_size is not None else 7 self.is_double = is_double self.reward_position = reward_position self.corridor_length = corridor_length assert corridor_length > 0 if max_steps is None: max_steps = 4 + 4 * corridor_length super().__init__( grid_size=3 + 2 * self.max_corridor_length, max_steps=max_steps, see_through_walls=True, # True for maximum performance agent_view_size=self.view_size, ) self.reward_range = (min(self.reward_values["fake_goal"], 0), self.reward_values["goal"]) @property def mission(self): goals = ["UPPER LEFT", "UPPER RIGHT", "LOWER RIGHT", "LOWER LEFT"] return f'Goal is {goals[self.reward_position]}' def _gen_grid(self, width, height): # Create an empty grid self.grid = Grid(width, height) # Place the agent in the top-left corner self.start_pos = (int(width / 2), int(height / 2)) self.start_dir = 3 # Create walls for x in range(0, width): for y in range(0, height): self.grid.set(x, y, Wall()) # Create paths if self.is_double: for y in range(height // 2 - self.corridor_length, height // 2 + self.corridor_length + 1): self.grid.set(width // 2, y, None) for x in range(width // 2 - self.corridor_length, width // 2 + self.corridor_length + 1): self.grid.set(x, height // 2 - self.corridor_length, None) self.grid.set(x, height // 2 + self.corridor_length, None) else: for y in range(height // 2 - self.corridor_length, height // 2 + 1): self.grid.set(width // 2, y, None) for x in range(width // 2 - self.corridor_length, width // 2 + self.corridor_length + 1): self.grid.set(x, height // 2 - self.corridor_length, None) # Create rewards reward_positions = self._reward_positions(width, height) self._gen_rewards(reward_positions) def _reward_positions(self, width, height): reward_positions = [ (width // 2 - self.corridor_length, height // 2 - self.corridor_length), (width // 2 + self.corridor_length, height // 2 - self.corridor_length), (width // 2 + self.corridor_length, height // 2 + self.corridor_length), (width // 2 - self.corridor_length, height // 2 + self.corridor_length), ] if not self.is_double: reward_positions = reward_positions[:2] return reward_positions def _reward(self): min_steps = (1 + 2 * self.corridor_length) if self.is_double and self.reward_position > 1: min_steps += 2 redundant_steps = max(0, self.step_count - min_steps) max_steps = self.max_steps - min_steps + 1 cell = self.grid.get(self.agent_pos[0], self.agent_pos[1]) max_reward = self.reward_values["fake_goal"] if hasattr(cell, "is_goal") and cell.is_goal: max_reward = self.reward_values["goal"] return min(max_reward, max_reward * (1 - min(1, (redundant_steps / max_steps)))) def _gen_rewards(self, rewards_pos: List[Tuple[int, int]]): for i, (x, y) in enumerate(rewards_pos): g = Goal() self.grid.set(x, y, g) g.is_goal = False if self.reward_position == i % len(rewards_pos): g.is_goal = True def render(self, mode='human', close=False, **kwargs): reward_positions = self._reward_positions(width=self.width, height=self.height) goal = self.grid.get(*reward_positions[self.reward_position]) assert goal.is_goal start_color = goal.color goal.color = 'blue' ret = super().render(mode, close, **kwargs) goal.color = start_color return ret
class NineRoomsEnv(MiniGridSimple): # Only 4 actions needed, left, right, up and down class NineRoomsCardinalActions(IntEnum): # Cardinal movement right = 0 down = 1 left = 2 up = 3 def __len__(self): return 4 def __init__( self, grid_size=20, passage_size=1, max_steps=100, seed=133, rnd_start=0, start_state_exclude_rooms=[], ): self.grid_size = grid_size self.passage_size = passage_size self._goal_default_pos = (1, 1) # set to 1 if agent is to be randomly spawned self.rnd_start = rnd_start # If self.rnd_start =1, don't spawn in these rooms self.start_state_exclude_rooms = start_state_exclude_rooms super().__init__(grid_size=grid_size, max_steps=max_steps, seed=seed, see_through_walls=False) self.nActions = len(NineRoomsEnv.NineRoomsCardinalActions) # Set the action and observation spaces self.actions = NineRoomsEnv.NineRoomsCardinalActions self.action_space = spaces.Discrete(self.nActions) self.max_cells = (grid_size - 1) * (grid_size - 1) self.observation_space = spaces.Tuple( [spaces.Discrete(grid_size), spaces.Discrete(grid_size)]) self.observation_size = self.grid_size * self.grid_size self.observation_shape = (self.observation_size, ) self.T = max_steps # Change the observation space to return the position in the grid @property def category(self): # [TODO] Make sure this doesn't break after self.agent_pos is changed to numpy.ndarray return self.cell_cat_map[self.agent_pos] def reward(self): # -1 for every action except if the action leads to the goal state return 1 if self.success else 0 def _gen_grid(self, width, height, val=False, seen=True): # Create the grid self.grid = Grid(width, height) # Generate surrounding walls self.grid.horz_wall(0, 0) self.grid.horz_wall(0, height - 1) self.grid.vert_wall(0, 0) self.grid.vert_wall(width - 1, 0) # Place horizontal walls through the grid self.grid.horz_wall(0, height // 3) self.grid.horz_wall(0, (2 * height) // 3) # Place vertical walls through the grid self.grid.vert_wall(width // 3, 0) self.grid.vert_wall((2 * width) // 3, 0) # Create passages passage_anchors = [(width // 3, height // 3), (width // 3, (2 * height) // 3), ((2 * width) // 3, height // 3), ((2 * width) // 3, (2 * height) // 3)] passage_cells = [] for anchor in passage_anchors: for delta in range(-1 * self.passage_size, self.passage_size + 1): passage_cells.append((anchor[0] + delta, anchor[1])) passage_cells.append((anchor[0], anchor[1] + delta)) for cell in passage_cells: self.grid.set(*cell, None) # Even during validation, start state distribution # should be the same as that during training if not self.rnd_start: self._agent_default_pos = ((width - 2) // 2, (height - 2) // 2) else: self._agent_default_pos = None # Place the agent at the center if self._agent_default_pos is not None: self.start_pos = self._agent_default_pos self.grid.set(*self._agent_default_pos, None) self.start_dir = self._rand_int( 0, 4) # Agent direction doesn't matter else: if len(self.start_state_exclude_rooms) == 0: self.place_agent() else: valid_start_pos = [] if seen: exclude_from = self.start_state_exclude_rooms else: exclude_from = [ x for x in range(1, 10) if x not in self.start_state_exclude_rooms ] for room in range(1, 10): if room in exclude_from: continue # Ignore that there are walls for now, can handle that with rejection sampling # Get x coordinates of allowed cells valid_x = [] if room % 3 == 1: valid_x = list(range(1, width // 3)) elif room % 3 == 2: valid_x = list(range(width // 3 + 1, (2 * width) // 3)) else: valid_x = list(range((2 * width) // 3 + 1, width - 1)) # Get valid y-coordinates of allowed cells valid_y = [] if (room - 1) // 3 == 0: valid_y = list(range(1, height // 3)) elif (room - 1) // 3 == 1: valid_y = list( range(height // 3 + 1, (2 * height) // 3)) else: valid_y = list(range((2 * height) // 3 + 1, height - 1)) room_cells = list(product(valid_x, valid_y)) valid_start_pos += room_cells # Make sure start position doesn't conflict with other cells while True: _start_pos = valid_start_pos[np.random.choice( len(valid_start_pos))] row = _start_pos[1] col = _start_pos[0] cell = self.grid.get(row, col) if cell is None or cell.can_overlap(): break self.start_pos = (col, row) self.start_dir = self._rand_int( 0, 4) # Agent direction doesn't matter goal = Goal() self.grid.set(*self._goal_default_pos, goal) goal.init_pos = goal.curr_pos = self._goal_default_pos self.mission = goal.init_pos def reset(self, val=False, seen=True): obs, info = super().reset(val=val, seen=seen) # add state feature to obs state_feat = self._encode_state(obs['agent_pos']) obs.update(dict(state_feat=state_feat)) return obs, info def step(self, action): self.step_count += 1 ''' Reward doesn't depend on action, but just state. reward = -1 if not (in_goal_state) else 0 ''' if not self.done: # check if currently at the goal state if self.agent_pos == self.mission: # No penalty, episode done self.done = True self.success = True else: # Cardinal movement if action in self.move_actions: move_pos = self.around_pos(action) fwd_cell = self.grid.get(*move_pos) self.agent_dir = (action - 1) % 4 if fwd_cell == None or fwd_cell.can_overlap( ) or self.is_goal(move_pos): self.agent_pos = move_pos else: raise ValueError("Invalid Action: {} ".format(action)) reward = self.reward() if self.step_count >= self.max_steps - 1: # print("Max Steps Exceeded.") self.done = True obs = self.gen_obs() # Add state features to the observation state_feat = self._encode_state(obs['agent_pos']) obs.update(dict(state_feat=state_feat)) info = { 'done': self.done, 'agent_pos': np.array(self.agent_pos), } if self.render_rgb: info['rgb_grid'] = self.render(mode='rgb_array') if self.done: info.update({ 'image': self.encode_grid(), 'success': self.success, 'agent_pos': self.agent_pos, }) return obs, reward, self.done, info def _encode_state(self, state): """ Encode the state to generate observation. """ feat = np.ones(self.width * self.height, dtype=float) curr_x, curr_y = state[1], state[0] curr_pos = curr_y * self.width + curr_x feat[curr_pos:] = 0 return feat
class SnakeEnv(MiniGridEnv): """ Empty grid environment, no obstacles, sparse reward """ # Enumeration of possible actions class Actions(IntEnum): # Turn left, turn right, move forward left = 0 right = 1 forward = 2 def __init__(self, size=9): super().__init__(grid_size=size, max_steps=None, see_through_walls=True) self.actions = SnakeEnv.Actions self.action_space = spaces.Discrete(len(self.actions)) # self.observation_space = spaces.Dict({ # 'image': spaces.Box( # low=0, # high=255, # shape=(size,size,3), # dtype='uint8' # ) # # }) def spawn_new_food(self): empties = [(i, j) for i in range(self.grid.height) for j in range(self.grid.width) if self.grid.get(i, j) is None and self.grid.get(i, j) != tuple(self.agent_pos)] self.grid.set(*random.choice(empties), Goal()) def _gen_grid(self, width, height): # Create an empty grid self.grid = Grid(width, height) self.grid.wall_rect(0, 0, width, height) # self.start_pos = (2, 2) yl, xl, _ = self.observation_space.spaces['image'].shape self.start_pos = (random.randint(2, yl - 2), random.randint(2, xl - 2)) self.agent_pos = self.start_pos #TODO: the env holding agent traits is shit! self.start_dir = random.randint(0, 3) self.agent_dir = self.start_dir self.snake = Snake( [self.start_pos, tuple(self.start_pos - self.dir_vec)]) [self.grid.set(*pos, Lava()) for pos in self.snake.body] self.spawn_new_food() self.mission = None def reset(self): return super().reset() # def gen_obs(self): # image = self.grid.encode() # # obs = { # 'image': image, # 'direction': self.agent_dir, # 'mission': self.mission # } # # return obs def step(self, action): self.step_count += 1 done = False if action == self.actions.left: self.agent_dir = (self.agent_dir - 1) % 4 elif action == self.actions.right: self.agent_dir = (self.agent_dir + 1) % 4 elif action == self.actions.forward: pass else: assert False, "unknown action: %d" % action fwd_pos = self.agent_pos + self.dir_vec fwd_cell = self.grid.get(*fwd_pos) if fwd_cell is None: self.grid.set(*self.agent_pos, Lava()) self.snake.grow_head(*fwd_pos) self.grid.set(*self.snake.rm_tail(), None) self.agent_pos = fwd_pos reward = -0.001 elif fwd_cell.type == 'goal': self.grid.set(*self.agent_pos, Lava()) self.snake.grow_head(*fwd_pos) self.agent_pos = fwd_pos self.spawn_new_food() reward = 1.0 elif (fwd_cell.type == 'lava' or fwd_cell.type == 'wall'): reward = -1.0 done = True else: assert False if self.step_count == 1 and done: assert False obs = self.gen_obs() assert any([ isinstance(self.grid.get(i, j), Goal) for i in range(self.grid.height) for j in range(self.grid.width) ]) return obs, reward, done, {}
class Cluttered(MiniGridSimple): # Only 4 actions needed, left, right, up and down class ClutteredCardinalActions(IntEnum): # Cardinal movement right = 0 down = 1 left = 2 up = 3 def __len__(self): return 4 def __init__( self, grid_size=20, num_objects=5, obj_size=3, max_steps=100, seed=133, state_encoding="thermal", rnd_start=0, ): self.state_encoding = state_encoding self.grid_size = grid_size self.num_objects = num_objects self.obj_size = obj_size # set to 1 if agent is to be randomly spawned self.rnd_start = rnd_start self.grid_seed = 12 # This only works for 15x15 grid with 6 obstacles #self._goal_default_pos = (6, 10) #self._goal_default_pos = (self.grid_size-2, self.grid_size-2) self._goal_default_pos = (7, 12) # This is used for some of the experiments. self._agent_default_pos = (7, 6) # If self.rnd_start =1, don't spawn in these rooms super().__init__(grid_size=grid_size, max_steps=max_steps, seed=seed, see_through_walls=False) self.nActions = len(Cluttered.ClutteredCardinalActions) # Set the action and observation spaces self.actions = Cluttered.ClutteredCardinalActions self.action_space = spaces.Discrete(self.nActions) self.max_cells = (grid_size - 1) * (grid_size - 1) self.observation_space = spaces.Tuple( [spaces.Discrete(grid_size), spaces.Discrete(grid_size)]) self.observation_size = self.grid_size * self.grid_size self.observation_shape = (self.observation_size, ) self.T = max_steps # Change the observation space to return the position in the grid def reward(self): # -1 for every action except if the action leads to the goal state #return 0 if self.success else -1 return 0 if self.success else -1 / self.T def _gen_grid(self, width, height, val=False, seen=True): assert width >= 10 and height >= 10, "Environment too small to place objects" # Create the grid self.grid = Grid(width, height) # Generate surrounding walls self.grid.horz_wall(0, 0) self.grid.horz_wall(0, height - 1) self.grid.vert_wall(0, 0) self.grid.vert_wall(width - 1, 0) np.random.seed(self.grid_seed) for obj_idx in range(self.num_objects): while True: c_x, c_y = np.random.choice(list(range( 2, self.grid_size - 3))), np.random.choice( list(range(2, self.grid_size - 3))) #obj_size = np.random.choice(list(range(1, self.obj_size+1))) obj_size = self.obj_size if obj_size == 3: cells = list( product([c_x - 1, c_x, c_x + 1], [c_y - 1, c_y, c_y + 1])) elif obj_size == 2: cells = list(product([c_x, c_x + 1], [c_y, c_y + 1])) elif obj_size == 1: cells = list(product([c_x], [c_y])) else: raise ValueError valid = True for cell in cells: cell = self.grid.get(cell[0], cell[1]) if not (cell is None or cell.can_overlap()): valid = False break if valid: for cell in cells: self.grid.set(*cell, Wall()) break # Set the start position and the goal position depending upon where the obstacles are present goal = Goal() # [NOTE] : This is a hack, add option to set goal location from arguments. self.grid.set(*self._goal_default_pos, goal) goal.init_pos = goal.curr_pos = self._goal_default_pos self.mission = goal.init_pos self.start_pos = self._agent_default_pos def reset(self, val=False, seen=True): obs, info = super().reset(val=val, seen=seen) # add state feature to obs state_feat = self._encode_state(obs['agent_pos']) obs.update(dict(state_feat=state_feat)) return obs, info def step(self, action): self.step_count += 1 ''' Reward doesn't depend on action, but just state. reward = -1 if not (in_goal_state) else 0 ''' if not self.done: # check if currently at the goal state if self.agent_pos == self.mission: # No penalty, episode done self.done = True self.success = True else: # Cardinal movement if action in self.move_actions: move_pos = self.around_pos(action) fwd_cell = self.grid.get(*move_pos) self.agent_dir = (action - 1) % 4 if fwd_cell == None or fwd_cell.can_overlap( ) or self.is_goal(move_pos): self.agent_pos = move_pos else: raise ValueError("Invalid Action: {} ".format(action)) reward = self.reward() if self.step_count >= self.max_steps - 1: # print("Max Steps Exceeded.") self.done = True obs = self.gen_obs() # Add state features to the observation state_feat = self._encode_state(obs['agent_pos']) obs.update(dict(state_feat=state_feat)) info = { 'done': self.done, 'agent_pos': np.array(self.agent_pos), } if self.render_rgb: info['rgb_grid'] = self.render(mode='rgb_array') if self.done: info.update({ 'image': self.encode_grid(), 'success': self.success, 'agent_pos': self.agent_pos, }) return obs, reward, self.done, info def _encode_state(self, state): """ Encode the state to generate observation. """ feat = np.ones(self.width * self.height, dtype=float) curr_x, curr_y = state[1], state[0] curr_pos = curr_y * self.width + curr_x if self.state_encoding == "thermal": feat[curr_pos:] = 0 elif self.state_encoding == "one-hot": feat[:] = 0 feat[curr_pos] = 1 return feat
class SnakeEnv(MiniGridEnv): class Actions(IntEnum): left = 0 right = 1 forward = 2 def __init__(self, size=9): super().__init__(grid_size=size, max_steps=None, see_through_walls=True) self.actions = SnakeEnv.Actions self.action_space = spaces.Discrete(len(self.actions)) def spawn_new_food(self): empties = [(i, j) for i in range(self.grid.height) for j in range(self.grid.width) if self.grid.get(i, j) is None and self.grid.get(i, j) != tuple(self.agent_pos)] self.grid.set(*random.choice(empties), Goal()) def _gen_grid(self, width, height): self.grid = Grid(width, height) self.grid.wall_rect(0, 0, width, height) # self.start_pos = (2, 2) yl, xl, _ = self.observation_space.spaces["image"].shape self.start_pos = (random.randint(2, yl - 2), random.randint(2, xl - 2)) self.agent_pos = self.start_pos # TODO: the env holding agent traits is shit! self.start_dir = random.randint(0, 3) self.agent_dir = self.start_dir self.snake = Snake( [self.start_pos, tuple(self.start_pos - self.dir_vec)]) [self.grid.set(*pos, Lava()) for pos in self.snake.body] self.spawn_new_food() self.mission = None def reset(self): return super().reset() def step(self, action): self.step_count += 1 done = False if action == self.actions.left: self.agent_dir = (self.agent_dir - 1) % 4 elif action == self.actions.right: self.agent_dir = (self.agent_dir + 1) % 4 elif action == self.actions.forward: pass else: assert False, "unknown action: %d" % action fwd_pos = self.agent_pos + self.dir_vec fwd_cell = self.grid.get(*fwd_pos) if fwd_cell is None: self.grid.set(*self.agent_pos, Lava()) self.snake.grow_head(*fwd_pos) self.grid.set(*self.snake.rm_tail(), None) self.agent_pos = fwd_pos reward = -0.001 elif fwd_cell.type == "goal": self.grid.set(*self.agent_pos, Lava()) self.snake.grow_head(*fwd_pos) self.agent_pos = fwd_pos self.spawn_new_food() reward = 1.0 elif fwd_cell.type == "lava" or fwd_cell.type == "wall": reward = -1.0 done = True else: assert False if self.step_count == 1 and done: assert False obs = self.gen_obs() assert any([ isinstance(self.grid.get(i, j), Goal) for i in range(self.grid.height) for j in range(self.grid.width) ]) return obs, reward, done, {}
class EmptyGridWorld(MiniGridSimple): # Only 4 actions needed, left, right, up and down class CardnalActions(IntEnum): # Cardinal movement right = 0 down = 1 left = 2 up = 3 def __len__(self): return 4 def __init__( self, grid_size=20, max_steps=100, state_encoding="thermal", seed=133, rnd_start=0, ): self.state_encoding = state_encoding self.grid_size = grid_size self._goal_default_pos = (self.grid_size - 2, 1) # set to 1 if agent is to be randomly spawned self.rnd_start = rnd_start super().__init__(grid_size=grid_size, max_steps=max_steps, seed=seed, see_through_walls=False) self.nActions = len(EmptyGridWorld.CardnalActions) # Set the action and observation spaces self.actions = EmptyGridWorld.CardnalActions self.action_space = spaces.Discrete(self.nActions) self.max_cells = (grid_size - 1) * (grid_size - 1) self.observation_space = spaces.Tuple( [spaces.Discrete(grid_size), spaces.Discrete(grid_size)]) self.observation_size = self.grid_size * self.grid_size self.observation_shape = (self.observation_size, ) self.T = max_steps # Change the observation space to return the position in the grid @property def category(self): # [TODO] Make sure this doesn't break after self.agent_pos is changed to numpy.ndarray return self.cell_cat_map[self.agent_pos] def reward(self): # -1 for every action except if the action leads to the goal state return 1 if self.success else -1 / self.T def _gen_grid(self, width, height, val=False, seen=True): # Create the grid self.grid = Grid(width, height) # Generate surrounding walls self.grid.horz_wall(0, 0) self.grid.horz_wall(0, height - 1) self.grid.vert_wall(0, 0) self.grid.vert_wall(width - 1, 0) # Even during validation, start state distribution # should be the same as that during training if not self.rnd_start: self._agent_default_pos = (1, self.grid_size - 2) else: self._agent_default_pos = None # Place the agent at the center if self._agent_default_pos is not None: self.start_pos = self._agent_default_pos self.grid.set(*self._agent_default_pos, None) self.start_dir = self._rand_int( 0, 4) # Agent direction doesn't matter goal = Goal() self.grid.set(*self._goal_default_pos, goal) goal.init_pos = goal.curr_pos = self._goal_default_pos self.mission = goal.init_pos def reset(self, val=False, seen=True): obs, info = super().reset(val=val, seen=seen) # add state feature to obs state_feat = self._encode_state(obs['agent_pos']) obs.update(dict(state_feat=state_feat)) return obs, info def step(self, action): self.step_count += 1 ''' Reward doesn't depend on action, but just state. reward = -1 if not (in_goal_state) else 0 ''' if not self.done: # check if currently at the goal state if self.agent_pos == self.mission: # No penalty, episode done self.done = True self.success = True else: # Cardinal movement if action in self.move_actions: move_pos = self.around_pos(action) fwd_cell = self.grid.get(*move_pos) self.agent_dir = (action - 1) % 4 if fwd_cell == None or fwd_cell.can_overlap( ) or self.is_goal(move_pos): self.agent_pos = move_pos else: raise ValueError("Invalid Action: {} ".format(action)) reward = self.reward() if self.step_count >= self.max_steps - 1: # print("Max Steps Exceeded.") self.done = True obs = self.gen_obs() # Add state features to the observation state_feat = self._encode_state(obs['agent_pos']) obs.update(dict(state_feat=state_feat)) info = { 'done': self.done, 'agent_pos': np.array(self.agent_pos), } if self.render_rgb: info['rgb_grid'] = self.render(mode='rgb_array') if self.done: info.update({ 'image': self.encode_grid(), 'success': self.success, 'agent_pos': self.agent_pos, }) return obs, reward, self.done, info def _encode_state(self, state): """ Encode the state to generate observation. """ feat = np.ones(self.width * self.height, dtype=float) curr_x, curr_y = state[0], state[1] curr_pos = curr_y * self.width + curr_x if self.state_encoding == "thermal": feat[curr_pos:] = 0 elif self.state_encoding == "one-hot": feat[:] = 0 feat[curr_pos] = 1 return feat