def __init__(self,
                 viz=0,
                 field=None,
                 partial=False,
                 radius=2,
                 frame_skip=4,
                 image_shape=(84, 84),
                 mode=None,
                 team_size=1,
                 ai_frame_skip=1,
                 raw_env=soccer_environment.SoccerEnvironment):
        super(SoccerPlayer, self).__init__()

        if team_size > 1 and mode != None:
            self.mode = mode.split(',')
        else:
            self.mode = [mode]
        self.field = field
        self.partial = partial
        self.viz = viz
        if self.viz:
            self.renderer_options = soccer_renderer.RendererOptions(
                show_display=True, max_fps=10, enable_key_events=True)
        else:
            self.renderer_options = None

        if self.field == 'large':
            map_path = file_util.resolve_path(__file__,
                                              '../data/map/soccer_large.tmx')
        else:
            map_path = None

        self.team_size = team_size
        self.env_options = soccer_environment.SoccerEnvironmentOptions(
            team_size=self.team_size,
            map_path=map_path,
            ai_frame_skip=ai_frame_skip)
        self.env = raw_env(env_options=self.env_options,
                           renderer_options=self.renderer_options)

        self.computer_team_name = self.env.team_names[1]
        self.player_team_name = self.env.team_names[0]

        # Partial
        if self.partial:
            self.radius = radius
            self.player_agent_index = self.env.get_agent_index(
                self.player_team_name, 0)

        self.actions = self.env.actions
        self.frame_skip = frame_skip
        self.image_shape = image_shape

        self.last_info = {}
        self.agent_actions = ['STAND'] * (self.team_size * 2)
        self.changing_counter = 0
        self.timestep = 0
        self.current_episode_score = StatCounter()
        self.restart_episode()
Example #2
0
    def get_overlays(self):
        """Get the overlay sprites.

        A sprite mapping file is associated with each overlay layer containing
        the sprite positions. If the property "sprite" exists, with the value of
        the file path relative to the map path, the contents of the mapping from
        sprite name to position will be read; Otherwise, an error will be
        raised.

        Returns:
            dict: A mapping from the name to the sprite.
        """
        # Get the tile dimension
        tile_dim = [self.tiled_map.tilewidth, self.tiled_map.tileheight]
        # Get the overlay layer
        overlay_layers = self.layers['overlay']
        # Get all the overlay images
        overlays = {}
        for layer in overlay_layers:
            # Add the overlay images
            if 'sprite' in layer.properties:
                # Build the table by pointing the position to the image
                pos_to_image = {}
                for (px, py, image) in layer.tiles():
                    pos_to_image[(px, py)] = image
                # Get the sprite file path relative to the map file
                path = layer.properties['sprite']
                resolved_path = file_util.resolve_path(self.filename, path)
                # Read the sprite file
                sprite = file_util.read_yaml(resolved_path)
                # Map the name to the sprite
                for (name, pos) in sprite.items():
                    px = pos['x']
                    py = pos['y']
                    pos = (px, py)
                    if pos not in pos_to_image:
                        raise KeyError(
                            '{} ({}, {}) is not found in the layer'.format(
                                name, px, py))
                    # Get the image
                    image = pos_to_image[pos]
                    # Create a new sprite
                    sprite = OverlaySprite(image, pos, tile_dim)
                    # Save the sprite in the overlays
                    if name in overlays:
                        raise RuntimeError(
                            'Duplicate name {} in the sprite file'.format(
                                name))
                    overlays[name] = sprite
            else:
                raise KeyError(
                    '"sprite" property in required for the layer {} '
                    'to load the overlays'.format(layer.name))
        return overlays
Example #3
0
    def get_tile_positions(self):
        """Get the tile positions.

        A tile mapping file can be associated with each layer containing the
        tile types. If the property "tile" exists, with the value of the file
        path relative to the map path, the contents of the mapping from tile
        name to tid (tile ID) will be read; Otherwise, the 2nd mapping will be
        an empty dict.

        Returns:
            dict: 1st mapping is from the layer name to the 2nd dict. 2nd
                mapping is from the name to the tile positions.
        """
        # Get the background layer
        layers = self.layers['all']
        # Build the mapping
        tile_pos = {}
        for layer in layers:
            # Check whether the tile mapping property exists
            if 'tile' in layer.properties:
                # Get the tile file path relative to the map file
                path = layer.properties['tile']
                resolved_path = file_util.resolve_path(self.filename, path)
                # Read the tile file
                tile_name_to_tid = file_util.read_yaml(resolved_path)
                # Build the inverse lookup of the mapping from tile name to tid
                tid_to_tile_name = {
                    v: k
                    for (k, v) in tile_name_to_tid.items()
                }
                # Create the 2nd mapping
                tile_name_to_pos = {}
                # Create the initial lists
                for name in tile_name_to_tid.keys():
                    tile_name_to_pos[name] = []
                # Add the positions
                for (px, py, gid) in layer:
                    # Ignore the empty tile
                    if gid <= 0:
                        continue
                    pos = [px, py]
                    tid = self.tiled_map.tiledgidmap[gid]
                    # Append when the mapping exists
                    if tid in tid_to_tile_name:
                        tile_name = tid_to_tile_name[tid]
                        tile_name_to_pos[tile_name].append(pos)
                tile_pos[layer.name] = tile_name_to_pos
            else:
                tile_pos[layer.name] = {}
        return tile_pos
Example #4
0
def main():
    # Initialize the random number generator to have consistent results
    random.seed(0)

    # Resolve the map path relative to this file
    map_path = file_util.resolve_path(
        __file__, '../data/map/soccer/soccer_21x14_goal_4.tmx')

    # Create a soccer environment options
    # "map_data" is specified to use the custom map.
    # "team_size" is given to specify the agents in one team.
    # "ai_frame_skip" is to control the frame skip for AI only
    env_options = soccer_environment.SoccerEnvironmentOptions(
        map_path=map_path, team_size=2, ai_frame_skip=2)

    # Create a soccer environment
    # If you want to render the environment, an optional argument
    # "renderer_options" can be used. For the sample usage, see
    # "sample/renderer.py".
    env = soccer_environment.SoccerEnvironment(env_options=env_options)

    # Run many episodes
    for episode_index in range(20):
        # Print the episode number
        print('')
        print('Episode {}:'.format(episode_index + 1))
        # Reset the environment and get the initial observation. The observation
        # is a class defined as "soccer_environment.SoccerObservation".
        observation = env.reset()
        state = observation.state
        action = observation.action
        reward = observation.reward
        next_state = observation.next_state
        # Print the state, action, reward, and next state pair
        print('Initial state:\n({}, {}, {}, {})\n'.format(
            state, action, reward, next_state))
        # Run the episode
        is_running = True
        while is_running:
            # Render the environment. The renderer will lazy load on the first
            # call. Skip the call if you don't need the rendering.
            env.render()
            # Get the partially observable screenshot of the first agent with a
            # radius of 1. The returned `screenshot` is a `numpy.ndarray`, the
            # format is the same as the returned value of `scipy.misc.imread`.
            # The previous call is required for this call to work.
            po_screenshot = env.renderer.get_po_screenshot(0, 1)
            # Control only the first agent in each team
            team_agent_index = 0
            for team_name in env.team_names:
                agent_index = env.get_agent_index(team_name, team_agent_index)
                action = random.choice(env.actions)
                env.take_cached_action(agent_index, action)
            # Update the state and get the observation
            observation = env.update_state()
            # Check the terminal state
            if env.state.is_terminal():
                print('Terminal state:\n{}'.format(observation))
                print('Episode {} ends at time step {}'.format(
                    episode_index + 1, env.state.time_step + 1))
                is_running = False

    # Save the last partially observable screenshot
    env.render()
    agent_pos = np.array(env.state.get_agent_pos(0))
    po_screenshot = env.renderer.get_po_screenshot(agent_pos, radius=1)
    screenshot_relative_path = 'screenshot.png'
    screenshot_abs_path = os.path.abspath(screenshot_relative_path)
    scipy.misc.imsave(screenshot_abs_path, po_screenshot)
    print('The last partially observable screenshot is saved to {}'.format(
        screenshot_abs_path))
Example #5
0
def main():
    global group_sizes

    # Create an environment
    env = gym.make('gridworld-v1')

    # Resolve the map path relative to this file
    map_path = file_util.resolve_path(
        __file__, '../data/map/gridworld/gridworld_9x9.tmx')

    # Set the environment options
    env.env_options = options.GridworldOptions(
        map_path=map_path,
        action_space=gym.spaces.Discrete(ACTION_SIZE),
        step_callback=step_callback,
        reset_callback=reset_callback)

    env.renderer_options = renderer.RendererOptions(show_display=False,
                                                    max_fps=60)

    # Load the enviornment
    env.load()

    # Set the random seed of the environment
    env.seed(0)

    # Run many episodes
    for episode_ind in range(6):
        # Print the episode number
        print('')
        print('Episode {}:'.format(episode_ind + 1))
        # Set the group names and sizes
        group_sizes = [
            1,
            episode_ind,
            1,
        ]
        env.env_options.set_group(GROUP_NAMES, group_sizes)
        # Reset the environment
        state = env.reset()
        # Print the shape of initial state
        print('Shape of initial state:{}'.format(state.shape))
        # Run the episode
        done = False
        timestep = 0
        while not done:
            # Render the environment
            screenshot = env.render()
            # Take random action
            random_action = env.action_space.sample()
            # Update the environment
            next_state, reward, done, _ = env.step(random_action)
            # Transition to the next state
            state = next_state
            timestep += 1
        print('Episode ended. Reward: {}. Timestep: {}'.format(
            reward, timestep))

    # Save the last screenshot
    screenshot_relative_path = 'screenshot.png'
    screenshot_abs_path = os.path.abspath(screenshot_relative_path)
    scipy.misc.imsave(screenshot_abs_path, screenshot)
    print('The last screenshot is saved to {}'.format(screenshot_abs_path))
Example #6
0
def main():
    # Initialize the random number generator to have consistent results
    random.seed(0)

    # Resolve the map path relative to this file
    map_path = file_util.resolve_path(
        __file__, '../data/map/predator_prey/predator_prey_15x15.tmx')

    # Create an environment options
    object_size = {
        'PREDATOR': 3,
        'PREY': 3,
        'OBSTACLE': 8,
    }
    env_options = predator_prey_environment.PredatorPreyEnvironmentOptions(
        map_path=map_path,
        object_size=object_size,
        po_radius=3,
        ai_frame_skip=2)

    # Create an environment
    env = predator_prey_environment.PredatorPreyEnvironment(
        env_options=env_options)

    # Get index range of preys
    predator_index_range = env.get_group_index_range('PREDATOR')
    first_predator_index = range(*predator_index_range)[0]

    # Run many episodes
    for episode_index in range(10):
        # Print the episode number
        print('')
        print('Episode {}:'.format(episode_index + 1))
        # Reset the environment and get the initial observation
        observation = env.reset()
        state = observation.state
        action = observation.action
        reward = observation.reward
        next_state = observation.next_state
        # Print the state
        print('Initial state:\n({}, {}, {}, {})\n'.format(
            state, action, reward, next_state))
        # Run the episode
        is_running = True
        while is_running:
            # Render the environment
            env.render()
            # Get position of the first predator
            pos = np.array(env.state.get_object_pos(first_predator_index))
            # Get partially observable symbolic view of the first agent with a
            # radius of 2
            po_view = env.state.get_po_symbolic_view(pos, 2)
            # Get partially observable screenshot of the first agent with a
            # radius of 2
            po_screenshot = env.renderer.get_po_screenshot(pos, 2)
            # Build actions without obstacles
            actions_wo = [None] * (env.options.object_size['PREDATOR'] +
                                   env.options.object_size['PREY'])
            # Get a random action from the action list
            action = random.choice(env.actions)
            # Set the action of the first predator
            actions_wo[0] = action
            # Update the environment and get observation
            observation = env.step_without_obstacles(actions_wo)
            # Check the terminal state
            if env.state.is_terminal():
                print('Terminal state:\n{}'.format(observation))
                print('Episode {} ends at time step {}'.format(
                    episode_index + 1, env.state.time_step + 1))
                is_running = False

    # Get position of the first predator
    pos = np.array(env.state.get_object_pos(first_predator_index))

    # Print the last partially observable symbolic view
    po_view = env.state.get_po_symbolic_view(pos, 2)
    print(po_view)

    # Save the last partially observable screenshot
    env.render()
    po_screenshot = env.renderer.get_po_screenshot(pos, 2)
    screenshot_relative_path = 'screenshot.png'
    screenshot_abs_path = os.path.abspath(screenshot_relative_path)
    scipy.misc.imsave(screenshot_abs_path, po_screenshot)
    print('The last partially observable screenshot is saved to {}'.format(
        screenshot_abs_path))
Example #7
0
 def test_resolve_path(self):
     path1 = 'dir1/file1'
     path2 = 'dir2/file2'
     expected_path = 'dir1/dir2/file2'
     resolved_path = file_util.resolve_path(path1, path2)
     assert os.path.normpath(expected_path) == resolved_path