示例#1
0
 def test_setstate(self, seed):
     """Checks setState functionality."""
     cfg = config.Config({
         'level': 'tests.symmetric',
         'game_engine_random_seed': seed
     })
     env1 = football_env.FootballEnv(cfg)
     env2 = football_env.FootballEnv(cfg)
     initial_obs = env1.reset()
     env2.reset()
     initial_state = env1.get_state()
     random.seed(seed)
     actions = len(football_action_set.get_action_set(cfg))
     first_action = random.randint(0, actions - 1)
     first_obs, _, _, _ = env1.step(first_action)
     _, _, _, _ = env2.step(first_action)
     step = 0
     limit = 10 if fast_run else 3000
     while step < limit:
         step += 1
         action = random.randint(0, actions - 1)
         if step % 10 == 0:
             env2.set_state(initial_state)
             self.compare_observations(initial_obs, env2.observation())
             env2.step(first_action)
             self.compare_observations(first_obs, env2.observation())
             env2.set_state(env1.get_state())
         self.compare_observations(env1.observation(), env2.observation())
         _, _, done1, _ = env1.step(action)
         _, _, done2, _ = env2.step(action)
         self.assertEqual(done1, done2)
         if done1:
             break
     env1.close()
     env2.close()
示例#2
0
def main(_):
    left_players = FLAGS.left_players.split(',') if FLAGS.left_players else ''
    right_players = FLAGS.right_players.split(
        ',') if FLAGS.right_players else ''
    assert not (
        'agent' in left_players or 'agent' in right_players
    ), 'Player type \'agent\' can not be used with play_game. Use tfhub player.'
    cfg = config.Config({
        'action_set': FLAGS.action_set,
        'right_players': right_players,
        'dump_full_episodes': True,
        'left_players': left_players,
        'real_time': FLAGS.real_time,
        'render': True
    })
    if FLAGS.level:
        cfg['level'] = FLAGS.level
    env = football_env.FootballEnv(cfg)
    env.reset()
    try:
        while True:
            _, _, done, _ = env.step(None)
            if done:
                env.reset()
    except KeyboardInterrupt:
        env.write_dump('shutdown')
        exit(1)
示例#3
0
def main(_):
  with open(FLAGS.trace_file, 'rb') as f:
    replay = six.moves.cPickle.load(f)
  trace = modify_trace(replay)
  fd, temp_path = tempfile.mkstemp(suffix='.dump')
  with tf.gfile.Open(temp_path, 'wb') as f:
    six.moves.cPickle.dump(trace, f)
  assert replay[0]['debug']['frame_cnt'] == 1, (
      'Trace does not start from the beginning of the episode, can not replay')
  cfg = config.Config(replay[0]['debug']['config'])
  player_type = 'replay={}'.format(temp_path)
  cfg['home_players'] = [player_type] * len(cfg['home_players'])
  cfg['away_players'] = [player_type] * len(cfg['away_players'])
  cfg.update({
      'physics_steps_per_frame': int(100 / FLAGS.fps),
      'real_time': False,
      'render': True,
      'tracesdir': '/tmp/dumps',
      'write_video': True
  })
  env = football_env.FootballEnv(cfg)
  env.reset(cfg)
  done = False
  try:
    while not done:
      _, _, done, _ = env.step(None)
  except KeyboardInterrupt:
    env.write_dump('shutdown')
    exit(1)
  os.close(fd)
 def replay(self, dump, fps=10, config_update={}, directory=None):
     with open(dump, 'rb') as f:
         replay = six.moves.cPickle.load(f)
     trace = self.__modify_trace(replay, fps)
     fd, temp_path = tempfile.mkstemp(suffix='.dump')
     with open(temp_path, 'wb') as f:
         six.moves.cPickle.dump(trace, f)
     assert replay[0]['debug']['frame_cnt'] == 1, (
         'Trace does not start from the beginning of the episode, can not replay'
     )
     cfg = config.Config(replay[0]['debug']['config'])
     cfg['players'] = self.__build_players(temp_path, cfg['players'])
     config_update['physics_steps_per_frame'] = int(100 / fps)
     config_update['real_time'] = False
     if 'render' not in config_update:
         config_update['render'] = True
     if directory:
         config_update['tracesdir'] = directory
     config_update['write_video'] = True
     cfg.update(config_update)
     env = football_env.FootballEnv(cfg)
     env.reset()
     done = False
     try:
         while not done:
             _, _, done, _ = env.step(None)
     except KeyboardInterrupt:
         env.write_dump('shutdown')
         exit(1)
     os.close(fd)
示例#5
0
 def test_corner(self, episode, factor, reverse):
     cfg = config.Config({
         'level': 'tests.corner_test',
         'players': ['agent:left_players=1,right_players=1'],
         'episode_number': episode,
         'reverse_team_processing': reverse,
     })
     env = football_env.FootballEnv(cfg)
     o = env.reset()
     done = False
     while not done:
         o, _, done, _ = env.step([
             football_action_set.action_left,
             football_action_set.action_left
         ])
     self.assertAlmostEqual(o[0]['ball'][0], -0.95 * factor, delta=0.1)
     self.assertAlmostEqual(o[0]['ball'][1], 0.4 * factor, delta=0.1)
     self.assertAlmostEqual(o[0]['right_team'][0][0], 1, delta=0.1)
     self.assertAlmostEqual(o[0]['right_team'][1][0],
                            -0.95 * factor,
                            delta=0.1)
     self.assertAlmostEqual(o[0]['left_team'][0][0], -0.95, delta=0.1)
     self.assertAlmostEqual(o[0]['left_team'][1][0],
                            -0.9 * factor,
                            delta=0.2)
     env.close()
示例#6
0
def main(_):
  players = FLAGS.players.split(';') if FLAGS.players else ''
  assert not (any(['agent' in player for player in players])
             ), ('Player type \'agent\' can not be used with play_game.')
  cfg = config.Config({
      'action_set': FLAGS.action_set,
      'dump_full_episodes': True,
      'players': players,
      'real_time': FLAGS.real_time,
  })
  if FLAGS.level:
    cfg['level'] = FLAGS.level
  env = football_env.FootballEnv(cfg)
  if FLAGS.render:
    env.render()
  env.reset()
  try:
    while True:
      _, _, done, _ = env.step([])
      if done:
        env.reset()
  except KeyboardInterrupt:
    logging.warning('Game stopped, writing dump...')
    env.write_dump('shutdown')
    exit(1)
示例#7
0
def main(_):
    cfg = config.Config({
        'action_set':
        FLAGS.action_set,
        'away_players':
        FLAGS.away_players.split(',') if FLAGS.away_players else '',
        'dump_full_episodes':
        True,
        'home_players':
        FLAGS.home_players.split(',') if FLAGS.home_players else '',
        'real_time':
        FLAGS.real_time,
        'render':
        True
    })
    if FLAGS.level:
        cfg['level'] = FLAGS.level
    env = football_env.FootballEnv(cfg)
    env.reset(cfg)
    try:
        while True:
            _, _, done, _ = env.step(None)
            if done:
                env.reset(cfg)
    except KeyboardInterrupt:
        env.write_dump('shutdown')
        exit(1)
示例#8
0
def run_scenario(cfg, queue, actions, render=False, validation=True):
  env = football_env.FootballEnv(cfg)
  if render:
    env.render()
  obs = env.reset()
  queue.put(obs)
  if validation:
    env.tracker_setup(0, 999999999999999)
  done = False
  step = 0
  while True:
    if isinstance(actions, Iterable):
      if step >= len(actions):
        break
      action = actions[step]
    else:
      action = actions.get()
      if action is None:
        break
    step += 1
    if isinstance(action, Iterable):
      obs, _, done, _ = env.step(action)
    else:
      obs, _, done, _ = env.step([action, action])
    queue.put(obs)
    if done:
      break
  queue.put(None)
  env.close()
 def generate_replay(self):
     """Generates replay of an episode."""
     cfg = config.Config()
     left_players = 2
     cfg.update({
         'action_set':
         'full',
         'level':
         'tests.corner_test',
         'dump_full_episodes':
         True,
         'players': [
             'agent:left_players={}'.format(left_players),
             'bot:right_players=1', 'lazy:right_players=1'
         ],
         'tracesdir':
         test_tmpdir
     })
     env = football_env.FootballEnv(cfg)
     env.reset()
     actions_cnt = len(football_action_set.get_action_set(cfg))
     done = False
     step = 0
     while not done:
         step += 1
         actions = [(step + x) % actions_cnt for x in range(left_players)]
         _, _, done, _ = env.step(actions)
     env.close()
示例#10
0
 def test_different_action_formats(self):
   """Verify different action formats are accepted."""
   cfg = config.Config()
   env = football_env.FootballEnv(cfg)
   env.reset()
   env.step(football_action_set.action_right)
   env.step([football_action_set.action_right])
   env.step(np.array([football_action_set.action_right]))
   env.step(np.array(football_action_set.action_right))
   env.close()
示例#11
0
 def test_player_order_invariant(self):
   """Checks that environment behaves the same regardless of players order."""
   players = ['agent:right_players=1', 'lazy:left_players=11']
   cfg = config.Config({
       'level': 'tests.11_vs_11_hard_deterministic',
       'players': players
   })
   env = football_env.FootballEnv(cfg)
   actions = len(football_action_set.get_action_set(cfg))
   hash_value1 = compute_hash(env, actions)
   players = [players[1], players[0]]
   cfg = config.Config({
       'level': 'tests.11_vs_11_hard_deterministic',
       'players': players
   })
   env = football_env.FootballEnv(cfg)
   hash_value2 = compute_hash(env, actions)
   self.assertEqual(hash_value1, hash_value2)
   env.close()
示例#12
0
  def test_multi_render(self):
    """Only one rendering instance allowed at a time."""
    if 'UNITTEST_IN_DOCKER' in os.environ:
      # Rendering is not supported.
      return
    cfg = config.Config({})
    env1 = football_env.FootballEnv(cfg)
    env1.render()
    env1.reset()

    env2 = football_env.FootballEnv(cfg)
    try:
      env2.render()
    except AssertionError:
      env1.close()
      env2.close()
      # It is still possible to render.
      env3 = football_env.FootballEnv(cfg)
      env3.reset()
      env3.close()
      return
    assert False, 'Exception expected'
示例#13
0
 def test_restore_after_done(self):
   cfg = config.Config({
       'level': 'academy_empty_goal_close',
   })
   env = football_env.FootballEnv(cfg)
   env.reset()
   state = env.get_state()
   # Go right until reaching the goal.
   done = False
   while not done:
     _, _, done, _ = env.step(5)
   env.set_state(state)
   env.step(0)  # Test if can take step
示例#14
0
 def test_restore_after_reset(self):
   cfg = config.Config({
       'level': '11_vs_11_competition',
   })
   env = football_env.FootballEnv(cfg)
   obs = env.reset()
   state = env.get_state()
   env.reset()
   env.set_state(state)
   obs_ = env.observation()
   state_ = env.get_state()
   env.step(0)  # Test if can take step
   self.compare_observations(obs, obs_)
   self.assertEqual(state, state_)
示例#15
0
def run_scenario(cfg, seed, queue, actions, render=False, validation=True):
    env = football_env.FootballEnv(cfg)
    if render:
        env.render()
    env.reset()
    if validation:
        env.tracker_setup(0, 999999999999999)
    done = False
    for action in actions:
        obs, _, done, _ = env.step([action, action])
        queue.put(obs)
        if done:
            break
    queue.put(None)
    env.close()
示例#16
0
 def test__memory_usage(self):
   """Make sure memory usage is low when not recording videos."""
   # This test has to go first, so that memory usage is not affected.
   if 'UNITTEST_IN_DOCKER' in os.environ:
     # Forge doesn't support rendering.
     return
   cfg = config.Config({'write_video': False})
   env = football_env.FootballEnv(cfg)
   env.render()
   env.reset()
   initial_memory = self.memory_usage()
   for _ in range(100):
     _, _, _, _ = env.step(football_action_set.action_right)
   memory_usage = self.memory_usage() - initial_memory
   env.close()
   self.assertGreaterEqual(10000000, memory_usage)
示例#17
0
 def test_second_half(self):
   """Test second half feature."""
   cfg = config.Config()
   cfg['level'] = 'tests.second_half'
   env = football_env.FootballEnv(cfg)
   for _ in range(5):
     o, _, done, _ = env.step(football_action_set.action_idle)
     self.assertFalse(done)
     self.assertAlmostEqual(o[0]['left_team'][o[0]['active']][0], 0, delta=0.1)
   for _ in range(6):
     self.assertFalse(done)
     o, _, done, _ = env.step(football_action_set.action_idle)
     self.assertAlmostEqual(
         o[0]['left_team'][o[0]['active']][0], -0.5, delta=0.1)
   self.assertTrue(done)
   env.close()
示例#18
0
 def check_determinism(self, extensive=False):
     """Check that environment is deterministic."""
     if 'UNITTEST_IN_DOCKER' in os.environ:
         return
     cfg = config.Config({'level': 'tests.11_vs_11_hard_deterministic'})
     env = football_env.FootballEnv(cfg)
     actions = len(football_action_set.get_action_set(cfg))
     for episode in range(1 if extensive else 2):
         hash_value = compute_hash(env, actions, extensive)
         if extensive:
             self.assertEqual(hash_value, 4203104251)
         elif episode % 2 == 0:
             self.assertEqual(hash_value, 716323440)
         else:
             self.assertEqual(hash_value, 1663893701)
     env.close()
示例#19
0
 def test___render(self):
   """Make sure rendering is not broken."""
   if 'UNITTEST_IN_DOCKER' in os.environ:
     # Rendering is not supported.
     return
   cfg = config.Config({
       'level': 'tests.11_vs_11_hard_deterministic',
   })
   env = football_env.FootballEnv(cfg)
   env.render()
   o = env.reset()
   hash = observation_hash(o)
   for _ in range(10):
     o, _, _, _ = env.step(football_action_set.action_right)
     hash = observation_hash(o, hash)
   self.assertEqual(hash, 2763980076)
   env.close()
示例#20
0
 def test_goal(self, episode, reverse):
   cfg = config.Config({
       'level': 'tests.goal_test',
       'players': ['agent:left_players=1,right_players=1'],
       'episode_number': episode,
       'reverse_team_processing': reverse,
   })
   env = football_env.FootballEnv(cfg)
   o = env.reset()
   done = False
   while not done:
     o, _, done, _ = env.step(
         [football_action_set.action_right, football_action_set.action_right])
   self.assertAlmostEqual(o[0]['ball'][0], 0.0, delta=0.1)
   self.assertEqual(o[0]['score'][episode], 1)
   self.assertEqual(o[0]['score'][1 - episode], 0)
   env.close()
示例#21
0
 def test_penalty(self):
   cfg = config.Config({
       'level': 'tests.penalty',
       'players': ['agent:left_players=1'],
   })
   env = football_env.FootballEnv(cfg)
   o = env.reset()
   done = False
   while not done:
     o, _, done, _ = env.step([football_action_set.action_sliding])
   self.assertAlmostEqual(o[0]['ball'][0], -0.809, delta=0.01)
   self.assertAlmostEqual(o[0]['ball'][1], 0.0, delta=0.01)
   self.assertAlmostEqual(o[0]['right_team'][0][0], 1, delta=0.1)
   self.assertAlmostEqual(o[0]['right_team'][1][0], -0.75, delta=0.1)
   self.assertAlmostEqual(o[0]['left_team'][0][0], -0.95, delta=0.1)
   self.assertAlmostEqual(o[0]['left_team'][1][0], -0.70, delta=0.1)
   env.close()
示例#22
0
 def test_offside(self, episode, team2, reverse):
   cfg = config.Config({
       'level': 'tests.offside_test',
       'players': ['agent:{}_players=1'.format(team2)],
       'episode_number': episode,
       'reverse_team_processing': reverse,
   })
   env = football_env.FootballEnv(cfg)
   env.reset()
   o, _, done, _ = env.step(football_action_set.action_long_pass)
   done = False
   while not done and o[0]['right_team'][1][0] == 0:
     o, _, done, _ = env.step(football_action_set.action_idle)
   self.assertAlmostEqual(o[0]['ball'][0], 0.6, delta=0.4)
   self.assertAlmostEqual(o[0]['right_team'][0][0], 0.6, delta=0.4)
   self.assertAlmostEqual(o[0]['right_team'][1][0], 0.6, delta=0.4)
   self.assertAlmostEqual(o[0]['left_team'][0][0], -0.6, delta=0.4)
   self.assertAlmostEqual(o[0]['left_team'][1][0], -0.6, delta=0.4)
   env.close()
示例#23
0
def create_multiagent_env(iprocess):

    left_player = 'ppo2_cnn:left_players=1,policy=gfootball_impala_cnn,checkpoint=/Users/stephen/Documents/football/checkpoints/11_vs_11_easy_stochastic_v2'
    right_player = 'agent:right_players=1,policy=gfootball_impala_cnn,checkpoint=/Users/stephen/Documents/football/checkpoints/11_vs_11_easy_stochastic_v2'
    players = [left_player, right_player]

    write_full_episode_dumps = False and (iprocess == 0)
    write_goal_dumps = False and (iprocess == 0)
    config_values = {
        'dump_full_episodes': write_full_episode_dumps,
        'dump_scores': write_goal_dumps,
        'players': players,
        'level': '11_vs_11_easy_stochastic',
        'tracesdir': '',  # logdir
        'write_video': False
    }

    cfg = config.Config(config_values)
    env = football_env.FootballEnv(cfg)

    render = False and (iprocess == 0)
    if render:
        env.render()

    dump_frequency = 10 if render and iprocess == 0 else 0
    env = wrappers.PeriodicDumpWriter(env, dump_frequency)

    rewards = 'scoring,checkpoints'  # what to base rewards on
    representation = 'extracted'  # ['simple115v2'] what observations model gets
    channel_dimensions = (observation_preprocessing.SMM_WIDTH,
                          observation_preprocessing.SMM_HEIGHT)
    apply_single_agent_wrappers = True
    stacked = True  # whether to get last 4 observations stacked or just last 1
    env = _apply_output_wrappers(env, rewards, representation,
                                 channel_dimensions,
                                 apply_single_agent_wrappers, stacked)

    env = monitor.Monitor(
        env,
        logger.get_dir() and os.path.join(logger.get_dir(), str(iprocess)))

    return env
示例#24
0
def main(_):

    left_player = 'ppo2_cnn:left_players=1,policy=gfootball_impala_cnn,checkpoint=/Users/stephen/Documents/football/checkpoints/11_vs_11_easy_stochastic_v2'
    right_player = 'ppo2_cnn:right_players=1,policy=gfootball_impala_cnn,checkpoint=/Users/stephen/Documents/football/checkpoints/11_vs_11_easy_stochastic_v2'
    players = [left_player, right_player]

    config_values = {'dump_full_episodes': False,
                     'dump_scores': False,
                     'players': players,
                     'level': '11_vs_11_easy_stochastic',
                     'tracesdir': '/Users/stephen/Documents/football/logs',  # logdir
                     'write_video': False}

    cfg = config.Config(config_values)
    env = football_env.FootballEnv(cfg)

    render = False
    if render:
        env.render()

    env.reset()

    dump_frequency = 3
    env = wrappers.PeriodicDumpWriter(env, dump_frequency)

    n_timesteps = int(2 * 3e3 + 1)  # 3k per episode

    right_agent_ep_scores = []
    ep_right_scores = 0.0

    for _ in range(n_timesteps):
        _, reward, done, _ = env.step([])
        ep_right_scores -= reward
        if done:
            right_agent_ep_scores.append(ep_right_scores)
            ep_right_scores = 0.0
            env.reset()

    mean_score = sum(right_agent_ep_scores) / len(right_agent_ep_scores)
    print(f'\n***\nRight agent episode scores: {right_agent_ep_scores}\n' +
          f'Right agent episode mean score: {mean_score}\n***\n')
示例#25
0
 def test_dynamic_render(self):
   """Verifies dynamic render support."""
   if 'UNITTEST_IN_DOCKER' in os.environ:
     # Rendering is not supported.
     return
   cfg = config.Config({
       'level': 'tests.11_vs_11_hard_deterministic',
   })
   env = football_env.FootballEnv(cfg)
   o = env.reset()
   for _ in range(10):
     o, _, _, _ = env.step(football_action_set.action_right)
     self.assertNotIn('frame', o[0])
     env.render()
     self.assertIn('frame', env.observation()[0])
     self.compare_observations(o, env.observation())
     o, _, _, _ = env.step(football_action_set.action_right)
     self.assertIn('frame', env.observation()[0])
     env.disable_render()
     self.compare_observations(o, env.observation())
   env.close()
示例#26
0
  def test_score_empty_goal(self):
    """Score on an empty goal."""
    cfg = config.Config()

    env = football_env.FootballEnv(cfg)
    cfg['level'] = 'academy_empty_goal'
    last_o = env.reset()[0]
    for _ in range(120):
      o, reward, done, _ = env.step(football_action_set.action_right)
      o = o[0]
      if done:
        self.assertEqual(reward, 1)
        break
      self.assertFalse(done)
      self.assertGreaterEqual(o['ball'][0], last_o['ball'][0] - 0.01)
      self.assertGreaterEqual(
          o['left_team'][o['active']][0],
          last_o['left_team'][last_o['active']][0] - 0.01)
      last_o = o
    self.assertTrue(done)
    env.close()
示例#27
0
    def check_determinism(self, extensive=False):
        """Check that environment is deterministic."""
        if 'UNITTEST_IN_DOCKER' in os.environ:
            return
        cfg = config.Config({'level': 'tests.11_vs_11_hard_deterministic'})
        env = football_env.FootballEnv(cfg)
        actions = len(football_action_set.get_action_set(cfg))
        for episode in range(1 if extensive else 2):
            hash_value = compute_hash(env, actions, extensive)
            if extensive:

                if hash_value != 1174966789:
                    self.assertEqual(hash_value, 2245893576)
            elif episode % 2 == 0:

                if hash_value != 2275067030:
                    self.assertEqual(hash_value, 4024823270)
            else:

                if hash_value != 2045063811:
                    self.assertEqual(hash_value, 1264083657)
        env.close()
示例#28
0
def create_environment(
        env_name='',
        stacked=False,
        representation='extracted',
        rewards='scoring',
        write_goal_dumps=False,
        write_full_episode_dumps=False,
        render=False,
        write_video=False,
        dump_frequency=1,
        logdir='',
        extra_players=None,
        number_of_left_players_agent_controls=1,
        number_of_right_players_agent_controls=0,
        channel_dimensions=(observation_preprocessing.SMM_WIDTH,
                            observation_preprocessing.SMM_HEIGHT),
        other_config_options={}):
    """Creates a Google Research Football environment.

  Args:
    env_name: a name of a scenario to run, e.g. "11_vs_11_stochastic".
      The list of scenarios can be found in directory "scenarios".
    stacked: If True, stack 4 observations, otherwise, only the last
      observation is returned by the environment.
      Stacking is only possible when representation is one of the following:
      "pixels", "pixels_gray" or "extracted".
      In that case, the stacking is done along the last (i.e. channel)
      dimension.
    representation: String to define the representation used to build
      the observation. It can be one of the following:
      'pixels': the observation is the rendered view of the football field
        downsampled to 'channel_dimensions'. The observation size is:
        'channel_dimensions'x3 (or 'channel_dimensions'x12 when "stacked" is
        True).
      'pixels_gray': the observation is the rendered view of the football field
        in gray scale and downsampled to 'channel_dimensions'. The observation
        size is 'channel_dimensions'x1 (or 'channel_dimensions'x4 when stacked
        is True).
      'extracted': also referred to as super minimap. The observation is
        composed of 4 planes of size 'channel_dimensions'.
        Its size is then 'channel_dimensions'x4 (or 'channel_dimensions'x16 when
        stacked is True).
        The first plane P holds the position of players on the left
        team, P[y,x] is 255 if there is a player at position (x,y), otherwise,
        its value is 0.
        The second plane holds in the same way the position of players
        on the right team.
        The third plane holds the position of the ball.
        The last plane holds the active player.
      'simple115'/'simple115v2': the observation is a vector of size 115.
        It holds:
         - the ball_position and the ball_direction as (x,y,z)
         - one hot encoding of who controls the ball.
           [1, 0, 0]: nobody, [0, 1, 0]: left team, [0, 0, 1]: right team.
         - one hot encoding of size 11 to indicate who is the active player
           in the left team.
         - 11 (x,y) positions for each player of the left team.
         - 11 (x,y) motion vectors for each player of the left team.
         - 11 (x,y) positions for each player of the right team.
         - 11 (x,y) motion vectors for each player of the right team.
         - one hot encoding of the game mode. Vector of size 7 with the
           following meaning:
           {NormalMode, KickOffMode, GoalKickMode, FreeKickMode,
            CornerMode, ThrowInMode, PenaltyMode}.
         Can only be used when the scenario is a flavor of normal game
         (i.e. 11 versus 11 players).
    rewards: Comma separated list of rewards to be added.
       Currently supported rewards are 'scoring' and 'checkpoints'.
    write_goal_dumps: whether to dump traces up to 200 frames before goals.
    write_full_episode_dumps: whether to dump traces for every episode.
    render: whether to render game frames.
       Must be enable when rendering videos or when using pixels
       representation.
    write_video: whether to dump videos when a trace is dumped.
    dump_frequency: how often to write dumps/videos (in terms of # of episodes)
      Sub-sample the episodes for which we dump videos to save some disk space.
    logdir: directory holding the logs.
    extra_players: A list of extra players to use in the environment.
        Each player is defined by a string like:
        "$player_name:left_players=?,right_players=?,$param1=?,$param2=?...."
    number_of_left_players_agent_controls: Number of left players an agent
        controls.
    number_of_right_players_agent_controls: Number of right players an agent
        controls.
    channel_dimensions: (width, height) tuple that represents the dimensions of
       SMM or pixels representation.
    other_config_options: dict that allows directly setting other options in
       the Config
  Returns:
    Google Research Football environment.
  """
    assert env_name

    scenario_config = config.Config({'level': env_name}).ScenarioConfig()
    players = [('agent:left_players=%d,right_players=%d' %
                (number_of_left_players_agent_controls,
                 number_of_right_players_agent_controls))]

    # Enable MultiAgentToSingleAgent wrapper?
    multiagent_to_singleagent = False
    if scenario_config.control_all_players:
        if (number_of_left_players_agent_controls in [0, 1]
                and number_of_right_players_agent_controls in [0, 1]):
            multiagent_to_singleagent = True
            players = [('agent:left_players=%d,right_players=%d' %
                        (scenario_config.controllable_left_players
                         if number_of_left_players_agent_controls else 0,
                         scenario_config.controllable_right_players
                         if number_of_right_players_agent_controls else 0))]

    if extra_players is not None:
        players.extend(extra_players)
    config_values = {
        'dump_full_episodes': write_full_episode_dumps,
        'dump_scores': write_goal_dumps,
        'players': players,
        'level': env_name,
        'tracesdir': logdir,
        'write_video': write_video,
    }
    config_values.update(other_config_options)
    c = config.Config(config_values)

    env = football_env.FootballEnv(c)
    if multiagent_to_singleagent:
        env = wrappers.MultiAgentToSingleAgent(
            env, number_of_left_players_agent_controls,
            number_of_right_players_agent_controls)
    if dump_frequency > 1:
        env = wrappers.PeriodicDumpWriter(env, dump_frequency, render)
    elif render:
        env.render()
    env = _apply_output_wrappers(env, rewards, representation,
                                 channel_dimensions,
                                 (number_of_left_players_agent_controls +
                                  number_of_right_players_agent_controls == 1),
                                 stacked)
    return env
示例#29
0
def create_environment(env_name='',
                       stacked=False,
                       representation='extracted',
                       with_checkpoints=False,
                       enable_goal_videos=False,
                       enable_full_episode_videos=False,
                       render=False,
                       write_video=False,
                       dump_frequency=1,
                       logdir='',
                       data_dir=None,
                       font_file=None,
                       away_player=None):
    """Creates a Google Research Football environment.

  Args:
    env_name: a name of a scenario to run, e.g. "11_vs_11_stochastic".
      The list of scenarios can be found in directory "scenarios".
    stacked: If True, stack 4 observations, otherwise, only the last
      observation is returned by the environment.
      Stacking is only possible when representation is one of the following:
      "pixels", "pixels_gray" or "extracted".
      In that case, the stacking is done along the last (i.e. channel)
      dimension.
    representation: String to define the representation used to build
      the observation. It can be one of the following:
      'pixels': the observation is the rendered view of the football field
        downsampled to w=96, h=72. The observation size is: 72x96x3
        (or 72x96x12 when "stacked" is True).
      'pixels_gray': the observation is the rendered view of the football field
        in gray scale and downsampled to w=96, h=72. The observation size is
        72x96x1 (or 72x96x4 when stacked is True).
      'extracted': also referred to as super minimap. The observation is
        composed of 4 planes of size w=96, h=72.
        Its size is then 72x96x4 (or 72x96x16 when stacked is True).
        The first plane P holds the position of the 11 player of the home
        team, P[y,x] is one if there is a player at position (x,y), otherwise,
        its value is zero.
        The second plane holds in the same way the position of the 11 players
        of the away team.
        The third plane holds the active player of the home team.
        The last plane holds the position of the ball.
      'simple115': the observation is a vector of size 115. It holds:
         - the ball_position and the ball_direction as (x,y,z)
         - one hot encoding of who controls the ball.
           [1, 0, 0]: nobody, [0, 1, 0]: home team, [0, 0, 1]: away team.
         - one hot encoding of size 11 to indicate who is the active player
           in the home team.
         - 11 (x,y) positions for each player of the home team.
         - 11 (x,y) motion vectors for each player of the home team.
         - 11 (x,y) positions for each player of the away team.
         - 11 (x,y) motion vectors for each player of the away team.
         - one hot encoding of the game mode. Vector of size 7 with the
           following meaning:
           {NormalMode, KickOffMode, GoalKickMode, FreeKickMode,
            CornerMode, ThrowInMode, PenaltyMode}.
         Can only be used when the scenario is a flavor of normal game
         (i.e. 11 versus 11 players).
    with_checkpoints: True to add intermediate checkpoint rewards to guide
       the agent to move to the opponent goal.
       If False, only scoring provides a reward.
    enable_goal_videos: whether to dump traces up to 200 frames before goals.
    enable_full_episode_videos: whether to dump traces for every episode.
    render: whether to render game frames.
       Must be enable when rendering videos or when using pixels
       representation.
    write_video: whether to dump videos when a trace is dumped.
    dump_frequency: how often to write dumps/videos (in terms of # of episodes)
      Sub-sample the episodes for which we dump videos to save some disk space.
    logdir: directory holding the logs.
    data_dir: location of the game engine data
       Safe to leave as the default value.
    font_file: location of the game font file
       Safe to leave as the default value.
    away_player: Away player (adversary) to use in the environment.
       Reserved for future usage to provide an opponent to train against.
       (which could be used for self-play).

  Returns:
    Google Research Football environment.
  """
    assert env_name
    away_players = [away_player] if away_player else []
    c = config.Config({
        'dump_full_episodes': enable_full_episode_videos,
        'dump_scores': enable_goal_videos,
        'level': env_name,
        'render': render,
        'tracesdir': logdir,
        'write_video': write_video,
        'away_players': away_players,
    })
    if data_dir:
        c['data_dir'] = data_dir
    if font_file:
        c['font_file'] = font_file
    env = football_env.FootballEnv(c)
    if dump_frequency > 1:
        env = wrappers.PeriodicDumpWriter(env, dump_frequency)
    if with_checkpoints:
        env = wrappers.CheckpointRewardWrapper(env)
    if representation.startswith('pixels'):
        env = wrappers.PixelsStateWrapper(env, 'gray' in representation)
    elif representation == 'simple21':
        env = wrappers.Simple21StateWrapper(env)
    elif representation == 'simple115':
        env = wrappers.Simple115StateWrapper(env)
    elif representation == 'extracted':
        env = wrappers.SMMWrapper(env)
    else:
        raise ValueError(
            'Unsupported representation: {}'.format(representation))
    if stacked:
        env = FrameStack(env, 4)
    return env
示例#30
0
def main():
    args = parse_args()
    players = args.players.split(';')
    config = Config({
        'action_set': ActionSetType.DEFAULT,
        'dump_full_episodes': False,
        'players': players,
        # 'real_time': args.real_time and args.render,
        'real_time': args.render and (not args.video),
        'pitch_scale': args.pitch_scale,
    })
    base_player_config = {
        'policy_config':
        PolicyConfig(
            policy_type=args.policy_type,
            checkpoint=args.checkpoint,
            random_frac=args.random_frac,
            action_set=DEFAULT_ACTION_SET,
            lr=args.lr,
            discount=args.discount,
            n_steps=args.n_steps,
            verbose=args.verbose,
        ),
        'warmstart':
        args.warmstart,
        'verbose':
        args.verbose,
        'video':
        args.video,
    }
    if args.level:
        config['level'] = args.level
    checkpoint = 'agents/' + args.policy_type.value.lower() + '/agent.npz'
    assert not os.system('mkdir -p %s' % os.path.dirname(checkpoint))
    env = football_env.FootballEnv(config=config,
                                   base_player_config=base_player_config)
    if args.render:
        env.render()
    obs_history = [
        env.reset(),  # Need this to know the initial state
    ]
    # self_play_history = History(max_size=int(1e7))
    running_score_update = 0.999
    running_score = [0, 0, 0]
    record = [0, 0, 0]
    try:
        game_num = 0
        epoch_history = []
        # cnts_by_mode = defaultdict(int)
        while True:
            obs, reward, done, info = env.step()
            # _, old_relative_obs = env.get_players_and_relative_obs_pairs(obs=obs_history[-1])
            # _, new_relative_obs = env.get_players_and_relative_obs_pairs(obs=obs)
            if env._agent.num_controlled_right_players() > 0:
                reward *= -1
            item = HistoryItem(
                old_state=obs_history[-1],
                action=info['agent_action'],
                new_state=obs,
                reward=reward.item(),
            )
            epoch_history.append(item)
            # env._agent.give_reward(item=item)
            # self_play_history.add(item=item)
            # cnts_by_mode[(obs[0]['game_mode'], obs[0]['ball_owned_team'])] += 1
            obs_history.append(obs)
            if args.verbose:
                print(reward, done, info)
            if done:
                # defaultdict(<class 'int'>, {(0, -1): 36256, (0, 0): 12701, (0, 1): 55352, (2, -1): 1871, (3, -1): 2146, (5, 1): 140, (5, 0): 19269, (4, -1): 1119, (5, -1): 1, (6, -1): 145})
                # print(cnts_by_mode)
                game_num += 1
                score = obs[0]['score']
                running_score[0] = running_score_update * running_score[0] + (
                    1.0 - running_score_update) * score[0]
                running_score[1] = running_score_update * running_score[1] + (
                    1.0 - running_score_update) * score[1]
                running_score[2] = running_score[0] - running_score[1]
                if score[0] > score[1]:
                    record[0] += 1
                elif score[0] < score[1]:
                    record[2] += 1
                else:
                    record[1] += 1
                # mean_reward = self_play_history.mean_reward()
                print(
                    'Final Score:',
                    score,
                    'Running score: [%.3f, %.3f, %.3f]' % tuple([
                        x / (1 - running_score_update**game_num)
                        for x in running_score
                    ]),
                    'Record:',
                    record,
                    # 'Mean Reward in history:', mean_reward,
                )
                # for item in self_play_history.sample(n=int(1e3)):
                #     env._agent.give_reward(item=item) # ._replace(reward=item.reward - mean_reward))
                env._agent.process_epoch(items=epoch_history)
                env._agent.reset()
                obs_history.append(env.reset())
                epoch_history = []
                if (not args.render) and (game_num % 25 == 0):
                    env._agent.save(checkpoint=checkpoint)
                if game_num == args.num_games:
                    break
    except KeyboardInterrupt:
        logging.warning('Game stopped, writing dump...')
        if (not args.render):
            env._agent.save(checkpoint='agent.pkl')
        # env.write_dump('shutdown')
        # return env._agent
        print(checkpoint)
        exit(1)