Exemplo n.º 1
0
def print_beneficial_deviation_analysis(last_meta_game, meta_game, last_meta_prob, verbose=False):
  """
  Function to check whether players have found policy of beneficial deviation in current meta_game compared to the last_meta_game
  Args:
    last_meta_game: List of list of meta_game (One array per game player). The meta game to compare against
    meta_game: List of list of meta_game (One array per game player). Current iteration's meta game. Same length with last_meta_game, and each element in meta_game has to include all entries in last_meta_game's corresponding elements
    last_meta_prob: nash equilibrium of last g_psro_iteration. List of list. Last iteration
  Returns:
    a list of length num_players, indicating the number of beneficial deviations for each player from last_meta_prob
  """
  num_player = len(last_meta_prob)
  num_new_pol = [ meta_game[0].shape[i]-len(last_meta_prob[i]) for i in range(num_player)]
  num_pol = [ meta_game[0].shape[i] for i in range(num_player)]
  prob_matrix = meta_strategies.general_get_joint_strategy_from_marginals(last_meta_prob)
  this_meta_prob = [np.append(last_meta_prob[i],[0 for _ in range(num_new_pol[i])]) for i in range(num_player)]
  beneficial_deviation = [0 for _ in range(num_player)]
  for i in range(num_player): 
    ne_payoff = np.sum(last_meta_game[i]*prob_matrix)
    # iterate through player's new policy
    for j in range(num_new_pol[i]):
      dev_prob = this_meta_prob.copy()
      dev_prob[i] = np.zeros(num_pol[i])
      dev_prob[i][len(last_meta_prob[i])+j] = 1
      new_prob_matrix = meta_strategies.general_get_joint_strategy_from_marginals(dev_prob)
      dev_payoff = np.sum(meta_game[i]*new_prob_matrix)
      if ne_payoff < dev_payoff:
        beneficial_deviation[i] += 1

  if verbose:
    print("\n---------------------------\nBeneficial Deviation :")
    for p in range(len(beneficial_deviation)):
      print('player '+str(p)+':',beneficial_deviation[p])
  return beneficial_deviation
Exemplo n.º 2
0
def strategy_regret(meta_games, subgame_index, ne=None, subgame_ne=None):
    """
        Calculate the strategy regret based on a complete payoff matrix for PSRO.
        strategy_regret of player equals to nash_payoff in meta_game - fix opponent nash strategy, player deviates to subgame_nash
        Assume all players have the same number of policies.
        :param meta_games: meta_games in PSRO
        :param subgame_index: subgame to evaluate, redundant if subgame_nash supplied
        :param: nash: equilibrium vector
        :param: subgame_ne: equilibrium vector
        :return: a list of regret, one for each player.

    """
    num_players = len(meta_games)
    num_new_pol = np.shape(meta_games[0])[0] - subgame_index

    ne = nash_solver(meta_games, solver="gambit") if not ne else ne
    index = [list(np.arange(subgame_index)) for _ in range(num_players)]
    submeta_games = [ele[np.ix_(*index)] for ele in meta_games]
    subgame_ne = nash_solver(submeta_games, solver="gambit") if not subgame_ne else subgame_ne
    nash_prob_matrix = meta_strategies.general_get_joint_strategy_from_marginals(ne)

    regrets = []
    for i in range(num_players):
        ne_payoff = np.sum(meta_games[i]*nash_prob_matrix)
        dev_prob = ne.copy()
        dev_prob[i] = list(np.append(subgame_ne[i],[0 for _ in range(num_new_pol)]))
        dev_prob_matrix = meta_strategies.general_get_joint_strategy_from_marginals(dev_prob)
        subgame_payoff = np.sum(meta_games[i]*dev_prob_matrix)
        regrets.append(ne_payoff-subgame_payoff)

    return regrets
Exemplo n.º 3
0
def regret(meta_games, subgame_index, subgame_ne=None, start_index=0):
    """
    Calculate the regret based on a complete payoff matrix for PSRO
    In subgame, each player could have different number of strategies
    :param meta_games: meta_games in PSRO
    :param subgame_index: last policy index in subgame.
                          subgame_index-start_index+1=number of policy
                          int/list. If int, players have same num of strategies
    :param start_index: starting index for the subgame.
                          int/list. If int, assume subgame in all num_players dimension
                          have the same index
    :param: subgame_ne: subgame nash equilibrium vector.
    :return: a list of regret, one for each player.
    """
    num_policy = np.array(np.shape(meta_games[0]))
    num_players = len(meta_games)
    subgame_index = np.ones(num_players,dtype=int)*subgame_index \
        if np.isscalar(subgame_index) else subgame_index
    start_index = np.ones(num_players,dtype=int)*start_index \
        if np.isscalar(start_index) else start_index
    if not sum(num_policy != subgame_index-start_index+1):
        print("The subgame is same as the full game. Return zero regret.")
        return np.zeros(num_players)

    num_new_pol_back = num_policy - subgame_index - 1
    index = [list(np.arange(start_index[i],subgame_index[i]+1)) for i in range(num_players)]
    submeta_games = [ele[np.ix_(*index)] for ele in meta_games]

    nash = nash_solver(submeta_games, solver="gambit") if not subgame_ne else subgame_ne
    prob_matrix = meta_strategies.general_get_joint_strategy_from_marginals(nash)
    this_meta_prob = [np.concatenate(([0 for _ in range(start_index[i])], nash[i], [0 for _ in range(num_new_pol_back[i])])) for i in range(num_players)]
    nash_payoffs = []
    deviation_payoffs = []

    for i in range(num_players): 
        ne_payoff = np.sum(submeta_games[i]*prob_matrix)
        # iterate through player's new policy
        dev_payoff = []
        for j in range(start_index[i] + num_new_pol_back[i]):
            dev_prob = this_meta_prob.copy()
            dev_prob[i] = np.zeros(num_policy[i])
            if j < start_index[i]:
                dev_prob[i][j] = 1
            else:
                dev_prob[i][subgame_index[i]+j-start_index[i]+1] = 1
            new_prob_matrix = meta_strategies.general_get_joint_strategy_from_marginals(dev_prob)
            dev_payoff.append(np.sum(meta_games[i]*new_prob_matrix))
        deviation_payoffs.append(dev_payoff-ne_payoff)
        nash_payoffs.append(ne_payoff)
    
    regret = [np.max(ele) for ele in deviation_payoffs]
    return regret
Exemplo n.º 4
0
def regret(meta_games, subgame_index, subgame_ne=None):
    """
    Calculate the regret based on a complete payoff matrix for PSRO
    Assume all players have the same number of policies
    :param meta_games: meta_games in PSRO
    :param subgame_index: the subgame to evaluate. Redundant when subgame_ne is supplied
    :param: subgame_ne: subgame nash equilibrium vector.
    :return: a list of regret, one for each player.
    """
    num_policy = np.shape(meta_games[0])[0]
    num_players = len(meta_games)
    if num_policy == subgame_index:
        print("The subgame is same as the full game. Return zero regret.")
        return np.zeros(num_players)
    num_new_pol = num_policy - subgame_index

    index = [list(np.arange(subgame_index)) for _ in range(num_players)]
    submeta_games = [ele[np.ix_(*index)] for ele in meta_games]
    nash = nash_solver(submeta_games, solver="gambit") if not subgame_ne else subgame_ne
    prob_matrix = meta_strategies.general_get_joint_strategy_from_marginals(nash)
    this_meta_prob = [np.append(nash[i],[0 for _ in range(num_new_pol)]) for i in range(num_players)]

    nash_payoffs = []
    deviation_payoffs = []

    for i in range(num_players): 
        ne_payoff = np.sum(submeta_games[i]*prob_matrix)
        # iterate through player's new policy
        dev_payoff = []
        for j in range(num_new_pol):
            dev_prob = this_meta_prob.copy()
            dev_prob[i] = np.zeros(num_policy)
            dev_prob[i][subgame_index+j] = 1
            new_prob_matrix = meta_strategies.general_get_joint_strategy_from_marginals(dev_prob)
            dev_payoff.append(np.sum(meta_games[i]*new_prob_matrix))
        deviation_payoffs.append(dev_payoff-ne_payoff)
        nash_payoffs.append(ne_payoff)
    
    regret = np.maximum(np.max(deviation_payoffs,axis=1),0)
    return regret
Exemplo n.º 5
0
 def get_mixed_payoff(self, strategy_list, strategy_support):
     """
 Check if the payoff exists for the profile given. If not, return False
 Params:
   strategy_list    : list of list, policy index for each player
   strategy_support : list of list, policy support probability for each player
 Returns:
   payoffs          : payoff for each player in the profile
 """
     if np.any(np.isnan(self._meta_games[0][np.ix_(*strategy_list)])):
         return False
     meta_game = [ele[np.ix_(*strategy_list)] for ele in self._meta_games]
     prob_matrix = meta_strategies.general_get_joint_strategy_from_marginals(
         strategy_support)
     payoffs = []
     for i in range(self._num_players):
         payoffs.append(np.sum(meta_game[i] * prob_matrix))
     return payoffs
Exemplo n.º 6
0
def rollout(env, strategies, strategy_support, sims_per_entry=1000):
    """
    Evaluate player's mixed strategy with support in env.
    Params:
        env              : an open_spiel env
        strategies       : list of list, each list containing a player's strategy
        strategy_support : mixed_strategy support probability vector
        sims_per_entry   : number of episodes for each pure strategy profile to sample
    Return:
        a list of players' payoff
    """
    num_players = len(strategies)
    num_strategies = [len(ele) for ele in strategies]
    prob_matrix = meta_strategies.general_get_joint_strategy_from_marginals(strategy_support)
    payoff_tensor = np.zeros([num_players]+num_strategies)

    for ind in itertools.product(*[np.arange(ele) for ele in num_strategies]):
        strat = [strategies[i][ind[i]] for i in range(num_players)]
        pure_payoff = sample_episodes(env, strat, sims_per_entry)
        payoff_tensor[tuple([...]+list(ind))] = pure_payoff

    return [np.sum(payoff_tensor[i]*prob_matrix) for i in range(num_players)]