示例#1
0
def main():
    #sys.stdout = open(result_file,'w')
    #anomaly_meta_game_file = open(result_folder+'/anomaly_game_matrix.pkl','ab')
    for i in range(test_cases):
        params = generate_parameters()
        meta_game = generate_meta_game(**params)
        print("###################################")
        print("##############Test_" + str(i) + "_##############")
        print("###################################")
        print(params)

        print("-----------------Gambit_", end='')
        start = time.time()
        gambit_eq = gs.nash_solver(meta_game, solver="gambit", mode="all")
        end = time.time()
        print("{0:0.3f}".format(end - start), end='')
        print("--------------------")
        for eq in gambit_eq:
            print("%%%%%%%%%%%%%%%%%%% EQ %%%%%%%%%%%%%%%%%")
            for eq_q in eq:
                print(["{0:0.3f}".format(i) for i in eq_q.tolist()])

        # TODO: RD may be performing poorly because gambit-gnm fails to find all
        # equilibrium. Chang gambit to lca to test
        print("------------------RD_", end='')
        start = time.time()
        rd_eq = gs.nash_solver(meta_game, solver="replicator", mode='one')
        end = time.time()
        print("{0:0.3f}".format(end - start), end='')
        print("-----------------------")
        for ele in rd_eq:
            print(["{0:0.3f}".format(i) for i in ele])
        closest, min_dis = element_distance_to_set(rd_eq, gambit_eq)
        print('distance to gambit-eq', closest, "{0:0.3f}".format(min_dis))

        print("----------------QuieFul_", end='')
        quiesce_full = QuiesceTest(meta_game)
        start = time.time()
        quiesce_full_eq, _ = quiesce_full.inner_loop()
        end = time.time()
        print("{0:0.3f}".format(end - start), end='')
        print("-------------------")
        print(quiesce_full_eq)
        closest, min_dis = element_distance_to_set(quiesce_full_eq, gambit_eq)
        print('distance to gambit-eq', closest, "{0:0.3f}".format(min_dis))
        if min_dis > zero_threshold:
            #pickle.dump(meta_game, anomaly_meta_game_file)
            print(meta_game)

        print("----------------QuieSpa_", end='')
        quiesce_sparse = QuiesceSparseTest(meta_game)
        start = time.time()
        quiesce_sparse_eq, _ = quiesce_sparse.inner_loop()
        end = time.time()
        print("{0:0.3f}".format(end - start), end='')
        print("-------------------")
        print(quiesce_sparse_eq)
        closest, min_dis = element_distance_to_set(quiesce_sparse_eq,
                                                   gambit_eq)
        print('distance to gambit-eq', closest, "{0:0.3f}".format(min_dis))
示例#2
0
def strategy_regret(meta_games, subgame_index, ne=None, subgame_ne=None):
    """
        Calculate the strategy regret based on a complete payoff matrix for PSRO.
        strategy_regret of player equals to nash_payoff in meta_game - fix opponent nash strategy, player deviates to subgame_nash
        Assume all players have the same number of policies.
        :param meta_games: meta_games in PSRO
        :param subgame_index: subgame to evaluate, redundant if subgame_nash supplied
        :param: nash: equilibrium vector
        :param: subgame_ne: equilibrium vector
        :return: a list of regret, one for each player.

    """
    num_players = len(meta_games)
    num_new_pol = np.shape(meta_games[0])[0] - subgame_index

    ne = nash_solver(meta_games, solver="gambit") if not ne else ne
    index = [list(np.arange(subgame_index)) for _ in range(num_players)]
    submeta_games = [ele[np.ix_(*index)] for ele in meta_games]
    subgame_ne = nash_solver(submeta_games, solver="gambit") if not subgame_ne else subgame_ne
    nash_prob_matrix = meta_strategies.general_get_joint_strategy_from_marginals(ne)

    regrets = []
    for i in range(num_players):
        ne_payoff = np.sum(meta_games[i]*nash_prob_matrix)
        dev_prob = ne.copy()
        dev_prob[i] = list(np.append(subgame_ne[i],[0 for _ in range(num_new_pol)]))
        dev_prob_matrix = meta_strategies.general_get_joint_strategy_from_marginals(dev_prob)
        subgame_payoff = np.sum(meta_games[i]*dev_prob_matrix)
        regrets.append(ne_payoff-subgame_payoff)

    return regrets
示例#3
0
def weighted_NE_strategy(solver,
                         return_joint=False,
                         checkpoint_dir=None,
                         gamma=0.4):
    meta_games = solver.get_meta_game()
    num_players = len(meta_games)
    NE_list = solver._NE_list
    if len(NE_list) == 0:
        return [np.array([1])] * num_players, None

    num_used_policies = len(NE_list[-1][0])

    if not isinstance(meta_games, list):
        meta_games = [meta_games, -meta_games]

    num_strategies = len(meta_games[0])
    equilibria = gs.nash_solver(meta_games,
                                solver="gambit",
                                mode="one",
                                checkpoint_dir=checkpoint_dir)

    result = [np.zeros(num_strategies)] * num_players
    for player in range(num_players):
        for i, NE in enumerate(NE_list):
            result[player][:len(NE[player])] += NE[player] * gamma**(
                num_used_policies - i)
        result[player] += equilibria[player]
        result[player] /= np.sum(result[player])

    return result, None
示例#4
0
def regret(meta_games, subgame_index, subgame_ne=None, start_index=0):
    """
    Calculate the regret based on a complete payoff matrix for PSRO
    In subgame, each player could have different number of strategies
    :param meta_games: meta_games in PSRO
    :param subgame_index: last policy index in subgame.
                          subgame_index-start_index+1=number of policy
                          int/list. If int, players have same num of strategies
    :param start_index: starting index for the subgame.
                          int/list. If int, assume subgame in all num_players dimension
                          have the same index
    :param: subgame_ne: subgame nash equilibrium vector.
    :return: a list of regret, one for each player.
    """
    num_policy = np.array(np.shape(meta_games[0]))
    num_players = len(meta_games)
    subgame_index = np.ones(num_players,dtype=int)*subgame_index \
        if np.isscalar(subgame_index) else subgame_index
    start_index = np.ones(num_players,dtype=int)*start_index \
        if np.isscalar(start_index) else start_index
    if not sum(num_policy != subgame_index-start_index+1):
        print("The subgame is same as the full game. Return zero regret.")
        return np.zeros(num_players)

    num_new_pol_back = num_policy - subgame_index - 1
    index = [list(np.arange(start_index[i],subgame_index[i]+1)) for i in range(num_players)]
    submeta_games = [ele[np.ix_(*index)] for ele in meta_games]

    nash = nash_solver(submeta_games, solver="gambit") if not subgame_ne else subgame_ne
    prob_matrix = meta_strategies.general_get_joint_strategy_from_marginals(nash)
    this_meta_prob = [np.concatenate(([0 for _ in range(start_index[i])], nash[i], [0 for _ in range(num_new_pol_back[i])])) for i in range(num_players)]
    nash_payoffs = []
    deviation_payoffs = []

    for i in range(num_players): 
        ne_payoff = np.sum(submeta_games[i]*prob_matrix)
        # iterate through player's new policy
        dev_payoff = []
        for j in range(start_index[i] + num_new_pol_back[i]):
            dev_prob = this_meta_prob.copy()
            dev_prob[i] = np.zeros(num_policy[i])
            if j < start_index[i]:
                dev_prob[i][j] = 1
            else:
                dev_prob[i][subgame_index[i]+j-start_index[i]+1] = 1
            new_prob_matrix = meta_strategies.general_get_joint_strategy_from_marginals(dev_prob)
            dev_payoff.append(np.sum(meta_games[i]*new_prob_matrix))
        deviation_payoffs.append(dev_payoff-ne_payoff)
        nash_payoffs.append(ne_payoff)
    
    regret = [np.max(ele) for ele in deviation_payoffs]
    return regret
示例#5
0
def regret(meta_games, subgame_index, subgame_ne=None):
    """
    Calculate the regret based on a complete payoff matrix for PSRO
    Assume all players have the same number of policies
    :param meta_games: meta_games in PSRO
    :param subgame_index: the subgame to evaluate. Redundant when subgame_ne is supplied
    :param: subgame_ne: subgame nash equilibrium vector.
    :return: a list of regret, one for each player.
    """
    num_policy = np.shape(meta_games[0])[0]
    num_players = len(meta_games)
    if num_policy == subgame_index:
        print("The subgame is same as the full game. Return zero regret.")
        return np.zeros(num_players)
    num_new_pol = num_policy - subgame_index

    index = [list(np.arange(subgame_index)) for _ in range(num_players)]
    submeta_games = [ele[np.ix_(*index)] for ele in meta_games]
    nash = nash_solver(submeta_games, solver="gambit") if not subgame_ne else subgame_ne
    prob_matrix = meta_strategies.general_get_joint_strategy_from_marginals(nash)
    this_meta_prob = [np.append(nash[i],[0 for _ in range(num_new_pol)]) for i in range(num_players)]

    nash_payoffs = []
    deviation_payoffs = []

    for i in range(num_players): 
        ne_payoff = np.sum(submeta_games[i]*prob_matrix)
        # iterate through player's new policy
        dev_payoff = []
        for j in range(num_new_pol):
            dev_prob = this_meta_prob.copy()
            dev_prob[i] = np.zeros(num_policy)
            dev_prob[i][subgame_index+j] = 1
            new_prob_matrix = meta_strategies.general_get_joint_strategy_from_marginals(dev_prob)
            dev_payoff.append(np.sum(meta_games[i]*new_prob_matrix))
        deviation_payoffs.append(dev_payoff-ne_payoff)
        nash_payoffs.append(ne_payoff)
    
    regret = np.maximum(np.max(deviation_payoffs,axis=1),0)
    return regret
示例#6
0
def general_nash_strategy(solver,
                          return_joint=False,
                          NE_solver="gambit",
                          mode='one',
                          game=None,
                          checkpoint_dir=None):
    """Returns nash distribution on meta game matrix.

  This method works for general-sum multi-player games.

  Args:
    solver: GenPSROSolver instance.
    return_joint: If true, only returns marginals. Otherwise marginals as well
      as joint probabilities.
    NE_solver: Tool for finding a NE.
    mode: Return one or all or pure NE.
    game: overrides solver.get_meta_games() if provided
  Returns:
    Nash distribution on strategies.
  """
    meta_games = solver.get_meta_game() if game is None else game
    if not isinstance(meta_games, list):
        meta_games = [meta_games, -meta_games]
    equilibria = gs.nash_solver(meta_games,
                                solver=NE_solver,
                                mode=mode,
                                checkpoint_dir=checkpoint_dir)

    if not return_joint:
        return equilibria
    else:
        if mode == 'all' and type(equilibria[0]) == list:
            # If multiple NE exist, return a list with joint strategies.
            joint_strategies_list = [
                get_joint_strategy_from_marginals([ne]) for ne in equilibria
            ]
            return equilibria, joint_strategies_list
        else:
            joint_strategies = get_joint_strategy_from_marginals(equilibria)
            return equilibria, joint_strategies
示例#7
0
game_name = 'MP'


def game_selector(game_name):
    if game_name == 'MP':
        meta_games = MP_meta_games
    elif game_name == 'BOS':
        meta_games = BOS_meta_games
    elif game_name == 'BC':
        meta_games = BC_meta_games
    elif game_name == 'RPS':
        meta_games = RPS_meta_games
    elif game_name == 'HT':
        meta_games = HT_meta_games
    elif game_name == 'RND':
        meta_games = RND_meta_games
    else:
        raise ValueError("Game does not exist.")
    return meta_games


print("****************************************")
for game in game_list:
    print("The current game is ", game)
    meta_games = game_selector(game)
    equilibria = gs.nash_solver(meta_games, solver="gambit", mode='all')
    for eq in equilibria:
        print(eq)
    print('*************************************')