forked from egtaonline/gameanalysis
/
AnalysisScript.py
executable file
·150 lines (136 loc) · 6.11 KB
/
AnalysisScript.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
#! /usr/bin/env python2.7
from GameIO import read
from Subgames import subgame, translate
from Dominance import iterated_elimination, pure_strategy_dominance, \
best_responses
from Regret import max_social_welfare, social_welfare, regret, neighbors
from Nash import pure_nash, min_regret_profile, mixed_nash
from sys import argv
from os.path import abspath, exists
from argparse import ArgumentParser
from copy import deepcopy
from math import isinf
def parse_args():
parser = ArgumentParser()
parser.add_argument("game", type=str, help="Game file to be analyzed. " +\
"Pure strategy Nash equilibrium search is performed on this "+\
"game, and regret of mixed strategy Nash equilibria are computed "+\
"in this game. If no subgames file is provided, mixed strategy "+\
"Nash equilibrium search is performed on this game.")
parser.add_argument("-sg", type=str, default="", help="Optional file "+\
"identifying complete subgames of 'game' to be analyzed. If "+\
"provided, mixed strategy Nash equilibrium search is performed "+\
"in every subgame. This file is usually generated by Subgames.py.")
parser.add_argument("-r", metavar="REGRET", type=float, default=1e-3, \
help="Max allowed regret for approximate Nash equilibria.")
parser.add_argument("-d", metavar="DIST", type=float, default=1e-3, \
help="L2-distance threshold to consider equilibria distinct.")
parser.add_argument("-s", metavar="SUPPORT", type=float, default=1e-3, \
help="Min probability for a strategy to be considered in support.")
parser.add_argument("-c", metavar="CONVERGE", type=float, default=1e-8, \
help="Replicator dynamics convergence thrshold.")
parser.add_argument("-i", metavar="ITERS", type=int, default=10000, \
help="Max replicator dynamics iterations.")
args = parser.parse_args()
return args
def main(args):
input_game = read(args.game)
print "input game =", abspath(args.game), "\n", input_game, "\n\n"
#max social welfare
soc_opt_prof, soc_opt_welf = max_social_welfare(input_game)
print "max social welfare =", round(soc_opt_welf, 4)
print "achieved by profile =", soc_opt_prof
if len(input_game.roles) > 1:
for r in input_game.roles:
role_opt_prof, role_opt_welf = max_social_welfare(input_game, r)
print "\tbest total value for", r, "=", role_opt_welf
print "\tachieved by profile =", role_opt_prof
print "\n\n"
#iterated elimination of dominated strategies
rational_game = iterated_elimination(input_game, pure_strategy_dominance, \
conditional=1)
eliminated = {r:sorted(set(input_game.strategies[r]) - set( \
rational_game.strategies[r])) for r in input_game.roles}
if any(map(len, eliminated.values())):
print "dominated strategies:"
for r in rational_game.roles:
if eliminated[r]:
print r, ":", ", ".join(eliminated[r])
else:
print "no dominated strategies found"
#pure strategy Nash equilibrium search
pure_equilibria = pure_nash(rational_game, args.r)
l = len(pure_equilibria)
if l > 0:
print "\n" + str(len(pure_equilibria)), "pure strategy Nash " +\
"equilibri" + ("um:" if l == 1 else "a:")
for i, eq in enumerate(pure_equilibria):
print str(i+1) + ". regret =", round(regret(input_game, eq), 4), \
"; social welfare =", round(social_welfare(input_game,eq),4)
for role in input_game.roles:
print " " + role + ":", ", ".join(map(lambda pair: \
str(pair[1]) + "x " + str(pair[0]), eq[role].items()))
else:
print "\nno pure strategy Nash equilibria found."
mrp = min_regret_profile(rational_game)
print "regret =", regret(input_game, mrp)
print "minimum regret pure strategy profile (regret = " + \
str(round(regret(input_game, mrp), 4)) + "; social welfare = "+\
str(round(social_welfare(input_game, mrp), 4)) + "):"
for role in input_game.roles:
print " " + role + ":", ", ".join(map(lambda pair: \
str(pair[1]) + "x " + str(pair[0]), mrp[role].items()))
if args.sg != "":
subgames = read(args.sg)
print "\n\n" + str(len(subgames)), "subgame" + \
("s" if len(subgames) > 1 else "") + "\n"
else:
subgames = [rational_game.strategies]
#mixed strategy Nash equilibrium search
for i, sg_strat in enumerate(subgames):
sg = subgame(rational_game, sg_strat)
if args.sg != "":
print "\nsubgame "+str(i+1)+":\n", "\n".join(map(lambda x: x[0] + \
":\n\t\t" + "\n\t\t".join(x[1]), sorted( \
sg.strategies.items()))).expandtabs(4)
mixed_equilibria = mixed_nash(sg, args.r, args.d, iters=args.i, \
converge_thresh=args.c)
print "\n" + str(len(mixed_equilibria)), "approximate mixed strategy"+ \
" Nash equilibri" + ("um:" if len(mixed_equilibria) == 1 \
else "a:")
for j, eq in enumerate(mixed_equilibria):
full_eq = translate(eq, sg, input_game)
all_data = all(map(lambda p: p in input_game, neighbors(\
input_game, full_eq)))
BR = {r:(list(t[0])[0] if len(t[0]) > 0 else None) for r,t in \
best_responses(input_game, full_eq).items()}
reg = max(map(lambda r: regret(input_game, full_eq, \
deviation=BR[r]), input_game.roles))
print str(j+1) + ". regret ", ("=" if all_data else ">=") , round(\
reg,4), "; social welfare =", round(social_welfare(sg,eq),4)
if len(sg.roles) > 1:
for r in sg.roles:
print "\ttotal value for", r, "=", social_welfare(sg, eq, r)
support = {r:[] for r in input_game.roles}
for k,role in enumerate(input_game.roles):
print role + ":"
for l,strategy in enumerate(input_game.strategies[role]):
if full_eq[k][l] >= args.s:
support[role].append(strategy)
print " " + strategy + ": " + str(round(100 * \
full_eq[k][l], 2)) + "%"
if args.sg != "":
print "best responses:"
for role in input_game.roles:
deviation_support = deepcopy(support)
deviation_support[role].append(BR[role])
r = regret(input_game, full_eq, role, deviation=BR[role])
print "\t" + str(role) + ": " + BR[role] + ";\tgain =", \
(round(r, 4) if not isinf(r) else "?")
print "Deviation subgame " + ("explored." if subgame( \
input_game, deviation_support).isComplete() else \
"UNEXPLORED!") + "\n"
if __name__ == "__main__":
print "command: " + " ".join(argv) + "\n"
args = parse_args()
main(args)