예제 #1
0
파일: Dominance.py 프로젝트: augie/egats
def PureStrategyDominance(game, conditional=True, weak=False):
	"""
	pure-strategy dominance criterion for IEDS

	conditional==0==False --> unconditional dominance
	conditional==1==True ---> conditional dominance
	conditional==2 ---------> extra-conservative conditional dominance
	"""
	undominated = {r:set(game.strategies[r]) for r in game.roles}
	for r in game.roles:
		for dominant, dominated in product(game.strategies[r], repeat=2):
			if dominant == dominated or dominated not in undominated[r]:
				continue
			dominance_proved = False
			for profile in game:
				if dominated in profile[r]:
					reg = regret(game, profile, r, dominated, dominant)
					if reg > 0 and not isinf(reg):
						dominance_proved = True
					elif (reg < 0) or (reg == 0 and not weak) or \
							(isinf(reg) and conditional):
						dominance_proved = False
						break
				elif dominant in profile[r] and conditional > 1:
					if profile.deviate(r, dominant, dominated) not in game:
						dominance_proved = False
						break
			if dominance_proved:
				undominated[r].remove(dominated)
	return Subgame(game, undominated)
예제 #2
0
def best_responses(game, prof, role=None, strategy=None):
	"""
	If role is unspecified, bestResponses returns a dict mapping each role
	all of its strategy-level results. If strategy is unspecified,
	bestResponses returns a dict mapping strategies to the set of best
	responses to the opponent-profile without that strategy.
	"""
	if role == None:
		return {r: best_responses(game, prof, r, strategy) for r \
				in game.roles}
	if strategy == None and isinstance(prof, Profile):
		return {s: best_responses(game, prof, role, s) for s in \
				prof[role]}
	best_deviations = set()
	biggest_gain = float('-inf')
	unknown = set()
	for dev in game.strategies[role]:
		reg = regret(game, prof, role, strategy, dev)
		if isinf(reg):
			unknown.add(dev)
		elif reg > biggest_gain:
			best_deviations = {dev}
			biggest_gain = reg
		elif reg == biggest_gain:
			best_deviations.add(dev)
	return best_deviations, unknown
예제 #3
0
파일: Dominance.py 프로젝트: augie/egats
def bestResponses(game, p, role=None, strategy=None):
	"""
	If role is unspecified, bestResponses returns a dict mapping each role
	all of its strategy-level results. If strategy is unspecified,
	bestResponses returns a dict mapping strategies to the set of best
	responses to the opponent-profile without that strategy.

	If conditional=True, bestResponses returns two sets: the known best
	responses, and the deviations whose value is unkown; otherwise it
	returns only the known best response set.
	"""
	if role == None:
		return {r: bestResponses(game, p, r, strategy) for r \
				in game.roles}
	if strategy == None and isinstance(p, Profile):
		return {s: bestResponses(game, p, role, s) for s in \
				p[role]}
	best_deviations = set()
	biggest_gain = float('-inf')
	unknown = set()
	for dev in game.strategies[role]:
		r = regret(game, p, role, strategy, dev)
		if isinf(r):
			unknown.add(dev)
		elif r > biggest_gain:
			best_deviations = {dev}
			biggest_gain = r
		elif r == biggest_gain:
			best_deviations.add(dev)
	return list(best_deviations), list(unknown)
예제 #4
0
파일: Nash.py 프로젝트: augie/egats
def EquilibriumRegrets(game, eq):
	regrets = {}
	for role in game.roles:
		regrets[role] = {}
		for strategy in game.strategies[role]:
			regrets[role][strategy] = -regret(game, eq, deviation=strategy)
	return regrets
예제 #5
0
파일: Nash.py 프로젝트: oveou/GameAnalysis
def mixed_nash(game, regret_thresh=1e-3, dist_thresh=1e-3, random_restarts=0, \
		at_least_one=False, *RD_args, **RD_kwds):
	"""
	Runs replicator dynamics from multiple starting mixtures.
	"""
	equilibria = []
	all_eq = []
	for m in game.biasedMixtures() + [game.uniformMixture()] + \
			[game.randomMixture() for __ in range(random_restarts)]:
		eq = replicator_dynamics(game, m, *RD_args, **RD_kwds)
		distances = map(lambda e: norm(e-eq,2), equilibria)
		if regret(game, eq) <= regret_thresh and all([d >= dist_thresh \
				for d in distances]):
			equilibria.append(eq)
		all_eq.append(eq)
	if len(equilibria) == 0 and at_least_one:
		return [min(all_eq, key=lambda e: regret(game, e))]
	return equilibria
예제 #6
0
파일: Nash.py 프로젝트: augie/egats
def write_mixed_eq(game, base_game, args):
	if base_game == None:
		base_game = game
	print "\game "+str(i+1)+":\n", "\n".join(map(lambda x: x[0] + \
			":\n\t\t" + "\n\t\t".join(x[1]), sorted( \
			game.strategies.items()))).expandtabs(4)
	mixed_equilibria = MixedNash(game, args.r, args.d, iters=args.i, \
		converge_thresh=args.c)
	print "\n" + str(len(mixed_equilibria)), "approximate mixed strategy"+ \
			" Nash equilibri" + ("um:" if len(mixed_equilibria) == 1 \
			else "a:")
	for j, eq in enumerate(mixed_equilibria):
		full_eq = translate(eq, game, base_game)
		if all(map(lambda p: p in base_game, neighbors(base_game, \
				full_eq))):
			print str(j+1) + ". regret =", round(regret(base_game, \
					full_eq), 4)
		else:
			print str(j+1) + ". regret >=", round(regret(base_game,  \
					full_eq, bound=True), 4)

		support = {r:[] for r in base_game.roles}
		for k,role in enumerate(base_game.roles):
			print role + ":"
			for l,strategy in enumerate(base_game.strategies[role]):
				if full_eq[k][l] >= args.s:
					support[role].append(strategy)
					print "    " + strategy + ": " + str(round(100 * \
							full_eq[k][l], 2)) + "%"

		BR = bestResponses(base_game, full_eq)
		print "best responses:"
		for role in base_game.roles:
			deviation_support = deepcopy(support)
			deviation_support[role].extend(BR[role][0])
			if len(BR[role][0]) == 0:
				continue
			r = regret(base_game, full_eq, role, deviation=BR[role][0][0])
			print "\t" + str(role) + ": " + ", ".join(BR[role][0]) + \
					";\tgain =", (round(r, 4) if not isinf(r) else "?")
			if base_game != game:
				print "Deviation game " + ("explored." if Subgame( \
						base_game, deviation_support).isComplete() else \
						"UNEXPLORED!") + "\n"
예제 #7
0
파일: Nash.py 프로젝트: augie/egats
def MixedNash(game, regret_thresh=1e-4, dist_thresh=1e-2, *RD_args, **RD_kwds):
	"""
	Runs replicator dynamics from multiple starting mixtures.
	"""
	equilibria = []
	for m in game.biasedMixtures() + [game.uniformMixture()]:
		eq = ReplicatorDynamics(game, m, *RD_args, **RD_kwds)
		distances = map(lambda e: norm(e-eq,2), equilibria)
		if regret(game, eq) <= regret_thresh and all([d >= dist_thresh \
				for d in distances]):
			equilibria.append(eq)
	return equilibria
예제 #8
0
파일: Nash.py 프로젝트: augie/egats
def write_pure_eq(game, base_game, args):
	if base_game == None:
		base_game = game
	pure_equilibria = PureNash(game, args.r)
	l = len(pure_equilibria)
	if l > 0:
		print "\n" + str(len(pure_equilibria)), "pure strategy Nash equilibri" \
				+ ("um:" if l == 1 else "a:")
		for i, eq in enumerate(pure_equilibria):
			print str(i+1) + ". regret =", round(regret(base_game, eq), 4)
			for role in base_game.roles:
				print "    " + role + ":", ", ".join(map(lambda pair: \
						str(pair[1]) + "x " + str(pair[0]), eq[role].items()))
	else:
		print "\nno pure strategy Nash equilibria found."
		mrp = MinRegretProfile(game)
		print "regret =", regret(base_game, mrp)
		print "minimum regret pure strategy profile (regret = " + \
				str(round(regret(base_game, mrp), 4)) + "):"
		for role in base_game.roles:
			print "    " + role + ":", ", ".join(map(lambda pair: \
					str(pair[1]) + "x " + str(pair[0]), mrp[role].items()))
예제 #9
0
def dominates(game, role, dominant, dominated, conditional=True, weak=False):
	dominance_observed = False
	for prof in game:
		if dominated in prof[role]:
			reg = regret(game, prof, role, dominated, dominant)
			if reg > 0 and not isinf(reg):
				dominance_observed = True
			elif (reg < 0) or (reg == 0 and not weak) or \
					(isinf(reg) and conditional):
				return False
		elif conditional > 1 and dominant in prof[role] and \
				(prof.deviate(role, dominant, dominated) not in game):
				return False
	return dominance_observed
예제 #10
0
파일: Nash.py 프로젝트: augie/egats
def ReplicatorDynamics(game, mix, iters=10000, converge_thresh=1e-8, \
		verbose=False):
	"""
	Replicator dynamics.
	"""
	for i in range(iters):
		old_mix = mix
		mix = (game.expectedValues(mix) - game.minPayoffs + tiny) * mix
		mix = mix / mix.sum(1).reshape(mix.shape[0],1)
		if norm(mix - old_mix) <= converge_thresh:
			break
	if verbose:
		print i+1, "iterations ; mix =", mix, "; regret =", regret(game, mix)
	return mix
예제 #11
0
파일: Nash.py 프로젝트: oveou/GameAnalysis
def replicator_dynamics(game, mix, iters=10000, converge_thresh=1e-8, \
		verbose=False):
	"""
	Replicator dynamics.
	"""
	for i in range(iters):
		old_mix = mix
		mix = (game.expectedValues(mix) - game.minPayoffs + tiny) * mix
		mix = mix / mix.sum(1).reshape(mix.shape[0],1)
		if norm(mix - old_mix) <= converge_thresh:
			break
	if verbose:
		print i+1, "iterations ; mix =", mix, "; regret =", regret(game, mix)
	mix[mix < 0] = 0 #occasionally one of the probabilities is barely negative
	return mix
예제 #12
0
파일: Nash.py 프로젝트: oveou/GameAnalysis
def pure_nash(game, epsilon=0):
	"""
	Finds all pure-strategy epsilon-Nash equilibria.
	"""
	return filter(lambda profile: regret(game, profile, bound=False)
			<= epsilon, game)
예제 #13
0
파일: Nash.py 프로젝트: oveou/GameAnalysis
def min_regret_profile(game):
	"""
	Finds the profile with the confirmed lowest regret.
	"""
	return min(game.knownProfiles(), key=lambda p: regret(game, p, bound=False))
예제 #14
0
def main(args):
	input_game = read(args.game)
	print "input game =", abspath(args.game), "\n", input_game, "\n\n"

	#max social welfare
	soc_opt_prof, soc_opt_welf = max_social_welfare(input_game)
	print "max social welfare =", round(soc_opt_welf, 4)
	print "achieved by profile =", soc_opt_prof
	if len(input_game.roles) > 1:
		for r in input_game.roles:
			role_opt_prof, role_opt_welf = max_social_welfare(input_game, r)
			print "\tbest total value for", r, "=", role_opt_welf
			print "\tachieved by profile =", role_opt_prof
	print "\n\n"

	#iterated elimination of dominated strategies
	rational_game = iterated_elimination(input_game, pure_strategy_dominance, \
										conditional=1)
	eliminated = {r:sorted(set(input_game.strategies[r]) - set( \
				rational_game.strategies[r])) for r in input_game.roles}
	if any(map(len, eliminated.values())):
		print "dominated strategies:"
		for r in rational_game.roles:
			if eliminated[r]:
				print r, ":", ", ".join(eliminated[r])
	else:
		print "no dominated strategies found"

	#pure strategy Nash equilibrium search
	pure_equilibria = pure_nash(rational_game, args.r)
	l = len(pure_equilibria)
	if l > 0:
		print "\n" + str(len(pure_equilibria)), "pure strategy Nash " +\
				"equilibri" + ("um:" if l == 1 else "a:")
		for i, eq in enumerate(pure_equilibria):
			print str(i+1) + ". regret =", round(regret(input_game, eq), 4), \
					"; social welfare =", round(social_welfare(input_game,eq),4)
			for role in input_game.roles:
				print "    " + role + ":", ", ".join(map(lambda pair: \
						str(pair[1]) + "x " + str(pair[0]), eq[role].items()))
	else:
		print "\nno pure strategy Nash equilibria found."
		mrp = min_regret_profile(rational_game)
		print "regret =", regret(input_game, mrp)
		print "minimum regret pure strategy profile (regret = " + \
				str(round(regret(input_game, mrp), 4)) + "; social welfare = "+\
				str(round(social_welfare(input_game, mrp), 4)) + "):"
		for role in input_game.roles:
			print "    " + role + ":", ", ".join(map(lambda pair: \
					str(pair[1]) + "x " + str(pair[0]), mrp[role].items()))

	if args.sg != "":
		subgames = read(args.sg)
		print "\n\n" + str(len(subgames)), "subgame" + \
					("s" if len(subgames) > 1 else "") + "\n"
	else:
		subgames = [rational_game.strategies]

	#mixed strategy Nash equilibrium search
	for i, sg_strat in enumerate(subgames):
		sg = subgame(rational_game, sg_strat)
		if args.sg != "":
			print "\nsubgame "+str(i+1)+":\n", "\n".join(map(lambda x: x[0] + \
					":\n\t\t" + "\n\t\t".join(x[1]), sorted( \
					sg.strategies.items()))).expandtabs(4)
		mixed_equilibria = mixed_nash(sg, args.r, args.d, iters=args.i, \
			converge_thresh=args.c)
		print "\n" + str(len(mixed_equilibria)), "approximate mixed strategy"+ \
				" Nash equilibri" + ("um:" if len(mixed_equilibria) == 1 \
				else "a:")
		for j, eq in enumerate(mixed_equilibria):
			full_eq = translate(eq, sg, input_game)
			all_data = all(map(lambda p: p in input_game, neighbors(\
					input_game, full_eq)))
			BR = {r:(list(t[0])[0] if len(t[0]) > 0 else None) for r,t in \
					best_responses(input_game, full_eq).items()}
			reg = max(map(lambda r: regret(input_game, full_eq, \
					deviation=BR[r]), input_game.roles))
			print str(j+1) + ". regret ", ("=" if all_data else ">=") , round(\
					reg,4), "; social welfare =", round(social_welfare(sg,eq),4)
			if len(sg.roles) > 1:
				for r in sg.roles:
					print "\ttotal value for", r, "=", social_welfare(sg, eq, r)

			support = {r:[] for r in input_game.roles}
			for k,role in enumerate(input_game.roles):
				print role + ":"
				for l,strategy in enumerate(input_game.strategies[role]):
					if full_eq[k][l] >= args.s:
						support[role].append(strategy)
						print "    " + strategy + ": " + str(round(100 * \
								full_eq[k][l], 2)) + "%"
			if args.sg != "":
				print "best responses:"
				for role in input_game.roles:
					deviation_support = deepcopy(support)
					deviation_support[role].append(BR[role])
					r = regret(input_game, full_eq, role, deviation=BR[role])
					print "\t" + str(role) + ": " + BR[role] + ";\tgain =", \
							(round(r, 4) if not isinf(r) else "?")
					print "Deviation subgame " + ("explored." if subgame( \
							input_game, deviation_support).isComplete() else \
							"UNEXPLORED!") + "\n"
예제 #15
0
파일: Nash.py 프로젝트: augie/egats
def SymmetricProfileRegrets(game):
	assert len(game.roles) == 1, "game must be symmetric"
	role = game.roles[0]
	return {s: regret(game, Profile({role:{s:game.players[role]}})) for s \
			in game.strategies[role]}
예제 #16
0
파일: Nash.py 프로젝트: augie/egats
def MinRegretProfile(game):
	"""
	Finds the profile with the confirmed lowest regret.
	"""
	return min([(regret(game, profile), profile) for profile in game])[1]