Ejemplo n.º 1
0
	def test_missing_regret(self):
		self.assertEqual(R.regret(self.cd_bl, self.BC, "Column", \
				"Center", "Left"), float('inf'))
		self.assertEqual(R.regret(self.cd_bl, self.BC, "Column", \
				"Center"), float('inf'))
		self.assertEqual(R.regret(self.cd_bl, self.BC, "Column"), float('inf'))
		self.assertEqual(R.regret(self.cd_bl, self.BC), float('inf'))
Ejemplo n.º 2
0
 def continue_sampling(self, matrix):
     if matrix.profile_dict == {}:
         return True
     game = matrix.toGame()
     decision = False
     equilibria = []
     all_eq = []
     for old_eq in self.old_equilibria:
         new_eq = Nash.replicator_dynamics(game, old_eq, self.iters, self.converge_threshold)
         decision = decision or linalg.norm(new_eq-old_eq, 2) > self.compare_threshold
         distances = map(lambda e: linalg.norm(e-new_eq, 2), equilibria)
         if Regret.regret(game, new_eq) <= self.regret_threshold and \
                 all([d >= self.dist_threshold for d in distances]):
             equilibria.append(new_eq)
         all_eq.append(new_eq)
     for m in game.biasedMixtures() + [game.uniformMixture()] + \
             [game.randomMixture() for __ in range(self.random_restarts)]:
         eq = Nash.replicator_dynamics(game, m, self.iters, self.converge_threshold)
         distances = map(lambda e: linalg.norm(e-eq,2), equilibria)
         if Regret.regret(game, eq) <= self.regret_threshold and \
                 all([d >= self.dist_threshold for d in distances]):
             equilibria.append(eq)
             decision = True
         all_eq.append(eq)
     if len(equilibria) == 0:
         decision = True
         self.old_equilibria = [min(all_eq, key=lambda e: Regret.regret(game, e))]
     else:
         self.old_equilibria = equilibria
     return decision
Ejemplo n.º 3
0
def single_test(game, noise_model, samples_per_step, delta, alpha, best_effort="false"):
    old_matrix = ObservationMatrix()
    for prof in game.knownProfiles():
        old_matrix.addObservations(prof, noise_model.generate_samples(game, prof, samples_per_step))
    candidate = Nash.mixed_nash(old_matrix.toGame(), at_least_one=True)[0]
    regret = Regret.regret(game, candidate)
    data = {"candidate": candidate, "game_eq": regret < delta, "regret": regret, "ne-regrets": {role: 
                {strategy: Regret.regret(game, candidate, role, strategy) for strategy in game.strategies[role]} for role in game.roles}}
    if best_effort == "true":
        print 'true'
        evaluator = BestEffortCIEvaluator(game, [candidate], delta, alpha, BootstrapConfidenceInterval())
    else:
        evaluator = ConfidenceIntervalEvaluator(game, [candidate], delta, alpha, BootstrapConfidenceInterval())
    count = samples_per_step
    target_set = Regret.mixture_neighbors(game, candidate).union(Regret.feasible_profiles(game, candidate))
    matrix = ObservationMatrix()
    for profile in target_set:
        matrix.addObservations(profile, {r: [PayoffData(s, profile[r][s], data_set) for s, data_set in s_hash.items()]
                                         for r, s_hash in old_matrix.profile_dict[profile].items()})
    while evaluator.continue_sampling(matrix) and count < 1000:
        print evaluator.confidence_interval
        for prof in target_set:
            matrix.addObservations(prof, noise_model.generate_samples(game, prof, samples_per_step))
        count += samples_per_step
    data["stopping_decision"] = evaluator.get_decision(matrix, candidate)
    data["sample_count"] = matrix.toGame().max_samples
    data["final_interval"] = evaluator.confidence_interval
    print data["final_interval"]
    return data
Ejemplo n.º 4
0
	def test_regret_bound(self):
		self.assertEqual(R.regret(self.cd_bl, self.BC, "Column", \
				"Center", "Left", True), float('-inf'))
		self.assertEqual(R.regret(self.cd_bl, self.BC, "Column", \
				"Center", bound=True), -9)
		self.assertEqual(R.regret(self.cd_bl, self.BC, "Column", \
				bound=True), -9)
		self.assertEqual(R.regret(self.cd_bl, self.BC, bound=True), 1)
Ejemplo n.º 5
0
	def test_SparseRegret(self):
		clique = S.cliques(self.ss)[0]
		clique_eq = N.mixed_nash(clique)[0]
		full_candidate = S.translate(clique_eq, clique, self.ss)
		self.assertEqual(R.regret(self.ss, full_candidate, deviation="A"), 0)
		self.assertEqual(R.regret(self.ss, full_candidate, deviation="B"), 0)
		self.assertEqual(R.regret(self.ss, full_candidate, deviation="C"), 1)
		self.assertEqual(R.regret(self.ss, full_candidate, deviation="D"), -1)
		self.assertEqual(R.regret(self.ss, full_candidate), 1)
Ejemplo n.º 6
0
	def test_dev_regret(self):
		self.assertEqual(R.regret(self.spd, self.BC, "Row", "Bottom", \
				"Middle"), 1)
		self.assertEqual(R.regret(self.spd, self.BC, "Column", "Center", \
				"Right"), -9)
		self.assertEqual(R.regret(self.spd, self.BC, "Column", "Center", \
				"Center"), 0)
		self.assertEqual(R.regret(self.cliques, self.AAHL, "sellers", \
				"low", "high"), 2)
		self.assertEqual(R.regret(self.cliques, self.AAHL, "sellers", \
				"high", "low"), 4)
		self.assertEqual(R.regret(self.cliques, self.AAHL, "buyers", \
				"accept", "reject"), -4)
		self.assertRaises(KeyError, R.regret, self.cliques, self.AAHL, \
				"buyers", "reject", "accept")
Ejemplo n.º 7
0
def main():
    parser = ArgumentParser(description='Sequential Bootstrap Experiments')
    parser.add_argument('input_file', metavar='input_file', help='a yaml file specifying the required details')
    parser.add_argument('output_file', metavar='output_file', help='output json suitable for use with the plotting script')
    args = parser.parse_args()
    input = yaml.safe_load(open(args.input_file))
    results = [{s:{} for s in input['stdevs']} for i in range(input['num_games'])]

    for i in range(input['num_games']):
        print i
        base_game = yaml_builder.construct_game(input['game'])
        stopping_rule = yaml_builder.construct_stopping_rule(input['stopping_rule'], base_game)
        for stdev in input['stdevs']:
            noise_model = yaml_builder.construct_model(stdev, input['noise_model'])
            matrix, equilibria = add_noise_sequentially(base_game, noise_model, stopping_rule, input['samples_per_step'])
            sample_game = matrix.toGame()
            results[i][stdev][0] = [{"profile": eq, "statistic": Regret.regret(base_game, eq),
                                  "bootstrap" : Bootstrap.bootstrap(sample_game, eq, Regret.regret, "resample", ["profile"]),
                                  "sample_count": sample_game.max_samples
                    } for eq in equilibria]
    f = open(args.output_file, 'w')
    f.write(IO.to_JSON_str(results, indent=None))
Ejemplo n.º 8
0
	def test_profile_regret(self):
		self.assertEqual(R.regret(self.spd, self.BC), 1)
		self.assertEqual(R.regret(self.cliques, self.AAHL), 4)
Ejemplo n.º 9
0
	def test_role_regret(self):
		self.assertEqual(R.regret(self.spd, self.BC, "Row"), 1)
		self.assertEqual(R.regret(self.cliques, self.AAHL, "buyers"), -4)
		self.assertEqual(R.regret(self.cliques, self.AAHL, "sellers"), 4)
Ejemplo n.º 10
0
	def test_strat_regret(self):
		self.assertEqual(R.regret(self.spd, self.BC, "Row", "Bottom"), 1)
		self.assertEqual(R.regret(self.cliques, self.AAHL, "buyers", \
				"accept"), -4)
Ejemplo n.º 11
0
	def test_mix_reg(self):
		self.assertEqual(R.regret(self.pd, self.pd.uniformMixture()), 0.5)