def gen_setting(T, P, targets_dict, time_horizon): import source.players.attackers as attackers import source.game as game targets = translate(targets_dict[T][0]) distributions = [] for i in range(P): distributions.append(tuple(gen_distr(T))) values = tuple((v, v) for v in targets) g = game.Game(values, time_horizon) g.attackers = [1] g.defenders = [0] #print(T, targets) att = [attackers.StackelbergAttacker(g, 1)] for d in distributions: #print(d) att.append(attackers.StochasticAttacker(g, 1, 1, *d)) profiles = [] index = 0 # hardcoded for stackelberg profiles.append({ "attacker": print_adv(att[index]), "others": [print_adv(a) for a in att if a != att[index]], "i": index }) return targets, profiles
def main2(arguments): """ SG with a Stackelberg attacker and a defender who can distinguish between a Stackelberg adn a Uniform """ values = ((1, 1), (2, 2), (3, 3)) time_horizon = 10 g = game.Game(values, time_horizon) agent = player.StUDefender(g, 0) # attacker = player.StackelbergAttacker(g, 1) attacker = player.Attacker(g, 1) g.set_players([agent], [attacker]) e = environment.Environment(g, 0) for t in range(g.time_horizon): strategy = agent.compute_strategy() e.observe_strategy(strategy) realization = agent.sample_strategy() e.observe_realization(realization) feedback = e.feedback("expert") agent.receive_feedback(feedback) print("history of the game") for i, h in enumerate(g.history): print("strategies at " + str(i) + ":") print("\t agent:", g.strategy_history[i][0], "\t attacker:", g.strategy_history[i][1]) print("moves at :" + str(i) + ":") print("\t agent:", h[0], "\t attacker:", h[1])
def translate(targets): import source.game as game m = min(targets) r = round(float(np.random.uniform(high=m)), 3) targets2 = [x - r for x in targets] values = tuple((v, v) for v in targets2) g = game.Game(values, 1) g.attackers = [1] g.defenders = [0] if len(targets) == len(support(g)): return targets2 else: return translate(targets)
def main3(arguments): """ You can take advantage also of the already implemented functions of Experiment class """ values = ((1, 1), (2, 2), (3, 3)) time_horizon = 10 g = game.Game(values, time_horizon) agent = player.StUDefender(g, 0) # attacker = player.StackelbergAttacker(g, 1) attacker = player.Attacker(g, 1) g.set_players([agent], [attacker]) experiment = Experiment(g) experiment.run() # select an existent folder to save the results experiment.save_results(".")
def gen_tar_with_len(length): import source.game as game len_s = 0 while len_s != length: T = 15 if length == 10: T = 50 time_horizon = 10 targets = [round(x, 3) for x in gen_norm_targets(T)] values = tuple((v, v) for v in targets) g = game.Game(values, time_horizon) g.attackers = [1] g.defenders = [0] s = support(g) len_s = len(s) return s
def setUp(self): time_horizon = 10 self.targets = [1, 2] values = tuple((v, v) for v in self.targets) self.game = game.Game(values, time_horizon) defender = player.Defender(self.game, 0) attacker = player.Attacker(self.game, 1) other = player.Attacker(self.game, 1) self.game.set_players([defender], [attacker], [attacker, other]) self.game.history = [{ 0: [0], 1: [0] }, { 0: [1], 1: [0] }, { 0: [0], 1: [1] }, { 0: [1], 1: [1] }]
def main(arguments): """ SG with a Stackelberg attacker and a defender who play a uniform strategy """ values = ((1, 1), (2, 2), (3, 3)) time_horizon = 10 g = game.Game(values, time_horizon) agent = player.Defender(g, 0, 1) attacker = player.StackelbergAttacker(g, 1, 1) g.set_players([agent], [attacker]) e = environment.Environment(g, 0) for t in range(g.time_horizon): strategy = agent.compute_strategy() e.observe_strategy(strategy) realization = agent.sample_strategy() e.observe_realization(realization) feedback = e.feedback("expert") agent.receive_feedback(feedback) for i in g.history: print(i)
#! /usr/bin/python2 import source.game as game if __name__ == "__main__": game = game.Game() game.on_loop()