def test_random_score(self, r, p, s, t): """Test score method with random scores using the hypothesis library.""" game = Game(r, s, t, p) self.assertEqual(game.score((C, C)), (r, r)) self.assertEqual(game.score((D, D)), (p, p)) self.assertEqual(game.score((C, D)), (s, t)) self.assertEqual(game.score((D, C)), (t, s))
def play_intent(Rounds, Strategy): you = Googliness(name='you') strategy = which_strategy(Strategy) if strategy == "ERROR": return err('play') opp = axl.strategies[[s.name for s in axl.strategies].index(strategy)]() global ROUNDS try: ROUNDS = int(Rounds) except ValueError or TypeError: return err('round') if ROUNDS == 0: return err('round') global PLAYERS PLAYERS = [] PLAYERS.append(opp) PLAYERS.append(you) for p in PLAYERS: p.reset() for player in PLAYERS: player.set_match_attributes(length=ROUNDS, game=Game(), noise=0) return Match().talk()
def games(draw, prisoners_dilemma=True, max_value=100): """ A hypothesis decorator to return a random game. Parameters ---------- prisoners_dilemma : bool If set not True the R,P,S,T values will be uniformly random. True by default which ensures T > R > P > S and 2R > T + S. max_value : the maximal payoff value """ if prisoners_dilemma: s_upper_bound = max_value - 4 # Ensures there is enough room s = draw(integers(max_value=s_upper_bound)) t_lower_bound = s + 3 # Ensures there is enough room t = draw(integers(min_value=t_lower_bound, max_value=max_value)) r_upper_bound = t - 1 r_lower_bound = min(max(int((t + s) / 2), s) + 2, r_upper_bound) r = draw(integers(min_value=r_lower_bound, max_value=r_upper_bound)) p_lower_bound = s + 1 p_upper_bound = r - 1 p = draw(integers(min_value=p_lower_bound, max_value=p_upper_bound)) else: s = draw(integers(max_value=max_value)) t = draw(integers(max_value=max_value)) r = draw(integers(max_value=max_value)) p = draw(integers(max_value=max_value)) game = Game(r=r, s=s, t=t, p=p) return game
def __init__(self): global ROUNDS global PLAYERS self.result = [] self.game = Game() self.turns = ROUNDS self.players = list(PLAYERS) self._cache = DeterministicCache()
def test_matches_with_different_game(): for strategy in all_strategies: for opponent in (Alternator, Cooperator, Defector): game = Game(r=4, s=0, p=2, t=6) players = (Player(strategy), opponent()) match = Match(players, turns=200, game=game) assert all(action in (C, D) for interaction in match.play() for action in interaction)
def test_default_scores(self): expected_scores = { (C, D): (0, 5), (D, C): (5, 0), (D, D): (1, 1), (C, C): (3, 3), } self.assertEqual(Game().scores, expected_scores)
def test_random_init(self, r, p, s, t): """Test init with random scores using the hypothesis library.""" expected_scores = { (C, D): (s, t), (D, C): (t, s), (D, D): (p, p), (C, C): (r, r), } game = Game(r, s, t, p) self.assertEqual(game.scores, expected_scores)
def __init__(self, chi=2): chi = float(chi) (R, P, T, S) = Game().RPTS() phi_max = float(P - S) / ((P - S) + chi * (T - P)) phi = phi_max / 2. p1 = 1. - phi * (chi - 1) * float(R - P) / (P - S) p2 = 1 - phi * (1 + chi * float(T - P) / (P - S)) p3 = phi * (chi + float(T - P) / (P - S)) p4 = 0 four_vector = (p1, p2, p3, p4) super(self.__class__, self).__init__(four_vector)
def __init__(self, players, turns, game=None, deterministic_cache=None, noise=0, match_attributes=None): """ Parameters ---------- players : tuple A pair of axelrod.Player objects turns : integer The number of turns per match game : axelrod.Game The game object used to score the match deterministic_cache : axelrod.DeterministicCache A cache of resulting actions for deterministic matches noise : float The probability that a player's intended action should be flipped match_attributes : dict Mapping attribute names to values which should be passed to players. The default is to use the correct values for turns, game and noise but these can be overridden if desired. """ self.result = [] self.turns = turns self._cache_key = (players[0].__class__, players[1].__class__, turns) self.noise = noise if game is None: self.game = Game() else: self.game = game if deterministic_cache is None: self._cache = DeterministicCache() else: self._cache = deterministic_cache if match_attributes is None: self.match_attributes = { 'length': self.turns, 'game': self.game, 'noise': self.noise } else: self.match_attributes = match_attributes self.players = list(players)
def __init__(self): """Initialises the player by picking a random strategy.""" super(RiskyQLearner, self).__init__() # Set this explicitely, since the constructor of super will not pick it up # for any subclasses that do not override methods using random calls. self.stochastic = True self.prev_action = random_choice() self.history = [] self.score = 0 self.Qs = OrderedDict({'': OrderedDict(zip(['C', 'D'], [0, 0])) }) self.Vs = OrderedDict({'': 0}) self.prev_state = '' (R, P, S, T) = Game().RPST() self.payoff_matrix = {'C': {'C': R, 'D': S}, 'D': {'C': T, 'D': P}}
def __init__(self, phi=0., s=None, l=None): (R, P, S, T) = Game().RPST() if s is None: s = 1 if l is None: l = R # Check parameters s_min = -min((T - l) / (l - S), (l - S) / (T - l)) if (l < P) or (l > R) or (s > 1) or (s < s_min): raise ValueError p1 = 1 - phi * (1 - s) * (R - l) p2 = 1 - phi * (s * (l - S) + (T - l)) p3 = phi * ((l - S) + s * (T - l)) p4 = phi * (1 - s) * (l - P) four_vector = [p1, p2, p3, p4] MemoryOnePlayer.__init__(self, four_vector)
def look_ahead(self, opponent, rounds=10): """Plays a number of rounds to determine the best strategy.""" results = [] game = Game() round_robin = RoundRobin(players=[self, opponent], game=game, turns=rounds) strategies = ['C', 'D'] dummy_history_self = copy.copy(self.history) dummy_history_opponent = copy.copy(opponent.history) for strategy in strategies: self.simulate_match(opponent, strategy, rounds) results.append(round_robin.calculate_scores(self, opponent)[0]) self.history = copy.copy(dummy_history_self) opponent.history = copy.copy(dummy_history_opponent) return strategies[results.index(max(results))]
def test_payoff_matrix(self): (R, P, S, T) = Game().RPST() payoff_matrix = {'C': {'C': R, 'D': S}, 'D': {'C': T, 'D': P}} p1 = self.player() self.assertEqual(p1.payoff_matrix, payoff_matrix)
def test_random_RPST(self, r, p, s, t): """Test RPST method with random scores using the hypothesis library.""" game = Game(r, s, t, p) self.assertEqual(game.RPST(), (r, p, s, t))
def test_payoff_matrix(self): payoff_matrix = ap.payoff_matrix(self.interactions, Game()) self.assertEqual(payoff_matrix, self.expected_payoff_matrix)
def test_wrong_class_equality(self): self.assertNotEqual(Game(), "wrong class")
def test_not_default_equality(self): self.assertEqual(Game(1, 2, 3, 4), Game(1, 2, 3, 4)) self.assertNotEqual(Game(1, 2, 3, 4), Game(1, 2, 3, 5)) self.assertNotEqual(Game(1, 2, 3, 4), Game())
def __init__(self): (R, P, S, T) = Game().RPST() ZeroDeterminantPlayer.__init__(self, phi=1. / 9, s=0.5, l=P)
def test_four_vector(self): player = self.player() (R, P, S, T) = Game().RPST() p = min(1 - float(T - R) / (R - S), float(R - P) / (T - P)) expected_dictionary = {(C, C): 1., (C, D): p, (D, C): 1., (D, D): p} test_four_vector(self, expected_dictionary)
def test_default_RPST(self): expected_values = (3, 1, 0, 5) self.assertEqual(Game().RPST(), expected_values)
def test_interaction_payoff(self): payoff = ap.interaction_payoff(self.interactions[(2, 2)], Game()) self.assertEqual(payoff, (13, 3))
def test_four_vector(self): (R, P, S, T) = Game().RPST() p = min(1 - (T - R) / (R - S), (R - P) / (T - P)) expected_dictionary = {(C, C): 1., (C, D): p, (D, C): 1., (D, D): p} test_four_vector(self, expected_dictionary)
def __init__(self, phi=0., chi=2.): (R, P, S, T) = Game().RPST() ZeroDeterminantPlayer.__init__(self, phi=0.25, s=0.5, l=R)
def setUp(self): self.inspector = Player() self.game = Game()
def test_default_score(self): game = Game() self.assertEqual(game.score((C, C)), (3, 3)) self.assertEqual(game.score((D, D)), (1, 1)) self.assertEqual(game.score((C, D)), (0, 5)) self.assertEqual(game.score((D, C)), (5, 0))
def test_payoff_matrix(self): (R, P, S, T) = Game().RPST() payoff_matrix = {C: {C: R, D: S}, D: {C: T, D: P}} player = self.player() self.assertEqual(player.payoff_matrix, payoff_matrix)
def test_default_equality(self): self.assertEqual(Game(), Game())
import inspect from axelrod import Player, Game (R, P, S, T) = Game().RPST() class Darwin(Player): """ A strategy which accumulates a record (the 'genome') of what the most favourable response in the previous round should have been, and naively assumes that this will remain the correct response at the same round of future trials. This 'genome' is preserved between opponents, rounds and repetitions of the tournament. It becomes a characteristic of the type and so a single version of this is shared by all instances for each loading of the class. As this results in information being preserved between tournaments, this is classified as a cheating strategy! If no record yet exists, the opponent's response from the previous round is returned. """ name = "Darwin" memory_depth = float('inf') genome = ['C'] valid_callers = ["play"] # What functions may invoke our strategy. outcomes = { ('C','C') : R, ('C','D') : S, ('D','C') : T,
def __init__(self, p=None): (R, P, S, T) = Game().RPST() if not p: p = min(1 - float(T - R) / (R - S), float(R - P) / (T - P)) four_vector = [1, p, 1, p] super(self.__class__, self).__init__(four_vector)