def __init__(self, four_vector=None, initial=C): """ Parameters fourvector, list or tuple of floats of length 4 The response probabilities to the preceeding round of play ( P(C|CC), P(C|CD), P(C|DC), P(C|DD) ) initial, C or D The initial move Special Cases Alternator is equivalent to MemoryOnePlayer((0, 0, 1, 1), C) Cooperator is equivalent to MemoryOnePlayer((1, 1, 1, 1), C) Defector is equivalent to MemoryOnePlayer((0, 0, 0, 0), C) Random is equivalent to MemoryOnePlayer((0.5, 0.5, 0.5, 0.5)) (with a random choice for the initial state) TitForTat is equivalent to MemoryOnePlayer((1, 0, 1, 0), C) WinStayLoseShift is equivalent to MemoryOnePlayer((1, 0, 0, 1), C) See also: The remaining strategies in this file Multiple strategies in titfortat.py Grofman, Joss in axelrod_tournaments.py """ Player.__init__(self) self._initial = initial if four_vector: self.set_four_vector(four_vector)
def __init__(self): Player.__init__(self) self.calming = False self.punishing = False self.punishment_count = 0 self.punishment_limit = 0
def __init__(self, memory_depth=float('inf'), soft=True): """ Parameters ---------- memory_depth, int >= 0 The number of rounds to use for the calculation of the cooperation and defection probabilities of the opponent. soft, bool Indicates whether to cooperate or not in the case that the cooperation and defection probabilities are equal. """ Player.__init__(self) self.soft = soft self.classifier['memory_depth'] = memory_depth if self.classifier['memory_depth'] < float('inf'): self.memory = self.classifier['memory_depth'] else: self.memory = 0 self.name = ( 'Go By Majority' + (self.memory > 0) * (": %i" % self.memory)) if self.soft: self.name = "Soft " + self.name else: self.name = "Hard " + self.name
def __init__(self): """ Uses the basic init from the Player class, but also set the name to include the retaliation setting. """ Player.__init__(self) self.name = ('Retaliate (' + str(self.retaliation_threshold) + ')')
def __init__(self, lookup_table=None, value_length=1): """ If no lookup table is provided to the constructor, then use the TFT one. """ Player.__init__(self) if not lookup_table: lookup_table = { ('', 'C', 'D'): D, ('', 'D', 'D'): D, ('', 'C', 'C'): C, ('', 'D', 'C'): C, } self.lookup_table = lookup_table # Rather than pass the number of previous turns (m) to consider in as a # separate variable, figure it out. The number of turns is the length # of the second element of any given key in the dict. self.plays = len(list(self.lookup_table.keys())[0][1]) # The number of opponent starting actions is the length of the first # element of any given key in the dict. self.opponent_start_plays = len(list(self.lookup_table.keys())[0][0]) # If the table dictates to ignore the opening actions of the opponent # then the memory classification is adjusted if self.opponent_start_plays == 0: self.classifier['memory_depth'] = self.plays # Ensure that table is well-formed for k, v in lookup_table.items(): if (len(k[1]) != self.plays) or (len(k[0]) != self.opponent_start_plays): raise ValueError("All table elements must have the same size") if value_length is not None: if len(v) > value_length: raise ValueError("Table values should be of length one, C or D")
def __init__(self, four_vector=None, initial=C): """ Parameters ---------- fourvector, list or tuple of floats of length 4 The response probabilities to the preceeding round of play ( P(C|CC), P(C|CD), P(C|DC), P(C|DD) ) initial, C or D The initial move Special Cases ------------- Alternator is equivalent to MemoryOnePlayer((0, 0, 1, 1), C) Cooperator is equivalent to MemoryOnePlayer((1, 1, 1, 1), C) Defector is equivalent to MemoryOnePlayer((0, 0, 0, 0), C) Random is equivalent to MemoryOnePlayer((0.5, 0.5, 0.5, 0.5)) (with a random choice for the initial state) TitForTat is equivalent to MemoryOnePlayer((1, 0, 1, 0), C) WinStayLoseShift is equivalent to MemoryOnePlayer((1, 0, 0, 1), C) See also: The remaining strategies in this file Multiple strategies in titfortat.py Grofman, Joss in axelrod_tournaments.py """ Player.__init__(self) self._initial = initial if four_vector: self.set_four_vector(four_vector)
def reset(self): """ Resets scores and history """ Player.reset(self) self.grudged = False self.grudge_memory = 0
def __init__(self, memory_depth = 3, exploreProb = 0.2, learnerType = 1): """ Parameters ---------- memory_depth, int >= 0 The number of rounds to use for the calculation of the cooperation and defection probabilities of the opponent. exploreProb, float >= 0 The probability of exploration while ignoring the best option """ Player.__init__(self) self.type = learnerType self.qTabSize = (4**memory_depth) # qTabSize : It refers to the size of the qTable. It consists of all possible states in the game self.qTab = [dict({True: 2, False: 2}) for i in range(0, self.qTabSize)] # qTab : The QTable which stores the Q values. It's updated during gameplay self.turns = [dict({True: 1, False: 1}) for i in range(0, self.qTabSize)] self.totTurns = 0 self.classifier['memory_depth'] = memory_depth # The memory depth to consider while playing the game self.memory = self.classifier['memory_depth'] self.explore = exploreProb # Initialize the payoff matrix for the game (R, P, S, T) = self.tournament_attributes["game"].RPST() self.payoff = {C: {C: R, D: S}, D: {C: T, D: P}} self.prevState = 0 self.prevAction = False
def __init__(self, lookup_table=None): """ If no lookup table is provided to the constructor, then use the TFT one. """ Player.__init__(self) if not lookup_table: lookup_table = { ('', 'C', 'D') : D, ('', 'D', 'D') : D, ('', 'C', 'C') : C, ('', 'D', 'C') : C, } self.lookup_table = lookup_table # Rather than pass the number of previous turns (m) to consider in as a # separate variable, figure it out. The number of turns is the length # of the second element of any given key in the dict. self.plays = len(list(self.lookup_table.keys())[0][1]) # The number of opponent starting actions is the length of the first # element of any given key in the dict. self.opponent_start_plays = len(list(self.lookup_table.keys())[0][0]) # If the table dictates to ignore the opening actions of the opponent # then the memory classification is adjusted if self.opponent_start_plays == 0: self.classifier['memory_depth'] = self.plays # Ensure that table is well-formed for k, v in lookup_table.items(): if (len(k[1]) != self.plays) or (len(k[0]) != self.opponent_start_plays): raise ValueError("All table elements must have the same size") if len(v) > 1: raise ValueError("Table values should be of length one, C or D")
def __init__(self, memory_depth=float('inf'), soft=True): """ Parameters ---------- memory_depth, int >= 0 The number of rounds to use for the calculation of the cooperation and defection probabilities of the opponent. soft, bool Indicates whether to cooperate or not in the case that the cooperation and defection probabilities are equal. """ Player.__init__(self) self.soft = soft self.classifier['memory_depth'] = memory_depth if self.classifier['memory_depth'] < float('inf'): self.memory = self.classifier['memory_depth'] else: self.memory = 0 self.name = 'Go By Majority' + (self.memory > 0) * (": %i" % self.memory) if self.soft: self.name = "Soft " + self.name else: self.name = "Hard " + self.name
def __init__(self, start_coop_prob=1.0, end_coop_prob=0.5, rounds_of_decay=200): Player.__init__(self) self._start_coop_prob = start_coop_prob self._end_coop_prob = end_coop_prob self._rounds_of_decay = rounds_of_decay
def __init__(self, cycle="CCD"): """This strategy will repeat the parameter `cycle` endlessly, e.g. C C D C C D C C D ...""" Player.__init__(self) self.cycle = cycle self.name += " " + cycle self.classifier['memory_depth'] = len(cycle) - 1 self.init_args = (cycle,)
def __init__(self): Player.__init__(self) self.init_sequence = [ C, C, D, C, D, D, D, C, C, D, C, D, C, C, D, C, D, D, C, D ] self.just_Ds = 0 self.unjust_Ds = 0 self.turned_defector = False
def reset(self): Player.reset(self) self.good = 1.0 self.bad = 0.0 self.nice1 = 0 self.nice2 = 0 self.total_C = 0 # not the same as self.cooperations self.total_D = 0 # not the same as self.defections
def __init__(self, player, move): # Need to retain history for opponents that examine opponents history # Do a deep copy just to be safe Player.__init__(self) self.history = copy.deepcopy(player.history) self.cooperations = player.cooperations self.defections = player.defections self.move = move
def __init__(self, four_vector, initial='C'): Player.__init__(self) self._four_vector = dict(zip([('C', 'C'), ('C', 'D'), ('D', 'C'), ('D', 'D')], map(float, four_vector))) self._initial = initial self.stochastic = False for x in set(four_vector): if x != 0 and x != 1: self.stochastic = True
def __init__(self, retaliation_threshold=0.1): """ Uses the basic init from the Player class, but also set the name to include the retaliation setting. """ Player.__init__(self) self.retaliation_threshold = retaliation_threshold self.name = ('Retaliate (' + str(self.retaliation_threshold) + ')') self.play_counts = defaultdict(int)
def __init__(self): """ Uses the basic init from the Player class, but also set the name to include the retaliation setting. """ Player.__init__(self) self.name = ( 'Retaliate (' + str(self.retaliation_threshold) + ')')
def __init__(self, revised=True): Player.__init__(self) self.revised = revised self.good = 1.0 self.bad = 0.0 self.nice1 = 0 self.nice2 = 0 self.total_C = 0 # note the same as self.cooperations self.total_D = 0 # note the same as self.defections
def __init__(self, rounds_to_cooperate=10): """ Parameters ---------- rounds_to_cooperate: int, 10 The number of rounds to cooperate initially """ Player.__init__(self) self._rounds_to_cooperate = rounds_to_cooperate
def __init__(self, start_coop_prob=1.0, end_coop_prob=0.5, rounds_of_decay=200): Player.__init__(self) self._start_coop_prob = start_coop_prob self._end_coop_prob = end_coop_prob self._rounds_of_decay = rounds_of_decay self.init_args = (start_coop_prob, end_coop_prob, rounds_of_decay)
def __init__(self, transitions=None, initial_state=None, initial_action=None): if not transitions: # Tit For Tat transitions = [(1, C, 1, C), (1, D, 1, D)] initial_state = 1 initial_action = C Player.__init__(self) self.initial_action = initial_action self.fsm = SimpleFSM(transitions, initial_state)
def __init__(self, transitions=None, initial_state=None, initial_action=None): if not transitions: # Tit For Tat transitions = [(1, C, 1, C), (1, D, 1, D)] initial_state = 1 initial_action = C Player.__init__(self) self.initial_state = initial_state self.initial_action = initial_action self.fsm = SimpleFSM(transitions, initial_state)
def __init__(self, rounds_to_cooperate=11): """ Parameters ---------- rounds_to_cooperate: int, 10 The number of rounds to cooperate initially """ Player.__init__(self) self._rounds_to_cooperate = rounds_to_cooperate self.__class__.memory_depth = rounds_to_cooperate
def __init__(self, initial='C'): Player.__init__(self) self.response_dict = { ('C', 'C'): 'C', ('C', 'D'): 'D', ('D', 'C'): 'D', ('D', 'D'): 'C', } self._initial = initial self.stochastic = False
def reset(self): """ Resets scores and history """ Player.reset(self) self.Qs = {'': {C: 0, D: 0}} self.Vs = {'': 0} self.prev_state = '' self.prev_action = random_choice()
def __init__(self, p=0.5): """ Parameters ---------- p, float The probability to cooperate """ Player.__init__(self) self.p = p self.init_args = (p,)
def __init__(self, four_vector=[1, 0, 0, 1], initial='C'): Player.__init__(self) self._four_vector = dict( zip([('C', 'C'), ('C', 'D'), ('D', 'C'), ('D', 'D')], map(float, four_vector))) self._initial = initial self.stochastic = False for x in set(four_vector): if x != 0 and x != 1: self.stochastic = True
def __init__(self, retaliation_threshold=0.1): """ Uses the basic init from the Player class, but also set the name to include the retaliation setting. """ Player.__init__(self) self.retaliation_threshold = retaliation_threshold self.name = ( 'Retaliate (' + str(self.retaliation_threshold) + ')') self.play_counts = defaultdict(int)
def __init__(self, forget_probability=0.05): """ Parameters ---------- forget_probability, float The probability of forgetting the count of opponent defections. """ Player.__init__(self) self.D_count = 0 self._initial = C self.forget_probability = forget_probability
def __init__(self, p=0.1): """ Parameters ---------- p, float The probability to defect randomly """ Player.__init__(self) self.p = p if (self.p == 0) or (self.p == 1): self.classifier['stochastic'] = False
def test_strategy(self): for action in [C, D]: m = MockPlayer(actions=[action]) p2 = Player() self.assertEqual(action, m.strategy(p2)) actions = [C, C, D, D, C, C] m = MockPlayer(actions=actions) p2 = Player() for action in actions: self.assertEqual(action, m.strategy(p2))
def reset(self): """ Resets scores and history """ Player.reset(self) self.Q = OrderedDict() self.Nsa = OrderedDict() self.prev_state = None self.prev_action = None self.n = 1 self.prob = []
def reset(self): """ Resets scores, QTable and history """ Player.reset(self) self.qTab = [dict({True: 0, False: 0}) for i in range(0, self.qTabSize)] self.turns = [dict({True: 0, False: 0}) for i in range(0, self.qTabSize)] self.totTurns = 0 self.prevState = 0 self.prevAction = False
def __init__(self, initial='C'): Player.__init__(self) self.response_dict = { ('C', 'C'): 'C', ('C', 'D'): 'D', ('D', 'C'): 'D', ('D', 'D'): 'C', } self.classifier['stochastic'] = False self._initial = initial self.init_args = (initial,)
def __init__(self, forget_probability = 0.05): """ Parameters ---------- forget_probability, float The probability of forgetting the count of opponent defections. """ Player.__init__(self) self.D_count = 0 self._initial = C self.forget_probability = forget_probability
def responses_test(self, history_1, history_2, responses, random_seed=None, tournament_length=200): """Test responses to arbitrary histories. Input response_list is a list of lists, each of which consists of a list for the history of player 1, a list for the history of player 2, and a list for the subsequent moves by player one to test. """ P1 = self.player() P1.tournament_attributes["length"] = tournament_length P2 = Player() P2.tournament_attributes["length"] = tournament_length test_responses(self, P1, P2, history_1, history_2, responses, random_seed=random_seed)
def __init__(self): Player.__init__(self) # Make sure we don't use any meta players to avoid infinite recursion. self.team = [t for t in self.team if not issubclass(t, MetaPlayer)] self.nteam = len(self.team) # Initiate all the player in out team. self.team = [t() for t in self.team] # If the team will have stochastic players, this meta is also stochastic. self.stochastic = any([t.stochastic for t in self.team])
def __init__(self, p=0.5): """ Parameters ---------- p, float The probability to cooperate Special Cases ------------- Random(0) is equivalent to Defector Random(1) is equivalent to Cooperator """ Player.__init__(self) self.p = p
def __init__(self, memory_depth = 3, action_codes = ([False]*70)): """ Parameters ---------- memory_depth, int >= 0 The number of rounds to use for the calculation of the cooperation and defection probabilities of the opponent. action_codes, bool list Indicates the exact action to perform based on the other players' history. """ Player.__init__(self) self.actions = action_codes self.classifier['memory_depth'] = memory_depth self.memory = self.classifier['memory_depth']
def __init__(self, cycle="CCD"): """This strategy will repeat the parameter `cycle` endlessly, e.g. C C D C C D C C D ... Special Cases ------------- Cooperator is equivalent to Cycler("C") Defector is equivalent to Cycler("D") Alternator is equivalent to Cycler("CD") """ Player.__init__(self) self.cycle = cycle self.name += " " + cycle self.classifier['memory_depth'] = len(cycle) - 1
def __init__(self, cycle="CCD"): """This strategy will repeat the parameter `cycle` endlessly, e.g. C C D C C D C C D ... Special Cases ------------- Cooperator is equivalent to Cycler("C") Defector is equivalent to Cycler("D") Alternator is equivalent to Cycler("CD") """ Player.__init__(self) self.cycle = cycle self.name = "Cycler {}".format(cycle) self.classifier['memory_depth'] = len(cycle) - 1
def __init__(self, memory_depth=3, action_codes=([False] * 70)): """ Parameters ---------- memory_depth, int >= 0 The number of rounds to use for the calculation of the cooperation and defection probabilities of the opponent. action_codes, bool list Indicates the exact action to perform based on the other players' history. """ Player.__init__(self) self.actions = action_codes self.classifier['memory_depth'] = memory_depth self.memory = self.classifier['memory_depth']
def __init__(self, four_vector=None, initial='C'): """ Parameters ---------- fourvector, list or tuple of floats of length 4 The response probabilities to the preceeding round of play ( P(C|CC), P(C|CD), P(C|DC), P(C|DD) ) initial, 'C' or 'D' The initial move """ Player.__init__(self) self._initial = initial if four_vector: self.set_four_vector(four_vector) self.init_args = (four_vector, initial)
def __init__(self, p=0.5): """ Parameters ---------- p, float The probability to cooperate Special Cases ------------- Random(0) is equivalent to Defector Random(1) is equivalent to Cooperator """ Player.__init__(self) self.p = p if p in [0, 1]: self.classifier['stochastic'] = False
def __init__(self, name='Human', c_symbol='C', d_symbol='D'): """ Parameters ---------- name: string The name of the human player c_symbol: string A symbol to denote cooperation within the history toolbar and prompt d_symbol: string A symbol to denote defection within the history toolbar and prompt """ Player.__init__(self) self.name = name self.symbols = {C: c_symbol, D: d_symbol} self.opponent_history = []