示例#1
0
 def strategy(self, opponent: Player) -> Action:
     if len(opponent.history) == 0:
         return self._initial
     # Determine which probability to use
     p = self._four_vector[(self.history[-1], opponent.history[-1])]
     # Draw a random number in [0, 1] to decide
     return random_choice(p)
示例#2
0
 def strategy(self, opponent: Player) -> Action:
     if not opponent.history:
         return C
     if opponent.history[-1] == D:
         return D
     p = self._cooperation_probability()
     return random_choice(p)
示例#3
0
 def strategy(self, opponent: Player) -> Action:
     if not opponent.history:
         return C
     if opponent.history[-1] == D:
         return D
     p = self._cooperation_probability()
     return random_choice(p)
示例#4
0
 def strategy(self, opponent: Player) -> Action:
     """This is the actual strategy"""
     if not self.history:
         return C
     if len(opponent.history) % 2 == 0:
         return random_choice(self.p)
     return opponent.history[-1]
示例#5
0
 def strategy(self, opponent: Player) -> Action:
     if len(opponent.history) <= 1:
         return self._initial
     # Determine which probability to use
     p = self._sixteen_vector[(tuple(self.history[-2:]),
                               tuple(opponent.history[-2:]))]
     # Draw a random number in [0, 1] to decide
     return random_choice(p)
示例#6
0
 def strategy(self, opponent: Player) -> Action:
     rounds = self._rounds_to_cooperate
     if len(self.history) < rounds:
         return C
     cooperate_count = opponent.history[-rounds:].count(C)
     prop_cooperate = cooperate_count / rounds
     prob_cooperate = max(0, prop_cooperate - 0.10)
     return random_choice(prob_cooperate)
示例#7
0
 def strategy(self, opponent: Player) -> Action:
     rounds = self._rounds_to_cooperate
     if len(self.history) < rounds:
         return C
     cooperate_count = opponent.history[-rounds:].count(C)
     prop_cooperate = cooperate_count / rounds
     prob_cooperate = max(0, prop_cooperate - 0.10)
     return random_choice(prob_cooperate)
示例#8
0
    def strategy(self, opponent: Player) -> Action:
        current_round = len(self.history) + 1

        if current_round == 1:
            return C
        else:
            probability = 1 - opponent.defections / (current_round - 1)
            return random_choice(probability)
示例#9
0
    def strategy(self, opponent: Player) -> Action:
        current_round = len(self.history) + 1

        if current_round == 1:
            return C
        else:
            probability = 1 - opponent.defections / (current_round - 1)
            return random_choice(probability)
示例#10
0
 def select_action(self, state: str) -> Action:
     """
     Selects the action based on the epsilon-soft policy
     """
     rnd_num = random.random()
     p = 1. - self.action_selection_parameter
     if rnd_num < p:
         return max(self.Qs[state], key=lambda x: self.Qs[state][x])
     return random_choice()
示例#11
0
 def strategy(self, opponent: Player) -> Action:
     round_number = len(self.history) + 1
     if round_number < 3:
         return C
     if round_number < 8:
         return opponent.history[-1]
     if self.history[-1] == opponent.history[-1]:
         return C
     return random_choice(2 / 7)
示例#12
0
 def strategy(self, opponent):
     if not hasattr(self, "_four_vector"):
         raise ValueError("_four_vector not yet set")
     if len(opponent.history) == 0:
         return self._initial
     # Determine which probability to use
     p = self._four_vector[(self.history[-1], opponent.history[-1])]
     # Draw a random number in [0, 1] to decide
     return random_choice(p)
示例#13
0
 def strategy(self, opponent: Player) -> Action:
     round_number = len(self.history) + 1
     if round_number < 3:
         return C
     if round_number < 8:
         return opponent.history[-1]
     if self.history[-1] == opponent.history[-1]:
         return C
     return random_choice(2 / 7)
示例#14
0
 def select_action(self, state: str) -> Action:
     """
     Selects the action based on the epsilon-soft policy
     """
     rnd_num = random.random()
     p = 1.0 - self.action_selection_parameter
     if rnd_num < p:
         return max(self.Qs[state], key=lambda x: self.Qs[state][x])
     return random_choice()
示例#15
0
    def strategy(self, opponent: Player) -> Action:
        current_round = len(self.history) + 1

        if current_round == 1:
            return C
        elif current_round <= 20:
            return opponent.history[-1]
        else:
            probability = 20 / current_round
            return random_choice(probability)
示例#16
0
 def strategy(opponent):
     # First move
     if not opponent.history:
         # Make sure we cooperate first turn
         return C
     if D in opponent.history[-2:]:
         # Probability of cooperating regardless
         return random_choice(opponent.cooperations / len(opponent.history))
     else:
         return C
示例#17
0
    def strategy(self, opponent: Player) -> Action:
        current_round = len(self.history) + 1

        if current_round == 1:
            return C
        elif current_round <= 20:
            return opponent.history[-1]
        else:
            probability = 20 / current_round
            return random_choice(probability)
示例#18
0
 def strategy(self, opponent: Player) -> Action:
     # First move
     if len(self.history) == 0:
         return C
     # React to the opponent's last move
     if opponent.history[-1] == D:
         return D
     # Otherwise cooperate, defect with probability 1 - self.p
     choice = random_choice(1 - self.p)
     return choice
示例#19
0
    def find_reward(self, opponent: Player) -> Dict[Action, Dict[Action, Score]]:
        """
        Finds the reward gained on the last iteration
        """

        if len(opponent.history) == 0:
            opp_prev_action = random_choice()
        else:
            opp_prev_action = opponent.history[-1]
        return self.payoff_matrix[self.prev_action][opp_prev_action]
示例#20
0
    def reset(self):
        """
        Resets scores and history
        """
        super().reset()

        self.Qs = {'': {C: 0, D: 0}}
        self.Vs = {'': 0}
        self.prev_state = ''
        self.prev_action = random_choice()
示例#21
0
 def strategy(opponent):
     # First move
     if not opponent.history:
         # Make sure we cooperate first turn
         return C
     if D in opponent.history[-2:]:
         # Probability of cooperating regardless
         return random_choice(opponent.cooperations / len(opponent.history))
     else:
         return C
示例#22
0
 def strategy(self, opponent: Player) -> Action:
     # First move
     if len(self.history) == 0:
         return C
     # React to the opponent's last move
     if opponent.history[-1] == D:
         return D
     # Otherwise cooperate, defect with probability 1 - self.p
     choice = random_choice(1 - self.p)
     return choice
示例#23
0
    def find_reward(self, opponent: Player) -> Dict[Action, Dict[Action, Score]]:
        """
        Finds the reward gained on the last iteration
        """

        if len(opponent.history) == 0:
            opp_prev_action = random_choice()
        else:
            opp_prev_action = opponent.history[-1]
        return self.payoff_matrix[self.prev_action][opp_prev_action]
示例#24
0
    def strategy(self, opponent: Player) -> Action:
        """This is the actual strategy"""
        if not self.history:
            return C

        if self.act_random:
            self.act_random = False
            return random_choice(self.p)

        self.act_random = True
        return opponent.history[-1]
示例#25
0
    def strategy(self, opponent: Player) -> Action:
        """This is the actual strategy"""
        if not self.history:
            return C

        if self.act_random:
            self.act_random = False
            return random_choice(self.p)

        self.act_random = True
        return opponent.history[-1]
示例#26
0
 def strategy(opponent: Player) -> Action:
     # Cooperate on the first move
     if not len(opponent.history):
         return C
     # Reciprocate cooperation
     if opponent.history[-1] == C:
         return C
     # Respond to defections with probability equal to opponent's total
     # proportion of defections
     defection_prop = opponent.defections / len(opponent.history)
     return random_choice(1 - defection_prop)
示例#27
0
    def strategy(self, opponent: Player) -> Action:
        if self.history:
            # Update internal state from the last play
            last_round = (self.history[-1], opponent.history[-1])
            self.s += self.delta[last_round]

        # Compute probability of Cooperation
        p = self.perr + (1.0 - 2 * self.perr) * (heaviside(self.s + 1, 1) -
                                                 heaviside(self.s - 1, 1))
        # Draw action
        action = random_choice(p)
        return action
示例#28
0
    def strategy(self, opponent: Player) -> Action:
        if self.history:
            # Update internal state from the last play
            last_round = (self.history[-1], opponent.history[-1])
            self.s += self.delta[last_round]

        # Compute probability of Cooperation
        p = self.perr + (1.0 - 2 * self.perr) * (
            heaviside(self.s + 1, 1) - heaviside(self.s - 1, 1))
        # Draw action
        action = random_choice(p)
        return action
示例#29
0
    def strategy(self, opponent: Player) -> Action:
        turns_number = len(self.history)
        sine_value = sin(2 * pi * turns_number / 10)

        if sine_value > 0.95:
            return D

        if abs(sine_value) < 0.95 and abs(sine_value) > 0.3:
            return opponent.history[-1]

        if sine_value < 0.3 and sine_value > -0.3:
            return random_choice()

        return C
示例#30
0
    def strategy(self, opponent: Player) -> Action:
        if not self.history:
            return C
        if self.is_defector:
            return D
        if self.history[-1] == D and opponent.history[-1] == C:
            decision = random_choice()
            if decision == C:
                return C
            else:
                self.is_defector = True
                return D

        return opponent.history[-1]
示例#31
0
    def strategy(opponent: Player) -> Action:
        """Looks at opponent history to see if they have defected.

        If so, player defection is inversely proportional to when this occurred.
        """

        index = next((index
                      for index, value in enumerate(opponent.history, start=1)
                      if value == D), None)

        if index is None:
            return C

        return random_choice(1 - 1 / abs(index))
示例#32
0
    def strategy(self, opponent: Player) -> Action:
        if not self.history:
            return C
        if self.is_defector:
            return D
        if self.history[-1] == D and opponent.history[-1] == C:
            decision = random_choice()
            if decision == C:
                return C
            else:
                self.is_defector = True
                return D

        return opponent.history[-1]
示例#33
0
    def strategy(self, opponent: Player) -> Action:
        turns_number = len(self.history)
        sine_value = sin(2 * pi * turns_number / 10)

        if sine_value > 0.95:
            return D

        if abs(sine_value) < 0.95 and abs(sine_value) > 0.3:
            return opponent.history[-1]

        if sine_value < 0.3 and sine_value > -0.3:
            return random_choice()

        return C
示例#34
0
 def strategy(self, opponent: Player) -> Action:
     """Runs a qlearn algorithm while the tournament is running."""
     if len(self.history) == 0:
         self.prev_action = random_choice()
         self.original_prev_action = self.prev_action
     state = self.find_state(opponent)
     reward = self.find_reward(opponent)
     if state not in self.Qs:
         self.Qs[state] = OrderedDict(zip([C, D], [0, 0]))
         self.Vs[state] = 0
     self.perform_q_learning(self.prev_state, state, self.prev_action, reward)
     action = self.select_action(state)
     self.prev_state = state
     self.prev_action = action
     return action
示例#35
0
    def __init__(self) -> None:
        """Initialises the player by picking a random strategy."""

        super().__init__()

        # Set this explicitely, since the constructor of super will not pick it up
        # for any subclasses that do not override methods using random calls.
        self.classifier['stochastic'] = True

        self.prev_action = random_choice()
        self.history = []  # type: List[Action]
        self.score = 0
        self.Qs = OrderedDict({'': OrderedDict(zip([C, D], [0, 0]))})
        self.Vs = OrderedDict({'': 0})
        self.prev_state = ''
示例#36
0
 def strategy(self, opponent: Player) -> Action:
     """Runs a qlearn algorithm while the tournament is running."""
     if len(self.history) == 0:
         self.prev_action = random_choice()
         self.original_prev_action = self.prev_action
     state = self.find_state(opponent)
     reward = self.find_reward(opponent)
     if state not in self.Qs:
         self.Qs[state] = OrderedDict(zip([C, D], [0, 0]))
         self.Vs[state] = 0
     self.perform_q_learning(self.prev_state, state, self.prev_action, reward)
     action = self.select_action(state)
     self.prev_state = state
     self.prev_action = action
     return action
示例#37
0
class Geller(Player):
    """Observes what the player will do in the next round and adjust.

    If unable to do this: will play randomly.

    Geller - by Martin Chorley (@martinjc), heavily inspired by Matthew Williams (@voxmjw)

    This code is inspired by Matthew Williams' talk
    "Cheating at rock-paper-scissors — meta-programming in Python"
    given at Django Weekend Cardiff in February 2014.

    His code is here: https://github.com/mattjw/rps_metaprogramming
    and there's some more info here: http://www.mattjw.net/2014/02/rps-metaprogramming/

    This code is **way** simpler than Matt's, as in this exercise we already
    have access to the opponent instance, so don't need to go
    hunting for it in the stack. Instead we can just call it to
    see what it's going to play, and return a result based on that

    This is almost certainly cheating, and more than likely against the
    spirit of the 'competition' :-)
    """

    name = 'Geller'
    default = lambda self: random_choice(0.5)
    classifier = {
        'memory_depth': -1,
        'stochastic': True,
        'makes_use_of': set(),
        'long_run_time': False,
        'inspects_source': True,  # Finds out what opponent will do
        'manipulates_source': False,
        'manipulates_state': False
    }

    def strategy(self, opponent: Player) -> Action:
        """
        Look at what the opponent will play in the next round and choose a strategy
        that gives the least jail time, which is is equivalent to playing the same
        strategy as that which the opponent will play.
        """
        curframe = inspect.currentframe()
        calframe = inspect.getouterframes(curframe, 2)
        calname = calframe[1][3]
        if calname == 'strategy':
            return self.default()
        else:
            return opponent.strategy(self)
示例#38
0
文件: hmm.py 项目: Jona12/Axelrod
    def move(self, opponent_action: Action) -> Action:
        """Changes state and computes the response action.

        Parameters
            opponent_action: Axelrod.Action
                The opponent's last action.
        """
        num_states = len(self.emission_probabilities)
        if opponent_action == C:
            next_state = choice(num_states, 1, p=self.transitions_C[self.state])
        else:
            next_state = choice(num_states, 1, p=self.transitions_D[self.state])
        self.state = next_state[0]
        p = self.emission_probabilities[self.state]
        action = random_choice(p)
        return action
示例#39
0
    def move(self, opponent_action: Action) -> Action:
        """Changes state and computes the response action.

        Parameters
            opponent_action: Axelrod.Action
                The opponent's last action.
        """
        num_states = len(self.emission_probabilities)
        if opponent_action == C:
            next_state = choice(num_states, 1, p=self.transitions_C[self.state])
        else:
            next_state = choice(num_states, 1, p=self.transitions_D[self.state])
        self.state = next_state[0]
        p = self.emission_probabilities[self.state]
        action = random_choice(p)
        return action
示例#40
0
    def strategy(self, opponent: Player) -> Action:

        if len(self.history) == 0:
            return C

        self.score_last_round(opponent)

        current_average_score = self.current_score / len(self.history)

        if current_average_score > self.very_good_score:
            return D
        if (current_average_score > self.wish_score) and (
                current_average_score < self.very_good_score):
            return C
        if current_average_score > 2:
            return C
        if (current_average_score < 2) and (current_average_score > 1):
            return D
        return random_choice()
示例#41
0
    def strategy(self, opponent: Player) -> Action:

        if len(self.history) == 0:
            return C

        self.score_last_round(opponent)

        current_average_score = self.current_score / len(self.history)

        if current_average_score > self.very_good_score:
            return D
        if (current_average_score > self.wish_score) and (
            current_average_score < self.very_good_score
        ):
            return C
        if current_average_score > 2:
            return C
        if (current_average_score < 2) and (current_average_score > 1):
            return D
        return random_choice()
示例#42
0
    def strategy(opponent: Player) -> Action:
        """Looks at opponent history to see if they have defected.

        If so, player defection is inversely proportional to when this occurred.
        """

        # calculate how many turns ago the opponent defected
        index = next(
            (
                index
                for index, value in enumerate(opponent.history[::-1], start=1)
                if value == D
            ),
            None,
        )

        if index is None:
            return C

        return random_choice(1 - 1 / abs(index))
示例#43
0
 def strategy(opponent: Player) -> Action:
     r = random.uniform(3, 7) / 10
     return random_choice(r)
示例#44
0
 def strategy(self, opponent: Player) -> Action:
     current_round = len(self.history) + 1
     expected_length = self.match_attributes["length"]
     probability = 1 - current_round / expected_length
     return random_choice(probability)
示例#45
0
 def strategy(opponent: Player) -> Action:
     r = random.uniform(3, 7) / 10
     return random_choice(r)
示例#46
0
 def strategy(self, opponent: Player) -> Action:
     current_round = len(self.history) + 1
     probability = current_round / 1000
     return random_choice(probability)
示例#47
0
 def strategy(self, opponent: Player) -> Action:
     current_round = len(self.history) + 1
     probability = current_round / 1000
     return random_choice(probability)
示例#48
0
 def strategy(self, opponent: Player) -> Action:
     if not opponent.history:
         return random_choice()
     if self.history[-1] == D and opponent.history[-1] == D:
         return D
     return C
示例#49
0
 def test_strategy(self):
     """
     Test that initial strategy cooperates.
     """
     self.first_play_test(random_choice())
示例#50
0
 def strategy(self, opponent: Player) -> Action:
     if len(self.history) == 0:
         return random_choice(0.6)
     return self.history[-1]
示例#51
0
 def strategy(self, opponent: Player) -> Action:
     if len(opponent.history) == 0:
         return C
     p = opponent.cooperations // len(opponent.history)
     return random_choice(p)
示例#52
0
 def strategy(self, opponent: Player) -> Action:
     # Random first move
     if not self.history:
         return random_choice()
     # Act opposite of opponent otherwise
     return flip_action(opponent.history[-1])
示例#53
0
 def foil_strategy_inspection() -> Action:
     """Foils _strategy_utils.inspect_strategy and _strategy_utils.look_ahead"""
     return random_choice(0.5)
示例#54
0
 def strategy(self, opponent: Player) -> Action:
     if len(opponent.history) == 0:
         # Randomly picks a strategy (not affected by history).
         return random_choice(0.5)
     p = opponent.cooperations / len(opponent.history)
     return random_choice(p)
示例#55
0
 def strategy(self, opponent: Player) -> Action:
     if len(opponent.history) == 0:
         return C
     p = opponent.cooperations / len(opponent.history)
     return random_choice(p)
示例#56
0
 def strategy(self, opponent: Player) -> Action:
     return random_choice(self.p)
示例#57
0
 def strategy(self, opponent):
     if not opponent.history:
         return random_choice()
     if self.history[-1] == D and opponent.history[-1] == D:
         return D
     return C