Esempio n. 1
0
 def test_return_values(self):
     self.assertEqual(random_choice(1), C)
     self.assertEqual(random_choice(0), D)
     random.seed(1)
     self.assertEqual(random_choice(), C)
     random.seed(2)
     self.assertEqual(random_choice(), D)
Esempio n. 2
0
 def test_seed_not_offset_by_deterministic_call(self):
     """Test that when called with p = 0 or 1, the random seed is not
     affected."""
     for p in [0, 1]:
         seed(0)
         r = random.random()
         seed(0)
         random_choice(p)
         self.assertEqual(r, random.random())
Esempio n. 3
0
    def strategy(self, opponent: Player) -> Action:

        # First turn
        if len(self.history) == 0:
            return random_choice(self._c_prob / (self._c_prob + self._d_prob))

        # Updating stimulus depending on his own latest choice
        self.stimulus_update(opponent)

        return random_choice(self._c_prob / (self._c_prob + self._d_prob))
Esempio n. 4
0
 def strategy(self, opponent):
     if not opponent.history:
         return C
     if opponent.history[-1] == D:
         return D
     p = self._cooperation_probability()
     return random_choice(p)
Esempio n. 5
0
 def strategy(self, opponent):
     # Random first move
     if not self.history:
         return random_choice();
     
     # Act opposite of opponent otherwise
     return flip_action(opponent.history[-1])
Esempio n. 6
0
 def strategy(self, opponent):
     if not opponent.history:
         return C
     if opponent.history[-1] == D:
         return D
     p = self._cooperation_probability()
     return random_choice(p)
Esempio n. 7
0
class Geller(Player):
    """Observes what the player will do in the next round and adjust.

    If unable to do this: will play randomly.
    """

    name = 'Geller'
    default = lambda self: random_choice(0.5)
    classifier = {
        'memory_depth': -1,
        'stochastic': True,
        'makes_use_of': set(),
        'inspects_source': True,  # Finds out what opponent will do
        'manipulates_source': False,
        'manipulates_state': False
    }

    def strategy(self, opponent):
        """
        Look at what the opponent will play in the next round and choose a strategy
        that gives the least jail time, which is is equivalent to playing the same
        strategy as that which the opponent will play.
        """
        curframe = inspect.currentframe()
        calframe = inspect.getouterframes(curframe, 2)
        calname = calframe[1][3]
        if calname == 'strategy':
            return self.default()
        else:
            return opponent.strategy(self)
Esempio n. 8
0
    def strategy(self, opponent):
        # Random first move
        if not self.history:
            return random_choice()

        # Act opposite of opponent otherwise
        return flip_action(opponent.history[-1])
Esempio n. 9
0
    def strategy(self, opponent):
        """
        A player chooses the best action based on qTab a predefined array
        of choices. It considers the k previous moves and decides which move 
        to make based on the computed Q Table.
        The length of the state list is 4^mem
        """
        
        mem = self.memory
        randomPlay = False
        opHistory = opponent.history[-mem:]
        myHistory = self.history[-mem:]
        
        if ((len(opHistory) < mem) or (len(myHistory) < mem)):
            randomPlay = True

        # In case the memory isn't enough, play a random move
        if (randomPlay):    
            return random_choice()

        # print (self.prevState, self.prevAction, opponent.history[-1], self.payoff[decodeMove(self.prevAction)][opponent.history[-1]])

        # Update the q table when results of the previous turn are available
        self.qTabUpdate(self.prevState, self.prevAction, self.payoff[decodeMove(self.prevAction)][opponent.history[-1]])

        choice = []
        for i in range(0,mem):
            choice.append(encode(myHistory[i], opHistory[i]))
            # Get the encoding for the state
        ids = decode(choice, mem)
        # print ids

        if (self.type > 0):
            self.totTurns += 1
        # if (self.totTurns%1000 == 0):
        #     print self.totTurns, self.explore*correction(self.totTurns)

        self.prevState = ids
        if (random.random() < self.explore*correction(self.totTurns)):
            self.prevAction = encodeMove(random_choice())
            # print self.prevAction
        elif (self.type == 1):
            self.prevAction = max(self.qTab[ids].iteritems(), key=operator.itemgetter(1))[0]
        else:
            self.prevAction = numpy.random.choice(self.qTab[ids].keys(), p = sigmoid(self.qTab[ids].values()))

        return decodeMove(self.prevAction)
Esempio n. 10
0
 def strategy(self, opponent):
     rounds = self._rounds_to_cooperate
     if len(self.history) < rounds:
         return C
     cooperate_count = opponent.history[-rounds:].count(C)
     prop_cooperate = cooperate_count / float(rounds)
     prob_cooperate = max(0, prop_cooperate - 0.10)
     return random_choice(prob_cooperate)
Esempio n. 11
0
 def strategy(self, opponent):
     rounds = self._rounds_to_cooperate
     if len(self.history) < rounds:
         return C
     cooperate_count = opponent.history[-rounds:].count(C)
     prop_cooperate = cooperate_count / float(rounds)
     prob_cooperate = max(0, prop_cooperate - 0.10)
     return random_choice(prob_cooperate)
Esempio n. 12
0
    def strategy(self, opponent):
        current_round = len(self.history) + 1

        if current_round == 1:
            return C
        else:
            probability = 1 - opponent.defections / (current_round - 1)
            return random_choice(probability)
Esempio n. 13
0
 def strategy(self, opponent):
     round_number = len(self.history) + 1
     if round_number < 3:
         return C
     if round_number < 8:
         return opponent.history[-1]
     if self.history[-1] == opponent.history[-1]:
         return C
     return random_choice(2./ 7)
Esempio n. 14
0
 def strategy(self, opponent):
     if not hasattr(self, "_four_vector"):
         raise ValueError("_four_vector not yet set")
     if len(opponent.history) == 0:
         return self._initial
     # Determine which probability to use
     p = self._four_vector[(self.history[-1], opponent.history[-1])]
     # Draw a random number in [0,1] to decide
     return random_choice(p)
Esempio n. 15
0
 def select_action(self, state):
     """
     Selects the action based on the epsilon-soft policy
     """
     rnd_num = random.random()
     p = 1. - self.action_selection_parameter
     if rnd_num < p:
         return max(self.Qs[state], key=lambda x: self.Qs[state][x])
     return random_choice()
Esempio n. 16
0
 def strategy(self, opponent):
     if not hasattr(self, "_four_vector"):
         raise ValueError("_four_vector not yet set")
     if len(opponent.history) == 0:
         return self._initial
     # Determine which probability to use
     p = self._four_vector[(self.history[-1], opponent.history[-1])]
     # Draw a random number in [0,1] to decide
     return random_choice(p)
Esempio n. 17
0
 def strategy(self, opponent):
     round_number = len(self.history) + 1
     if round_number < 3:
         return C
     if round_number < 8:
         return opponent.history[-1]
     if self.history[-1] == opponent.history[-1]:
         return C
     return random_choice(2. / 7)
Esempio n. 18
0
 def select_action(self, state):
     """
     Selects the action based on the epsilon-soft policy
     """
     rnd_num = random.random()
     p = 1. - self.action_selection_parameter
     if rnd_num < p:
         return max(self.Qs[state], key=lambda x: self.Qs[state][x])
     return random_choice()
Esempio n. 19
0
 def strategy(opponent):
     """Randomly picks a strategy (not affected by history)."""
     if len(opponent.history) == 0:
         return random_choice(0.5)
     p = opponent.cooperations // len(opponent.history)
     rnd_num = random.random()
     if rnd_num < p:
         return 'C'
     return 'D'
Esempio n. 20
0
    def select_action(self, state):
        if state == '':
            return random_choice()

        if self.Q[state][D] > self.Q[state][C]:
            action = D
        else:
            action = C

        rnd_num = random.random()
        p = self.boltzman(state, action)
        self.prob.append(p)
        #print ("p=",p)
        if rnd_num < p:
            return action
        else:
            #print "random choice"
            return random_choice()
Esempio n. 21
0
 def strategy(self, opponent):
     # First move
     if len(self.history) == 0:
         return C
     # React to the opponent's last move
     if opponent.history[-1] == D:
         return D
     # Otherwise cooperate, defect with a small probability
     choice = random_choice(1 - self.p)
     return choice
Esempio n. 22
0
    def reset(self):
        """
        Resets scores and history
        """
        Player.reset(self)

        self.Qs = {'': {C: 0, D: 0}}
        self.Vs = {'': 0}
        self.prev_state = ''
        self.prev_action = random_choice()
Esempio n. 23
0
    def find_reward(self, opponent):
        """
        Finds the reward gained on the last iteration
        """

        if len(opponent.history) == 0:
            opp_prev_action = random_choice()
        else:
            opp_prev_action = opponent.history[-1]
        return self.payoff_matrix[self.prev_action][opp_prev_action]
Esempio n. 24
0
    def find_reward(self, opponent):
        """
        Finds the reward gained on the last iteration
        """

        if len(opponent.history) == 0:
            opp_prev_action = random_choice()
        else:
            opp_prev_action = opponent.history[-1]
        return self.payoff_matrix[self.prev_action][opp_prev_action]
Esempio n. 25
0
    def reset(self):
        """
        Resets scores and history
        """
        Player.reset(self)

        self.Qs = {'': {C: 0, D: 0}}
        self.Vs = {'': 0}
        self.prev_state = ''
        self.prev_action = random_choice()
Esempio n. 26
0
    def strategy(self, opponent):
        current_round = len(self.history) + 1

        if current_round == 1:
            return C
        elif current_round <= 20:
            return opponent.history[-1]
        else:
            probability = 20 / current_round
            return random_choice(probability)
Esempio n. 27
0
 def strategy(opponent):
     # Cooperate on the first move
     if not len(opponent.history):
         return C
     # Reciprocate cooperation
     if opponent.history[-1] == C:
         return C
     # Respond to defections with probability equal to opponent's total
     # proportion of defections
     defection_prop = float(opponent.defections) / len(opponent.history)
     return random_choice(1 - defection_prop)
Esempio n. 28
0
 def strategy(opponent):
     # Cooperate on the first move
     if not len(opponent.history):
         return C
     # Reciprocate cooperation
     if opponent.history[-1] == C:
         return C
     # Respond to defections with probability equal to opponent's total
     # proportion of defections
     defection_prop = float(opponent.defections) / len(opponent.history)
     return random_choice(1 - defection_prop)
Esempio n. 29
0
    def strategy(opponent):
        """Looks at opponent history to see if they have defected.

        If so, player defection is inversely proportional to when this occurred.
        """

        index = next((index for index, value in enumerate(opponent.history, start=1) if value == D), None)

        if index is None:
            return C

        return random_choice(1 - 1 / float(abs(index)))
Esempio n. 30
0
    def strategy(opponent):
        """Looks at opponent history to see if they have defected.

        If so, player defection is inversely proportional to when this occurred.
        """

        index = next((index
                      for index, value in enumerate(opponent.history, start=1)
                      if value == D), None)

        if index is None:
            return C

        return random_choice(1 - 1 / float(abs(index)))
Esempio n. 31
0
 def strategy(self, opponent):
     """Runs a qlearn algorithm while the tournament is running."""
     state = self.find_state(opponent)
     reward = self.find_reward(opponent)
     if state not in self.Qs:
         self.Qs[state] = OrderedDict(zip([C, D], [0, 0]))
         self.Vs[state] = 0
     self.perform_q_learning(self.prev_state, state, self.prev_action, reward)
     if state not in self.Qs:
         action = random_choice()
     else:
         action = self.select_action(state)
     self.prev_state = state
     self.prev_action = action
     return action
Esempio n. 32
0
 def strategy(self, opponent):
     """Runs a qlearn algorithm while the tournament is running."""
     state = self.find_state(opponent)
     reward = self.find_reward(opponent)
     if state not in self.Qs:
         self.Qs[state] = OrderedDict(zip(['C', 'D'], [0, 0]))
         self.Vs[state] = 0
     self.perform_q_learning(self.prev_state, state, self.prev_action, reward)
     if state not in self.Qs:
         action = random_choice()
     else:
         action = self.select_action(state)
     self.prev_state = state
     self.prev_action = action
     return action
Esempio n. 33
0
    def __init__(self):
        """Initialises the player by picking a random strategy."""

        super(RiskyQLearner, self).__init__()

        # Set this explicitely, since the constructor of super will not pick it up
        # for any subclasses that do not override methods using random calls.
        self.classifier['stochastic'] = True

        self.prev_action = random_choice()
        self.history = []
        self.score = 0
        self.Qs = OrderedDict({'':  OrderedDict(zip([C, D], [0, 0])) })
        self.Vs = OrderedDict({'': 0})
        self.prev_state = ''
Esempio n. 34
0
    def __init__(self):
        """Initialises the player by picking a random strategy."""

        super(RiskyQLearner, self).__init__()

        # Set this explicitely, since the constructor of super will not pick it up
        # for any subclasses that do not override methods using random calls.
        self.classifier['stochastic'] = True

        self.prev_action = random_choice()
        self.history = []
        self.score = 0
        self.Qs = OrderedDict({'': OrderedDict(zip([C, D], [0, 0]))})
        self.Vs = OrderedDict({'': 0})
        self.prev_state = ''
Esempio n. 35
0
class Geller(Player):
    """Observes what the player will do in the next round and adjust.

    If unable to do this: will play randomly.

    Geller - by Martin Chorley (@martinjc), heavily inspired by Matthew Williams (@voxmjw)

    This code is inspired by Matthew Williams' talk
    "Cheating at rock-paper-scissors — meta-programming in Python"
    given at Django Weekend Cardiff in February 2014.

    His code is here: https://github.com/mattjw/rps_metaprogramming
    and there's some more info here: http://www.mattjw.net/2014/02/rps-metaprogramming/

    This code is **way** simpler than Matt's, as in this exercise we already
    have access to the opponent instance, so don't need to go
    hunting for it in the stack. Instead we can just call it to
    see what it's going to play, and return a result based on that

    This is almost certainly cheating, and more than likely against the
    spirit of the 'competition' :-)
    """

    name = 'Geller'
    default = lambda self: random_choice(0.5)
    classifier = {
        'memory_depth': -1,
        'stochastic': True,
        'makes_use_of': set(),
        'long_run_time': False,
        'inspects_source': True,  # Finds out what opponent will do
        'manipulates_source': False,
        'manipulates_state': False
    }

    def strategy(self, opponent):
        """
        Look at what the opponent will play in the next round and choose a strategy
        that gives the least jail time, which is is equivalent to playing the same
        strategy as that which the opponent will play.
        """
        curframe = inspect.currentframe()
        calframe = inspect.getouterframes(curframe, 2)
        calname = calframe[1][3]
        if calname == 'strategy':
            return self.default()
        else:
            return opponent.strategy(self)
Esempio n. 36
0
    def __init__(self):
        """Initialises the player by picking a random strategy."""

        super(RiskyQLearner, self).__init__()

        # Set this explicitely, since the constructor of super will not pick it up
        # for any subclasses that do not override methods using random calls.
        self.stochastic = True

        self.prev_action = random_choice()
        self.history = []
        self.score = 0
        self.Qs = OrderedDict({'':  OrderedDict(zip(['C', 'D'], [0, 0])) })
        self.Vs = OrderedDict({'': 0})
        self.prev_state = ''
        (R, P, S, T) = Game().RPST()
        self.payoff_matrix = {'C': {'C': R, 'D': S}, 'D': {'C': T, 'D': P}}
Esempio n. 37
0
 def strategy(self, opponent):
     current_round = len(self.history) + 1
     probability = 1 - float(current_round) / 1000
     return random_choice(probability)
Esempio n. 38
0
 def strategy(opponent):
     r = random.uniform(3, 7) / float(10)
     return random_choice(r)
Esempio n. 39
0
 def strategy(self, opponent):
     action = LookerUp.strategy(self, opponent)
     # action could be 'C' or a float
     if action in [C, D]:
         return action
     return random_choice(action)
Esempio n. 40
0
 def strategy(opponent):
     if len(opponent.history) == 0:
         return C
     p = opponent.cooperations // len(opponent.history)
     return random_choice(p)
Esempio n. 41
0
 def strategy(opponent):
     if len(opponent.history) == 0:
         return Actions.C
     p = opponent.cooperations // len(opponent.history)
     return random_choice(p)
Esempio n. 42
0
 def strategy(opponent):
     if len(opponent.history) == 0:
         # Randomly picks a strategy (not affected by history).
         return random_choice(0.5)
     p = opponent.cooperations // len(opponent.history)
     return random_choice(p)
Esempio n. 43
0
 def strategy(self, opponent):
     current_round = len(self.history) + 1
     probability = current_round / 1000
     return random_choice(probability)
Esempio n. 44
0
 def strategy(self, opponent):
     current_round = len(self.history) + 1
     expected_length = self.match_attributes["length"]
     probability = 1 - float(current_round) / expected_length
     return random_choice(probability)
Esempio n. 45
0
 def strategy(self, opponent):
     current_round = len(self.history) + 1
     expected_length = self.match_attributes['length']
     probability = 1 - float(current_round) / expected_length
     return random_choice(probability)
Esempio n. 46
0
 def strategy(opponent):
     """Randomly picks a strategy (not affected by history)."""
     if len(opponent.history) == 0:
         return 'C'
     p = opponent.cooperations // len(opponent.history)
     return random_choice(p)
Esempio n. 47
0
 def strategy(self, opponent):
     if len(self.history) == 0:
         return random_choice(0.6)
     return self.history[-1]
Esempio n. 48
0
 def strategy(self, opponent):
     if len(self.history) == 0:
         return random_choice(0.6)
     return self.history[-1]
Esempio n. 49
0
 def strategy(opponent):
     if len(opponent.history) == 0:
         # Randomly picks a strategy (not affected by history).
         return random_choice(0.5)
     p = opponent.cooperations // len(opponent.history)
     return random_choice(p)
Esempio n. 50
0
 def strategy(self, opponent):
     return random_choice(self.p)
Esempio n. 51
0
 def strategy(opponent):
     r = random.uniform(3, 7) / float(10)
     return random_choice(r)