def __init__(self):
        # AI contains the Game Logic
        self.ai_logic = GameLogic()
        # discount factor in the Q-learning algorithm
        self.discount = 0.4
        # k value for exploration
        self.k = 2
        self.adult_last_move = "first"
        self.change = 0

        self.t_count = 0
        self.u_count = 0
        self.v_count = 0
        self.w_count = 0

        # setup and initialize ordinal matrix
        self.ord_matrix = []
        self.ord_t = 0
        self.ord_u = 0
        self.ord_v = 0
        self.ord_w = 0
        self.update_ord_matrix()
        self.save_ord_matrix()

        # setup and initialize Q-value matrix
        self.q_t = self.ai_logic.t[1]
        self.q_u = self.ai_logic.u[1]
        self.q_v = self.ai_logic.v[1]
        self.q_w = self.ai_logic.w[1]
        self.q_matrix = []
        self.update_q_matrix()

        # setup and initialize probability matrix
        self.probability_matrix = []
        self.update_probability_matrix()
Esempio n. 2
0
    def _mock_input(self, *args, **kwargs):
        prompt = args[0]
        if prompt.startswith("Wanna play?"):
            # self.old_print(prompt, 'y')
            return "y"
        elif prompt.startswith("Enter dice to keep (no spaces), or (q)uit:"):
            scorers = GameLogic.get_scorers(self.roll)
            keepers = "".join([str(ch) for ch in scorers])
            # self.old_print(prompt, keepers)
            return keepers
        elif prompt.startswith("(r)oll again, (b)ank your points or (q)uit "):
            # _________________________________________________________
            # our average score 9175
            x = GameLogic.get_scorers(self.roll)
            y = GameLogic.calculate_score(x)
            # print(type(x))
            # print("x"*50)
            # print(x)
            # print("x"*50)
            if y < 200:
                # scorers = GameLogic.get_scorers(self.roll)
                # if  keepers < 200:
                return ("r")
            else:
                return "b"


# _________________________________________________________
# return "b"
        else:
            raise ValueError(f"Unrecognized prompt {prompt}")
Esempio n. 3
0
    def __init__(self):
        self.game_map = GameMap()
        self.game_logic = GameLogic(self.game_map)
        self.view = View()

        self.level_start()
        self.enemy_actions()
        self.player_actions()
        self.check_game_status()
Esempio n. 4
0
    def _mock_input(self, *args, **kwargs):
        prompt = args[0]
        if prompt.startswith("Wanna play?"):
            # self.old_print(prompt, 'y')
            return "y"
        elif prompt.startswith("Enter dice to keep (no spaces), or (q)uit:"):
            scorers = GameLogic.get_scorers(self.roll)
            keepers = "".join([str(ch) for ch in scorers])

            if 5 in self.roll or 1 in self.roll:
                """
                The kept dices will only works if the self.roll has 5(s) or 1(s)
                """
                keepers = "".join([str(ch) for ch in scorers])
            # self.old_print(prompt, keepers)
            # self.old_print(keepers)

            return keepers
        elif prompt.startswith("(r)oll again, (b)ank your points or (q)uit "):
            # scorers = GameLogic.get_scorers(self.roll)
            # keepers = "".join([str(ch) for ch in scorers])
            self.old_print(prompt, 'b')
            if 5 in self.roll:
                return ("b")
            else:
                return "r"
        else:
            raise ValueError(f"Unrecognized prompt {prompt}")
 def __init__(self):
     # AI contains the Game Logic
     self.ai_logic = GameLogic()
     # discount factor in the Q-learning algorithm
     self.discount = 0.4
     # k value for exploration
     self.k = 2
     self.adult_last_move = "first"
     self.change = 0
     
     self.t_count = 0
     self.u_count = 0
     self.v_count = 0
     self.w_count = 0
     
     # setup and initialize ordinal matrix
     self.ord_matrix = []
     self.ord_t = 0
     self.ord_u = 0
     self.ord_v = 0
     self.ord_w = 0
     self.update_ord_matrix()
     self.save_ord_matrix()
     
     # setup and initialize Q-value matrix
     self.q_t = self.ai_logic.t[1]
     self.q_u = self.ai_logic.u[1]
     self.q_v = self.ai_logic.v[1]
     self.q_w = self.ai_logic.w[1]
     self.q_matrix = []
     self.update_q_matrix()
     
     # setup and initialize probability matrix
     self.probability_matrix = []
     self.update_probability_matrix()
Esempio n. 6
0
class Main:
    def __init__(self):
        self._rand_sel = ComputerSelection()
        self._game_logic = GameLogic()
        self.mainFunction()

    def mainFunction(self):
        """ Initiates the program asking the user to select an option.
            Calls for the random_selection function on computer_selection.
            Compares both choices and gets a winner.
        """
        print("")
        print("WELCOME TO ROCK-PAPER-SCISSORS")
        print("------------------------------")
        question = [
            inquirer.List(
                "action",
                "Make your choice",
                ["rock", "paper", "scissors", "lizard", "spock"],
            )
        ]
        user_choice = inquirer.prompt(question)["action"]
        computer_choice = self._rand_sel.random_selection()
        print("YOU:  " + user_choice + " vs COMPUTER:  " + computer_choice)
        rps_winner = self._game_logic.winner(user_choice=user_choice,
                                             machine_choice=computer_choice)
        print(rps_winner)
Esempio n. 7
0
class Main:
    def __init__(self):
        self.instances = []
        # create instances
        self.game_logic = GameLogic()
        self.instances.append(self.game_logic)
        self.instances.append(PlayerView(self.game_logic))

    def go(self):
        self.game_logic.load_world("level_1.txt")

        while True:
            for instance in self.instances:
                instance.tick()

            if self.game_logic.get_property("quit"):
                break
Esempio n. 8
0
 def test_getplayer(self, mock_input):
     captured_output = StringIO()
     sys.stdout = captured_output
     game = GameLogic()
     self.assertIsInstance(game.player_1, Player)
     self.assertIsInstance(game.player_2, Player)
     self.assertEqual(game.player_1.name, 'ScaryTerry')
     self.assertEqual(game.player_2.name, 'Holt')
     sys.stdout = sys.__stdout__
Esempio n. 9
0
 def setUp(self):
     pygame.font.init()
     self.display = pygame.display.set_mode((1024, 640))
     self.gamelogic = GameLogic()
     self.colors = Colors()
     self.startview = StartView(self.display, self.colors)
     self.save = SaveGame(self.gamelogic)
     self.menu = Menu(self.display, self.colors)
     self.shop = Shop(self.display, self.colors, self.gamelogic)
     pygame.init()
Esempio n. 10
0
 def _mock_input(self, *args):
     prompt = args[0]
     if prompt.startswith("Wanna play?"):
         return "y"
     elif prompt.startswith("Enter dice to keep (no spaces), or (q)uit:"):
         scorers = GameLogic.get_scorers(self.roll)
         keepers = "".join([str(ch) for ch in scorers])
         return keepers
     elif prompt.startswith("(r)oll again, (b)ank your points or (q)uit "):
         return "b"
     else:
         raise ValueError(f"Unrecognized prompt {prompt}")
Esempio n. 11
0
    def start_new_game(self):
        next_lvl = self.game_state.advance_level()

        # if we finished all the levels
        restart_game = False
        if not next_lvl:
            self.game_state.reset()
            next_lvl = self.game_state.advance_level()
            restart_game = True

        # create new game
        game_logic = GameLogic()
        game_logic.game_state = self.game_state
        game_logic.event_manager = self.event_manager
        self.add_graphic_state(game_logic)
        game_logic.build_level(next_lvl)
        self.game_logic = game_logic

        # Display message at the begining of each level
        msg = 'Level %d' % self.game_state.current_level
        self.add_graphic_state(TemporaryMessage(msg))

        if restart_game:
            self.add_graphic_state(StartMenu())
            msg = 'Credit goes here'
            self.add_graphic_state(TemporaryMessage(msg))
        else:
            self.add_graphic_state(EndLevel(True))
Esempio n. 12
0
    def gather_keepers(self, roll, keeper_string):

        keepers = [int(ch) for ch in keeper_string]

        while not GameLogic.validate_keepers(roll, keepers):
            print("Cheater!!! Or possibly made a typo...")
            print(",".join([str(i) for i in roll]))
            keeper_string = input("Enter dice to keep (no spaces), or (q)uit: ")
            if keeper_string.startswith("q"):
                self.quit_game()

            keepers = [int(ch) for ch in keeper_string]

        return keepers
Esempio n. 13
0
 def __init__(self, username):
     """Initiates the game and takes as a parameter the users name"""
     pygame.init()
     self._screen = pygame.display.set_mode((1380, 800))
     self._screen.fill((54, 48, 33))
     self._username = username
     self._board_font = pygame.font.SysFont('Arial', 15, bold=pygame.font.Font.bold)
     # What stage of the game we are
     self._game_status = 'PRE-SETUP'
     # Board setup files
     self._running = True
     # Creates map and board graphics
     self._create_sea_map()
     self._vertical_horizontal_lines()
     self._game_logic = GameLogic(self._username)
     self.mark_active_boats([1100, 30])
     self.mark_active_boats([1100, 200], 'human')
     self._game_logic.setup_game()
     while self._running:
         for event in pygame.event.get():
             # Populate ai boat dictionary
             self._event_handler(event)
         pygame.display.update()
Esempio n. 14
0
 def test_playerturn(self, mock_input):
     captured_output = StringIO()
     sys.stdout = captured_output
     game = GameLogic()
     game.player_turn()
     self.assertEqual(game.moves.state['mid'],{'l':0,'m':1, 'r':0}) 
     game.player_turn()
     self.assertEqual(game.moves.state['mid'],{'l':2,'m':1, 'r':0}) 
     sys.stdout = sys.__stdout__
Esempio n. 15
0
    def make_game_easier(self, request):
        """Given a number, automatically match same count of pairs and return match histories"""
        game = get_by_urlsafe(request.urlsafe_game_key, Game)
        if not game:
            raise endpoints.NotFoundException('Game not found!')
        if game.game_over:
            raise endpoints.ForbiddenException('Illegal action: Game is already over.')

        hint_num = request.hint_num
        if hint_num <= 0:
            raise endpoints.ForbiddenException('Illegal action: Can not receive a negative number.')
        if hint_num * 2 >= 52 - game.matched:
            raise endpoints.ForbiddenException('Illegal action: Can not use hint to win, try a smaller number.')

        hint_histories = GameLogic.make_game_easier(game=game, hint_num=hint_num)
        return HistoryForms(items=[h.to_form() for h in hint_histories])
Esempio n. 16
0
def main():
    """Main function of the game. Creates all the classes that GameLoop-class needs to run
    the game, and then starts the game by calling gameloop.start().
    """
    display = pygame.display.set_mode((1024, 640))
    pygame.display.set_caption("Clicker")
    pygame.font.init()
    colors = Colors()
    event_queue = EventQueue()
    startview = StartView(display, colors)
    menu = Menu(display, colors)
    logic = GameLogic()
    shop = Shop(display, colors, logic)
    save = SaveGame(logic)
    game_loop = GameLoop(logic, display, colors, startview, event_queue, menu,
                         shop, save)
    pygame.init()
    game_loop.start()
Esempio n. 17
0
def play_game(player_one, player_two):
    player_mapping = {player_one: "A", player_two: "B"}
    players_turn = player_one
    next_player = player_two
    playing = True
    game_logic = GameLogic(player_mapping)
    while playing:
        game_logic.show_board()
        choosen_pit = input(f"{players_turn}, choose a pit: ")
        if choosen_pit and game_logic.valid_pit(players_turn, choosen_pit):
            go_again = game_logic.move(players_turn, choosen_pit)
            if game_logic.game_ended(player_one, player_two):
                playing = False
            elif not go_again:
                players_turn, next_player = next_player, players_turn
        else:
            print("Invalid choice, try again.")
Esempio n. 18
0
    def make_match(self, request):
        """Makes a match. Returns matched cards' state with message"""
        game = get_by_urlsafe(request.urlsafe_game_key, Game)
        if not game:
            raise endpoints.NotFoundException('Game not found!')
        if game.game_over:
            raise endpoints.ForbiddenException('Illegal action: Game is already over.')

        pair_1 = request.guess_pair_1
        pair_2 = request.guess_pair_2
        if pair_1 == pair_2:
            raise endpoints.ForbiddenException('Illegal action: Two guess index must be different!')
        if pair_1 < 0 or pair_2 < 0 or pair_1 > 51 or pair_2 > 51:
            raise endpoints.ForbiddenException('Illegal action: Guess num must between 0 and 52!')

        try:
            return GameLogic.match_pair(game=game, pair_1=pair_1, pair_2=pair_2)
        except RuntimeError:
            raise endpoints.ForbiddenException('Illegal action: Could not rematch a matched card')
Esempio n. 19
0
 def test_checkwinner(self, mock_input):
     captured_output = StringIO()
     sys.stdout = captured_output
     game = GameLogic()
     self.assertEqual(game.check_winner(), None)
     game.moves.state['mid']['l'] = 1
     game.moves.state['mid']['m'] = 1
     game.moves.state['mid']['r'] = 1
     self.assertEqual(game.check_winner(), 1)
     game.moves.state['mid']['m'] = 2
     self.assertEqual(game.check_winner(), None)
     game.moves.state['top']['r'] = 2
     game.moves.state['bot']['l'] = 2
     self.assertEqual(game.check_winner(), 2)
     game.moves.state['top']['l'] = 1
     game.moves.state['bot']['l'] = 1
     self.assertEqual(game.check_winner(), 1)
     sys.stdout = sys.__stdout__
Esempio n. 20
0
 def take_action(self, roll, dice_to_keep):
     remaining_dice = get_remaining_dice(roll, dice_to_keep)
     dice_to_keep = str_to_tuple(dice_to_keep)
     to_bank = self.banker.shelf(GameLogic.calculate_score(dice_to_keep))
     print(
         f"You have {to_bank} unbanked points and {remaining_dice} dice remaining"
     )
     roll_bank_quit = input("(r)oll again, (b)ank your points or (q)uit ")
     if (roll_bank_quit == 'b'):
         self.banking(to_bank)
         self.round += 1
         if self.round > self.num_rounds:
             print(
                 f'Thanks for playing. You earned {self.banker.balance} points'
             )
             sys.exit()
         self.new_round()
     elif (roll_bank_quit == 'r'):
         if remaining_dice == 0:
             remaining_dice = 6
         self.repeat_round(remaining_dice)
     elif (roll_bank_quit == 'q'):
         self.quit_game()
Esempio n. 21
0
    def __init__(self, manager, white_player='human', black_player='human'):

        # game state members
        self.game_manager = manager
        self.game_running = False
        self.needs_redraw = True
        self.screen = None
        self.clock = pygame.time.Clock()

        # game components
        self.game_logic = GameLogic(self)
        self.game_grid = GameGrid(self)
        self.game_board = None

        self.white_player = None
        self.black_player = None
        self.initialize_players(white_player, black_player)

        self.turn_manager = TurnManager(self)
        self.highlighter = Highlighter(self)
        self.logger = Logger(self)

        self.buttons = {}
Esempio n. 22
0
 def is_zilch(self, roll):
     if GameLogic.calculate_score(roll) == 0:
         return True
     else:
         return False
class IntelligentAgent2(store.object):
    
    # initialize all the member variables
    def __init__(self):
        # AI contains the Game Logic
        self.ai_logic = GameLogic()
        # discount factor in the Q-learning algorithm
        self.discount = 0.4
        # k value for exploration
        self.k = 2
        
        # Set up for Parent
        self.p_change = 0
        
        self.t_count = 0
        self.u_count = 0
        self.v_count = 0
        self.w_count = 0
        
        # setup and initialize ordinal matrix
        self.p_ord_matrix = []
        self.ord_t = 0
        self.ord_u = 0
        self.ord_v = 0
        self.ord_w = 0
        self.update_p_ord_matrix()
        self.save_p_ord_matrix()
        
        # setup and initialize Q-value matrix
        self.q_t = self.ai_logic.t[1]
        self.q_u = self.ai_logic.u[1]
        self.q_v = self.ai_logic.v[1]
        self.q_w = self.ai_logic.w[1]
        self.p_q_matrix = []
        self.update_p_q_matrix()
        
        # setup and initialize probability matrix
        self.p_probability_matrix = []
        self.update_p_probability_matrix()
        
        # Set up for Child
        self.c_change = 0
        
        self.o_count = 0
        self.p_count = 0
        self.q_count = 0
        self.y_count = 0
        
        # setup and initialize ordinal matrix
        self.c_ord_matrix = []
        self.ord_o = 0
        self.ord_p = 0
        self.ord_q = 0
        self.ord_y = 0
        self.update_c_ord_matrix()
        self.save_c_ord_matrix()
        
        # setup and initialize Q-value matrix
        self.q_o = self.ai_logic.t[0]
        self.q_p = self.ai_logic.u[0]
        self.q_q = self.ai_logic.v[0]
        self.q_y = self.ai_logic.w[0]
        self.c_q_matrix = []
        self.update_c_q_matrix()
        
        # setup and initialize probability matrix
        self.c_probability_matrix = []
        self.update_c_probability_matrix()
        
    def reset_p_learning_rate(self):
        self.t_count = 0
        self.u_count = 0
        self.v_count = 0
        self.w_count = 0
        
    def reset_c_learning_rate(self):
        self.o_count = 0
        self.p_count = 0
        self.q_count = 0
        self.y_count = 0
        
    # function to calculate new ordinal matrix with new state and store the ordinal matrix    
    def update_ord_matrix(self):
        self.update_p_ord_matrix()
        self.update_c_ord_matrix()
    
    def update_p_ord_matrix(self):
        self.ai_logic.update_matrix()
        self.p_ord_matrix = []
        temp_matrix = [self.ai_logic.t[1], self.ai_logic.u[1], self.ai_logic.v[1], self.ai_logic.w[1]]
        
        # for each action, rank the reward in terms of value. Giving a matrix representing 41 states
        for i in range(4):
            count = 1
            for j in range(4):
                if i == j:
                    continue
                if temp_matrix[i] > temp_matrix[j]:
                    count += 1
            self.p_ord_matrix.append(count)

    def update_c_ord_matrix(self):
        self.ai_logic.update_matrix()
        self.c_ord_matrix = []
        temp_matrix = [self.ai_logic.t[0], self.ai_logic.u[0], self.ai_logic.v[0], self.ai_logic.w[0]]
        
        for i in range(4):
            count = 1
            for j in range(4):
                if i == j:
                    continue
                if temp_matrix[i] > temp_matrix[j]:
                    count += 1
            self.c_ord_matrix.append(count)
            
    # store ordinal values in the class to remember    
    def save_p_ord_matrix(self):
        self.ord_t = self.p_ord_matrix[0]
        self.ord_u = self.p_ord_matrix[1]
        self.ord_v = self.p_ord_matrix[2]
        self.ord_w = self.p_ord_matrix[3]
          
    def save_c_ord_matrix(self):
        self.ord_o = self.c_ord_matrix[0]
        self.ord_p = self.c_ord_matrix[1]
        self.ord_q = self.c_ord_matrix[2]
        self.ord_y = self.c_ord_matrix[3]
        
    # checks if the new calculated ordinal matrix is different to the old one, representing a change in state    
    def ord_matrix_change(self):
        parent = self.p_ord_matrix_change()
        child = self.c_ord_matrix_change()
        if child == True or parent == True:
            return True
        else:
            return False
    
    def p_ord_matrix_change(self):
        if (self.ord_t != self.p_ord_matrix[0] or self.ord_u != self.p_ord_matrix[1] or self.ord_v != self.p_ord_matrix[2] or self.ord_w != self.p_ord_matrix[3]):
            self.save_p_ord_matrix()
            self.p_change += 1
            self.reset_p_learning_rate()
            return True
        return False
        
    def c_ord_matrix_change(self):
        if (self.ord_o != self.c_ord_matrix[0] or self.ord_p != self.c_ord_matrix[1] or self.ord_q != self.c_ord_matrix[2] or self.ord_y != self.c_ord_matrix[3]):
            self.save_c_ord_matrix()
            self.c_change += 1
            self.reset_c_learning_rate()
            return True
        return False
    
    # updates the q matrix with new values of Q for each state change 
    def update_q_matrix(self):
        self.update_p_q_matrix()
        self.update_c_q_matrix()
    
    def update_p_q_matrix(self):
        self.p_q_matrix = []
        self.p_q_matrix.append(self.q_t)
        self.p_q_matrix.append(self.q_u)
        self.p_q_matrix.append(self.q_v)
        self.p_q_matrix.append(self.q_w)
        
    def update_c_q_matrix(self):
        self.c_q_matrix = []
        self.c_q_matrix.append(self.q_o)
        self.c_q_matrix.append(self.q_p)
        self.c_q_matrix.append(self.q_q)
        self.c_q_matrix.append(self.q_y)
        
    # updates the Q value for action t    
    def update_q_t(self):
        if self.t_count == 0:
            learning_rate = 1
        else:
            learning_rate = 1.0/self.t_count
        self.q_t = self.q_t + learning_rate*(self.ai_logic.t[1] + (self.discount * max(self.p_q_matrix)) - self.q_t)
    
    # updates the Q value for action u    
    def update_q_u(self):
        if self.u_count == 0:
            learning_rate = 1
        else:
            learning_rate = 1.0/self.u_count
        self.q_u = self.q_u + learning_rate*(self.ai_logic.u[1] + (self.discount * max(self.p_q_matrix)) - self.q_u)
    
    # updates the Q value for action v    
    def update_q_v(self):
        if self.v_count == 0:
            learning_rate = 1
        else:
            learning_rate = 1.0/self.v_count
        self.q_v = self.q_v + learning_rate*(self.ai_logic.v[1] + (self.discount * max(self.p_q_matrix)) - self.q_v)
    
    # updates the Q value for action w    
    def update_q_w(self):
        if self.w_count == 0:
            learning_rate = 1
        else:
            learning_rate = 1.0/self.w_count
        self.q_w = self.q_w + learning_rate*(self.ai_logic.w[1] + (self.discount * max(self.p_q_matrix)) - self.q_w)
        
    # updates the Q value for action o   
    def update_q_o(self):
        if self.o_count == 0:
            learning_rate = 1
        else:
            learning_rate = 1.0/self.o_count
        self.q_o = self.q_o + learning_rate*(self.ai_logic.t[0] + (self.discount * max(self.c_q_matrix)) - self.q_o)
    
    # updates the Q value for action p    
    def update_q_p(self):
        if self.p_count == 0:
            learning_rate = 1
        else:
            learning_rate = 1.0/self.p_count
        self.q_p = self.q_p + learning_rate*(self.ai_logic.u[0] + (self.discount * max(self.c_q_matrix)) - self.q_p)
    
    # updates the Q value for action q    
    def update_q_q(self):
        if self.q_count == 0:
            learning_rate = 1
        else:
            learning_rate = 1.0/self.q_count
        self.q_q = self.q_q + learning_rate*(self.ai_logic.v[0] + (self.discount * max(self.c_q_matrix)) - self.q_q)
    
    # updates the Q value for action y    
    def update_q_y(self):
        if self.y_count == 0:
            learning_rate = 1
        else:
            learning_rate = 1.0/self.y_count
        self.q_y = self.q_y + learning_rate*(self.ai_logic.w[0] + (self.discount * max(self.c_q_matrix)) - self.q_y)
        
    # updates the probability matrix after a change in Q values    
    def update_probability_matrix(self):
        self.update_p_probability_matrix()
        self.update_c_probability_matrix()
    
    def update_p_probability_matrix(self):
        self.p_probability_matrix = []
        self.update_p_q_matrix()
        total = 0
        for i in range(4):
            total += self.k ** self.p_q_matrix[i]
        self.p_probability_matrix.append((self.k**self.q_t)/total)
        self.p_probability_matrix.append((self.k**self.q_u)/total)
        self.p_probability_matrix.append((self.k**self.q_v)/total)
        self.p_probability_matrix.append((self.k**self.q_w)/total)
        
    def update_c_probability_matrix(self):
        self.c_probability_matrix = []
        self.update_c_q_matrix()
        total = 0
        for i in range(4):
            total += self.k ** self.c_q_matrix[i]
        self.c_probability_matrix.append((self.k**self.q_o)/total)
        self.c_probability_matrix.append((self.k**self.q_p)/total)
        self.c_probability_matrix.append((self.k**self.q_q)/total)
        self.c_probability_matrix.append((self.k**self.q_y)/total)
        
    # checks if the AI won the game given an action pair    
    def check_win(self, adult, child):
        return self.ai_logic.check_win(adult, child)
    
    # returns the number of rounds the AI has played    
    def get_round(self):
        return self.ai_logic.round
    
    def parent_move(self):
        # random number from 0 to 1
        choice = random.random()
        # determine the probablistic range of each action
        prob_t = self.p_probability_matrix[0]
        prob_u = self.p_probability_matrix[0]+self.p_probability_matrix[1]
        prob_v = self.p_probability_matrix[0]+self.p_probability_matrix[1]+self.p_probability_matrix[2]
        prob_w = 1
        
        # if t is chosen, adult attends
        if choice < prob_t:
            adult = "attend"
            
        # if u is chosen, adult ignores
        if prob_t <= choice and choice < prob_u:
            adult = "ignore"
            
        # if v is chosen, adult attends
        if prob_u <= choice and choice < prob_v:
            adult = "attend"
            
        # if w is chosen, adult ignores
        if prob_v <= choice and choice < prob_w:
            adult = "ignore"
            
        return adult
    
    def child_move(self):
        # random number from 0 to 1
        choice = random.random()
        # determine the probablistic range of each action
        prob_o = self.c_probability_matrix[0]
        prob_p = self.c_probability_matrix[0]+self.c_probability_matrix[1]
        prob_q = self.c_probability_matrix[0]+self.c_probability_matrix[1]+self.c_probability_matrix[2]
        prob_y = 1
        
        # if t is chosen, adult attends
        if choice < prob_o:
            child = "go"
            
        # if u is chosen, adult ignores
        if prob_o <= choice and choice < prob_p:
            child = "go"
            
        # if v is chosen, adult attends
        if prob_p <= choice and choice < prob_q:
            child = "dontgo"
            
        # if w is chosen, adult ignores
        if prob_q <= choice and choice < prob_y:
            child = "dontgo"
            
        return child
    
    def move(self):
        adult = self.parent_move()
        child = self.child_move()
        
        # find which action was chosen and update the specific Q value and probability matrix
        if adult == "attend" and child == "go":
            self.ai_logic.attend_go()
            self.ai_logic.update_matrix()
            self.update_ord_matrix()
            # checks if the action caused a change in the ordinal matrix
            self.ord_matrix_change()
            if not self.check_final_action("adult"):
                self.t_count += 1
                self.update_q_t()
            if not self.check_final_action("child"):
                self.o_count += 1
                self.update_q_o()
            self.update_q_matrix()
            self.update_probability_matrix()
            
        if adult == "attend" and child == "dontgo":
            self.ai_logic.attend_dontgo()
            self.ai_logic.update_matrix()
            self.update_ord_matrix()
            self.ord_matrix_change()
            if not self.check_final_action("adult"):
                self.v_count += 1
                self.update_q_v()
            if not self.check_final_action("child"):
                self.q_count += 1
                self.update_q_q()
            self.update_q_matrix()
            self.update_probability_matrix()
            
        if adult == "ignore" and child == "go":
            self.ai_logic.ignore_go()
            self.ai_logic.update_matrix()
            self.update_ord_matrix()
            self.ord_matrix_change()
            if not self.check_final_action("adult"):
                self.u_count += 1
                self.update_q_u()
            if not self.check_final_action("child"):
                self.update_q_p()
                self.p_count += 1
            self.update_q_matrix()
            self.update_probability_matrix()
                   
        if adult == "ignore" and child == "dontgo":
            self.ai_logic.ignore_dontgo()
            self.ai_logic.update_matrix()
            self.update_ord_matrix()
            self.ord_matrix_change()
            if not self.check_final_action("adult"):
                self.w_count += 1
                self.update_q_w()
            if not self.check_final_action("child"):
                self.y_count += 1
                self.update_q_y()
            self.update_q_matrix()
            self.update_probability_matrix()
                
        return [adult, child]
        
    def check_final_action(self, player):
        if player == "adult":
            for i in range(4):
                if round(self.p_probability_matrix[i], 2) == 1.00:
                    return True
            return False
        elif player == "child":
            for i in range(4):
                if round(self.c_probability_matrix[i], 2) == 1.00:
                    return True
            return False
Esempio n. 24
0
 def keep_scorers(self, roll):
     return GameLogic.get_scorers(roll)
Esempio n. 25
0
 def calculate_score(self, roll):
     return GameLogic.calculate_score(roll)
Esempio n. 26
0
    # game setup

    # init pygame
    pygame.init()

    # init display
    screen = pygame.display.set_mode(cfg['game']['screen_size'])

    # level creation
    levels = [LevelMic(screen, cfg['game']['screen_size'], mic)]

    # choose level
    level = levels[0]

    # game logic with dependencies
    game_logic = GameLogic()

    # add clock
    clock = pygame.time.Clock()

    # mic stream and update
    with mic.stream:

        # game loop
        while game_logic.run_loop:
            for event in pygame.event.get():

                # input handling
                game_logic.event_update(event)
                level.event_update(event)
 def __init__(self):
     # AI contains the Game Logic
     self.ai_logic = GameLogic()
     # discount factor in the Q-learning algorithm
     self.discount = 0.4
     # k value for exploration
     self.k = 2
     
     # Set up for Parent
     self.p_change = 0
     
     self.t_count = 0
     self.u_count = 0
     self.v_count = 0
     self.w_count = 0
     
     # setup and initialize ordinal matrix
     self.p_ord_matrix = []
     self.ord_t = 0
     self.ord_u = 0
     self.ord_v = 0
     self.ord_w = 0
     self.update_p_ord_matrix()
     self.save_p_ord_matrix()
     
     # setup and initialize Q-value matrix
     self.q_t = self.ai_logic.t[1]
     self.q_u = self.ai_logic.u[1]
     self.q_v = self.ai_logic.v[1]
     self.q_w = self.ai_logic.w[1]
     self.p_q_matrix = []
     self.update_p_q_matrix()
     
     # setup and initialize probability matrix
     self.p_probability_matrix = []
     self.update_p_probability_matrix()
     
     # Set up for Child
     self.c_change = 0
     
     self.o_count = 0
     self.p_count = 0
     self.q_count = 0
     self.y_count = 0
     
     # setup and initialize ordinal matrix
     self.c_ord_matrix = []
     self.ord_o = 0
     self.ord_p = 0
     self.ord_q = 0
     self.ord_y = 0
     self.update_c_ord_matrix()
     self.save_c_ord_matrix()
     
     # setup and initialize Q-value matrix
     self.q_o = self.ai_logic.t[0]
     self.q_p = self.ai_logic.u[0]
     self.q_q = self.ai_logic.v[0]
     self.q_y = self.ai_logic.w[0]
     self.c_q_matrix = []
     self.update_c_q_matrix()
     
     # setup and initialize probability matrix
     self.c_probability_matrix = []
     self.update_c_probability_matrix()
Esempio n. 28
0
class App(object):
    def __init__(self):
        self.game_map = GameMap()
        self.game_logic = GameLogic(self.game_map)
        self.view = View()

        self.level_start()
        self.enemy_actions()
        self.player_actions()
        self.check_game_status()

    def start_render(self):
        self.render_entities()
        self.render_hud()
        self.render = self.view.root.after(60, self.start_render)

    def stop_render(self):
        self.view.root.after_cancel(self.render)

    def render_entities(self):
        self.view.delete_entities()
        self.view.draw_entities(self.game_logic.entities)

    def render_hud(self):
        self.view.delete_hud()
        self.view.draw_HUD(self.game_logic.hero)

    def enemy_actions(self):
        for enemy in self.game_logic.entities[1:]:
            self.game_logic.move_entity(enemy, enemy.get_move_direction())
            self.game_logic.enemy_strike(enemy)
            self.game_logic.remove_dead_entity(enemy)
        self.view.root.after(500, self.enemy_actions)

    def player_actions(self):
        self.move_player()
        self.player_strike()
        self.view.root.after(60, self.player_actions)

    def move_player(self):
        if self.view.player_move_direction != None:
            self.game_logic.move_entity(self.game_logic.hero,
                                        self.view.player_move_direction)
            self.view.player_move_direction = None

    def player_strike(self):
        if self.view.player_strike and self.game_logic.hero.can_strike:
            self.game_logic.hero.strike()
        self.view.player_strike = False

    def check_game_status(self):
        if self.game_logic.is_level_over():
            self.stop_render()
            self.level_start()
        elif self.game_logic.hero.current_hp <= 0:
            self.stop_render()
            self.view.clear_view()
            self.view.game_over_screen()
            self.view.root.after(1500, exit)
        self.view.root.after(2000, self.check_game_status)

    def show_game_level(self):
        self.view.clear_view()
        self.view.write_level_info(self.game_logic.current_level + 1)
        self.view.root.after(1000, self.view.clear_view)

    def level_start(self):
        self.show_game_level()
        self.view.root.after(1100, self.game_logic.start_level)
        self.view.root.after(
            1200, lambda: self.view.draw_map(self.game_map.game_map))
        self.view.root.after(1200, self.start_render)
class IntelligentAgent(store.object):
    
    # initialize all the member variables
    def __init__(self):
        # AI contains the Game Logic
        self.ai_logic = GameLogic()
        # discount factor in the Q-learning algorithm
        self.discount = 0.4
        # k value for exploration
        self.k = 2
        self.adult_last_move = "first"
        self.change = 0
        
        self.t_count = 0
        self.u_count = 0
        self.v_count = 0
        self.w_count = 0
        
        # setup and initialize ordinal matrix
        self.ord_matrix = []
        self.ord_t = 0
        self.ord_u = 0
        self.ord_v = 0
        self.ord_w = 0
        self.update_ord_matrix()
        self.save_ord_matrix()
        
        # setup and initialize Q-value matrix
        self.q_t = self.ai_logic.t[1]
        self.q_u = self.ai_logic.u[1]
        self.q_v = self.ai_logic.v[1]
        self.q_w = self.ai_logic.w[1]
        self.q_matrix = []
        self.update_q_matrix()
        
        # setup and initialize probability matrix
        self.probability_matrix = []
        self.update_probability_matrix()
        
    def reset_learning_rate(self):
        self.t_count = 0
        self.u_count = 0
        self.v_count = 0
        self.w_count = 0
    
    # function to calculate new ordinal matrix with new state and store the ordinal matrix    
    def update_ord_matrix(self):
        self.ai_logic.update_matrix()
        self.ord_matrix = []
        temp_matrix = [self.ai_logic.t[1], self.ai_logic.u[1], self.ai_logic.v[1], self.ai_logic.w[1]]
        
        # for each action, rank the reward in terms of value. Giving a matrix representing 41 states
        for i in range(4):
            count = 1
            for j in range(4):
                if i == j:
                    continue
                if temp_matrix[i] > temp_matrix[j]:
                    count += 1
            self.ord_matrix.append(count)
    
    # store ordinal values in the class to remember    
    def save_ord_matrix(self):
        self.ord_t = self.ord_matrix[0]
        self.ord_u = self.ord_matrix[1]
        self.ord_v = self.ord_matrix[2]
        self.ord_w = self.ord_matrix[3]
    
    # checks if the new calculated ordinal matrix is different to the old one, representing a change in state    
    def ord_matrix_change(self):
        if (self.ord_t != self.ord_matrix[0] or self.ord_u != self.ord_matrix[1] or self.ord_v != self.ord_matrix[2] or self.ord_w != self.ord_matrix[3]):
            self.save_ord_matrix()
            return True
        return False
    
    # updates the q matrix with new values of Q for each state change 
    def update_q_matrix(self):
        self.q_matrix = []
        self.q_matrix.append(self.q_t)
        self.q_matrix.append(self.q_u)
        self.q_matrix.append(self.q_v)
        self.q_matrix.append(self.q_w)
    
    # updates the Q value for action t.    
    def update_q_t(self):
        if self.t_count == 0:
            learning_rate = 1
        else:
            learning_rate = 1.0/self.t_count
        self.q_t = self.q_t + learning_rate*(self.ai_logic.t[1] + (self.discount * max(self.q_matrix)) - self.q_t)
    
    # updates the Q value for action u    
    def update_q_u(self):
        if self.u_count == 0:
            learning_rate = 1
        else:
            learning_rate = 1.0/self.u_count
        self.q_u = self.q_u + learning_rate*(self.ai_logic.u[1] + (self.discount * max(self.q_matrix)) - self.q_u)
    
    # updates the Q value for action v    
    def update_q_v(self):
        if self.v_count == 0:
            learning_rate = 1
        else:
            learning_rate = 1.0/self.v_count
        self.q_v = self.q_v + learning_rate*(self.ai_logic.v[1] + (self.discount * max(self.q_matrix)) - self.q_v)
    
    # updates the Q value for action w    
    def update_q_w(self):
        if self.w_count == 0:
            learning_rate = 1
        else:
            learning_rate = 1.0/self.w_count
        self.q_w = self.q_w + learning_rate*(self.ai_logic.w[1] + (self.discount * max(self.q_matrix)) - self.q_w)
    
    # updates the probability matrix after a change in Q values    
    def update_probability_matrix(self):
        self.probability_matrix = []
        self.update_q_matrix()
        total = 0
        for i in range(4):
            total += self.k ** self.q_matrix[i]
        self.probability_matrix.append((self.k**self.q_t)/total)
        self.probability_matrix.append((self.k**self.q_u)/total)
        self.probability_matrix.append((self.k**self.q_v)/total)
        self.probability_matrix.append((self.k**self.q_w)/total)
    
    # function called to inform the AI to make a move and returns the action in the form of [adult, child]    
    def move(self):
        # random number from 0 to 1
        choice = random.random()
        # determine the probablistic range of each action
        prob_t = self.probability_matrix[0]
        prob_u = self.probability_matrix[0]+self.probability_matrix[1]
        prob_v = self.probability_matrix[0]+self.probability_matrix[1]+self.probability_matrix[2]
        prob_w = 1
        
        # if t is chosen, adult attends
        if choice < prob_t:
            adult = "attend"
            
        # if u is chosen, adult ignores
        if prob_t <= choice and choice < prob_u:
            adult = "ignore"
            
        # if v is chosen, adult attends
        if prob_u <= choice and choice < prob_v:
            adult = "attend"
            
        # if w is chosen, adult ignores
        if prob_v <= choice and choice < prob_w:
            adult = "ignore"
        
        # find the action choice of the inner child given an adult's action choice    
        child = self.child_move(adult)
        
        # find which action was chosen and update the specific Q value and probability matrix
        if adult == "attend" and child == "go":
            self.ai_logic.attend_go()
            self.ai_logic.update_matrix()
            self.update_ord_matrix()
            # checks if the action caused a change in the ordinal matrix
            if self.ord_matrix_change():
                self.change += 1
                self.reset_learning_rate()
            self.t_count += 1
            self.update_q_t()
            self.update_q_matrix()
            self.update_probability_matrix()
            
        if adult == "attend" and child == "dontgo":
            self.ai_logic.attend_dontgo()
            self.ai_logic.update_matrix()
            self.update_ord_matrix()
            if self.ord_matrix_change():
                self.change += 1
                self.reset_learning_rate()
            self.v_count += 1
            self.update_q_v()
            self.update_q_matrix()
            self.update_probability_matrix()
            
        if adult == "ignore" and child == "go":
            self.ai_logic.ignore_go()
            self.ai_logic.update_matrix()
            self.update_ord_matrix()
            if self.ord_matrix_change():
                self.change += 1
                self.reset_learning_rate()
            self.u_count += 1
            self.update_q_u()
            self.update_q_matrix()
            self.update_probability_matrix()
                   
        if adult == "ignore" and child == "dontgo":
            self.ai_logic.ignore_dontgo()
            self.ai_logic.update_matrix()
            self.update_ord_matrix()
            if self.ord_matrix_change():
                self.change += 1
                self.reset_learning_rate()
            self.w_count += 1
            self.update_q_w()
            self.update_q_matrix()
            self.update_probability_matrix()
                
        return [adult, child]
    
    # this function prompts the child to choose an action given the adult's choice.
    # It picks the action with the greatest reward, if equal, then a random choice between go and don't go        
    def child_move(self, adult):
        if self.adult_last_move == "first":
            self.adult_last_move = adult
            child_action = random.random()
            if child_action <= 0.5:
                return "go"
            else:
                return "dontgo"

        if self.adult_last_move == "attend":
            self.adult_last_move = adult
            if self.ai_logic.t[0] > self.ai_logic.v[0]:
                return "go"
            elif self.ai_logic.t[0] < self.ai_logic.v[0]:
                return "dontgo"
            elif self.ai_logic.t[0] == self.ai_logic.v[0]:
                child_action = random.random()
                if child_action < 0.5:
                    return "go"
                else:
                    return "dontgo"
        if self.adult_last_move == "ignore":
            self.adult_last_move = adult
            if self.ai_logic.u[0] > self.ai_logic.w[0]:
                return "go"
            elif self.ai_logic.u[0] < self.ai_logic.w[0]:
                return "dontgo"
            elif self.ai_logic.u[0] == self.ai_logic.w[0]:
                child_action = random.random()
                if child_action < 0.5:
                    return "go"
                else:
                    return "dontgo"
    
    # checks if the AI won the game given an action pair    
    def check_win(self, adult, child):
        return self.ai_logic.check_win(adult, child)
    
    # returns the number of rounds the AI has played    
    def get_round(self):
        return self.ai_logic.round
Esempio n. 30
0
 def __init__(self):
     self._rand_sel = ComputerSelection()
     self._game_logic = GameLogic()
     self.mainFunction()
Esempio n. 31
0
 def setUp(self):
     self.logic = GameLogic()
Esempio n. 32
0
class TestGameLogic(unittest.TestCase):
    def setUp(self):
        self.logic = GameLogic()

    def test_clicking_produces_score(self):
        self.logic.click()
        self.assertEqual(self.logic.score, 1)

    def test_cannot_purchase_click_upgrade_without_score(self):
        self.logic.score = 30
        self.logic.click_upgrade()
        self.assertEqual(self.logic.score, 30)
        self.assertEqual(self.logic.click_upgrades_bought, 0)

    def test_can_purchase_click_upgrade_with_sufficent_score(self):
        self.logic.score = 50
        self.logic.click_upgrade()
        self.assertEqual(self.logic.score, 0)
        self.assertEqual(self.logic.click_upgrades_bought, 1)

    def test_click_upgrade_is_working(self):
        self.logic.score = 50
        self.logic.click_upgrade()
        self.assertEqual(self.logic.score_per_click, 2)
        self.logic.click()
        self.assertEqual(self.logic.score, 2)
        self.logic.score += 148
        self.logic.click_upgrade()
        self.assertEqual(self.logic.score_per_click, 4)
        self.logic.click()
        self.assertEqual(self.logic.score, 4)

    def test_cannot_purchase_autoclicker_without_score(self):
        self.logic.score = 50
        self.logic.autoclicker()
        self.assertEqual(self.logic.score, 50)
        self.assertEqual(self.logic.autoclickers_bought, 0)

    def test_can_purchase_autoclicker_with_sufficent_score(self):
        self.logic.score = 100
        self.logic.autoclicker()
        self.assertEqual(self.logic.score, 0)
        self.assertEqual(self.logic.autoclickers_bought, 1)

    def test_autoclicker_upgrade_is_working(self):
        self.logic.score = 400
        self.logic.autoclickers_bought = 1
        self.logic.autoclicker_upgrade()
        self.assertEqual(self.logic.score, 0)
        self.assertEqual(self.logic.autoclicker_upgrades_bought, 1)
        self.assertEqual(self.logic.autoclicker_power, 2)
        self.assertEqual(self.logic.autoclicker_upgrade_cost, 1200)

    def test_cannot_buy_autoclicker_upgrade_without_autoclickers(self):
        self.logic.score = 400
        self.logic.autoclickers_bought = 0
        self.logic.autoclicker_upgrade()
        self.assertEqual(self.logic.score, 400)
        self.assertEqual(self.logic.autoclicker_upgrades_bought, 0)

    def test_autoclickers_are_working(self):
        self.logic.autoclickers_bought = 2
        self.logic.autoclicker_click()
        self.assertEqual(self.logic.score, 2)
        self.logic.score = 400
        self.logic.autoclicker_upgrade()
        self.logic.autoclicker_click()
        self.assertEqual(self.logic.score, 4)
    def __init__(self):
        # AI contains the Game Logic
        self.ai_logic = GameLogic()
        # discount factor in the Q-learning algorithm
        self.discount = 0.4
        # k value for exploration
        self.k = 2

        # Set up for Parent
        self.p_change = 0

        self.t_count = 0
        self.u_count = 0
        self.v_count = 0
        self.w_count = 0

        # setup and initialize ordinal matrix
        self.p_ord_matrix = []
        self.ord_t = 0
        self.ord_u = 0
        self.ord_v = 0
        self.ord_w = 0
        self.update_p_ord_matrix()
        self.save_p_ord_matrix()

        # setup and initialize Q-value matrix
        self.q_t = self.ai_logic.t[1]
        self.q_u = self.ai_logic.u[1]
        self.q_v = self.ai_logic.v[1]
        self.q_w = self.ai_logic.w[1]
        self.p_q_matrix = []
        self.update_p_q_matrix()

        # setup and initialize probability matrix
        self.p_probability_matrix = []
        self.update_p_probability_matrix()

        # Set up for Child
        self.c_change = 0

        self.o_count = 0
        self.p_count = 0
        self.q_count = 0
        self.y_count = 0

        # setup and initialize ordinal matrix
        self.c_ord_matrix = []
        self.ord_o = 0
        self.ord_p = 0
        self.ord_q = 0
        self.ord_y = 0
        self.update_c_ord_matrix()
        self.save_c_ord_matrix()

        # setup and initialize Q-value matrix
        self.q_o = self.ai_logic.t[0]
        self.q_p = self.ai_logic.u[0]
        self.q_q = self.ai_logic.v[0]
        self.q_y = self.ai_logic.w[0]
        self.c_q_matrix = []
        self.update_c_q_matrix()

        # setup and initialize probability matrix
        self.c_probability_matrix = []
        self.update_c_probability_matrix()
class IntelligentAgent2(store.object):

    # initialize all the member variables
    def __init__(self):
        # AI contains the Game Logic
        self.ai_logic = GameLogic()
        # discount factor in the Q-learning algorithm
        self.discount = 0.4
        # k value for exploration
        self.k = 2

        # Set up for Parent
        self.p_change = 0

        self.t_count = 0
        self.u_count = 0
        self.v_count = 0
        self.w_count = 0

        # setup and initialize ordinal matrix
        self.p_ord_matrix = []
        self.ord_t = 0
        self.ord_u = 0
        self.ord_v = 0
        self.ord_w = 0
        self.update_p_ord_matrix()
        self.save_p_ord_matrix()

        # setup and initialize Q-value matrix
        self.q_t = self.ai_logic.t[1]
        self.q_u = self.ai_logic.u[1]
        self.q_v = self.ai_logic.v[1]
        self.q_w = self.ai_logic.w[1]
        self.p_q_matrix = []
        self.update_p_q_matrix()

        # setup and initialize probability matrix
        self.p_probability_matrix = []
        self.update_p_probability_matrix()

        # Set up for Child
        self.c_change = 0

        self.o_count = 0
        self.p_count = 0
        self.q_count = 0
        self.y_count = 0

        # setup and initialize ordinal matrix
        self.c_ord_matrix = []
        self.ord_o = 0
        self.ord_p = 0
        self.ord_q = 0
        self.ord_y = 0
        self.update_c_ord_matrix()
        self.save_c_ord_matrix()

        # setup and initialize Q-value matrix
        self.q_o = self.ai_logic.t[0]
        self.q_p = self.ai_logic.u[0]
        self.q_q = self.ai_logic.v[0]
        self.q_y = self.ai_logic.w[0]
        self.c_q_matrix = []
        self.update_c_q_matrix()

        # setup and initialize probability matrix
        self.c_probability_matrix = []
        self.update_c_probability_matrix()

    def reset_p_learning_rate(self):
        self.t_count = 0
        self.u_count = 0
        self.v_count = 0
        self.w_count = 0

    def reset_c_learning_rate(self):
        self.o_count = 0
        self.p_count = 0
        self.q_count = 0
        self.y_count = 0

    # function to calculate new ordinal matrix with new state and store the ordinal matrix
    def update_ord_matrix(self):
        self.update_p_ord_matrix()
        self.update_c_ord_matrix()

    def update_p_ord_matrix(self):
        self.ai_logic.update_matrix()
        self.p_ord_matrix = []
        temp_matrix = [
            self.ai_logic.t[1], self.ai_logic.u[1], self.ai_logic.v[1],
            self.ai_logic.w[1]
        ]

        # for each action, rank the reward in terms of value. Giving a matrix representing 41 states
        for i in range(4):
            count = 1
            for j in range(4):
                if i == j:
                    continue
                if temp_matrix[i] > temp_matrix[j]:
                    count += 1
            self.p_ord_matrix.append(count)

    def update_c_ord_matrix(self):
        self.ai_logic.update_matrix()
        self.c_ord_matrix = []
        temp_matrix = [
            self.ai_logic.t[0], self.ai_logic.u[0], self.ai_logic.v[0],
            self.ai_logic.w[0]
        ]

        for i in range(4):
            count = 1
            for j in range(4):
                if i == j:
                    continue
                if temp_matrix[i] > temp_matrix[j]:
                    count += 1
            self.c_ord_matrix.append(count)

    # store ordinal values in the class to remember
    def save_p_ord_matrix(self):
        self.ord_t = self.p_ord_matrix[0]
        self.ord_u = self.p_ord_matrix[1]
        self.ord_v = self.p_ord_matrix[2]
        self.ord_w = self.p_ord_matrix[3]

    def save_c_ord_matrix(self):
        self.ord_o = self.c_ord_matrix[0]
        self.ord_p = self.c_ord_matrix[1]
        self.ord_q = self.c_ord_matrix[2]
        self.ord_y = self.c_ord_matrix[3]

    # checks if the new calculated ordinal matrix is different to the old one, representing a change in state
    def ord_matrix_change(self):
        parent = self.p_ord_matrix_change()
        child = self.c_ord_matrix_change()
        if child == True or parent == True:
            return True
        else:
            return False

    def p_ord_matrix_change(self):
        if (self.ord_t != self.p_ord_matrix[0]
                or self.ord_u != self.p_ord_matrix[1]
                or self.ord_v != self.p_ord_matrix[2]
                or self.ord_w != self.p_ord_matrix[3]):
            self.save_p_ord_matrix()
            self.p_change += 1
            self.reset_p_learning_rate()
            return True
        return False

    def c_ord_matrix_change(self):
        if (self.ord_o != self.c_ord_matrix[0]
                or self.ord_p != self.c_ord_matrix[1]
                or self.ord_q != self.c_ord_matrix[2]
                or self.ord_y != self.c_ord_matrix[3]):
            self.save_c_ord_matrix()
            self.c_change += 1
            self.reset_c_learning_rate()
            return True
        return False

    # updates the q matrix with new values of Q for each state change
    def update_q_matrix(self):
        self.update_p_q_matrix()
        self.update_c_q_matrix()

    def update_p_q_matrix(self):
        self.p_q_matrix = []
        self.p_q_matrix.append(self.q_t)
        self.p_q_matrix.append(self.q_u)
        self.p_q_matrix.append(self.q_v)
        self.p_q_matrix.append(self.q_w)

    def update_c_q_matrix(self):
        self.c_q_matrix = []
        self.c_q_matrix.append(self.q_o)
        self.c_q_matrix.append(self.q_p)
        self.c_q_matrix.append(self.q_q)
        self.c_q_matrix.append(self.q_y)

    # updates the Q value for action t
    def update_q_t(self):
        if self.t_count == 0:
            learning_rate = 1
        else:
            learning_rate = 1.0 / self.t_count
        self.q_t = self.q_t + learning_rate * (
            self.ai_logic.t[1] +
            (self.discount * max(self.p_q_matrix)) - self.q_t)

    # updates the Q value for action u
    def update_q_u(self):
        if self.u_count == 0:
            learning_rate = 1
        else:
            learning_rate = 1.0 / self.u_count
        self.q_u = self.q_u + learning_rate * (
            self.ai_logic.u[1] +
            (self.discount * max(self.p_q_matrix)) - self.q_u)

    # updates the Q value for action v
    def update_q_v(self):
        if self.v_count == 0:
            learning_rate = 1
        else:
            learning_rate = 1.0 / self.v_count
        self.q_v = self.q_v + learning_rate * (
            self.ai_logic.v[1] +
            (self.discount * max(self.p_q_matrix)) - self.q_v)

    # updates the Q value for action w
    def update_q_w(self):
        if self.w_count == 0:
            learning_rate = 1
        else:
            learning_rate = 1.0 / self.w_count
        self.q_w = self.q_w + learning_rate * (
            self.ai_logic.w[1] +
            (self.discount * max(self.p_q_matrix)) - self.q_w)

    # updates the Q value for action o
    def update_q_o(self):
        if self.o_count == 0:
            learning_rate = 1
        else:
            learning_rate = 1.0 / self.o_count
        self.q_o = self.q_o + learning_rate * (
            self.ai_logic.t[0] +
            (self.discount * max(self.c_q_matrix)) - self.q_o)

    # updates the Q value for action p
    def update_q_p(self):
        if self.p_count == 0:
            learning_rate = 1
        else:
            learning_rate = 1.0 / self.p_count
        self.q_p = self.q_p + learning_rate * (
            self.ai_logic.u[0] +
            (self.discount * max(self.c_q_matrix)) - self.q_p)

    # updates the Q value for action q
    def update_q_q(self):
        if self.q_count == 0:
            learning_rate = 1
        else:
            learning_rate = 1.0 / self.q_count
        self.q_q = self.q_q + learning_rate * (
            self.ai_logic.v[0] +
            (self.discount * max(self.c_q_matrix)) - self.q_q)

    # updates the Q value for action y
    def update_q_y(self):
        if self.y_count == 0:
            learning_rate = 1
        else:
            learning_rate = 1.0 / self.y_count
        self.q_y = self.q_y + learning_rate * (
            self.ai_logic.w[0] +
            (self.discount * max(self.c_q_matrix)) - self.q_y)

    # updates the probability matrix after a change in Q values
    def update_probability_matrix(self):
        self.update_p_probability_matrix()
        self.update_c_probability_matrix()

    def update_p_probability_matrix(self):
        self.p_probability_matrix = []
        self.update_p_q_matrix()
        total = 0
        for i in range(4):
            total += self.k**self.p_q_matrix[i]
        self.p_probability_matrix.append((self.k**self.q_t) / total)
        self.p_probability_matrix.append((self.k**self.q_u) / total)
        self.p_probability_matrix.append((self.k**self.q_v) / total)
        self.p_probability_matrix.append((self.k**self.q_w) / total)

    def update_c_probability_matrix(self):
        self.c_probability_matrix = []
        self.update_c_q_matrix()
        total = 0
        for i in range(4):
            total += self.k**self.c_q_matrix[i]
        self.c_probability_matrix.append((self.k**self.q_o) / total)
        self.c_probability_matrix.append((self.k**self.q_p) / total)
        self.c_probability_matrix.append((self.k**self.q_q) / total)
        self.c_probability_matrix.append((self.k**self.q_y) / total)

    # checks if the AI won the game given an action pair
    def check_win(self, adult, child):
        return self.ai_logic.check_win(adult, child)

    # returns the number of rounds the AI has played
    def get_round(self):
        return self.ai_logic.round

    def parent_move(self):
        # random number from 0 to 1
        choice = random.random()
        # determine the probablistic range of each action
        prob_t = self.p_probability_matrix[0]
        prob_u = self.p_probability_matrix[0] + self.p_probability_matrix[1]
        prob_v = self.p_probability_matrix[0] + self.p_probability_matrix[
            1] + self.p_probability_matrix[2]
        prob_w = 1

        # if t is chosen, adult attends
        if choice < prob_t:
            adult = "attend"

        # if u is chosen, adult ignores
        if prob_t <= choice and choice < prob_u:
            adult = "ignore"

        # if v is chosen, adult attends
        if prob_u <= choice and choice < prob_v:
            adult = "attend"

        # if w is chosen, adult ignores
        if prob_v <= choice and choice < prob_w:
            adult = "ignore"

        return adult

    def child_move(self):
        # random number from 0 to 1
        choice = random.random()
        # determine the probablistic range of each action
        prob_o = self.c_probability_matrix[0]
        prob_p = self.c_probability_matrix[0] + self.c_probability_matrix[1]
        prob_q = self.c_probability_matrix[0] + self.c_probability_matrix[
            1] + self.c_probability_matrix[2]
        prob_y = 1

        # if t is chosen, adult attends
        if choice < prob_o:
            child = "go"

        # if u is chosen, adult ignores
        if prob_o <= choice and choice < prob_p:
            child = "go"

        # if v is chosen, adult attends
        if prob_p <= choice and choice < prob_q:
            child = "dontgo"

        # if w is chosen, adult ignores
        if prob_q <= choice and choice < prob_y:
            child = "dontgo"

        return child

    def move(self):
        adult = self.parent_move()
        child = self.child_move()

        # find which action was chosen and update the specific Q value and probability matrix
        if adult == "attend" and child == "go":
            self.ai_logic.attend_go()
            self.ai_logic.update_matrix()
            self.update_ord_matrix()
            # checks if the action caused a change in the ordinal matrix
            self.ord_matrix_change()
            if not self.check_final_action("adult"):
                self.t_count += 1
                self.update_q_t()
            if not self.check_final_action("child"):
                self.o_count += 1
                self.update_q_o()
            self.update_q_matrix()
            self.update_probability_matrix()

        if adult == "attend" and child == "dontgo":
            self.ai_logic.attend_dontgo()
            self.ai_logic.update_matrix()
            self.update_ord_matrix()
            self.ord_matrix_change()
            if not self.check_final_action("adult"):
                self.v_count += 1
                self.update_q_v()
            if not self.check_final_action("child"):
                self.q_count += 1
                self.update_q_q()
            self.update_q_matrix()
            self.update_probability_matrix()

        if adult == "ignore" and child == "go":
            self.ai_logic.ignore_go()
            self.ai_logic.update_matrix()
            self.update_ord_matrix()
            self.ord_matrix_change()
            if not self.check_final_action("adult"):
                self.u_count += 1
                self.update_q_u()
            if not self.check_final_action("child"):
                self.update_q_p()
                self.p_count += 1
            self.update_q_matrix()
            self.update_probability_matrix()

        if adult == "ignore" and child == "dontgo":
            self.ai_logic.ignore_dontgo()
            self.ai_logic.update_matrix()
            self.update_ord_matrix()
            self.ord_matrix_change()
            if not self.check_final_action("adult"):
                self.w_count += 1
                self.update_q_w()
            if not self.check_final_action("child"):
                self.y_count += 1
                self.update_q_y()
            self.update_q_matrix()
            self.update_probability_matrix()

        return [adult, child]

    def check_final_action(self, player):
        if player == "adult":
            for i in range(4):
                if round(self.p_probability_matrix[i], 2) == 1.00:
                    return True
            return False
        elif player == "child":
            for i in range(4):
                if round(self.c_probability_matrix[i], 2) == 1.00:
                    return True
            return False