def test_recall_subtree_not_found(recall_tree, some_state3): plant_state = mcts.State() plant_state.ally_starting = True plant_state.ally_team = [1] plant_state.enemy_team = [2] node = mcts.recall_subtree(some_state3, recall_tree(plant_state), {1}) assert 0 == node.value
def test_recall_subtree_found_tree(recall_tree): plant_state = mcts.State() plant_state.ally_starting = True plant_state.ally_team = [1] plant_state.enemy_team = [2] node = mcts.recall_subtree(plant_state, recall_tree(plant_state), {1}) assert 5 == node.value
def evaluate_MCTS_against_real_matches(data): ally_wins = 0 enemy_wins = 0 total_win_pct = 0 enemy_team = data[0][0] ally_starting = data[1] banned_champs = data[0][1] exploration_term = data[2] state = MCTS.State() state.ally_starting = ally_starting tree = None while len(state.enemy_team) < 5 or len(state.ally_team) < 5: if not ally_starting: pick_for_enemy_team(enemy_team, state, ally_starting) tree = MCTS.recall_subtree(state, tree, set(banned_champs)) allowed_champions = list.copy(tree.possible_actions) suggestions, tree = MCTS.run_mcts(10, tree, True, allowed_champions, exploration_term=exploration_term) pick_for_ally_team(suggestions, enemy_team, state) if ally_starting: pick_for_enemy_team(enemy_team, state, ally_starting) input_vector = list.copy(state.ally_team) input_vector.extend(list.copy(state.enemy_team)) result = NN.predictTeamComp(input_vector) total_win_pct += result if result > 0.5: ally_wins += 1 else: enemy_wins += 1 return ally_wins, enemy_wins, result
def evaluate_MCTS_against_winpct(data): ally_wins = 0 enemy_wins = 0 total_win_pct = 0 ally_starting = data[0] exploration_term = data[1] state = MCTS.State() state.ally_starting = ally_starting tree = None banned_champs = set(random.sample(range(0, 141), 10)) allowed_champions = MCTS.get_allowed_champions(banned_champs) while len(state.enemy_team) < 5 or len(state.ally_team) < 5: if ally_starting is not True: pick_champ_enemy_team_winpct(allowed_champions, state) tree = MCTS.recall_subtree(state, tree, set(banned_champs)) allowed_champions = list.copy(tree.possible_actions) suggestions, tree = MCTS.run_mcts(10, tree, True, allowed_champions, exploration_term=exploration_term) if suggestions[0].champ2 is None: state.ally_team.append(suggestions[0].champ) allowed_champions.remove(suggestions[0].champ) else: state.ally_team.append(suggestions[0].champ) allowed_champions.remove(suggestions[0].champ) state.ally_team.append(suggestions[0].champ2) allowed_champions.remove(suggestions[0].champ2) if ally_starting is True: pick_champ_enemy_team_winpct(allowed_champions, state) input_vector = list.copy(state.ally_team) input_vector.extend(list.copy(state.enemy_team)) result = NN.predictTeamComp(input_vector) total_win_pct += result if result > 0.5: ally_wins += 1 elif result < 0.5: enemy_wins += 1 return ally_wins, enemy_wins, result
def evaluate_MCTS_VS_MCTS(data): number_of_matches = data[0] exploration_term_one = data[1] exploration_term_two = data[2] ally_starting = True ally_state = MCTS.State() ally_state.ally_starting = ally_starting enemy_state = MCTS.State() enemy_state.ally_starting = not ally_starting ally_tree = None enemy_tree = None total_win_pct = 0 ally_wins = 0 enemy_wins = 0 for iteration in range(0, number_of_matches): banned_champs = set(random.sample(range(0, 141), 10)) while len(ally_state.enemy_team) < 5 or len(ally_state.ally_team) < 5: #Ally Turn ally_tree = MCTS.recall_subtree(ally_state, ally_tree, set(banned_champs)) allowed_champions = list.copy(ally_tree.possible_actions) suggestions, reduced_root = MCTS.run_mcts(10, ally_tree, True, allowed_champions, 10, exploration_term_one) ally_tree = reduced_root if suggestions[0].champ2 is None: ally_state.ally_team.append(suggestions[0].champ) enemy_state.enemy_team.append(suggestions[0].champ) else: ally_state.ally_team.append(suggestions[0].champ) ally_state.ally_team.append(suggestions[0].champ2) enemy_state.enemy_team.append(suggestions[0].champ) enemy_state.enemy_team.append(suggestions[0].champ2) #Enemy Team enemy_tree = MCTS.recall_subtree(enemy_state, enemy_tree, set(banned_champs)) allowed_champions = list.copy(enemy_tree.possible_actions) suggestions, reduced_root = MCTS.run_mcts(10, enemy_tree, True, allowed_champions, 10, exploration_term_two) enemy_tree = reduced_root if suggestions[0].champ2 is None: enemy_state.ally_team.append(suggestions[0].champ) ally_state.enemy_team.append(suggestions[0].champ) else: enemy_state.ally_team.append(suggestions[0].champ) enemy_state.ally_team.append(suggestions[0].champ2) ally_state.enemy_team.append(suggestions[0].champ) ally_state.enemy_team.append(suggestions[0].champ2) input_vector = list.copy(ally_state.ally_team) input_vector.extend(list.copy(ally_state.enemy_team)) result_from_nn = NN.predictTeamComp(input_vector) total_win_pct += result_from_nn if result_from_nn > 0.5: ally_wins += 1 else: enemy_wins += 1 ally_tree = None ally_state = MCTS.State() ally_state.ally_starting = ally_starting enemy_tree = None enemy_state = MCTS.State() enemy_state.ally_starting = not ally_starting avg_pct = total_win_pct / number_of_matches return ally_wins, enemy_wins, avg_pct