def __init__(self,
              game,
              iterations=1500,
              checkpoint_iterations=10,
              weigth_delay=None,
              show_progress=True):
     self.game = game
     self.iterations = iterations
     self.checkpoint_iterations = checkpoint_iterations
     self.weight_delay = weigth_delay
     self.show_progress = show_progress
     self.exp = Exploitability(game)
    def test_kuhn_action_minus_tilted_agent(self):
        kuhn_equilibrium, _ = read_strategy_from_file(
            KUHN_POKER_GAME_FILE_PATH,
            'strategies/kuhn.limit.2p-equilibrium.strategy')

        game = acpc.read_game_file(KUHN_POKER_GAME_FILE_PATH)
        exploitability = Exploitability(game)

        tilted_agent_strategy = create_agent_strategy_from_trained_strategy(
            KUHN_POKER_GAME_FILE_PATH, kuhn_equilibrium, Action.CALL,
            TiltType.ADD, -0.5)
        self.assertTrue(is_correct_strategy(tilted_agent_strategy))
        self.assertTrue(
            not is_strategies_equal(kuhn_equilibrium, tilted_agent_strategy))

        equilibrium_exploitability = exploitability.evaluate(kuhn_equilibrium)
        raise_add_tilted_exploitability = exploitability.evaluate(
            tilted_agent_strategy)
        self.assertTrue(
            raise_add_tilted_exploitability > equilibrium_exploitability)
    def train_and_show_results(self, test_spec):
        game_file_path = test_spec['game_file_path']
        game = acpc.read_game_file(game_file_path)

        base_strategy, _ = read_strategy_from_file(
            game_file_path, test_spec['base_strategy_path'])

        agents = test_spec['opponent_tilt_types']
        num_agents = len(agents)

        game_name = game_file_path.split('/')[1][:-5]
        overwrite_figure = test_spec[
            'overwrite_figure'] if 'overwrite_figure' in test_spec else False
        figure_path = get_new_path(
            '%s/%s(it:%s-st:%s)' %
            (FIGURES_FOLDER, game_name, test_spec['training_iterations'],
             test_spec['checkpoint_iterations']), '.png', overwrite_figure)
        create_path_dirs(figure_path)

        exp = Exploitability(game)

        checkpoints_count = math.ceil(
            (test_spec['training_iterations'] - 700) /
            test_spec['checkpoint_iterations'])
        iteration_counts = np.zeros(checkpoints_count)
        exploitability_values = np.zeros([num_agents, checkpoints_count])
        vs_opponent_utility_values = np.zeros([num_agents, checkpoints_count])
        opponent_exploitability_values = np.zeros(num_agents)
        for i, agent in enumerate(agents):
            print('%s/%s' % (i + 1, num_agents))

            opponent_strategy = create_agent_strategy_from_trained_strategy(
                game_file_path, base_strategy, agent[0], agent[1], agent[2])

            self.assertTrue(is_correct_strategy(opponent_strategy))

            if 'print_opponent_strategies' in test_spec and test_spec[
                    'print_opponent_strategies']:
                write_strategy_to_file(
                    opponent_strategy, '%s/%s.strategy' %
                    (os.path.dirname(figure_path), get_agent_name(agent)))

            if 'print_best_responses' in test_spec and test_spec[
                    'print_best_responses']:
                opponent_best_response = BestResponse(game).solve(
                    opponent_strategy)
                write_strategy_to_file(
                    opponent_best_response, '%s/%s-best_response.strategy' %
                    (os.path.dirname(figure_path), get_agent_name(agent)))

            if PLOT_OPPONENT_EXPLOITABILITY:
                opponent_exploitability = exp.evaluate(opponent_strategy)
                opponent_exploitability_values[i] = opponent_exploitability
                print('%s exploitability: %s' %
                      (get_agent_name(agent), opponent_exploitability))

            def checkpoint_callback(game_tree, checkpoint_index, iterations):
                if i == 0:
                    iteration_counts[checkpoint_index] = iterations
                self.assertTrue(is_correct_strategy(game_tree))
                exploitability_values[i, checkpoint_index] = exp.evaluate(
                    game_tree)
                vs_opponent_utility_values[i, checkpoint_index] = exp.evaluate(
                    opponent_strategy, game_tree)

            rnr = RestrictedNashResponse(game, opponent_strategy, agent[3])
            rnr.train(test_spec['training_iterations'],
                      checkpoint_iterations=test_spec['checkpoint_iterations'],
                      checkpoint_callback=checkpoint_callback)

            if 'print_response_strategies' in test_spec and test_spec[
                    'print_response_strategies']:
                write_strategy_to_file(
                    rnr.game_tree,
                    '%s-%s-p=%s.strategy' % (figure_path[:-len('.png')],
                                             get_agent_name(agent), agent[3]))

            print('Vs opponent value: %s' %
                  exp.evaluate(opponent_strategy, rnr.game_tree))
            print('Exploitability: %s' % exp.evaluate(rnr.game_tree))

            plt.figure(dpi=300)
            ax = plt.subplot(111)
            for j in range(i + 1):
                p = plt.plot(iteration_counts,
                             exploitability_values[j],
                             label='%s-p=%s exploitability' %
                             (get_agent_name(agents[j]), agents[j][3]),
                             linewidth=LINE_WIDTH)
                plt.plot(iteration_counts,
                         vs_opponent_utility_values[j],
                         '--',
                         label='Utility against opponent strategy',
                         color=p[0].get_color(),
                         linewidth=LINE_WIDTH)
                if PLOT_OPPONENT_EXPLOITABILITY:
                    plt.plot(iteration_counts,
                             np.ones(checkpoints_count) *
                             opponent_exploitability_values[j],
                             ':',
                             label='Opponent exploitability',
                             color=p[0].get_color(),
                             linewidth=LINE_WIDTH)

            plt.title(test_spec['title'])
            plt.xlabel('Training iterations')
            plt.ylabel('Strategy exploitability [mbb/g]')
            plt.grid()
            handles, labels = ax.get_legend_handles_labels()
            new_handles = []
            new_labels = []
            for i in range(PLOT_COUNT_PER_AGENT):
                for j in range(i, len(handles), PLOT_COUNT_PER_AGENT):
                    new_handles += [handles[j]]
                    new_labels += [labels[j]]
            lgd = plt.legend(new_handles,
                             new_labels,
                             loc='upper center',
                             bbox_to_anchor=(0.5, -0.1),
                             ncol=PLOT_COUNT_PER_AGENT)

            plt.savefig(figure_path,
                        bbox_extra_artists=(lgd, ),
                        bbox_inches='tight')

        print('Figure written to %s' % figure_path)
class RnrParameterOptimizer():
    def __init__(self,
                 game,
                 iterations=1500,
                 checkpoint_iterations=10,
                 weigth_delay=None,
                 show_progress=True):
        self.game = game
        self.iterations = iterations
        self.checkpoint_iterations = checkpoint_iterations
        self.weight_delay = weigth_delay
        self.show_progress = show_progress
        self.exp = Exploitability(game)

    def train(self, opponent_strategy, exploitability,
              max_exploitability_delta):

        result_strategy = GameTreeBuilder(
            self.game, StrategyTreeNodeProvider()).build_tree()
        best_exploitability = float('inf')
        best_exploitability_delta = float('inf')

        def checkpoint_callback(game_tree, checkpoint_index, iterations):
            if iterations <= ((3 / 4) * self.iterations):
                # Make sure the strategy at least partially converged
                return

            nonlocal result_strategy
            nonlocal best_exploitability_delta
            nonlocal best_exploitability

            current_exploitability = self.exp.evaluate(game_tree)
            current_exploitability_delta = abs(current_exploitability -
                                               exploitability)
            if current_exploitability_delta < best_exploitability_delta:
                if current_exploitability_delta <= max_exploitability_delta:
                    copy_strategy(result_strategy, game_tree)
                best_exploitability_delta = current_exploitability_delta
                best_exploitability = current_exploitability

        iteration = 0
        p_low = 0
        p_high = 1

        if self.show_progress:
            print()
            print('Exploitability: %s +- %s' %
                  (exploitability, max_exploitability_delta))

        while True:
            if self.show_progress:
                iteration += 1
                print('Run %s' % iteration)
                print('Interval: %s - %s' % (p_low, p_high))
            p_current = p_low + (p_high - p_low) / 2
            rnr = RestrictedNashResponse(self.game,
                                         opponent_strategy,
                                         p_current,
                                         show_progress=self.show_progress)
            if self.weight_delay:
                rnr.train(self.iterations,
                          checkpoint_iterations=self.checkpoint_iterations,
                          checkpoint_callback=checkpoint_callback,
                          weight_delay=self.weight_delay)
            else:
                rnr.train(self.iterations,
                          checkpoint_iterations=self.checkpoint_iterations,
                          checkpoint_callback=checkpoint_callback)

            if best_exploitability_delta < max_exploitability_delta:
                print('Result exploitability: %s, p=%s' %
                      (best_exploitability, p_current))
                return result_strategy, best_exploitability, p_current

            if self.show_progress:
                print('Exploitability: %s, p=%s, current_delta=%s' %
                      (best_exploitability, p_current,
                       best_exploitability_delta))

            if best_exploitability > exploitability:
                p_high = p_current
            else:
                p_low = p_current
            best_exploitability = float('inf')
            best_exploitability_delta = float('inf')
    def train_and_show_results(self, test_spec):
        game = acpc.read_game_file(test_spec['game_file_path'])

        weak_opponent_samples_tree = GameTreeBuilder(
            game, SamplesTreeNodeProvider()).build_tree()
        weak_opponent_strategy_tree = GameTreeBuilder(
            game, StrategyTreeNodeProvider()).build_tree()

        def on_node(samples_node, strategy_node):
            if isinstance(samples_node, ActionNode):
                child_count = len(samples_node.children)
                samples_count = random.randrange(15)
                for i, a in enumerate(samples_node.children):
                    if i < (child_count - 1) and samples_count > 0:
                        action_samples_count = random.randrange(samples_count +
                                                                1)
                        samples_count -= action_samples_count
                        samples_node.action_decision_counts[
                            a] = action_samples_count
                    else:
                        samples_node.action_decision_counts[a] = samples_count
                samples_sum = np.sum(samples_node.action_decision_counts)
                if samples_sum > 0:
                    strategy_node.strategy = samples_node.action_decision_counts / samples_sum
                else:
                    for a in strategy_node.children:
                        strategy_node.strategy[a] = 1 / len(
                            strategy_node.children)

        walk_trees(on_node, weak_opponent_samples_tree,
                   weak_opponent_strategy_tree)

        self.assertTrue(is_correct_strategy(weak_opponent_strategy_tree))

        exploitability = Exploitability(game)
        num_test_counts = test_spec['test_counts']
        data = np.zeros([num_test_counts, 2, len(P_MAX_VALUES)])
        for i in range(num_test_counts):
            print('%s/%s' % (i + 1, num_test_counts))

            for j, p_max in enumerate(P_MAX_VALUES):
                print('Pmax: %s - %s/%s' % (p_max, j + 1, len(P_MAX_VALUES)))

                dbr = DataBiasedResponse(game,
                                         weak_opponent_samples_tree,
                                         p_max=p_max)
                dbr.train(test_spec['training_iterations'])

                data[i, 0, j] = exploitability.evaluate(dbr.game_tree)
                data[i, 1,
                     j] = exploitability.evaluate(weak_opponent_strategy_tree,
                                                  dbr.game_tree)

                plt.figure(dpi=160)
                for k in range(i + 1):
                    run_index = math.floor(k / 2)
                    xdata = data[k,
                                 0, :] if k < i or j == (len(P_MAX_VALUES) -
                                                         1) else data[k, 0,
                                                                      0:j + 1]
                    ydata = data[k,
                                 1, :] if k < i or j == (len(P_MAX_VALUES) -
                                                         1) else data[k, 1,
                                                                      0:j + 1]
                    plt.plot(xdata,
                             ydata,
                             label='Run %s' % (run_index + 1),
                             marker='o',
                             linewidth=0.8)

                if 'title' in test_spec:
                    plt.title(test_spec['title'])
                plt.xlabel('DBR trained strategy exploitability [mbb/g]')
                plt.ylabel(
                    'Random opponent exploitation by DBR strategy [mbb/g]')
                plt.grid()
                if num_test_counts > 1:
                    plt.legend()

                game_name = test_spec['game_file_path'].split('/')[1][:-5]
                figure_output_path = '%s/%s(it:%s).png' % (
                    FIGURES_FOLDER, game_name,
                    test_spec['training_iterations'])

                figures_directory = os.path.dirname(figure_output_path)
                if not os.path.exists(figures_directory):
                    os.makedirs(figures_directory)

                plt.savefig(figure_output_path)

        print('\033[91mThis test needs your assistance! ' +
              'Check the generated graph %s!\033[0m' % figure_output_path)
Example #6
0
    def train_and_show_results(self, test_spec):
        game = acpc.read_game_file(test_spec['game_file_path'])

        exploitability = Exploitability(game)

        iteration_counts = np.zeros(0)
        exploitability_values = np.zeros([1, 0])
        best_exploitability = float("inf")
        best_exploitability_strategy = GameTreeBuilder(
            game, StrategyTreeNodeProvider()).build_tree()

        def checkpoint_callback(game_tree, checkpoint_index, iterations):
            nonlocal iteration_counts
            nonlocal exploitability_values
            nonlocal best_exploitability
            nonlocal best_exploitability_strategy

            iteration_counts = np.append(iteration_counts, iterations)

            if CHECK_STRATEGY_CORRECTNESS:
                self.assertTrue(is_correct_strategy(game_tree))

            exploitability_value = exploitability.evaluate(game_tree)
            exploitability_values = np.append(exploitability_values,
                                              exploitability_value)
            if COLLECT_MIN_EXPLOITABILITY and exploitability_value < best_exploitability:
                best_exploitability = exploitability_value
                copy_strategy(best_exploitability_strategy, game_tree)

        cfr = Cfr(game)
        cfr.train(test_spec['training_iterations'],
                  weight_delay=test_spec['weight_delay'],
                  checkpoint_iterations=test_spec['checkpoint_iterations'],
                  checkpoint_callback=checkpoint_callback,
                  minimal_action_probability=0.00006)

        best_response = BestResponse(game).solve(cfr.game_tree)
        player_utilities, _ = PlayerUtility(game).evaluate(
            cfr.game_tree, best_response)
        print(player_utilities.tolist())
        print('Exploitability: %s' % exploitability.evaluate(cfr.game_tree))

        if COLLECT_MIN_EXPLOITABILITY:
            min_exploitability = exploitability.evaluate(
                best_exploitability_strategy)
            min_exploitability_best_response = BestResponse(game).solve(
                best_exploitability_strategy)
            min_exploitability_player_utilities, _ = PlayerUtility(
                game).evaluate(best_exploitability_strategy,
                               min_exploitability_best_response)
            self.assertEqual(min_exploitability, exploitability_values.min())
            print('Minimum exploitability: %s' % min_exploitability)
            print('Minimum exploitability player utilities: %s' %
                  min_exploitability_player_utilities.tolist())
        else:
            print('Minimum exploitability: %s' % exploitability_values.min())

        plt.figure(dpi=160)
        plt.plot(iteration_counts, exploitability_values, linewidth=0.8)

        plt.title(test_spec['title'])
        plt.xlabel('Training iterations')
        plt.ylabel('Strategy exploitability [mbb/g]')
        plt.grid()

        game_name = test_spec['game_file_path'].split('/')[1][:-5]
        figure_output_path = '%s/%s(it:%s-st:%s).png' % (
            FIGURES_FOLDER, game_name, test_spec['training_iterations'],
            test_spec['checkpoint_iterations'])

        figures_directory = os.path.dirname(figure_output_path)
        if not os.path.exists(figures_directory):
            os.makedirs(figures_directory)

        plt.savefig(figure_output_path)

        write_strategy_to_file(
            cfr.game_tree, '%s/%s(it:%s).strategy' %
            (FIGURES_FOLDER, game_name, test_spec['training_iterations']), [
                '# Game utility against best response: %s' %
                player_utilities.tolist()
            ])
Example #7
0
def optimize_portfolio(game_file_path,
                       opponent_strategies,
                       response_strategies,
                       portfolio_size=-1,
                       portfolio_cut_improvement_threshold=0.05,
                       log=False,
                       output_directory=None):

    num_opponents = len(opponent_strategies)

    if portfolio_size == num_opponents or portfolio_cut_improvement_threshold == 0:
        return response_strategies, range(num_opponents)

    game = acpc.read_game_file(game_file_path)
    exp = Exploitability(game)

    if log:
        print()

    utilities = np.zeros([num_opponents, num_opponents])
    for i in range(num_opponents):
        for j in range(num_opponents):
            utilities[i, j] = exp.evaluate(opponent_strategies[j],
                                           response_strategies[i])

    portfolio_utilities = np.zeros(num_opponents)
    response_added = np.ones(num_opponents, dtype=np.intp) * -1

    response_total_utility = np.mean(utilities, axis=1)
    best_response_index = np.argmax(response_total_utility)

    portfolio_utilities[0] = response_total_utility[best_response_index]
    response_added[0] = best_response_index

    max_utilities = np.zeros(num_opponents)
    np.copyto(max_utilities, utilities[best_response_index])

    response_available = [True] * num_opponents
    response_available[best_response_index] = False
    for i in range(1, num_opponents):
        best_portfolio_utility = None
        best_max_utilities = None
        best_response_to_add = None
        for j in range(num_opponents):
            if response_available[j]:
                new_max_utilities = np.maximum(max_utilities, utilities[j])
                new_portfolio_utility = np.mean(new_max_utilities)
                if not best_portfolio_utility or new_portfolio_utility > best_portfolio_utility:
                    best_portfolio_utility = new_portfolio_utility
                    best_max_utilities = new_max_utilities
                    best_response_to_add = j
        response_available[best_response_to_add] = False
        max_utilities = best_max_utilities
        portfolio_utilities[i] = best_portfolio_utility
        response_added[i] = best_response_to_add

    final_portfolio_size = None

    if portfolio_size > 0:
        final_portfolio_size = portfolio_size
    else:
        min_portfolio_utility = portfolio_utilities[0]
        max_portfolio_utility = portfolio_utilities[-1]
        total_utility_improvement = max_portfolio_utility - min_portfolio_utility
        minimal_improvement = total_utility_improvement * portfolio_cut_improvement_threshold
        final_portfolio_size = 1
        for i in range(1, num_opponents):
            if (portfolio_utilities[i] -
                    portfolio_utilities[i - 1]) >= minimal_improvement:
                final_portfolio_size += 1
            else:
                break

    if log:
        print('Utilities table:')
        for i in range(num_opponents):
            print('\t'.join([str(u) for u in utilities[i]]))
        print('Response added: %s' % response_added)
        print('Final portfolio size: %s' % final_portfolio_size)

        plt.figure(dpi=160)
        plt.plot(
            np.arange(num_opponents, dtype=np.intp) + 1, portfolio_utilities)
        plt.plot(final_portfolio_size,
                 portfolio_utilities[final_portfolio_size - 1],
                 marker='o',
                 color='r')
        plt.title('Portfolio utility')
        plt.xlabel('Portfolio size')
        plt.ylabel('Portfolio value [mbb/g]')
        plt.grid()

        if output_directory:
            plt.savefig('%s/portoflio_size_utility.png' % output_directory)
        else:
            plt.show()

    portfolio_response_indices = response_added[:final_portfolio_size]
    return np.take(response_strategies,
                   portfolio_response_indices), portfolio_response_indices
    def create_agents_and_plot_exploitabilities(self, test_spec):
        base_strategy, _ = read_strategy_from_file(
            test_spec['game_file_path'],
            test_spec['base_strategy_path'])

        game = acpc.read_game_file(test_spec['game_file_path'])
        exploitability = Exploitability(game)

        plot_equilibrium = test_spec['plot_equilibrium'] if 'plot_equilibrium' in test_spec else True
        if plot_equilibrium:
            equilibrium_exploitability = exploitability.evaluate(base_strategy)

        tilt_probabilities = test_spec['tilt_probabilities']
        exploitability_values = np.zeros([len(TILT_TYPES), len(tilt_probabilities)])

        plot_exploitabilities = test_spec['plot_exploitabilities'] if 'plot_exploitabilities' in test_spec else True
        if plot_exploitabilities:
            for i, tilt_type in enumerate(TILT_TYPES):
                for j, tilt_probability in enumerate(tilt_probabilities):
                    tilted_agent = create_agent_strategy_from_trained_strategy(
                        test_spec['game_file_path'],
                        base_strategy,
                        tilt_type[1],
                        tilt_type[2],
                        tilt_probability)
                    exploitability_values[i, j] = exploitability.evaluate(tilted_agent)

                plt.figure(dpi=160)
                for j in range(i + 1):
                    plt.plot(
                        tilt_probabilities,
                        exploitability_values[j],
                        label=TILT_TYPES[j][0],
                        linewidth=0.8)

                if plot_equilibrium:
                    plt.plot(
                        tilt_probabilities,
                        [equilibrium_exploitability] * len(tilt_probabilities),
                        'r--',
                        label='Equilibrium',
                        linewidth=1.5)

                # plt.title(test_spec['title'])
                plt.xlabel('Tilt amount')
                plt.ylabel('Agent exploitability [mbb/g]')
                plt.grid()
                plt.legend()

                figure_output_path = '%s/%s.png' % (FIGURES_FOLDER, test_spec['figure_filename'])

                figures_directory = os.path.dirname(figure_output_path)
                if not os.path.exists(figures_directory):
                    os.makedirs(figures_directory)

                plt.savefig(figure_output_path)

        plot_agent_comparison = test_spec['plot_agent_comparison'] if 'plot_agent_comparison' in test_spec else False
        if plot_agent_comparison:
            agents_strategies = []
            agent_names = []
            for i, tilt_type in enumerate(TILT_TYPES):
                for j, tilt_probability in enumerate(tilt_probabilities):
                    agent_names += ['%s %s %s' % (str(tilt_type[1]).split('.')[1], str(tilt_type[2]).split('.')[1], tilt_probability)]
                    agents_strategies += [create_agent_strategy_from_trained_strategy(
                        test_spec['game_file_path'],
                        base_strategy,
                        tilt_type[1],
                        tilt_type[2],
                        tilt_probability)]

            num_agents = len(agent_names)
            scores_table = np.zeros([num_agents, num_agents])

            num_comparisons = 0
            for i in range(num_agents):
                for j in range(i, num_agents):
                    num_comparisons += 1

            with tqdm(total=num_comparisons) as pbar:
                for i in range(num_agents):
                    for j in range(i, num_agents):
                        scores_table[i, j] = exploitability.evaluate(agents_strategies[j], agents_strategies[i])
                        scores_table[j, i] = -scores_table[i, j]
                        pbar.update(1)

            max_score = scores_table.max()
            min_score = scores_table.min()

            # plt.figure(dpi=160)
            fig, ax = plt.subplots()

            cax = plt.imshow(scores_table, cmap=plt.cm.RdYlGn)
            plt.xticks(np.arange(num_agents), agent_names)
            plt.setp(ax.xaxis.get_majorticklabels(), rotation=45, ha="right", rotation_mode="anchor")
            plt.yticks(np.arange(num_agents), agent_names)
            # plt.yticks(rotation=35)

            # plt.tick_params(
            #     axis='x',
            #     which='both',
            #     bottom=False,
            #     top=False,
            #     labelbottom=False)

            cbar = fig.colorbar(cax, ticks=[min_score, 0, max_score])
            cbar.ax.set_yticklabels([round(min_score), '0', round(max_score)])

            plt.tight_layout()
            plt.gcf().subplots_adjust(left=0.1)

            figure_output_path = '%s/%s-comparison.png' % (FIGURES_FOLDER, test_spec['figure_filename'])

            figures_directory = os.path.dirname(figure_output_path)
            if not os.path.exists(figures_directory):
                os.makedirs(figures_directory)

            plt.savefig(figure_output_path, dpi=160)
Example #9
0
    def train_and_show_results(self, test_spec):
        game_file_path = test_spec['game_file_path']
        portfolio_name = test_spec['portfolio_name']
        agent_specs = test_spec['opponent_tilt_types']

        if not _check_agent_names_unique(agent_specs):
            raise AttributeError(
                'Agents must be unique so that they have unique names')

        strategies_directory_base = '%s/%s' % (TEST_OUTPUT_DIRECTORY,
                                               portfolio_name)
        strategies_directory = strategies_directory_base
        if 'overwrite_portfolio_path' not in test_spec or not test_spec[
                'overwrite_portfolio_path']:
            counter = 1
            while os.path.exists(strategies_directory):
                strategies_directory = '%s(%s)' % (strategies_directory_base,
                                                   counter)
                counter += 1
        if not os.path.exists(strategies_directory):
            os.makedirs(strategies_directory)

        game = acpc.read_game_file(game_file_path)
        exp = Exploitability(game)

        # Delete results since they will be generated again
        for file in os.listdir(strategies_directory):
            absolute_path = '/'.join([strategies_directory, file])
            if os.path.isfile(absolute_path):
                os.remove(absolute_path)

        base_strategy, _ = read_strategy_from_file(
            game_file_path, test_spec['base_strategy_path'])

        num_opponents = len(agent_specs)
        opponents = []
        for agent in agent_specs:
            opponent_strategy = create_agent_strategy_from_trained_strategy(
                game_file_path, base_strategy, agent[0], agent[1], agent[2])
            opponents += [opponent_strategy]

        parallel = test_spec['parallel'] if 'parallel' in test_spec else False

        response_paths = [
            '%s/responses/%s-response.strategy' %
            (strategies_directory, _get_agent_name(agent))
            for agent in agent_specs
        ]

        opponent_responses = [None] * num_opponents
        responses_to_train_indices = []
        responses_to_train_opponents = []
        responses_to_train_params = []
        for i in range(num_opponents):
            if os.path.exists(response_paths[i]):
                response_strategy, _ = read_strategy_from_file(
                    game_file_path, response_paths[i])
                opponent_responses[i] = response_strategy
            else:
                responses_to_train_indices += [i]
                responses_to_train_opponents += [opponents[i]]
                responses_to_train_params += [agent_specs[i][3]]

        def on_response_trained(response_index, response_strategy):
            output_file_path = response_paths[
                responses_to_train_indices[response_index]]
            output_file_dir = os.path.dirname(output_file_path)
            if not os.path.exists(output_file_dir):
                os.makedirs(output_file_dir)

            opponent_strategy = opponents[response_index]
            opponent_exploitability = exp.evaluate(opponent_strategy)
            response_exploitability = exp.evaluate(response_strategy)
            response_utility_vs_opponent = exp.evaluate(
                opponent_strategy, response_strategy)

            write_strategy_to_file(response_strategy, output_file_path, [
                'Opponent exploitability: %s' % opponent_exploitability,
                'Response exploitability: %s' % response_exploitability,
                'Response value vs opponent: %s' %
                response_utility_vs_opponent,
            ])

        print('%s responses need to be trained' %
              len(responses_to_train_opponents))

        responses_to_train_strategies = train_portfolio_responses(
            game_file_path,
            responses_to_train_opponents,
            responses_to_train_params,
            log=True,
            parallel=parallel,
            callback=on_response_trained)

        for i, j in enumerate(responses_to_train_indices):
            opponent_responses[j] = responses_to_train_strategies[i]

        if 'portfolio_cut_improvement_threshold' in test_spec:
            portfolio_strategies, response_indices = optimize_portfolio(
                game_file_path,
                opponents,
                opponent_responses,
                portfolio_cut_improvement_threshold=test_spec[
                    'portfolio_cut_improvement_threshold'],
                log=True,
                output_directory=strategies_directory)
        else:
            portfolio_strategies, response_indices = optimize_portfolio(
                game_file_path,
                opponents,
                opponent_responses,
                log=True,
                output_directory=strategies_directory)

        portfolio_size = len(portfolio_strategies)

        agent_names = [
            _get_agent_name(agent)
            for agent in np.take(agent_specs, response_indices, axis=0)
        ]

        print()
        for a in agent_specs:
            print(_get_agent_name(a))

        response_strategy_file_names = []
        for i, strategy in enumerate(portfolio_strategies):
            agent_name = agent_names[i]

            opponent_strategy = opponents[response_indices[i]]
            opponent_exploitability = exp.evaluate(opponent_strategy)
            response_exploitability = exp.evaluate(strategy)
            response_utility_vs_opponent = exp.evaluate(
                opponent_strategy, strategy)

            # Save portfolio response strategy
            response_strategy_output_file_path = '%s/%s-response.strategy' % (
                strategies_directory, agent_name)
            response_strategy_file_names += [
                response_strategy_output_file_path.split('/')[-1]
            ]
            write_strategy_to_file(
                strategy, response_strategy_output_file_path, [
                    'Opponent exploitability: %s' % opponent_exploitability,
                    'Response exploitability: %s' % response_exploitability,
                    'Response value vs opponent: %s' %
                    response_utility_vs_opponent,
                ])

            # Save opponent strategy
            opponent_strategy_file_name = '%s-opponent.strategy' % agent_name
            opponent_strategy_output_file_path = '%s/%s' % (
                strategies_directory, opponent_strategy_file_name)
            write_strategy_to_file(opponent_strategy,
                                   opponent_strategy_output_file_path)

            # Generate opponent ACPC script
            opponent_script_path = '%s/%s.sh' % (strategies_directory,
                                                 agent_name)
            shutil.copy(BASE_OPPONENT_SCRIPT_PATH, opponent_script_path)
            _replace_in_file(
                opponent_script_path, OPPONENT_SCRIPT_REPLACE_STRINGS, [
                    WARNING_COMMENT, game_file_path,
                    opponent_strategy_output_file_path.split('/')[-1]
                ])

        for utility_estimation_method in UTILITY_ESTIMATION_METHODS:
            agent_name_method_name = '' if utility_estimation_method == UTILITY_ESTIMATION_METHODS[
                0] else '-%s' % utility_estimation_method
            agent_script_path = '%s/%s%s.sh' % (
                strategies_directory, portfolio_name, agent_name_method_name)
            shutil.copy(BASE_AGENT_SCRIPT_PATH, agent_script_path)

            strategies_replacement = ''
            for i in range(portfolio_size):
                strategies_replacement += '        "${SCRIPT_DIR}/%s"' % response_strategy_file_names[
                    i]
                if i < (portfolio_size - 1):
                    strategies_replacement += ' \\\n'
            _replace_in_file(agent_script_path, AGENT_SCRIPT_REPLACE_STRINGS, [
                WARNING_COMMENT, game_file_path,
                '"%s"' % utility_estimation_method, strategies_replacement
            ])