Exemple #1
0
    def train_and_show_results(self, test_spec):
        game_file_path = test_spec['game_file_path']
        game = acpc.read_game_file(game_file_path)

        base_strategy, _ = read_strategy_from_file(
            game_file_path, test_spec['base_strategy_path'])

        opponent = test_spec['opponent']
        opponent_strategy = create_agent_strategy_from_trained_strategy(
            game_file_path, base_strategy, opponent[1], opponent[2],
            opponent[3])

        strategy, exploitability, p = RnrParameterOptimizer(game).train(
            opponent_strategy, test_spec['exploitability'],
            test_spec['max_delta'])

        self.assertTrue(strategy != None)
        self.assertTrue(is_correct_strategy(strategy))
        print('Final exploitability is %s with p of %s' % (exploitability, p))
    def test_kuhn_action_minus_tilted_agent(self):
        kuhn_equilibrium, _ = read_strategy_from_file(
            KUHN_POKER_GAME_FILE_PATH,
            'strategies/kuhn.limit.2p-equilibrium.strategy')

        game = acpc.read_game_file(KUHN_POKER_GAME_FILE_PATH)
        exploitability = Exploitability(game)

        tilted_agent_strategy = create_agent_strategy_from_trained_strategy(
            KUHN_POKER_GAME_FILE_PATH, kuhn_equilibrium, Action.CALL,
            TiltType.ADD, -0.5)
        self.assertTrue(is_correct_strategy(tilted_agent_strategy))
        self.assertTrue(
            not is_strategies_equal(kuhn_equilibrium, tilted_agent_strategy))

        equilibrium_exploitability = exploitability.evaluate(kuhn_equilibrium)
        raise_add_tilted_exploitability = exploitability.evaluate(
            tilted_agent_strategy)
        self.assertTrue(
            raise_add_tilted_exploitability > equilibrium_exploitability)
    def train_and_show_results(self, test_spec):
        game_file_path = test_spec['game_file_path']
        game = acpc.read_game_file(game_file_path)

        base_strategy, _ = read_strategy_from_file(
            game_file_path, test_spec['base_strategy_path'])

        agents = test_spec['opponent_tilt_types']
        num_agents = len(agents)

        game_name = game_file_path.split('/')[1][:-5]
        overwrite_figure = test_spec[
            'overwrite_figure'] if 'overwrite_figure' in test_spec else False
        figure_path = get_new_path(
            '%s/%s(it:%s-st:%s)' %
            (FIGURES_FOLDER, game_name, test_spec['training_iterations'],
             test_spec['checkpoint_iterations']), '.png', overwrite_figure)
        create_path_dirs(figure_path)

        exp = Exploitability(game)

        checkpoints_count = math.ceil(
            (test_spec['training_iterations'] - 700) /
            test_spec['checkpoint_iterations'])
        iteration_counts = np.zeros(checkpoints_count)
        exploitability_values = np.zeros([num_agents, checkpoints_count])
        vs_opponent_utility_values = np.zeros([num_agents, checkpoints_count])
        opponent_exploitability_values = np.zeros(num_agents)
        for i, agent in enumerate(agents):
            print('%s/%s' % (i + 1, num_agents))

            opponent_strategy = create_agent_strategy_from_trained_strategy(
                game_file_path, base_strategy, agent[0], agent[1], agent[2])

            self.assertTrue(is_correct_strategy(opponent_strategy))

            if 'print_opponent_strategies' in test_spec and test_spec[
                    'print_opponent_strategies']:
                write_strategy_to_file(
                    opponent_strategy, '%s/%s.strategy' %
                    (os.path.dirname(figure_path), get_agent_name(agent)))

            if 'print_best_responses' in test_spec and test_spec[
                    'print_best_responses']:
                opponent_best_response = BestResponse(game).solve(
                    opponent_strategy)
                write_strategy_to_file(
                    opponent_best_response, '%s/%s-best_response.strategy' %
                    (os.path.dirname(figure_path), get_agent_name(agent)))

            if PLOT_OPPONENT_EXPLOITABILITY:
                opponent_exploitability = exp.evaluate(opponent_strategy)
                opponent_exploitability_values[i] = opponent_exploitability
                print('%s exploitability: %s' %
                      (get_agent_name(agent), opponent_exploitability))

            def checkpoint_callback(game_tree, checkpoint_index, iterations):
                if i == 0:
                    iteration_counts[checkpoint_index] = iterations
                self.assertTrue(is_correct_strategy(game_tree))
                exploitability_values[i, checkpoint_index] = exp.evaluate(
                    game_tree)
                vs_opponent_utility_values[i, checkpoint_index] = exp.evaluate(
                    opponent_strategy, game_tree)

            rnr = RestrictedNashResponse(game, opponent_strategy, agent[3])
            rnr.train(test_spec['training_iterations'],
                      checkpoint_iterations=test_spec['checkpoint_iterations'],
                      checkpoint_callback=checkpoint_callback)

            if 'print_response_strategies' in test_spec and test_spec[
                    'print_response_strategies']:
                write_strategy_to_file(
                    rnr.game_tree,
                    '%s-%s-p=%s.strategy' % (figure_path[:-len('.png')],
                                             get_agent_name(agent), agent[3]))

            print('Vs opponent value: %s' %
                  exp.evaluate(opponent_strategy, rnr.game_tree))
            print('Exploitability: %s' % exp.evaluate(rnr.game_tree))

            plt.figure(dpi=300)
            ax = plt.subplot(111)
            for j in range(i + 1):
                p = plt.plot(iteration_counts,
                             exploitability_values[j],
                             label='%s-p=%s exploitability' %
                             (get_agent_name(agents[j]), agents[j][3]),
                             linewidth=LINE_WIDTH)
                plt.plot(iteration_counts,
                         vs_opponent_utility_values[j],
                         '--',
                         label='Utility against opponent strategy',
                         color=p[0].get_color(),
                         linewidth=LINE_WIDTH)
                if PLOT_OPPONENT_EXPLOITABILITY:
                    plt.plot(iteration_counts,
                             np.ones(checkpoints_count) *
                             opponent_exploitability_values[j],
                             ':',
                             label='Opponent exploitability',
                             color=p[0].get_color(),
                             linewidth=LINE_WIDTH)

            plt.title(test_spec['title'])
            plt.xlabel('Training iterations')
            plt.ylabel('Strategy exploitability [mbb/g]')
            plt.grid()
            handles, labels = ax.get_legend_handles_labels()
            new_handles = []
            new_labels = []
            for i in range(PLOT_COUNT_PER_AGENT):
                for j in range(i, len(handles), PLOT_COUNT_PER_AGENT):
                    new_handles += [handles[j]]
                    new_labels += [labels[j]]
            lgd = plt.legend(new_handles,
                             new_labels,
                             loc='upper center',
                             bbox_to_anchor=(0.5, -0.1),
                             ncol=PLOT_COUNT_PER_AGENT)

            plt.savefig(figure_path,
                        bbox_extra_artists=(lgd, ),
                        bbox_inches='tight')

        print('Figure written to %s' % figure_path)
    def run_evaluation(self, test_spec):
        print()

        workspace_dir = os.getcwd()

        game_file_path = workspace_dir + '/' + test_spec['game_file_path']
        game = acpc.read_game_file(game_file_path)

        if game.get_num_players() != 2:
            raise AttributeError('Only games with 2 players are supported')

        test_name = test_spec['test_name']
        base_agent = test_spec['base_agent']
        validation_agents = test_spec['validation_agents']
        num_matches = test_spec['num_matches']
        num_match_hands = test_spec['num_match_hands']

        game_name = game_file_path.split('/')[-1][:-len('.game')]

        validation_agent_names = [
            _get_agent_name(agent) for agent in validation_agents
        ]

        test_directory = '%s/%s/%s' % (workspace_dir, FILES_PATH, test_name)
        agents_data_directories = []
        for validation_agent in validation_agents:
            agent_data_dir = '%s/%s-[%s;%s]-%sx%s' % (
                test_directory, game_name, base_agent[0],
                _get_agent_name(validation_agent), num_matches,
                num_match_hands)
            agents_data_directories += [agent_data_dir]

        force_recreate_data = test_spec[
            'force_recreate_data'] if 'force_recreate_data' in test_spec else False

        base_validation_agent_strategy = None

        validation_agent_strategies = []

        for x in range(len(validation_agents)):
            agent_data_directory = agents_data_directories[x]
            validation_agent = validation_agents[x]
            data_created = True
            if not force_recreate_data:
                if os.path.exists(agent_data_directory):
                    for i in range(num_matches):
                        match_dir = '%s/match_%s' % (agent_data_directory, i)
                        if not os.path.exists(match_dir) or len(
                                os.listdir(match_dir)) == 0:
                            data_created = False
                            break
                else:
                    data_created = False

            if base_validation_agent_strategy is None:
                base_validation_agent_strategy, _ = read_strategy_from_file(
                    game_file_path,
                    test_spec['base_validation_agents_strategy_path'])

            validation_agent_strategy = create_agent_strategy_from_trained_strategy(
                game_file_path, base_validation_agent_strategy,
                validation_agent[0], validation_agent[1], validation_agent[2])

            validation_agent_strategies += [validation_agent_strategy]

            if not data_created or force_recreate_data:
                if os.path.exists(agent_data_directory):
                    shutil.rmtree(agent_data_directory)

                validation_agent_strategy_path = '%s/%s.strategy' % (
                    test_directory, _get_agent_name(validation_agent))

                write_strategy_to_file(validation_agent_strategy,
                                       validation_agent_strategy_path)

                for i in range(num_matches):
                    match_data_dir = '%s/match_%s' % (agent_data_directory, i)
                    if not os.path.exists(match_data_dir):
                        os.makedirs(match_data_dir)

                    seed = int(datetime.now().timestamp())

                    env = os.environ.copy()
                    env['PATH'] = os.path.dirname(
                        sys.executable) + ':' + env['PATH']

                    proc = subprocess.Popen([
                        MATCH_SCRIPT,
                        '%s/normal' % match_data_dir,
                        game_file_path,
                        str(num_match_hands),
                        str(seed),
                        base_agent[0],
                        _get_agent_name(validation_agent),
                    ],
                                            cwd=ACPC_INFRASTRUCTURE_DIR,
                                            env=env,
                                            stdout=subprocess.PIPE)
                    ports_string = proc.stdout.readline().decode(
                        'utf-8').strip()
                    ports = ports_string.split(' ')

                    args = [
                        (game_file_path, ports[0], base_agent[1]),
                        (game_file_path, ports[1],
                         validation_agent_strategy_path),
                    ]

                    with multiprocessing.Pool(2) as p:
                        p.map(_run_agent, args)

                    proc = subprocess.Popen([
                        MATCH_SCRIPT,
                        '%s/reversed' % match_data_dir,
                        game_file_path,
                        str(num_match_hands),
                        str(seed),
                        _get_agent_name(validation_agent),
                        base_agent[0],
                    ],
                                            cwd=ACPC_INFRASTRUCTURE_DIR,
                                            env=env,
                                            stdout=subprocess.PIPE)
                    ports_string = proc.stdout.readline().decode(
                        'utf-8').strip()
                    ports = ports_string.split(' ')

                    args = [
                        (game_file_path, ports[0],
                         validation_agent_strategy_path),
                        (game_file_path, ports[1], base_agent[1]),
                    ]

                    with multiprocessing.Pool(2) as p:
                        p.map(_run_agent, args)

                print('Data created')

        output = []

        def prin(string=''):
            nonlocal output
            output += [string]
            print(string)

        utility_estimators = test_spec['utility_estimators']

        agents_log_files_paths = []
        for x in range(len(validation_agents)):
            agents_data_directory = agents_data_directories[x]
            log_file_paths = []
            for i in range(num_matches):
                log_file_paths += [
                    '%s/match_%s/normal.log' % (agents_data_directory, i),
                    '%s/match_%s/reversed.log' % (agents_data_directory, i),
                ]
            agents_log_files_paths += [log_file_paths]

        agent_strategies = {}
        for i in range(len(validation_agents)):
            agent_strategies[
                validation_agent_names[i]] = validation_agent_strategies[i]

        prin(
            'Cell contains utility of row player based on observation of column player'
        )
        for utility_estimator_spec in utility_estimators:
            utility_estimator_name = utility_estimator_spec[0]
            utility_estimator_class = utility_estimator_spec[1]
            utility_estimator_instance = None
            if utility_estimator_class is not None:
                if len(utility_estimator_spec) == 2:
                    utility_estimator_instance = utility_estimator_class(
                        game, False)
                elif len(utility_estimator_spec) > 2:
                    utility_estimator_args = utility_estimator_spec[2]
                    utility_estimator_instance = utility_estimator_class(
                        game, False, **utility_estimator_args)

            prin()
            prin('%s (mean | SD)' % utility_estimator_name)

            output_table = [[None for j in range(len(validation_agents) + 1)]
                            for i in range(len(validation_agents))]
            for i in range(len(validation_agents)):
                output_table[i][0] = validation_agent_names[i]
            for x in range(len(validation_agents)):
                log_readings = [
                    get_player_utilities_from_log_file(
                        log_file_path,
                        game_file_path=game_file_path,
                        utility_estimator=utility_estimator_instance,
                        player_strategies=agent_strategies,
                        evaluated_strategies=validation_agent_strategies)
                    for log_file_path in agents_log_files_paths[x]
                ]

                data, player_names = get_logs_data(*log_readings)
                means = np.mean(data, axis=0)
                stds = np.std(data, axis=0)

                player_index = player_names.index(validation_agent_names[x])
                for y in range(len(validation_agents)):
                    output_table[y][x +
                                    1] = '%s | %s' % (means[player_index][y],
                                                      stds[player_index][y])

            prin(
                tabulate(output_table,
                         headers=validation_agent_names,
                         tablefmt='grid'))

        prin()
        prin('Total num hands: %s' % data.shape[0])

        output_log_path = get_new_path(
            '%s/output-%sx%s' % (test_directory, num_matches, num_match_hands),
            '.log')
        with open(output_log_path, 'w') as file:
            for line in output:
                file.write(line + '\n')
    def create_agents_and_plot_exploitabilities(self, test_spec):
        base_strategy, _ = read_strategy_from_file(
            test_spec['game_file_path'],
            test_spec['base_strategy_path'])

        game = acpc.read_game_file(test_spec['game_file_path'])
        exploitability = Exploitability(game)

        plot_equilibrium = test_spec['plot_equilibrium'] if 'plot_equilibrium' in test_spec else True
        if plot_equilibrium:
            equilibrium_exploitability = exploitability.evaluate(base_strategy)

        tilt_probabilities = test_spec['tilt_probabilities']
        exploitability_values = np.zeros([len(TILT_TYPES), len(tilt_probabilities)])

        plot_exploitabilities = test_spec['plot_exploitabilities'] if 'plot_exploitabilities' in test_spec else True
        if plot_exploitabilities:
            for i, tilt_type in enumerate(TILT_TYPES):
                for j, tilt_probability in enumerate(tilt_probabilities):
                    tilted_agent = create_agent_strategy_from_trained_strategy(
                        test_spec['game_file_path'],
                        base_strategy,
                        tilt_type[1],
                        tilt_type[2],
                        tilt_probability)
                    exploitability_values[i, j] = exploitability.evaluate(tilted_agent)

                plt.figure(dpi=160)
                for j in range(i + 1):
                    plt.plot(
                        tilt_probabilities,
                        exploitability_values[j],
                        label=TILT_TYPES[j][0],
                        linewidth=0.8)

                if plot_equilibrium:
                    plt.plot(
                        tilt_probabilities,
                        [equilibrium_exploitability] * len(tilt_probabilities),
                        'r--',
                        label='Equilibrium',
                        linewidth=1.5)

                # plt.title(test_spec['title'])
                plt.xlabel('Tilt amount')
                plt.ylabel('Agent exploitability [mbb/g]')
                plt.grid()
                plt.legend()

                figure_output_path = '%s/%s.png' % (FIGURES_FOLDER, test_spec['figure_filename'])

                figures_directory = os.path.dirname(figure_output_path)
                if not os.path.exists(figures_directory):
                    os.makedirs(figures_directory)

                plt.savefig(figure_output_path)

        plot_agent_comparison = test_spec['plot_agent_comparison'] if 'plot_agent_comparison' in test_spec else False
        if plot_agent_comparison:
            agents_strategies = []
            agent_names = []
            for i, tilt_type in enumerate(TILT_TYPES):
                for j, tilt_probability in enumerate(tilt_probabilities):
                    agent_names += ['%s %s %s' % (str(tilt_type[1]).split('.')[1], str(tilt_type[2]).split('.')[1], tilt_probability)]
                    agents_strategies += [create_agent_strategy_from_trained_strategy(
                        test_spec['game_file_path'],
                        base_strategy,
                        tilt_type[1],
                        tilt_type[2],
                        tilt_probability)]

            num_agents = len(agent_names)
            scores_table = np.zeros([num_agents, num_agents])

            num_comparisons = 0
            for i in range(num_agents):
                for j in range(i, num_agents):
                    num_comparisons += 1

            with tqdm(total=num_comparisons) as pbar:
                for i in range(num_agents):
                    for j in range(i, num_agents):
                        scores_table[i, j] = exploitability.evaluate(agents_strategies[j], agents_strategies[i])
                        scores_table[j, i] = -scores_table[i, j]
                        pbar.update(1)

            max_score = scores_table.max()
            min_score = scores_table.min()

            # plt.figure(dpi=160)
            fig, ax = plt.subplots()

            cax = plt.imshow(scores_table, cmap=plt.cm.RdYlGn)
            plt.xticks(np.arange(num_agents), agent_names)
            plt.setp(ax.xaxis.get_majorticklabels(), rotation=45, ha="right", rotation_mode="anchor")
            plt.yticks(np.arange(num_agents), agent_names)
            # plt.yticks(rotation=35)

            # plt.tick_params(
            #     axis='x',
            #     which='both',
            #     bottom=False,
            #     top=False,
            #     labelbottom=False)

            cbar = fig.colorbar(cax, ticks=[min_score, 0, max_score])
            cbar.ax.set_yticklabels([round(min_score), '0', round(max_score)])

            plt.tight_layout()
            plt.gcf().subplots_adjust(left=0.1)

            figure_output_path = '%s/%s-comparison.png' % (FIGURES_FOLDER, test_spec['figure_filename'])

            figures_directory = os.path.dirname(figure_output_path)
            if not os.path.exists(figures_directory):
                os.makedirs(figures_directory)

            plt.savefig(figure_output_path, dpi=160)
Exemple #6
0
    def train_and_show_results(self, test_spec):
        game_file_path = test_spec['game_file_path']
        portfolio_name = test_spec['portfolio_name']
        agent_specs = test_spec['opponent_tilt_types']

        if not _check_agent_names_unique(agent_specs):
            raise AttributeError(
                'Agents must be unique so that they have unique names')

        strategies_directory_base = '%s/%s' % (TEST_OUTPUT_DIRECTORY,
                                               portfolio_name)
        strategies_directory = strategies_directory_base
        if 'overwrite_portfolio_path' not in test_spec or not test_spec[
                'overwrite_portfolio_path']:
            counter = 1
            while os.path.exists(strategies_directory):
                strategies_directory = '%s(%s)' % (strategies_directory_base,
                                                   counter)
                counter += 1
        if not os.path.exists(strategies_directory):
            os.makedirs(strategies_directory)

        game = acpc.read_game_file(game_file_path)
        exp = Exploitability(game)

        # Delete results since they will be generated again
        for file in os.listdir(strategies_directory):
            absolute_path = '/'.join([strategies_directory, file])
            if os.path.isfile(absolute_path):
                os.remove(absolute_path)

        base_strategy, _ = read_strategy_from_file(
            game_file_path, test_spec['base_strategy_path'])

        num_opponents = len(agent_specs)
        opponents = []
        for agent in agent_specs:
            opponent_strategy = create_agent_strategy_from_trained_strategy(
                game_file_path, base_strategy, agent[0], agent[1], agent[2])
            opponents += [opponent_strategy]

        parallel = test_spec['parallel'] if 'parallel' in test_spec else False

        response_paths = [
            '%s/responses/%s-response.strategy' %
            (strategies_directory, _get_agent_name(agent))
            for agent in agent_specs
        ]

        opponent_responses = [None] * num_opponents
        responses_to_train_indices = []
        responses_to_train_opponents = []
        responses_to_train_params = []
        for i in range(num_opponents):
            if os.path.exists(response_paths[i]):
                response_strategy, _ = read_strategy_from_file(
                    game_file_path, response_paths[i])
                opponent_responses[i] = response_strategy
            else:
                responses_to_train_indices += [i]
                responses_to_train_opponents += [opponents[i]]
                responses_to_train_params += [agent_specs[i][3]]

        def on_response_trained(response_index, response_strategy):
            output_file_path = response_paths[
                responses_to_train_indices[response_index]]
            output_file_dir = os.path.dirname(output_file_path)
            if not os.path.exists(output_file_dir):
                os.makedirs(output_file_dir)

            opponent_strategy = opponents[response_index]
            opponent_exploitability = exp.evaluate(opponent_strategy)
            response_exploitability = exp.evaluate(response_strategy)
            response_utility_vs_opponent = exp.evaluate(
                opponent_strategy, response_strategy)

            write_strategy_to_file(response_strategy, output_file_path, [
                'Opponent exploitability: %s' % opponent_exploitability,
                'Response exploitability: %s' % response_exploitability,
                'Response value vs opponent: %s' %
                response_utility_vs_opponent,
            ])

        print('%s responses need to be trained' %
              len(responses_to_train_opponents))

        responses_to_train_strategies = train_portfolio_responses(
            game_file_path,
            responses_to_train_opponents,
            responses_to_train_params,
            log=True,
            parallel=parallel,
            callback=on_response_trained)

        for i, j in enumerate(responses_to_train_indices):
            opponent_responses[j] = responses_to_train_strategies[i]

        if 'portfolio_cut_improvement_threshold' in test_spec:
            portfolio_strategies, response_indices = optimize_portfolio(
                game_file_path,
                opponents,
                opponent_responses,
                portfolio_cut_improvement_threshold=test_spec[
                    'portfolio_cut_improvement_threshold'],
                log=True,
                output_directory=strategies_directory)
        else:
            portfolio_strategies, response_indices = optimize_portfolio(
                game_file_path,
                opponents,
                opponent_responses,
                log=True,
                output_directory=strategies_directory)

        portfolio_size = len(portfolio_strategies)

        agent_names = [
            _get_agent_name(agent)
            for agent in np.take(agent_specs, response_indices, axis=0)
        ]

        print()
        for a in agent_specs:
            print(_get_agent_name(a))

        response_strategy_file_names = []
        for i, strategy in enumerate(portfolio_strategies):
            agent_name = agent_names[i]

            opponent_strategy = opponents[response_indices[i]]
            opponent_exploitability = exp.evaluate(opponent_strategy)
            response_exploitability = exp.evaluate(strategy)
            response_utility_vs_opponent = exp.evaluate(
                opponent_strategy, strategy)

            # Save portfolio response strategy
            response_strategy_output_file_path = '%s/%s-response.strategy' % (
                strategies_directory, agent_name)
            response_strategy_file_names += [
                response_strategy_output_file_path.split('/')[-1]
            ]
            write_strategy_to_file(
                strategy, response_strategy_output_file_path, [
                    'Opponent exploitability: %s' % opponent_exploitability,
                    'Response exploitability: %s' % response_exploitability,
                    'Response value vs opponent: %s' %
                    response_utility_vs_opponent,
                ])

            # Save opponent strategy
            opponent_strategy_file_name = '%s-opponent.strategy' % agent_name
            opponent_strategy_output_file_path = '%s/%s' % (
                strategies_directory, opponent_strategy_file_name)
            write_strategy_to_file(opponent_strategy,
                                   opponent_strategy_output_file_path)

            # Generate opponent ACPC script
            opponent_script_path = '%s/%s.sh' % (strategies_directory,
                                                 agent_name)
            shutil.copy(BASE_OPPONENT_SCRIPT_PATH, opponent_script_path)
            _replace_in_file(
                opponent_script_path, OPPONENT_SCRIPT_REPLACE_STRINGS, [
                    WARNING_COMMENT, game_file_path,
                    opponent_strategy_output_file_path.split('/')[-1]
                ])

        for utility_estimation_method in UTILITY_ESTIMATION_METHODS:
            agent_name_method_name = '' if utility_estimation_method == UTILITY_ESTIMATION_METHODS[
                0] else '-%s' % utility_estimation_method
            agent_script_path = '%s/%s%s.sh' % (
                strategies_directory, portfolio_name, agent_name_method_name)
            shutil.copy(BASE_AGENT_SCRIPT_PATH, agent_script_path)

            strategies_replacement = ''
            for i in range(portfolio_size):
                strategies_replacement += '        "${SCRIPT_DIR}/%s"' % response_strategy_file_names[
                    i]
                if i < (portfolio_size - 1):
                    strategies_replacement += ' \\\n'
            _replace_in_file(agent_script_path, AGENT_SCRIPT_REPLACE_STRINGS, [
                WARNING_COMMENT, game_file_path,
                '"%s"' % utility_estimation_method, strategies_replacement
            ])