Exemplo n.º 1
0
 def _writeout_game_results(self, path: str,
                            match_results: List[MatchResult]):
     data = AutoMatchResult()
     data.rule = self.rule
     data.engine_config_list = self.engine_config_list
     data.match_results = match_results
     yaml_dump(data, path)
 def run_evaluation(self, log_prefix: str, kifus: List[str],
                    cleanup_moves: int) -> Tuple[List[str], float]:
     self.engine_handles = []
     bestmoves = []
     self._log_file = open(log_prefix + ".log", "a")
     cleanup_ctr = 0
     count = 0
     correct = 0
     try:
         for i, ec in enumerate(self.engine_config_list):
             self._log(f"Initializing engine {i}")
             self.engine_handles.append(self._exec_engine(ec))
             self._init_engine(i, ec)
             self._isready_engine(i)
             self._engine_write(i, "usinewgame")
         for kifu in kifus:
             pred_move, gt_move = self._run_single_evaluation(kifu)
             bestmoves.append(pred_move)
             count += 1
             if pred_move == gt_move:
                 correct += 1
             cleanup_ctr += 1
             if cleanup_ctr >= cleanup_moves:
                 # 置換表フルを避けるため、cleanup_moves手思考したところでリセット
                 self._engine_write(0, f"gameover win")
                 self._isready_engine(0)
                 self._engine_write(0, "usinewgame")
                 cleanup_ctr = 0
         for i in range(len(self.engine_config_list)):
             self._log(f"Closing engine {i}")
             self._quit_engine(i)
         self._log("Finished task")
     except Exception as ex:
         self._log(f"Exception: {ex}")
         raise
     finally:
         self._log_file.close()
         self._log_file = None
         accuracy = correct / count
         yaml_dump(
             {
                 "bestmoves": bestmoves,
                 "correct": correct,
                 "accuracy": accuracy
             }, log_prefix + ".yaml")
     return bestmoves, accuracy
def call_evaluation(engine_config, out_dir, kifu_path):
    # サブディレクトリに設定ファイルとログを出力する
    trial_dir = os.path.join(out_dir, str(uuid.uuid4()))
    os.makedirs(trial_dir)
    trial_engine_config_path = os.path.join(trial_dir, "engine.yaml")
    yaml_dump(engine_config, trial_engine_config_path)
    stdout = subprocess.check_output([
        "python", "-m", "neneshogi.auto_match.next_move_evaluation", kifu_path,
        trial_engine_config_path, "--log_prefix",
        os.path.join(trial_dir, "next_move_evaluation")
    ])
    accuracy = None
    for line in stdout.decode("utf-8").splitlines():
        if line.startswith("accuracy:"):
            accuracy_str = line[len("accuracy:"):].strip()
            accuracy = float(accuracy_str)
    assert accuracy is not None
    return accuracy
Exemplo n.º 4
0
def iter_match(rule: Rule, base_engines_rates, target_config, count: int, dst: str):
    os.makedirs(dst)
    cur_base = len(base_engines_rates) // 2
    vs_results = []
    for i in range(count):
        print(f"iter {i}: match to rate {base_engines_rates[cur_base]['rate']}")
        wins = match(rule, base_engines_rates[cur_base]["config"], target_config, os.path.join(dst, f"am_{i}"))
        print(f"base:target={wins[0]}:{wins[1]}")
        # winsはbase:targetなので注意
        vs_results.append({"base_idx": cur_base, "base_rate": base_engines_rates[cur_base]['rate'], "wins": wins})

        if wins[0] > wins[1]:
            cur_base -= 1
        elif wins[0] < wins[1]:
            cur_base += 1
        cur_base = max(0, min(len(base_engines_rates) - 1, cur_base))
        # クラッシュ対策に毎回保存しておく
        yaml_dump(vs_results, os.path.join(dst, "multi_measure_result.yaml"))
Exemplo n.º 5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("data_dir")
    parser.add_argument("--n_configs", type=int, default=20)
    args = parser.parse_args()
    data_dir = args.data_dir
    assert os.path.exists(f"{data_dir}/config/rule.yaml")
    for i in range(args.n_configs):
        run_dir = generate_run_dir(args.data_dir)
        print(run_dir)
        rule_file_path = f"{run_dir}/rule.yaml"
        shutil.copy(f"{data_dir}/config/rule.yaml", rule_file_path)
        config_target_path = f"{run_dir}/engine_target.yaml"
        config_base = util.yaml_load(f"{data_dir}/config/engine_base.yaml")
        config_target = util.yaml_load(f"{data_dir}/config/engine_target.yaml")
        generate_random_options(config_target)
        print(config_target)
        util.yaml_dump(config_target, config_target_path)

        for strength in range(4):
            print("strength", strength)
            config_base_path = f"{run_dir}/engine_base_{strength}.yaml"
            util.yaml_dump(config_base, config_base_path)
            output_prefix = f"{run_dir}/result_{strength}"
            cmd = [
                "python", "-m", "neneshogi.auto_match", rule_file_path,
                config_base_path, config_target_path, "--log_prefix",
                output_prefix
            ]
            subprocess.check_call(cmd)

            win_rate = get_win_rate(output_prefix + ".yaml")
            print("win_rate", win_rate)
            if win_rate >= 0.5:
                increment_base_strength(config_base)
            else:
                break