# Print results. print(dataframe.describe()) # Generate plots. fig, axes = plt.subplots(nrows=2, ncols=1, squeeze=False) dataframe.plot.scatter(x="max_visits", y="heuristic_visits", alpha=0.02, ax=axes[0, 0]) fit = np.polyfit(dataframe["max_visits"], dataframe["heuristic_visits"], 1) regression = np.poly1d(fit) dataframe["reg"] = regression(dataframe["max_visits"]) axes[0, 0].plot(dataframe.max_visits, dataframe.reg, color="r") axes[1, 0].hist(dataframe.heuristic_rank, bins=list(range(8)), density=True, rwidth=0.9, align="left") axes[1, 0].set_xlabel("Rank of the action deemed best by the HeuristicPlayer") axes[1, 0].set_ylabel("Fraction of nodes") fig.set_size_inches(5, 5) plt.suptitle("Overlap between Heuristic and Mcts") plt.tight_layout() plt.savefig(f"{file_template}.png") if __name__ == "__main__": main_wrapper(get_overlap_for_multiple_game_states)
axes[i, 0].legend(loc=0) axes[i, 0].hlines([1, 0.66, 0.33, 0, -0.33, -0.66, -1], xmin=0, xmax=max(scenario_df.permutations), color="k", alpha=0.25, linestyles="--") axes[i, 0].set_ylim( min(scenario_df.score) - 0.05, max(scenario_df.score) + 0.05) axes[i, 1].set_title(scenario) axes[i, 0].set_xscale("log") axes[i, 1].set_xscale("log") output_png = csv_path.replace(".csv", ".png") plt.tight_layout() plt.savefig(output_png) def main(): options = MctsPlayerOptions( max_iterations=667, max_permutations=150, save_rewards=True, merge_scoring_info_func=lower_ci_bound_on_raw_rewards) _generate_data(options) _plot_results(options) if __name__ == "__main__": main_wrapper(main)
def test_success(self): self.assertEqual(0, main_wrapper(lambda: print("Success")))
def test_exception(self): self.assertEqual(1, main_wrapper(lambda: print(10 / 0)))
print("Overall performance:") if num_actions.one != 0: print("\tONE:\t" + ("{:.5f}".format(sum_scores.one / num_actions.one))) if num_actions.two != 0: print("\tTWO:\t" + ("{:.5f}".format(sum_scores.two / num_actions.two))) def _main(): # filename = "../autosave_bummerl.pickle" # with open(filename, "rb") as input_file: # bummerl = pickle.load(input_file) # results = evaluate_bummerl(bummerl) # print_eval_results(results, None) options = MctsPlayerOptions( num_processes=1, max_permutations=150, max_iterations=667, merge_scoring_info_func=average_score_with_tiebreakers) players = PlayerPair(CythonMctsPlayer(PlayerId.ONE, False, options), CythonMctsPlayer(PlayerId.TWO, False, options)) bummerl_score = PlayerPair(0, 0) with open("../autosave_game.pickle", "rb") as input_file: game = pickle.load(input_file) print_eval_results(evaluate_game(game, players, bummerl_score, "0", "0"), None) if __name__ == "__main__": main_wrapper(_main)
r"kivy.*setter", r"ui\..*(Widget|Layout|ScoreView)\.bind", r"ui\..*(Widget|Layout)\.dispatch", r"ui\..*(Widget|Layout)\.fbind", r"ui\..*(Widget|Layout)\.register_event_type", r"ui\..*(Widget|Layout)\.setter", r"ui\.game_options\.GameOptions\..*", ] extension_pkgs = [ "ai.cython_mcts_player.mcts_debug", "ai.cython_mcts_player.player", ] pylint_opts = [ "--indent-string=' '", "--ignore-imports=yes", "--generated-members=" + ",".join(generated_members), "-j 0", # Run in parallel on all available processors "--disable=" + ",".join(disabled_checks), "--good-names=i,j,k,q,n,ex,Run,_,ax", "--extension-pkg-allow-list=" + ",".join(extension_pkgs) ] # Call pylint in a subprocess since it's licensed under GPL. Do not import it. cmd = [sys.executable, "-m", "pylint" ] + pylint_opts + get_all_python_files() subprocess.run(cmd, check=True) if __name__ == "__main__": sys.exit(main_wrapper(run_pylint))
[action for action in game.actions if action.player_id == PlayerId.ONE]) action_counter = 0 fig, ax = plt.subplots(nrows=num_actions, ncols=2, squeeze=False) for action in game.actions: if action.player_id == PlayerId.ONE: # cheater = False dataframe = run_mcts_player_step_by_step(game_state.next_player_view(), options, iterations_step=100, game_points=game_points) _plot_data(dataframe, "score", ax[action_counter, 0], _hlines_for_scores) # cheater = True dataframe = run_mcts_player_step_by_step(game_state, options, iterations_step=100, game_points=game_points) _plot_data(dataframe, "score", ax[action_counter, 1], _hlines_for_scores) action_counter += 1 game_state = action.execute(game_state) fig.set_size_inches(20, 5 * num_actions) fig.suptitle(f"Debug game: dealer={game.dealer}, seed={game.seed}") plt.tight_layout() plt.savefig("debug_game.png") if __name__ == "__main__": main_wrapper( lambda: debug_game("bummerl_4180_2_done.pickle", 5, MctsPlayerOptions(max_iterations=667 * 4, max_permutations=150, save_rewards=False)))
options = MctsPlayerOptions(num_processes=1, max_iterations=10000) num_seeds = 1000 with multiprocessing.Pool(processes=4) as pool: data = pool.map( functools.partial( _min_iteration_to_fully_simulate_closing_the_talon, options=options, after_n_tricks=5), list(range(num_seeds))) dataframe = DataFrame(data, columns=["seed", "iteration"]) filename_template = os.path.join(os.path.dirname(__file__), "data", "iterations_for_closing_the_talon") # noinspection PyTypeChecker dataframe.to_csv(f"{filename_template}.csv", index=False) dataframe.iteration.hist() print(dataframe.iteration.describe()) num_not_fully_simulated = len(dataframe[dataframe.iteration.isnull()]) not_fully_simulated_pct = 100.0 * num_not_fully_simulated / num_seeds plt.title("In %.0f%% of the cases (%s out of %s),\n" "closing the talon was not fully simulated." % (not_fully_simulated_pct, num_not_fully_simulated, num_seeds)) plt.suptitle("Iterations required to fully simulate closing the talon") plt.xlabel("Iterations") plt.tight_layout() plt.savefig(f"{filename_template}.png") if __name__ == "__main__": main_wrapper(iterations_for_closing_the_talon)
# Discover and run all tests. for folder in folders: loader = unittest.TestLoader() tests = loader.discover(folder, pattern=tests_file_pattern) test_runner = unittest.runner.TextTestRunner() result = test_runner.run(tests) if not result.wasSuccessful(): print(result.errors) print(result.failures) print("\nTests failed. Coverage report will not be generated.") sys.exit(-1) cov.stop() cov.save() # Generate the html report. html_report_dir = "htmlcov" cov.html_report(directory=html_report_dir) html_path = "file:///%s/index.html" % "/".join( os.path.abspath(html_report_dir).split("\\")) # Print a text report to stdout. print() cov.report(skip_empty=True, skip_covered=True, show_missing=True) print("\nOutput saved to: %s" % html_path) if __name__ == "__main__": sys.exit(main_wrapper(lambda: run_all_tests_with_coverage(sys.argv[1:])))