for envId, env in enumerate(evaluation.envs): # # Plot histogram for rewards for that env # if do_plots and interactive: # env.plotHistogram(evaluation.horizon * evaluation.repetitions) # (almost) unique hash from the configuration hashvalue = abs( hash((tuple(configuration.keys()), tuple([(len(k) if isinstance(k, (dict, tuple, list)) else k) for k in configuration.values()])))) if debug_memory: start_tracemalloc() # DEBUG # --- Also plotting the history of means if interactive: evaluation.plotHistoryOfMeans(envId) # XXX To plot without saving # Evaluate just that env evaluation.startOneEnv(envId, env) # Display the final regrets and rankings for that env evaluation.printLastRegrets(envId) evaluation.printFinalRanking(envId) evaluation.printRunningTimes(envId) evaluation.printMemoryConsumption(envId) evaluation.printNumberOfCPDetections(envId) if debug_memory: display_top_tracemalloc() # DEBUG # Sub folder with a useful name subfolder = "SP__K{}_T{}_N{}__{}_algos".format( env.nbArms, configuration['horizon'], configuration['repetitions'],
from very_simple_configuration import configuration else: from example_of_configuration_singleplayer import configuration configuration['showplot'] = True evaluation = Evaluator(configuration) # Start the evaluation and then print final ranking and plot, for each environment for envId, env in enumerate(evaluation.envs): # Evaluate just that env evaluation.startOneEnv(envId, env) # Compare them for envId, env in enumerate(evaluation.envs): evaluation.plotHistoryOfMeans(envId) # XXX To plot without saving print("\nGiving all the vector of final regrets ...") evaluation.printLastRegrets(envId) print("\nGiving the final ranking ...") evaluation.printFinalRanking(envId) print("\n\n- Plotting the last regrets...") evaluation.plotLastRegrets(envId, boxplot=True) print("\nGiving the mean and std running times ...") evaluation.printRunningTimes(envId) evaluation.plotRunningTimes(envId) print("\nGiving the mean and std running times ...") evaluation.printMemoryConsumption(envId)