# # stats analysis file # basename = os.path.basename(os.path.normpath(analysis_dir)) analysis_path = analysis_dir + "/measure-densities-" + \ basename + ".tsv" analysis_handle = open(analysis_path, "w", 0) # # parameters from model_parameters.py # width_factor = mparam.width_factor height_factor = mparam.height_factor time_factor = mparam.time_factor num_trials = mparam.num_trials # mfunc.show_message(g, analysis_handle, "\n\nSeed Densities\n\n") # for i in range(num_runs): message = sorted_pickle_names[i] + "\n" mfunc.show_message(g, analysis_handle, message) # mfunc.show_message(g, analysis_handle, "\n") # mfunc.show_message(g, analysis_handle, "\nwidth_factor = " + \ str(width_factor) + "\n") mfunc.show_message(g, analysis_handle, "height_factor = " + \ str(height_factor) + "\n") mfunc.show_message(g, analysis_handle, "time_factor = " + \ str(time_factor) + "\n") mfunc.show_message(g, analysis_handle, "num_trials = " + \ str(num_trials) + "\n\n")
# Initialize some parameters locally # num_trials = 2 # each pair of seeds will have this many contests num_top = 10 # num_top Gen i seeds will compete with num_top Gen n seeds # # Stats analysis file # basename = os.path.basename(os.path.normpath(analysis_dir)) analysis_path = analysis_dir + "/compare-past-winners-" + \ "top" + str(num_top) + "-try" + str(num_trials) + "-" + \ basename + ".tsv" analysis_handle = open(analysis_path, "w", 0) # # Print out a header for the result file # mfunc.show_message(g, analysis_handle, "\n\nCompare Past Winners\n\n") mfunc.show_message(g, analysis_handle, "width_factor = " + \ str(width_factor) + "\n") mfunc.show_message(g, analysis_handle, "height_factor = " + \ str(height_factor) + "\n") mfunc.show_message(g, analysis_handle, "time_factor = " + \ str(time_factor) + "\n") mfunc.show_message(g, analysis_handle, "num_trials = " + \ str(num_trials) + "\n") mfunc.show_message(g, analysis_handle, "num_top = " + \ str(num_top) + "\n") mfunc.show_message(g, analysis_handle, "path = " + \ str(pickle_dir) + "\n\n") mfunc.show_message(g, analysis_handle, \ "Note the results will change slightly each time this runs.\n\n") #
# Initialize some parameters from model_parameters # pickle_dir = mparam.log_directory analysis_dir = mparam.log_directory num_generations = mparam.num_generations # # Report file for fusion events # basename = os.path.basename(os.path.normpath(analysis_dir)) analysis_path = analysis_dir + "/report-fusion-" + \ basename + ".tsv" analysis_handle = open(analysis_path, "w", 0) # # Print out a header for the result file # mfunc.show_message(g, analysis_handle, "\n\nReport Fusion\n\n") mfunc.show_message(g, analysis_handle, "Format: " + \ "<seed 0 fitness> <tab> " + \ "<seed 1 fitness> <tab> " + \ "<fusion fitness> <new line>\n\n") # # Make a list of the pickles in pickle_dir # # We assume that the directory pickle_dir contains the pickles from # only one single run of Model-T. That is, all of the pickles will # have the same date and time stamp as part of their file names. # pickle_list = [] for file in os.listdir(pickle_dir): if file.endswith(".bin"): pickle_list.append(file)
# # stats analysis file # basename = os.path.basename(os.path.normpath(analysis_dir)) analysis_path = analysis_dir + "/compare-random-" + \ basename + ".tsv" analysis_handle = open(analysis_path, "w", 0) # # parameters from model_parameters.py # width_factor = mparam.width_factor height_factor = mparam.height_factor time_factor = mparam.time_factor num_trials = mparam.num_trials # mfunc.show_message(g, analysis_handle, "\n\nCompare Random\n\n") # for i in range(num_runs): message = sorted_pickle_names[i] + "\n" mfunc.show_message(g, analysis_handle, message) # mfunc.show_message(g, analysis_handle, "\n") # mfunc.show_message(g, analysis_handle, "\nwidth_factor = " + \ str(width_factor) + "\n") mfunc.show_message(g, analysis_handle, "height_factor = " + \ str(height_factor) + "\n") mfunc.show_message(g, analysis_handle, "time_factor = " + \ str(time_factor) + "\n") mfunc.show_message(g, analysis_handle, "num_trials = " + \ str(num_trials) + "\n\n")
import pickle # # ----------------------------------------------------------------- # Make a file for logging the results. The filename is based on the # date, so that log files can easily be ordered by date. # ----------------------------------------------------------------- # log_name = time.strftime("log-20%y-%m-%d-%Hh-%Mm-%Ss", \ time.localtime()) log_path = mparam.log_directory + "/" + log_name + ".txt" # use 0 so that log file writes immediately (no buffer), # in case of forced exit log_handle = open(log_path, "w", 0) start_time = time.strftime("Start time: 20%y-%m-%d %Hh:%Mm:%Ss\n", \ time.localtime()) mfunc.show_message(g, log_handle, start_time) # show parameter settings parameter_settings = mfunc.show_parameters() mfunc.show_message(g, log_handle, "\nParameter Settings\n\n") for setting in parameter_settings: mfunc.show_message(g, log_handle, setting + "\n") mfunc.show_message(g, log_handle, "\n") # # ----------------------------------------------------------------- # Set the random number generator seed here. If random_seed is # negative, then Python will automatically set a random number # seed. Note that, if random_seed is negative, then the experiment # cannot be exactly repeated. # ----------------------------------------------------------------- # random_seed = mparam.random_seed
# each pair of seeds will have this many contests num_trials = 50 # this many wins is significant at 95% level # (p = 0.0325, Binomial Exact Test) num_wins = 32 # # Stats analysis file # basename = os.path.basename(os.path.normpath(analysis_dir)) analysis_path = analysis_dir + "/compare-win-count-" + \ basename + ".tsv" analysis_handle = open(analysis_path, "w", 0) # # Print out a header for the result file # mfunc.show_message(g, analysis_handle, "\n\nCompare Winners\n\n") mfunc.show_message(g, analysis_handle, "width_factor = " + \ str(width_factor) + "\n") mfunc.show_message(g, analysis_handle, "height_factor = " + \ str(height_factor) + "\n") mfunc.show_message(g, analysis_handle, "time_factor = " + \ str(time_factor) + "\n") mfunc.show_message(g, analysis_handle, "num_trials = " + \ str(num_trials) + "\n") mfunc.show_message(g, analysis_handle, "num_wins = " + \ str(num_wins) + "\n") mfunc.show_message(g, analysis_handle, "path = " + \ str(pickle_dir) + "\n\n") mfunc.show_message(g, analysis_handle, \ "Note the numbers will change slightly each time this runs.\n\n") #
final_num = min(smallest_pickle_size1, smallest_pickle_size2) step_size = 1 # # analysis file # analysis_path = analysis_dir + "/compare-types.tsv" analysis_handle = open(analysis_path, "w", 0) # # parameters from model_parameters.py # width_factor = mparam.width_factor height_factor = mparam.height_factor time_factor = mparam.time_factor num_trials = mparam.num_trials # mfunc.show_message(g, analysis_handle, \ "\n\nCompare Types\n\n") # mfunc.show_message(g, analysis_handle, "path 1 = " + path1 + "\n\n") for i in range(num_runs1): message = long_names1[i] + "\n" mfunc.show_message(g, analysis_handle, message) mfunc.show_message(g, analysis_handle, "\n") # mfunc.show_message(g, analysis_handle, "path 2 = " + path2 + "\n\n") for i in range(num_runs2): message = long_names2[i] + "\n" mfunc.show_message(g, analysis_handle, message) mfunc.show_message(g, analysis_handle, "\n") # mfunc.show_message(g, analysis_handle, "width_factor = " + \ str(width_factor) + "\n")