def create_dist_batch_set(default_parameters, eps): def clean_str(s): return str("%-2.2f" % s).replace(".", "") dist_data_beta60_analysis = copy.deepcopy(default_parameters) # Ensuring that the distribution runs folder exist dist_data_beta60_analysis["batch_folder"] = ( "../data/distribution_tests/distribution_runs") if not os.path.isdir(dist_data_beta60_analysis["batch_folder"]): dist_data_beta60_analysis["batch_folder"] = \ os.path.join("..", dist_data_beta60_analysis["batch_folder"]) dist_data_beta60_analysis["batch_name"] = \ "distribution_test_eps{0:s}".format(clean_str(eps)) dist_data_beta60_analysis["beta"] = 6.0 dist_data_beta60_analysis["num_bins_per_int"] = 16 dist_data_beta60_analysis["bin_range"] = [-2.1, 2.1] dist_data_beta60_analysis["hist_flow_times"] = [0, 250, 600] dist_data_beta60_analysis["NCfgs"] = get_num_observables( dist_data_beta60_analysis["batch_folder"], dist_data_beta60_analysis["batch_name"]) dist_data_beta60_analysis["obs_file"] = "6_6.00" # 6^3x12, beta=6.0 dist_data_beta60_analysis["N"] = 6 dist_data_beta60_analysis["NT"] = 12 dist_data_beta60_analysis["color"] = "#377eb8" return dist_data_beta60_analysis
def beta645_L32_analysis(): from pre_analysis.pre_analyser import pre_analysis from post_analysis.post_analyser import post_analysis from default_analysis_params import get_default_parameters from tools.folderreadingtools import get_num_observables import copy import os # TODO: load b645 32^4 data # TODO: pre-analyse b645 32^4 data # TODO: compare b645 32^4 and b645 48^3*96 data in post analysis #### Different batches data_batch_folder = "../GluonAction/data9" data_batch_folder = "../GluonAction/data10" default_params = get_default_parameters( data_batch_folder=data_batch_folder) # obs_exlusions = ["plaq", "energy", "topc", "topc2", "topc4", "topcr", "topcMC", "topsus"] # obs_exlusions = ["energy", "topsus", "topsust", "topsuste", "topsusMC", "topsusqtq0"] # observables = observables_euclidean_time # observables = ["topsus", "topsust", "topsuste", "topsusMC", "topsusqtq0"] # observables = ["topsusMC"] # observables = ["topcr", "qtq0eff"] # observables = ["topcte"] # observables = observables_euclidean_time # observables = ["topcr", "topsus"] # observables = ["topsust", "topsuste", "topsusqtq0"] # observables = ["topcrMC"] # observables = ["qtq0eff", "qtq0e"] + ["topsus", "topsust", "topsuste", "topsusMC", "topsusqtq0"] # observables = ["qtq0eff", "qtq0e"] + ["topsust", "topsuste", "topsusMC", "topsusqtq0"] # observables = ["topsus", "topsust", "topsuste", "topsusMC", "topsusqtq0"] # observables = ["topcr"] # exit("CHECK TOPCR!! @ 34 in main_analysis.py") # observables = ["topsuste"] # observables = ["qtq0effMC"] # observables = ["energy"] # observables = ["w_t_energy"] # observables = ["plaq", "energy", "topc", "topct"] # observables += ["energy"] # default_params["observables"] = observables #### Post analysis parameters line_fit_interval_points = 20 # topsus_fit_targets = [0.3,0.4,0.5,0.58] # topsus_fit_targets = [0.3, 0.4, 0.5, 0.6] # tf = sqrt(8*t0) topsus_fit_targets = [0.6] energy_fit_target = 0.3 #### Different batches # data_batch_folder = "../GluonAction/data8" data_batch_folder = "../GluonAction/data10" # data_batch_folder = "../GluonAction/DataGiovanni" # data_batch_folder = "../data/topc_modes_8x16" # Method of continuum extrapolation. # Options: plateau, plateau_mean, nearest, interpolate, bootstrap extrapolation_methods = [ "plateau", "plateau_mean", "nearest", "interpolate", "bootstrap" ] extrapolation_methods = ["plateau"] extrapolation_methods = ["bootstrap"] plot_continuum_fit = False # Topcr reference value. Options: [float], t0beta, article, t0 topcr_t0 = "t0beta" # Number of different sectors we will analyse in euclidean time default_params["numsplits_eucl"] = 4 intervals_eucl = [None, None, None, None] # Number of different sectors we will analyse in monte carlo time default_params["MC_time_splits"] = 4 # MC_intervals = [[0, 1000], [500, 1000], [500, 1000], [175, 250]] MC_intervals = [None, None, None, None] # Extraction point in flow time a*t_f for q0 in qtq0 q0_flow_times = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6] # [fermi] # Flow time indexes in percent to plot qtq0 in euclidean time at euclidean_time_percents = [0, 0.25, 0.50, 0.75, 1.00] # euclidean_time_percents = [0] # Data types to be looked at in the post-analysis. post_analysis_data_type = ["bootstrap", "jackknife", "unanalyzed"] post_analysis_data_type = ["bootstrap"] ########## Main analysis ########## databeta60 = copy.deepcopy(default_params) databeta60["batch_name"] = "beta60" databeta60["beta"] = 6.0 databeta60["topc_y_limits"] = [-9, 9] databeta60["topc2_y_limits"] = [-81, 81] databeta60["NCfgs"] = get_num_observables(databeta60["batch_folder"], databeta60["batch_name"]) databeta60["obs_file"] = "24_6.00" databeta60["MCInt"] = MC_intervals[0] databeta60["N"] = 24 databeta60["NT"] = 2 * databeta60["N"] databeta60["color"] = "#e41a1c" databeta61 = copy.deepcopy(default_params) databeta61["batch_name"] = "beta61" databeta61["beta"] = 6.1 databeta61["topc_y_limits"] = [-12, 12] databeta61["topc2_y_limits"] = [-144, 144] databeta61["NCfgs"] = get_num_observables(databeta61["batch_folder"], databeta61["batch_name"]) databeta61["obs_file"] = "28_6.10" databeta61["MCInt"] = MC_intervals[1] databeta61["N"] = 28 databeta61["NT"] = 2 * databeta61["N"] databeta61["color"] = "#377eb8" databeta62 = copy.deepcopy(default_params) databeta62["batch_name"] = "beta62" databeta62["beta"] = 6.2 databeta62["topc_y_limits"] = [-12, 12] databeta62["topc2_y_limits"] = [-196, 196] databeta62["NCfgs"] = get_num_observables(databeta62["batch_folder"], databeta62["batch_name"]) databeta62["obs_file"] = "32_6.20" databeta62["MCInt"] = MC_intervals[2] databeta62["N"] = 32 databeta62["NT"] = 2 * databeta62["N"] databeta62["color"] = "#4daf4a" databeta645 = copy.deepcopy(default_params) databeta645["flow_epsilon"] = 0.02 databeta645["batch_name"] = "beta645" databeta645["beta"] = 6.45 databeta645["topc_y_limits"] = [-15, 15] databeta645["topc2_y_limits"] = [-300, 300] databeta645["NCfgs"] = get_num_observables(databeta645["batch_folder"], databeta645["batch_name"]) databeta645["obs_file"] = "48_6.45" databeta645["MCInt"] = MC_intervals[3] databeta645["N"] = 48 databeta645["NT"] = 2 * databeta645["N"] databeta645["color"] = "#984ea3" # Adding relevant batches to args analysis_parameter_list = [databeta60, databeta61, databeta62, databeta645] # analysis_parameter_list = [databeta60, databeta61, databeta62] # analysis_parameter_list = [databeta61, databeta62] # analysis_parameter_list = [databeta62] # analysis_parameter_list = [databeta645] section_seperator = "=" * 160 print section_seperator print "Observables to be analysed: %s" % ", ".join( default_params["observables"]) print section_seperator + "\n" #### Submitting main analysis for analysis_parameters in analysis_parameter_list: pre_analysis(analysis_parameters) if not analysis_parameter_list[0]["MCInt"] is None: assert sum([len(plist["MCInt"]) - len(analysis_parameter_list[0]["MCInt"]) for plist in analysis_parameter_list]) == 0, \ "unequal amount of MC intervals" #### Submitting post-analysis data if len(analysis_parameter_list) >= 3: post_analysis(analysis_parameter_list, default_params["observables"], topsus_fit_targets, line_fit_interval_points, energy_fit_target, q0_flow_times, euclidean_time_percents, extrapolation_methods=extrapolation_methods, plot_continuum_fit=plot_continuum_fit, post_analysis_data_type=post_analysis_data_type, figures_folder=default_params["figures_folder"], gif_params=default_params["gif"], verbose=default_params["verbose"]) else: msg = "Need at least 3 different beta values to run post analysis" msg += "(%d given)." % len(analysis_parameter_list) print msg
def topc_modes_analysis(): """Analysis for different lattice sizes and their topological charges.""" default_params = get_default_parameters(data_batch_folder="temp") run_pre_analysis = False verbose = True data_path = "../data/" if not os.path.isdir(data_path): data_path = "../" + data_path ########## Smaug data 8x16 analysis ########## smaug8x16_data_beta60_analysis = copy.deepcopy(default_params) smaug8x16_data_beta60_analysis["batch_folder"] = data_path smaug8x16_data_beta60_analysis["batch_name"] = "beta60_8x16_run" smaug8x16_data_beta60_analysis["beta"] = 6.0 smaug8x16_data_beta60_analysis["topc_y_limits"] = [-2, 2] smaug8x16_data_beta60_analysis["num_bins_per_int"] = 32 smaug8x16_data_beta60_analysis["bin_range"] = [-2.5, 2.5] smaug8x16_data_beta60_analysis["hist_flow_times"] = [0, 250, 600] smaug8x16_data_beta60_analysis["NCfgs"] = get_num_observables( smaug8x16_data_beta60_analysis["batch_folder"], smaug8x16_data_beta60_analysis["batch_name"]) smaug8x16_data_beta60_analysis["obs_file"] = "8_6.00" smaug8x16_data_beta60_analysis["N"] = 8 smaug8x16_data_beta60_analysis["NT"] = 16 smaug8x16_data_beta60_analysis["color"] = "#377eb8" ########## Smaug data 12x24 analysis ########## smaug12x24_data_beta60_analysis = copy.deepcopy(default_params) smaug12x24_data_beta60_analysis["batch_folder"] = data_path smaug12x24_data_beta60_analysis["batch_name"] = "beta60_12x24_run" smaug12x24_data_beta60_analysis["beta"] = 6.0 smaug12x24_data_beta60_analysis["topc_y_limits"] = [-4, 4] smaug12x24_data_beta60_analysis["num_bins_per_int"] = 16 smaug12x24_data_beta60_analysis["bin_range"] = [-4.5, 4.5] smaug12x24_data_beta60_analysis["hist_flow_times"] = [0, 250, 600] smaug12x24_data_beta60_analysis["NCfgs"] = get_num_observables( smaug12x24_data_beta60_analysis["batch_folder"], smaug12x24_data_beta60_analysis["batch_name"]) smaug12x24_data_beta60_analysis["obs_file"] = "12_6.00" smaug12x24_data_beta60_analysis["N"] = 12 smaug12x24_data_beta60_analysis["NT"] = 24 smaug12x24_data_beta60_analysis["color"] = "#377eb8" ########## Smaug data 16x32 analysis ########## smaug16x32_data_beta61_analysis = copy.deepcopy(default_params) smaug16x32_data_beta61_analysis["batch_folder"] = data_path smaug16x32_data_beta61_analysis["batch_name"] = "beta61_16x32_run" smaug16x32_data_beta61_analysis["beta"] = 6.1 smaug16x32_data_beta61_analysis["topc_y_limits"] = [-8, 8] smaug16x32_data_beta61_analysis["num_bins_per_int"] = 16 smaug16x32_data_beta61_analysis["bin_range"] = [-7.5, 7.5] smaug16x32_data_beta61_analysis["hist_flow_times"] = [0, 250, 600] smaug16x32_data_beta61_analysis["NCfgs"] = get_num_observables( smaug16x32_data_beta61_analysis["batch_folder"], smaug16x32_data_beta61_analysis["batch_name"]) smaug16x32_data_beta61_analysis["obs_file"] = "16_6.10" smaug16x32_data_beta61_analysis["N"] = 16 smaug16x32_data_beta61_analysis["NT"] = 32 smaug16x32_data_beta61_analysis["color"] = "#377eb8" param_list = [ smaug8x16_data_beta60_analysis, smaug12x24_data_beta60_analysis, smaug16x32_data_beta61_analysis ] if run_pre_analysis: # Submitting analysis for analysis_parameters in param_list: pre_analysis(analysis_parameters) # Loads topc data data = [] # N_val = [24, 24, 28] for i, param in enumerate(param_list): print "Loading data for: {}".format(param["batch_name"]) # fpath = os.path.join(param["batch_folder"], param["batch_name"], # "{0:d}_{1:.2f}.npy".format(N_val[i], # param["beta"])) data_, p = get_data_parameters(param) data.append({ "data": data_("topc")["obs"].T, "beta": param["beta"], "N": param["N"] }) # print data_("topc")["obs"].shape # Flow time to plots flow_times = [0, 250, 600] # Histogram plotting xlim = 7.5 NBins = np.arange(-xlim, xlim, 0.05) for t_f in flow_times: # Adds unanalyzed data fig, axes = plt.subplots(3, 1, sharey=False, sharex=True) for i, ax in enumerate(axes): lab = r"${0:d}^3\times{1:d}$, $\beta={2:.2f}$".format( data[i]["N"], data[i]["N"] * 2, data[i]["beta"]) weights = np.ones_like(data[i]["data"][t_f]) weights /= len(data[i]["data"][t_f]) ax.hist(data[i]["data"][t_f], bins=NBins, label=lab, weights=weights) ax.legend(loc="upper right") ax.grid(True) ax.set_xlim(-xlim, xlim) if i == 1: ax.set_ylabel(r"$Hits$") elif i == 2: ax.set_xlabel(r"$Q$") # Sets up figure figpath = "figures/topc_modes_analysis" if not os.path.isdir(figpath): figpath = "../" + figpath check_folder(figpath, verbose=verbose) figpath = os.path.join(figpath, "topc_modes_tf{}.pdf".format(t_f)) fig.savefig(figpath) print "Figure saved at {0:s}".format(figpath) plt.close(fig)
def lattice_updates_analysis(): run_pre_analysis = False verbose = False N_corr = [200, 400, 600] N_updates = [10, 20, 30] param_list = [] beta = 6.0 figure_folder = "../figures/lattice_updates" output_folder = "../data/lattice_update_data" ############ Sets up the different N_up/N_corr analysises ########## # Sets up Slurm output files if not os.path.isdir(output_folder): output_folder = os.path.join("..", output_folder) output_file_path = os.path.join(output_folder, "output_files") # Sets up empty nested dictionary output_files = {icorr: {iup: {} for iup in N_updates} for icorr in N_corr} # Retrieves standard parameters default_params = get_default_parameters(data_batch_folder="temp", verbose=verbose) # Loops through different corr lengths and link update sizes for icorr in N_corr: for iup in N_updates: # Loops over files in directory for of in os.listdir(output_file_path): _tmp = re.findall(r"NUP([\d]+)_NCORR([\d]+)", of)[0] _NUp, _NCorr = list(map(int, _tmp)) # If icorr and iup matches files, we add it to the dictionary # we created earlier. if icorr == _NCorr and iup == _NUp: output_files[icorr][iup] = { "NUp": iup, "NCorr": icorr, "output_path": os.path.join(output_file_path, of) } break # Sets up parameter list for analysis for icorr in N_corr: for iup in N_updates: _params = copy.deepcopy(default_params) _params["batch_folder"] = output_folder _params["batch_name"] = \ "B60_NUP{0:d}_NCORR{1:d}".format(iup, icorr) _params["NCfgs"] = get_num_observables(_params["batch_folder"], _params["batch_name"]) _params["beta"] = beta _params["color"] = "#377eb8" _params["N"] = 16 _params["NT"] = _params["N"] * 2 _params["observables"] = ["plaq", "energy", "topc"] _params.update(output_files[icorr][iup]) _times = read_run_time(_params["output_path"]) _params["total_runtime"] = _times[-1][-1] _params["update_time"] = _times[0][0] param_list.append(_params) if run_pre_analysis: # Submitting distribution analysis for analysis_parameters in param_list: pre_analysis(analysis_parameters) print("Success: pre analysis done.") # Use post_analysis data for further analysis. data = {icorr: {iup: {} for iup in N_updates} for icorr in N_corr} for i, _params in enumerate(param_list): print "Loading data for NCorr={0:d} NUp={1:d}".format( _params["NCorr"], _params["NUp"]) data[_params["NCorr"]][_params["NUp"]] = { "data": post_analysis.PostAnalysisDataReader( [_params], observables_to_load=_params["observables"], verbose=verbose), "params": _params, } print("Success: post analysis data retrieved.") X_corr, Y_up = np.meshgrid(N_corr, N_updates) # Sets up time grid Z_total_runtimes = np.zeros((3, 3)) Z_update_times = np.zeros((3, 3)) X_flow = np.zeros((3, 3, 251)) Z_autocorr = np.zeros((3, 3, 251)) Z_autocorr_error = np.zeros((3, 3, 251)) for i, icorr in enumerate(N_corr): for j, iup in enumerate(N_updates): Z_total_runtimes[i, j] = \ data[icorr][iup]["params"]["total_runtime"] Z_update_times[i, j] = \ data[icorr][iup]["params"]["update_time"] _tmp = data[icorr][iup]["data"]["topc"][beta] Z_autocorr[i, j] = \ _tmp["with_autocorr"]["autocorr"]["tau_int"] Z_autocorr_error[i, j] = \ _tmp["with_autocorr"]["autocorr"]["tau_int_err"] X_flow[i, j] = _tmp["with_autocorr"]["unanalyzed"]["x"] X_flow[i, j] *= data[icorr][iup]["data"].flow_epsilon[6.0] X_flow[i, j] = np.sqrt(8 * X_flow[i, j]) X_flow[i, j] *= data[icorr][iup]["data"].lattice_sizes[6.0][0] # Plots update and total run-times on grid heatmap_plotter(N_corr, N_updates, Z_total_runtimes, "topc_total_runtime.pdf", xlabel=r"$N_\mathrm{corr}$", ylabel=r"$N_\mathrm{up}$", cbartitle=r"$t_\mathrm{total}$", figure_folder=figure_folder) heatmap_plotter(N_corr, N_updates, Z_update_times, "topc_update_runtime.pdf", xlabel=r"$N_\mathrm{corr}$", ylabel=r"$N_\mathrm{up}$", cbartitle=r"$t_\mathrm{update}$", figure_folder=figure_folder) # Plots final autocorrelations on grid flow_times = [0, 100, 250] for tf in flow_times: heatmap_plotter(N_corr, N_updates, Z_autocorr[:, :, tf], "topc_autocorr_tau{0:d}.pdf".format(tf), xlabel=r"$N_\mathrm{corr}$", ylabel=r"$N_\mathrm{up}$", cbartitle=r"$\tau_\mathrm{int}$", figure_folder=figure_folder) heatmap_plotter(N_corr, N_updates, Z_autocorr_error[:, :, tf], "topc_autocorr_err_tau{0:d}.pdf".format(tf), xlabel=r"$N_\mathrm{corr}$", ylabel=r"$N_\mathrm{up}$", cbartitle=r"$\tau_\mathrm{int}$", figure_folder=figure_folder) # Plots all of the 9 autocorrs in single figure plot9_figures(X_flow, X_corr, Y_up, Z_autocorr, Z_autocorr_error, "topc_autocorr.pdf", xlabel=r"$\sqrt{8t_{f}}$", ylabel=r"$\tau_\mathrm{int}$", figure_folder=figure_folder, mark_interval=10)
def main_analysis(run_pre_analysis=True, run_post_analysis=True, only_generate_data=False, observables=None, post_analysis_data_type=["bootstrap"]): from pre_analysis.pre_analyser import pre_analysis from post_analysis.post_analyser import post_analysis from default_analysis_params import get_default_parameters from tools.folderreadingtools import get_num_observables import copy import os # Different batches # data_batch_folder = "../GluonAction/data8" data_batch_folder = "../GluonAction/data11" # data_batch_folder = "../GluonAction/DataGiovanni" # data_batch_folder = "../data/topc_modes_8x16" data_batch_folder = "../data/data11" obs_exlusions = ["w_t_energy", "energy", "topcMC", "topsusMC", "qtq0effMC"] default_params = get_default_parameters( data_batch_folder=data_batch_folder, obs_exlusions=obs_exlusions) if not isinstance(observables, type(None)): default_params["observables"] = observables # Post analysis parameters line_fit_interval_points = 20 # topsus_fit_targets = [0.3,0.4,0.5,0.58] # topsus_fit_targets = [0.3, 0.4, 0.5, 0.6] # tf = sqrt(8*t0) topsus_fit_targets = [0.6, "t0", "w0", "t0cont", "w0cont"] energy_fit_target = 0.3 # Will try to use pickled reference scale instead use_pickled_reference_scale = True # Method of continuum extrapolation. # Options: plateau, plateau_mean, nearest, interpolate, bootstrap extrapolation_methods = [ "plateau", "plateau_mean", "nearest", "interpolate", "bootstrap" ] # extrapolation_methods = ["plateau"] extrapolation_methods = ["bootstrap"] plot_continuum_fit = False # Number of different sectors we will analyse in euclidean time default_params["numsplits_eucl"] = 4 intervals_eucl = [None, None, None, None] # Number of different sectors we will analyse in monte carlo time default_params["MC_time_splits"] = 4 # MC_intervals = [[0, 1000], [500, 1000], [500, 1000], [175, 250]] # MC_intervals = [[0, 1000], [0, 1000], [0, 2000], [0, 125]] MC_intervals = [None, None, None, None] # Extraction point in flow time a*t_f for q0 in qtq0 q0_flow_times = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6] # [fermi] # Flow time indexes in percent to plot qtq0 in euclidean time at euclidean_time_percents = [0, 0.25, 0.50, 0.75, 1.00] # euclidean_time_percents = [0] # Blocking default_params["blocking_analysis"] = True # Check to only generate data for post-analysis default_params["only_generate_data"] = only_generate_data ########## Main analysis ########## databeta60 = copy.deepcopy(default_params) databeta60["batch_name"] = "beta60" databeta60["ensemble_name"] = r"$A$" databeta60["beta"] = 6.0 databeta60["block_size"] = 10 # None databeta60["topc_y_limits"] = [-9, 9] databeta60["topc2_y_limits"] = [-81, 81] databeta60["NCfgs"] = get_num_observables(databeta60["batch_folder"], databeta60["batch_name"]) databeta60["obs_file"] = "24_6.00" databeta60["MCInt"] = MC_intervals[0] databeta60["N"] = 24 databeta60["NT"] = 2 * databeta60["N"] databeta60["color"] = "#e41a1c" databeta61 = copy.deepcopy(default_params) databeta61["batch_name"] = "beta61" databeta61["ensemble_name"] = r"$B$" databeta61["beta"] = 6.1 databeta61["block_size"] = 10 # None databeta61["topc_y_limits"] = [-12, 12] databeta61["topc2_y_limits"] = [-144, 144] databeta61["NCfgs"] = get_num_observables(databeta61["batch_folder"], databeta61["batch_name"]) databeta61["obs_file"] = "28_6.10" databeta61["MCInt"] = MC_intervals[1] databeta61["N"] = 28 databeta61["NT"] = 2 * databeta61["N"] databeta61["color"] = "#377eb8" databeta62 = copy.deepcopy(default_params) databeta62["batch_name"] = "beta62" databeta62["ensemble_name"] = r"$C$" databeta62["beta"] = 6.2 databeta62["block_size"] = 10 # None databeta62["topc_y_limits"] = [-12, 12] databeta62["topc2_y_limits"] = [-196, 196] databeta62["NCfgs"] = get_num_observables(databeta62["batch_folder"], databeta62["batch_name"]) databeta62["obs_file"] = "32_6.20" databeta62["MCInt"] = MC_intervals[2] databeta62["N"] = 32 databeta62["NT"] = 2 * databeta62["N"] databeta62["color"] = "#4daf4a" databeta645 = copy.deepcopy(default_params) databeta645["flow_epsilon"] = 0.02 databeta645["batch_name"] = "beta645" databeta645["ensemble_name"] = r"$D_2$" databeta645["beta"] = 6.45 databeta645["block_size"] = 25 # None databeta645["topc_y_limits"] = [-15, 15] databeta645["topc2_y_limits"] = [-300, 300] databeta645["NCfgs"] = get_num_observables(databeta645["batch_folder"], databeta645["batch_name"]) databeta645["obs_file"] = "48_6.45" databeta645["MCInt"] = MC_intervals[3] databeta645["N"] = 48 databeta645["NT"] = 2 * databeta645["N"] databeta645["color"] = "#984ea3" # Adding relevant batches to args analysis_parameter_list = [databeta60, databeta61, databeta62, databeta645] section_seperator = "=" * 160 print section_seperator print "Observables to be analysed: %s" % ", ".join( default_params["observables"]) print section_seperator + "\n" # Submitting main analysis if run_pre_analysis: for analysis_parameters in analysis_parameter_list: pre_analysis(analysis_parameters) if not analysis_parameter_list[0]["MCInt"] is None: assert sum( [len(plist["MCInt"]) - len(analysis_parameter_list[0]["MCInt"]) for plist in analysis_parameter_list]) == 0, \ "unequal amount of MC intervals" # Submitting post-analysis data if run_post_analysis: if len(analysis_parameter_list) >= 3: post_analysis(analysis_parameter_list, default_params["observables"], topsus_fit_targets, line_fit_interval_points, energy_fit_target, q0_flow_times, euclidean_time_percents, extrapolation_methods=extrapolation_methods, plot_continuum_fit=plot_continuum_fit, post_analysis_data_type=post_analysis_data_type, figures_folder=default_params["figures_folder"], gif_params=default_params["gif"], verbose=default_params["verbose"]) else: msg = "Need at least 3 different beta values to run post analysis" msg += "(%d given)." % len(analysis_parameter_list) print msg
def thermalization_analysis(): """Runs the thermalization analysis.""" verbose = True run_pre_analysis = True mark_every = 50 mc_cutoff = -1 # Skip every 100 points with 2000 therm-steps!! batch_folder = check_relative_path("data/thermalization_data") base_figure_folder = check_relative_path("figures/") base_figure_folder = os.path.join(base_figure_folder, "thermalization_analysis") check_folder(base_figure_folder, verbose=verbose) default_params = get_default_parameters(data_batch_folder="temp", include_euclidean_time_obs=False) ############ COLD START ############# cold_beta60_params = copy.deepcopy(default_params) cold_beta60_params["batch_folder"] = batch_folder cold_beta60_params["batch_name"] = "B60_THERM_COLD" cold_beta60_params["load_binary_file"] = False cold_beta60_params["beta"] = 6.0 cold_beta60_params["topc_y_limits"] = [-2, 2] cold_beta60_params["num_bins_per_int"] = 32 cold_beta60_params["bin_range"] = [-2.5, 2.5] cold_beta60_params["hist_flow_times"] = [0, 250, 600] cold_beta60_params["NCfgs"] = get_num_observables( cold_beta60_params["batch_folder"], cold_beta60_params["batch_name"]) cold_beta60_params["obs_file"] = "8_6.00" cold_beta60_params["N"] = 8 cold_beta60_params["NT"] = 16 cold_beta60_params["color"] = "#377eb8" ########## HOT RND START ############ hot_rnd_beta60_params = copy.deepcopy(default_params) hot_rnd_beta60_params["batch_folder"] = batch_folder hot_rnd_beta60_params["batch_name"] = "B60_THERM_HOT_RND" ########## HOT RST START ############ hot_rst_beta60_params = copy.deepcopy(default_params) hot_rst_beta60_params["batch_folder"] = batch_folder hot_rst_beta60_params["batch_name"] = "B60_THERM_HOT_RST" if run_pre_analysis: # Submitting distribution analysis cold_data = load_observable(cold_beta60_params) hot_rnd_data = load_observable(hot_rnd_beta60_params) hot_rst_data = load_observable(hot_rst_beta60_params) # # Loads post analysis data # cold_data = post_analysis.PostAnalysisDataReader( # [cold_beta60_params], # observables_to_load=cold_beta60_params["observables"], # verbose=verbose) # hot_rnd_data = post_analysis.PostAnalysisDataReader( # [hot_rnd_beta60_params], # observables_to_load=hot_rnd_beta60_params["observables"], # verbose=verbose) # hot_rst_data = post_analysis.PostAnalysisDataReader( # [hot_rst_beta60_params], # observables_to_load=hot_rst_beta60_params["observables"], # verbose=verbose) # TODO: plot termaliations for the 3 different observables plot_types = ["default", "loglog", "logx", "logy"] y_labels = [[r"$P$", r"$Q$", r"$E$"], [ r"$\frac{|P - \langle P \rangle|}{\langle P \rangle}$", r"$\frac{|Q - \langle Q \rangle|}{\langle Q \rangle}$", r"$\frac{|E - \langle E \rangle|}{\langle E \rangle}$" ], [ r"$|P - \langle P \rangle|$", r"$|Q - \langle Q \rangle|$", r"$|E - \langle E \rangle|$" ]] # y_labels[i_dr] = [r"$\langle P \rangle$", r"$\langle P \rangle$", # r"$\langle P \rangle$"] subplot_rows = [1, 3] # Limits to be put on plot x_limits = [[] for i in range(3)] y_limits = [[], [], []] data_representations = ["default", "relerr", "abserr"] obs_list = cold_data["obs"].keys() x_label = r"$t_\mathrm{MC}$" for i_dr, dr in enumerate(data_representations): for pt in plot_types: for i_obs, obs in enumerate(obs_list): for plot_rows in subplot_rows: # Sets up figure folder for observable figure_folder = os.path.join(base_figure_folder, obs) check_folder(figure_folder, verbose=verbose) # Sets up plot type folder figure_folder = os.path.join(figure_folder, pt) check_folder(figure_folder, verbose=verbose) if obs == "energy": correction_factor = -1.0 / 64 cold_data["obs"][obs] *= correction_factor hot_rnd_data["obs"][obs] *= correction_factor hot_rst_data["obs"][obs] *= correction_factor # Retrieves data and makes modifications _cold_data = modify_data(cold_data["obs"][obs][:mc_cutoff], dr) _hot_rnd_data = modify_data( hot_rnd_data["obs"][obs][:mc_cutoff], dr) _hot_rst_data = modify_data( hot_rst_data["obs"][obs][:mc_cutoff], dr) # Creates figure name figure_name = "{0:s}_{1:s}_{2:s}_{3:d}plotrows.pdf".format( obs, pt, dr, plot_rows) plot_data_array( [np.arange(_cold_data.shape[0]) for i in range(3)], [_cold_data, _hot_rnd_data, _hot_rst_data], ["Cold start", "Hot start", r"Hot start, $RST$"], x_label, y_labels[i_dr][i_obs], figure_name, figure_folder, plot_type=pt, x_limits=x_limits[i_obs], y_limits=y_limits[i_obs], mark_every=mark_every, subplot_rows=plot_rows)
def topc_modes_analysis(): """Analysis for different lattice sizes and their topological charges.""" default_params = get_default_parameters(data_batch_folder="temp") default_params["blocking_analysis"] = True default_params["observables"] = ["plaq", "topc", "topc2", "topc4", "topcr", "topsus", "topsusqtq0", "qtq0e", "qtq0eff", "topcMC"] default_params["observables"] = ["topc2", "topc4", "topcr"] # Check to only generate data for post-analysis default_params["only_generate_data"] = False run_pre_analysis = False run_post_analysis = False # run_pre_analysis = True # run_post_analysis = True default_params["verbose"] = True ########## Post analysis parameters ########## line_fit_interval_points = 20 topsus_fit_targets = [0.5, 0.6] energy_fit_target = 0.3 q0_flow_times = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6] # [fermi] euclidean_time_percents = [0, 0.25, 0.50, 0.75, 1.00] extrapolation_methods = ["bootstrap"] plot_continuum_fit = False post_analysis_data_type = ["bootstrap"] figures_folder = "figures/topc_modes_analysis" data_path = "../data/" if not os.path.isdir(data_path): data_path = "../" + data_path ########## Smaug data 8x16 analysis ########## smaug8x16_data_beta60_analysis = copy.deepcopy(default_params) smaug8x16_data_beta60_analysis["batch_folder"] = data_path smaug8x16_data_beta60_analysis["batch_name"] = "beta60_8x16_run" smaug8x16_data_beta60_analysis["ensemble_name"] = r"$E$" smaug8x16_data_beta60_analysis["beta"] = 6.0 smaug8x16_data_beta60_analysis["block_size"] = 10 # None smaug8x16_data_beta60_analysis["topc_y_limits"] = [-2, 2] smaug8x16_data_beta60_analysis["num_bins_per_int"] = 32 smaug8x16_data_beta60_analysis["bin_range"] = [-2.5, 2.5] smaug8x16_data_beta60_analysis["hist_flow_times"] = [0, 250, 600] smaug8x16_data_beta60_analysis["NCfgs"] = get_num_observables( smaug8x16_data_beta60_analysis["batch_folder"], smaug8x16_data_beta60_analysis["batch_name"]) smaug8x16_data_beta60_analysis["obs_file"] = "8_6.00" smaug8x16_data_beta60_analysis["N"] = 8 smaug8x16_data_beta60_analysis["NT"] = 16 smaug8x16_data_beta60_analysis["color"] = "#377eb8" ########## Smaug data 12x24 analysis ########## smaug12x24_data_beta60_analysis = copy.deepcopy(default_params) smaug12x24_data_beta60_analysis["batch_folder"] = data_path smaug12x24_data_beta60_analysis["batch_name"] = "beta60_12x24_run" smaug12x24_data_beta60_analysis["ensemble_name"] = r"$F$" smaug12x24_data_beta60_analysis["beta"] = 6.0 smaug12x24_data_beta60_analysis["block_size"] = 10 # None smaug12x24_data_beta60_analysis["topc_y_limits"] = [-4, 4] smaug12x24_data_beta60_analysis["num_bins_per_int"] = 16 smaug12x24_data_beta60_analysis["bin_range"] = [-4.5, 4.5] smaug12x24_data_beta60_analysis["hist_flow_times"] = [0, 100, 600] smaug12x24_data_beta60_analysis["NCfgs"] = get_num_observables( smaug12x24_data_beta60_analysis["batch_folder"], smaug12x24_data_beta60_analysis["batch_name"]) smaug12x24_data_beta60_analysis["obs_file"] = "12_6.00" smaug12x24_data_beta60_analysis["N"] = 12 smaug12x24_data_beta60_analysis["NT"] = 24 smaug12x24_data_beta60_analysis["color"] = "#377eb8" ########## Smaug data 16x32 analysis ########## smaug16x32_data_beta61_analysis = copy.deepcopy(default_params) smaug16x32_data_beta61_analysis["batch_folder"] = data_path smaug16x32_data_beta61_analysis["batch_name"] = "beta61_16x32_run" smaug16x32_data_beta61_analysis["ensemble_name"] = r"$G$" smaug16x32_data_beta61_analysis["beta"] = 6.1 smaug16x32_data_beta61_analysis["block_size"] = 10 # None smaug16x32_data_beta61_analysis["topc_y_limits"] = [-8, 8] smaug16x32_data_beta61_analysis["num_bins_per_int"] = 16 smaug16x32_data_beta61_analysis["bin_range"] = [-7.5, 7.5] smaug16x32_data_beta61_analysis["hist_flow_times"] = [0, 100, 400] smaug16x32_data_beta61_analysis["NCfgs"] = get_num_observables( smaug16x32_data_beta61_analysis["batch_folder"], smaug16x32_data_beta61_analysis["batch_name"]) smaug16x32_data_beta61_analysis["obs_file"] = "16_6.10" smaug16x32_data_beta61_analysis["N"] = 16 smaug16x32_data_beta61_analysis["NT"] = 32 smaug16x32_data_beta61_analysis["color"] = "#377eb8" param_list = [ smaug8x16_data_beta60_analysis, smaug12x24_data_beta60_analysis, smaug16x32_data_beta61_analysis] if run_pre_analysis: # Submitting analysis for analysis_parameters in param_list: pre_analysis(analysis_parameters) # Submitting post-analysis data if run_post_analysis: if len(param_list) >= 3: post_analysis(param_list, default_params["observables"], topsus_fit_targets, line_fit_interval_points, energy_fit_target, q0_flow_times, euclidean_time_percents, extrapolation_methods=extrapolation_methods, plot_continuum_fit=plot_continuum_fit, post_analysis_data_type=post_analysis_data_type, figures_folder=figures_folder, # "figures/topc_modes_analysis" verbose=default_params["verbose"]) else: msg = "Need at least 3 different beta values to run post analysis" msg += "(%d given)." % len(analysis_parameter_list) print msg # Loads topc data data = [] # N_val = [24, 24, 28] for i, param in enumerate(param_list): print "Loading data for: {}".format(param["batch_name"]) data_, p = get_data_parameters(param) data.append({"data": data_("topc")["obs"].T, "beta": param["beta"], "N": param["N"], "ensemble_name": param["ensemble_name"]}) # Flow time to plots flow_times = [0, 25, 50, 100, 150, 250, 450, 600] # Histogram plotting xlim = 7.5 NBins = np.arange(-xlim, xlim, 0.05) for t_f in flow_times: # Adds unanalyzed data fig, axes = plt.subplots(len(param_list), 1, sharey=False, sharex=True) axes = np.atleast_1d(axes) for i, ax in enumerate(axes): # lab = r"${0:d}^3\times{1:d}$, $\beta={2:.2f}$".format( # data[i]["N"], data[i]["N"]*2, data[i]["beta"]) lab = data[i]["ensemble_name"] weights = np.ones_like(data[i]["data"][t_f]) weights /= len(data[i]["data"][t_f]) ax.hist(data[i]["data"][t_f], bins=NBins, label=lab, weights=weights) ax.legend(loc="upper right") ax.grid(True) ax.set_xlim(-xlim, xlim) if i == 1: ax.set_ylabel(r"Hits(normalized)") elif i == 2: ax.set_xlabel(r"$Q$") # Sets up figure figpath = figures_folder if not os.path.isdir(figpath): figpath = "../" + figpath check_folder(figpath, verbose=default_params["verbose"]) figpath = os.path.join(figpath, "topc_modes_tf{}.pdf".format(t_f)) fig.savefig(figpath) print "Figure saved at {0:s}".format(figpath) plt.close(fig)