def create_subsection_figures(props, dim_rows, dim_cols, exp_prefix): if len(props) == 0: print("No props: plots were not generated.") return section = reporting.Section("Figures", []) getter_mse = lambda p: float(p["result.best.trainMSE"]) predicate = lambda v, v_xaxis: v <= v_xaxis N = 50 # number of points per plot line r = (0.0, 1e0) xs = np.linspace(r[0], r[1], N) xticks = np.arange(r[0], r[1], r[1] / 10) savepath = "results/figures/ratioMSE.pdf" plotter.plot_ratio_meeting_predicate(props, getter_mse, predicate, xs=xs, xticks=xticks, show_plot=False, title="Ratio of solutions with MSE under the certain level", xlabel="MSE", series_dim=dim_method, xlogscale=False, savepath=savepath) section.add(reporting.FloatFigure(savepath.replace("results/", ""))) # Illustration of individual runs and their errors on training and validation sets savepath = "results/figures/progressionGrid.pdf" dim_rows = get_benchmarks_from_props(props, ignoreNumTests=True) dim_cols = (dim_methodGP * dim_all + dim_methodCDGP * dim_all + dim_methodCDGPprops * dim_weight) * \ dim_benchmarkNumTests # * dim_optThreshold plotter.plot_value_progression_grid_simple(props, dim_rows, dim_cols, ["cdgp.logTrainSet", "cdgp.logValidSet"], ["train", "valid"], plot_individual_runs=True, savepath=savepath) section.add(reporting.FloatFigure(savepath.replace("results/", ""))) return section
def create_section(title, desc, props, subsects, figures_list, exp_prefix): assert isinstance(title, str) assert isinstance(desc, str) assert isinstance(props, list) assert isinstance(figures_list, list) section = reporting.Section(title, []) section.add(reporting.BlockLatex(desc + "\n")) for s in subsects: section.add(s) # Create figures in the appropriate directory for f in figures_list: section.add(reporting.FloatFigure(f)) section.add(reporting.BlockLatex(r"\vspace{1cm}" + "\n")) return section
def create_section_with_results(title, desc, folders, numRuns=10, use_bench_simple_names=True, print_status_matrix=True): assert isinstance(title, str) assert isinstance(desc, str) assert isinstance(folders, list) print("\n*** Processing: {0}***".format(title)) for f in folders: print(f) if folders is None or len(folders) == 0: return None props = load_correct_props(folders, name=title) # Create figures in the appropriate directory plot_figures(props) # Uncomment this to print names of files with results of a certain configuration # print("\n(** {0} **) props_meeting the property:".format(title[:15])) # for p in props: # if float(p["cdgp.solverTimeMaxSec"]) >= 2.0: # print(p["thisFileName"] + ", " + p["cdgp.solverTimeMaxSec"]) def post(s): return s.replace("{ccccccccccccc}", "{rrrrrrrrrrrrr}").replace("{rrr", "{lrr").replace(r"\_{lex}", "_{lex}").replace(r"\_{", "_{") dim_benchmarks = Dim.from_dict(props, "benchmark") if print_status_matrix: d = dim_benchmarks * dim_methodCDGP * dim_testsRatio * dim_sa + \ dim_benchmarks * dim_methodGPR * dim_testsRatioGPR * dim_sa matrix = produce_status_matrix(d, props) print("\n****** Status matrix:") print(matrix + "\n") if use_bench_simple_names: configs = [Config(benchmarks_simple_names.get(c.get_caption(), c.get_caption()), c.filters[0][1]) for c in dim_benchmarks.configs] dim_benchmarks = Dim(configs) dim_benchmarks.sort() # --------------------- Shared stats --------------------- # -------------- Dimensions ----------------- dim_cols = dim_methodCDGP * dim_ea_type * dim_sel * dim_testsRatio +\ dim_methodGPR * dim_ea_type * dim_sel * dim_testsRatioGPR + \ dim_methodFormal dim_rows = dim_benchmarks.sort() + dim_true # dim_cols = dim_method * dim_sa # ------------------------------------------- vb = 1 # vertical border print("STATUS") text = post(printer.latex_table(props, dim_rows, dim_cols, get_num_computed, layered_headline=True, vertical_border=vb)) latex_status = printer.table_color_map(text, 0.0, numRuns/2, numRuns, "colorLow", "colorMedium", "colorHigh") print("SUCCESS RATES") text = post(printer.latex_table(props, dim_rows, dim_cols, fun_successRates, layered_headline=True, vertical_border=vb)) latex_successRates = printer.table_color_map(text, 0.0, 0.5, 1.0, "colorLow", "colorMedium", "colorHigh") print("AVG RUNTIME") text = post(printer.latex_table(props, dim_rows, dim_cols, get_avg_runtime, layered_headline=True, vertical_border=vb)) latex_avgRuntime = printer.table_color_map(text, 0.0, 1800.0, 3600.0, "colorLow", "colorMedium", "colorHigh") print("AVG RUNTIME (SUCCESSFUL)") text = post(printer.latex_table(props, dim_rows, dim_cols, get_avg_runtimeOnlySuccessful, layered_headline=True, vertical_border=vb)) latex_avgRuntimeOnlySuccessful = printer.table_color_map(text, 0.0, 1800.0, 3600.0, "colorLow", "colorMedium", "colorHigh") # print("SUCCESS RATES (FULL INFO)") # text = post(printer.latex_table(props, dim_rows, dim_cols, fun_successRates_full, layered_headline=True, vertical_border=vb)) # print("AVG SIZES") # text = post(printer.latex_table(props, dim_rows, dim_cols, get_stats_size, layered_headline=True, vertical_border=vb)) # latex_sizes = printer.table_color_map(text, 0.0, 100.0, 200.0, "colorLow", "colorMedium", "colorHigh") print("AVG SIZES (SUCCESSFUL)") text = post(printer.latex_table(props, dim_rows, dim_cols, get_stats_sizeOnlySuccessful, layered_headline=True, vertical_border=vb)) latex_sizesOnlySuccessful = printer.table_color_map(text, 0.0, 100.0, 200.0, "colorLow", "colorMedium", "colorHigh") # --------------------- CDGP stats --------------------- # -------------- Dimensions ----------------- dim_cols = dim_methodCDGP * dim_ea_type * dim_sel * dim_testsRatio + \ dim_methodGPR * dim_ea_type * dim_sel * dim_testsRatioGPR # ------------------------------------------- print("AVG BEST-OF-RUN FITNESS") text = post(printer.latex_table(props, dim_rows, dim_cols, get_avg_fitness, layered_headline=True, vertical_border=vb)) latex_avgBestOfRunFitness = printer.table_color_map(text, 0.0, 0.5, 1.0, "colorLow", "colorMedium", "colorHigh") print("AVG TOTAL TESTS") text = post(printer.latex_table(props, dim_rows, dim_cols, get_avg_totalTests, layered_headline=True, vertical_border=vb)) latex_avgTotalTests = printer.table_color_map(text, 0.0, 1000.0, 2000.0, "colorLow", "colorMedium", "colorHigh") print("AVG RUNTIME PER PROGRAM") text = post(printer.latex_table(props, dim_rows, dim_cols, get_avg_runtimePerProgram, layered_headline=True, vertical_border=vb)) latex_avgRuntimePerProgram = printer.table_color_map(text, 0.01, 1.0, 2.0, "colorLow", "colorMedium", "colorHigh") print("AVG GENERATION") text = post(printer.latex_table(props, dim_rows, dim_cols, get_avg_generation, layered_headline=True, vertical_border=vb)) latex_avgGeneration = printer.table_color_map(text, 0.0, 50.0, 100.0, "colorLow", "colorMedium", "colorHigh") print("AVG GENERATION (SUCCESSFUL)") text = post(printer.latex_table(props, dim_rows, dim_cols, get_avg_generationSuccessful, layered_headline=True, vertical_border=vb)) latex_avgGenerationSuccessful = printer.table_color_map(text, 0.0, 50.0, 100.0, "colorLow", "colorMedium", "colorHigh") print("MAX SOLVER TIME") text = post(printer.latex_table(props, dim_rows, dim_cols, get_stats_maxSolverTime, layered_headline=True, vertical_border=vb)) latex_maxSolverTimes = printer.table_color_map(text, 0.0, 0.5, 1.0, "colorLow", "colorMedium", "colorHigh") print("AVG SOLVER TIME") text = post(printer.latex_table(props, dim_rows, dim_cols, get_stats_avgSolverTime, layered_headline=True, vertical_border=vb)) latex_avgSolverTimes = printer.table_color_map(text, 0.0, 0.015, 0.03, "colorLow", "colorMedium", "colorHigh") print("AVG NUM SOLVER CALLS") text = post(printer.latex_table(props, dim_rows, dim_cols, get_avgSolverTotalCalls, layered_headline=True, vertical_border=vb)) latex_avgSolverTotalCalls = printer.table_color_map(text, 1e1, 1e2, 1e4, "colorLow", "colorMedium", "colorHigh") print("NUM SOLVER CALLS > 0.5s") text = post(printer.latex_table(props, dim_rows, dim_cols, get_numSolverCallsOverXs, layered_headline=True, vertical_border=vb)) latex_numSolverCallsOverXs = printer.table_color_map(text, 0, 50, 100, "colorLow", "colorMedium", "colorHigh") subsects_main = [ ("Status (correctly finished processes)", latex_status, reversed(reporting.color_scheme_red)), ("Success rates", latex_successRates, reporting.color_scheme_green), ("Average runtime [s]", latex_avgRuntime, reporting.color_scheme_violet), ("Average runtime (only successful) [s]", latex_avgRuntimeOnlySuccessful, reporting.color_scheme_violet), #("Average sizes of best of runs (number of nodes)", latex_sizes, reporting.color_scheme_yellow), ("Average sizes of best of runs (number of nodes) (only successful)", latex_sizesOnlySuccessful, reporting.color_scheme_yellow), ] subsects_cdgp = [ ("Average best-of-run ratio of passed tests", latex_avgBestOfRunFitness, reporting.color_scheme_green), ("Average sizes of $T_C$ (total tests in run)", latex_avgTotalTests, reporting.color_scheme_blue), ("Average generation (all)", latex_avgGeneration, reporting.color_scheme_teal), ("Average generation (only successful)", latex_avgGenerationSuccessful, reporting.color_scheme_teal), ("Approximate average runtime per program [s]", latex_avgRuntimePerProgram, reporting.color_scheme_brown), ("Max solver time per query [s]", latex_maxSolverTimes, reporting.color_scheme_violet), ("Avg solver time per query [s]", latex_avgSolverTimes, reporting.color_scheme_brown), ("Avg number of solver calls (in thousands; 1=1000)", latex_avgSolverTotalCalls, reporting.color_scheme_blue), ("Number of solver calls $>$ 0.5s", latex_numSolverCallsOverXs, reporting.color_scheme_blue), ] figures_cdgp = [ "figures/ratioEvaluated_correctVsAllRuns.pdf", "figures/ratioTime_correctVsAllCorrect.pdf", "figures/ratioTime_endedVsAllEnded.pdf", ] def get_content_of_subsections(subsects): content = [] vspace = reporting.BlockLatex(r"\vspace{0.75cm}"+"\n") for title, table, cs in subsects: if isinstance(cs, reporting.ColorScheme3): cs = cs.toBlockLatex() sub = reporting.SectionRelative(title, contents=[cs, reporting.BlockLatex(table + "\n"), vspace]) content.append(sub) return content section = reporting.Section(title, []) section.add(reporting.BlockLatex(desc + "\n")) section.add(reporting.Subsection("Shared Statistics", get_content_of_subsections(subsects_main))) section.add(reporting.Subsection("CDGP Statistics", get_content_of_subsections(subsects_cdgp))) for f in figures_cdgp: section.add(reporting.FloatFigure(f)) section.add(reporting.BlockLatex(r"\vspace{1cm}" + "\n")) return section