示例#1
0
def best_fronts_color_nondom(args):
    boot_size = int(args["--bootstrap"])
    results_dir = args["--dir"]
    plots_dir = Path(args["-o"])
    scoring = defaultdict(list)
    global_scoring = defaultdict(list)
    for problem_name, problem_mod, algorithms in serialization.each_result(
        BudgetResultsExtractor(), results_dir
    ):
        for algo_name, results in algorithms:
            best_result = find_acceptable_result_for_budget(list(results), boot_size)
            """:type: RunResultBudget """

            if best_result and algo_name in algos:
                best_value = best_result["results"][0]
                scoring[problem_name, problem_mod].append((algo_name, best_value))
                global_scoring[problem_name].extend(
                    tuple(v) for v in best_value.fitnesses
                )

    for problem_name in set(global_scoring):
        global_scoring[problem_name] = metrics.filter_not_dominated(
            global_scoring[problem_name]
        )

    for problem_name, problem_mod in scoring:
        ax, f = plot_problem_front(
            problem_mod.pareto_front, multimodal=problem_name == "ZDT3"
        )
        for algo_name, best_value in scoring[(problem_name, problem_mod)]:
            plot_results(ax, best_value, algo_name, global_scoring[problem_name])
        save_plot(ax, f, problem_mod, plots_dir)
示例#2
0
def pictures_summary(args):
    logger = logging.getLogger(__name__)
    logger.debug("pictures_summary")

    selected = set(args["--selected"].upper().split(","))
    boot_size = int(args["--bootstrap"])
    results_dir = args["--dir"]
    plots_dir = Path(args["-o"])

    logger.debug("Plotting summary with selected algos: " + ",".join(selected))

    scoring = collections.defaultdict(lambda: collections.defaultdict(dict))
    problems = set()

    with log_time(process_time, logger, "Preparing data done in {time_res:.3f}"):
        for problem_name, problem_mod, algorithms in serialization.each_result(
            BudgetResultsExtractor(), results_dir
        ):
            problems.add(problem_name)
            problem_score = collections.defaultdict(list)
            algos = list(algorithms)
            for algo_name, results in algos:
                max_result = find_acceptable_result_for_budget(list(results), boot_size)
                if max_result:
                    print(
                        "{}, {} , budget={}".format(
                            problem_name, algo_name, max_result["budget"]
                        )
                    )
                    for metric_name, metric_name_long, data_process in max_result[
                        "analysis"
                    ]:
                        if metric_name in ranking.best_func:
                            data_process = list(x() for x in data_process)
                            data_analysis = yield_analysis(data_process, boot_size)

                            score = math.log(
                                math.fabs(data_analysis["btstrpd"]["metrics"]) + 1.0
                            )

                            scoring[metric_name][algo_name][problem_name] = score
                            problem_score[metric_name].append((algo_name, score))
                else:
                    print("{}, {}, NO BUDGET".format(problem_name, algo_name))

            for metric_name in scoring:
                if metric_name != "pdi":

                    max_score = (
                        max(x for algo, x in problem_score[metric_name]) + 0.0001
                    )
                    for algo_name, _ in algos:
                        if (
                            algo_name in scoring[metric_name]
                            and problem_name in scoring[metric_name][algo_name]
                        ):
                            scoring[metric_name][algo_name][problem_name] /= max_score

    plot_results_summary(problems, scoring, selected, plots_dir)
示例#3
0
def rank(args):
    # plot_pareto_fronts()

    logger = logging.getLogger(__name__)
    logger.debug("ranking")

    boot_size = int(args["--bootstrap"])

    scoring = collections.defaultdict(list)

    with log_time(process_time, logger,
                  "Preparing data done in {time_res:.3f}"):
        for problem_name, problem_mod, algorithms in serialization.each_result(
                BudgetResultsExtractor(), RESULTS_DIR):
            for algo_name, results in algorithms:
                max_budget_result = find_acceptable_result_for_budget(
                    list(results), boot_size)
                if max_budget_result:
                    for (
                            metric_name,
                            metric_name_long,
                            data_process,
                    ) in max_budget_result["analysis"]:
                        if metric_name in best_func:

                            data_process = list(x() for x in data_process)
                            data_analysis = yield_analysis(
                                data_process, boot_size)

                            score = data_analysis["btstrpd"]["metrics"]
                            scoring[(problem_name, metric_name)].append(
                                (algo_name, score))

    global_scoring = collections.defaultdict(collections.Counter)

    print("Problem ranking\n################")
    for problem_name, metric_name in scoring:
        metric_scoring = scoring[(problem_name, metric_name)]
        algo_win, score = best_func[metric_name](metric_scoring,
                                                 key=lambda x: x[1])
        print("{}, {} : {}".format(problem_name, metric_name, algo_win))

        weak_winners = get_weak_winners(metric_scoring, (algo_win, score),
                                        winner_tolerance[metric_name])
        # if not weak_winners:
        #     global_scoring[metric_name].update([algo_win])
        if not weak_winners:
            global_scoring[metric_name].update([algo_win, algo_win])
        else:
            global_scoring[metric_name].update(
                [algo_win] + [algo for algo, score in weak_winners])

    print("\nGlobal ranking\n##############")
    for metric_name in global_scoring:
        print("{} : ".format(metric_name) + ", ".join(
            "{} ({})".format(score[0], score[1])
            for score in global_scoring[metric_name].most_common()))
示例#4
0
def analyse_results(*args, **kwargs):
    for problem_name, problem_mod, algorithms in serialization.each_result(
        BudgetResultsExtractor()
    ):
        for algo_name, budgets in algorithms:
            for result in budgets:
                print(
                    "{:9} {:14} {:>4} {:>2}".format(
                        problem_name,
                        algo_name,
                        result["budget"],
                        len(result["results"]),
                    )
                )
示例#5
0
def pictures_from_stats(args):
    # plot_pareto_fronts()

    logger = logging.getLogger(__name__)
    logger.debug("pictures from stats")

    boot_size = int(args["--bootstrap"])
    results_dir = args["--dir"]
    plots_dir = Path(args["-o"])

    results = collections.defaultdict(list)
    with log_time(process_time, logger, "Preparing data done in {time_res:.3f}"):
        for problem_name, problem_mod, algorithms in serialization.each_result(
            BudgetResultsExtractor(), results_dir
        ):
            for algo_name, budgets in algorithms:
                for result in budgets:
                    _, _, cost_data = next(result["analysis"])
                    cost_data = list(x() for x in cost_data)
                    cost_analysis = yield_analysis(cost_data, boot_size)

                    budget = cost_analysis["btstrpd"]["metrics"]
                    budget_err = cost_analysis["stdev"]

                    for metric_name, metric_name_long, data_process in result[
                        "analysis"
                    ]:
                        if metric_name in best_func:
                            if metric_name == "dst from pareto":
                                metric_name = "dst"
                            data_process = list(x() for x in data_process)

                            data_analysis = yield_analysis(data_process, boot_size)

                            score = data_analysis["btstrpd"]["metrics"]
                            score_err = data_analysis["stdev"]

                            keys = [
                                (problem_name, algo_name, metric_name, group)
                                for group in algos_groups[algo_name]
                            ]
                            value = (budget, budget_err, score, score_err)
                            print("PLOT: " + str(value))

                            for key in keys:
                                results[key].append(value)
    plot_results(results, plots_dir, (500, 4500))
示例#6
0
def detailed_rank(args):
    # plot_pareto_fronts()

    logger = logging.getLogger(__name__)
    logger.debug("detailed ranking")

    boot_size = int(args["--bootstrap"])

    for result_set in result_dirs:
        print("***{}***".format(result_set))
        scoring = collections.defaultdict(list)

        with log_time(process_time, logger,
                      "Preparing data done in {time_res:.3f}"):
            for problem_name, problem_mod, algorithms in serialization.each_result(
                    BudgetResultsExtractor(), result_set):
                for algo_name, results in algorithms:
                    for result in results:
                        if validate_cost(result, boot_size):
                            for metric_name, metric_name_long, data_process in result[
                                    "analysis"]:
                                if metric_name in best_func:
                                    data_process = list(x()
                                                        for x in data_process)
                                    data_analysis = yield_analysis(
                                        data_process, boot_size)

                                    score = data_analysis["btstrpd"]["metrics"]
                                    scoring[(problem_name, result["budget"],
                                             metric_name)].append(
                                                 (algo_name, score))

        global_scoring = collections.defaultdict(collections.Counter)

        for problem_name, budget, metric_name in scoring:
            algo_win, score = best_func[metric_name](scoring[(problem_name,
                                                              budget,
                                                              metric_name)],
                                                     key=lambda x: x[1])
            global_scoring[(budget, metric_name)].update([algo_win])

        for budget, metric_name in sorted(global_scoring):
            print("{} {} : ".format(budget, metric_name) +
                  ", ".join("{} ({})".format(score[0], score[1])
                            for score in global_scoring[
                                (budget, metric_name)].most_common()))
示例#7
0
def best_fronts(args):
    boot_size = int(args["--bootstrap"])
    results_dir = args["--dir"]
    plots_dir = Path(args["-o"])
    for problem_name, problem_mod, algorithms in serialization.each_result(
        BudgetResultsExtractor(), results_dir
    ):
        original_front = problem_mod.pareto_front
        ax, f = plot_problem_front(original_front, multimodal=problem_name == "ZDT3")

        for algo_name, results in algorithms:
            best_result = find_acceptable_result_for_budget(list(results), boot_size)
            """:type: RunResultBudget """

            if best_result and algo_name in algos:
                plot_results(ax, best_result["results"][0], algo_name)
        save_plot(ax, f, problem_mod, plots_dir)
示例#8
0
def table_rank(args):
    logger = logging.getLogger(__name__)
    logger.debug("table ranking")

    boot_size = int(args["--bootstrap"])

    results = collections.defaultdict(
        lambda: collections.defaultdict(collections.Counter))

    for result_set in result_dirs:
        print("***{}***".format(result_set))
        with log_time(process_time, logger,
                      "Preparing data done in {time_res:.3f}"):
            for problem_name, problem_mod, algorithms in serialization.each_result(
                    BudgetResultsExtractor(), result_set):
                print(result_set, problem_name)
                scoring = collections.defaultdict(list)
                for algo_name, results_data in algorithms:
                    results_data = list(results_data)

                    for i in range(len(results_data)):
                        original_budget = results_data[i]["budget"]
                        if original_budget == 40:
                            result = results_data[i]
                            # result = find_acceptable_result_for_budget(
                            #     results_data[: i + 1], boot_size
                            # )
                            if result:
                                print("{} {} {} -> {}".format(
                                    problem_name,
                                    algo_name,
                                    original_budget,
                                    result["budget"],
                                ))
                                for metric_name, metric_name_long, data_process in result[
                                        "analysis"]:
                                    if metric_name in best_func:
                                        data_process = list(
                                            x() for x in data_process)
                                        data_analysis = yield_analysis(
                                            data_process, boot_size)

                                        score = data_analysis["btstrpd"][
                                            "metrics"]
                                        scoring[(original_budget,
                                                 metric_name)].append(
                                                     (algo_name, score))
                print("****{}****".format(problem_name))
                for budget, metric_name in sorted(scoring):
                    metric_scoring = scoring[(budget, metric_name)]
                    algo_win, score = best_func[metric_name](
                        metric_scoring, key=lambda x: x[1])
                    weak_winners = get_weak_winners(
                        metric_scoring, (algo_win, score),
                        winner_tolerance[metric_name])

                    # # Only strong
                    # if not weak_winners:
                    #     results[(budget, metric_name)][result_set].update([algo_win])

                    # Strong = 2 points, Weak or Winner = 1 point
                    if not weak_winners:
                        results[(budget, metric_name)][result_set].update(
                            [algo_win, algo_win])
                    else:
                        results[(budget, metric_name)][result_set].update(
                            [algo_win] +
                            [algo for algo, score in weak_winners])

                    # print('{} {} {}'.format(budget, metric_name, scoring[(budget, metric_name)]))
                    print("*****{} {}".format(budget, metric_name))
                    if not weak_winners:
                        print("*****Strong winner: {} :{}".format(
                            algo_win, score))
                    else:
                        print("*****Winner: {} :{}".format(algo_win, score))
                        print("*****Weak winners: {}".format(weak_winners))

    print("""\\begin{table}[ht]
  \\centering
    \\caption{Final results}
    \\label{tab:results"}
    \\resizebox{\\textwidth}{!}{%
    \\begin{tabular}{  r@{ }l | c | c | c | }
          \multicolumn{2}{c}{}
        & $K_0$
        & $K_1$
        & $K_2$
      \\\\ \\hline""")

    prevous_budget = None
    for budget, metric_name in sorted(
            sorted(results.keys(), key=lambda x: metrics_order.index(x[1])),
            key=lambda x: x[0],
    ):
        budget_label = str(budget) + " "
        if prevous_budget and prevous_budget != budget:
            print("\\hdashline")
        elif prevous_budget:
            budget_label = ""

        score_str = ""
        for result_set in result_dirs:
            results_counter = results[(budget, metric_name)][result_set]
            algo_ranking = results_counter.most_common(2)
            values = list(results_counter.values())
            if len(algo_ranking) == 2:
                winner = format_result(algo_ranking[0], values, 2)
                second = format_result(
                    algo_ranking[1],
                    values,
                    1 if algo_ranking[0][1] != algo_ranking[1][1] else 2,
                )
                score_str += "& {}, {}".format(winner, second)
            elif len(algo_ranking) == 1:
                winner = format_result(algo_ranking[0], values, 2)
                score_str += "& {}".format(winner)
            else:
                score_str += "& "
        print("{}& {} {}\\\\".format(budget_label, metric_name, score_str))
        prevous_budget = budget

    print("""    \\end{tabular}}\n\\end{table}""")
示例#9
0
def violin(args):
    global_data = collections.defaultdict(dict)

    boot_size = int(args["--bootstrap"])
    results_dir = args["--dir"]
    plots_dir = Path(args["-o"])

    for problem_name, problem_mod, algorithms in serialization.each_result(
            BudgetResultsExtractor(), results_dir):
        for algo_name, results in algorithms:
            max_result = find_acceptable_result_for_budget(
                list(results), boot_size)
            if max_result:
                for metric_name, metric_name_long, data_process in max_result[
                        "analysis"]:
                    if metric_name in best_func:
                        data_process = list(x() for x in data_process)
                        global_data[(problem_name,
                                     metric_name)][algo_name] = data_process

    print(global_data[("UF2", "pdi")])
    print()
    for problem, metric in global_data:
        try:
            algo_data = global_data[(problem, metric)]

            accepted_algos = [
                algo_name for algo_name in algos_order
                if algo_name in algo_data and algo_data[algo_name] != [0.0] *
                len(algo_data[algo_name])
            ]

            data = prepare_data(
                [algo_data[algo_name] for algo_name in accepted_algos])
            if metric == "pdi":
                print(data)
            # if problem == 'UF2' and metric == 'pdi':
            #     print(data)
            if data:
                with plt_figure():
                    plt.figure(num=None, facecolor="w", edgecolor="k")
                    # plt.yscale('log')
                    x_index = range(1, len(accepted_algos) + 1)
                    plt.ylabel(metric, fontsize=20)
                    plt.xticks(
                        x_index,
                        [algos[algo_name][0] for algo_name in accepted_algos],
                        rotation=80,
                    )
                    for i in x_index:
                        plt.axvline(i, lw=0.9, c="#AFAFAF", alpha=0.5)
                    plt.tick_params(axis="both", labelsize=15)

                    result = plt.violinplot(
                        data,
                        showmeans=True,
                        showextrema=True,
                        showmedians=True,
                        widths=0.8,
                    )

                    for pc in result["bodies"]:
                        pc.set_facecolor("0.8")
                        # pc.set_sizes([0.8])

                    result["cbars"].set_color("black")
                    result["cmeans"].set_color("black")
                    result["cmins"].set_color("black")
                    result["cmaxes"].set_color("black")
                    result["cmedians"].set_color("black")

                    result["cmeans"].set_linewidths([2])

                    plt.tight_layout()
                    # os.makedirs(PLOTS_DIR, exist_ok=True)
                    # os.makedirs(os.path.join(PLOTS_DIR, 'plots_violin'), exist_ok=True)
                    problem_moea = problem.replace("emoa", "moea")
                    metric_short = metric.replace("distance from Pareto front",
                                                  "dst")
                    fig_path = (plots_dir / "plots_violin" /
                                "figures_violin_{}_{}.eps".format(
                                    problem_moea, metric_short))
                    fig_path_pdf = (plots_dir / "plots_violin" /
                                    "figures_violin_{}_{}.pdf".format(
                                        problem_moea, metric_short))
                    with suppress(FileExistsError):
                        fig_path.parent.mkdir(parents=True)
                    print(fig_path)
                    plt.savefig(str(fig_path))
                    plt.savefig(str(fig_path_pdf))
        except KeyError as e:
            print("Missing algo: {}, (problem: {}, metrics: {}".format(
                e, problem, metric))
        except LinAlgError as e:
            print("Zero vector? : {}, {}: {}".format(problem, metric, e))
示例#10
0
def statistics(args):
    logger = logging.getLogger(__name__)

    badbench = []
    cost_badbench = []
    boot_size = int(args["--bootstrap"])

    # pretty format
    screen_width = sum(y for x in fields for y in x[1]) + 4 * len(fields)
    [err_prefix] = [
        i for i, (name, lens, fmt) in enumerate(fields)
        if name == "RESULT, confidence interval"
    ]
    err_prefix = sum(y for x in fields[:err_prefix]
                     for y in x[1]) + 4 * err_prefix - 2
    err_prefix = " " * err_prefix

    def print_header():
        print()
        print(
            "..".join("[{0:^{1}}]".format(head, sum(width))
                      for head, width, var in fields) + "..",
            flush=True,
        )
        return True

    with close_and_join(multiprocessing.Pool(min(int(args["-j"]), 8))) as p:
        for problem_name, problem_mod, algorithms in serialization.each_result(
                BudgetResultsExtractor()):
            for algo_name, budgets in algorithms:
                header_just_printed = print_header()

                for result in budgets:
                    len_data = len(result["results"])

                    first_budget_line = True
                    avg_pop_len = average(
                        [len(x.population) for x in result["results"]])

                    with log_time(
                            process_time,
                            logger,
                            "Calculating metrics for {} :: {} :: {} in {{time_res:.3f}}s"
                            .format(problem_name, algo_name, result["budget"]),
                    ):

                        results_precalc = p.map(
                            force_data,
                            zip(repeat(boot_size), result["analysis"]),
                            chunksize=1,
                        )

                        for (
                                metric_name,
                                metric_name_long,
                                data_process,
                                analysis,
                        ) in results_precalc:
                            if first_budget_line and not header_just_printed:
                                if screen_width % 2 == 1:
                                    print("-" + " -" * (screen_width // 2))
                                else:
                                    print(" -" * (screen_width // 2))
                            first_budget_line = False

                            columns = []
                            for i, (head, width, var) in enumerate(fields):
                                columns.append(var.format(*width, **locals()))

                            # the data
                            print("", " :: ".join(columns), ":: ", flush=True)
                            header_just_printed = False

                            if analysis["goodbench"] != "✓":
                                lower_process = analysis["lower"]
                                upper_process = analysis["upper"]
                                low_out_fence_process = analysis[
                                    "low_out_fence"]
                                upp_out_fence_process = analysis[
                                    "upp_out_fence"]
                                stdev_process = analysis["stdev"]
                                mean_process = analysis["mean"]

                                outliers = len([
                                    x for x in data_process
                                    if lower_process <= x <= upper_process
                                ])
                                print(
                                    "{err_prefix}:: Suspicious result analysis:\n"
                                    "{err_prefix}::             {0:>2} / {1:2} ({4:7.3f}%) out of [ {2:>18.13} ; {3:<18.13} ]\n"
                                    "{err_prefix}::                                                            Δ {7:<18.13}\n"
                                    "{err_prefix}::                               Bounds: [ {5:>18.13} ; {6:<18.13} ]\n"
                                    "{err_prefix}::                                                            Δ {8:<18.13}"
                                    .format(
                                        outliers,
                                        len(data_process),
                                        lower_process,
                                        upper_process,
                                        100.0 * outliers / len(data_process),
                                        min(data_process),
                                        max(data_process),
                                        upper_process - lower_process,
                                        max(data_process) - min(data_process),
                                        err_prefix=err_prefix,
                                    ))
                                print("{err_prefix}:: Values".format(
                                    err_prefix=err_prefix))

                                def aux(x):
                                    try:
                                        return (abs(x - mean_process) * 100.0 /
                                                stdev_process)
                                    except ZeroDivisionError:
                                        return float("inf")

                                print(
                                    "".join(
                                        "{err_prefix}:: {0:>30.20}  = avg {1:<+30} = avg {3:+8.3f}% ⨉ σ | {2:17} {4:17} {5:17}\n"
                                        .format(
                                            x,
                                            x - mean_process,
                                            (lower_process <= x <=
                                             upper_process)
                                            and "(out of mean±3σ)" or "",
                                            aux(x),
                                            ((low_out_fence_process <= x <
                                              analysis["low_inn_fence"]) or
                                             (analysis["upp_inn_fence"] <= x <
                                              upp_out_fence_process))
                                            and " (mild outlier)" or "",
                                            ((x < low_out_fence_process) or
                                             (upp_out_fence_process < x))
                                            and "(EXTREME outlier)" or "",
                                            err_prefix=err_prefix,
                                        ) for x in data_process),
                                    end="",
                                )
                                if abs(analysis["mean_nooutliers_diff"]
                                       ) > 10.0:
                                    badbench.append([
                                        problem_name,
                                        algo_name,
                                        result["budget"],
                                        metric_name_long,
                                    ])
                                    print(err_prefix + "::", "#" * 22,
                                          "#" * 67, "#" * 22)
                                    print(
                                        err_prefix + "::",
                                        "#" * 22,
                                        "Mean of results changed a lot (> 10%), so probably UNTRUSTED result",
                                        "#" * 22,
                                    )
                                    print(err_prefix + "::", "#" * 22,
                                          "#" * 67, "#" * 22)
                                else:
                                    print(
                                        err_prefix + "::",
                                        "Mean of results changed a little (< 10%), so probably that's all okay",
                                    )

    if badbench:
        print("#" * 237)
        for i in badbench:
            print(">>> " + " :: ".join(str(x) for x in i))