コード例 #1
0
def problems_runner(list_args):
    problems = [list_args[0]]
    Configurations["Universal"]["Population_Size"] = list_args[1]
    Configurations["Universal"]["No_of_Generations"] = list_args[2]

    # Wrap the tests in the jmoo core framework
    tests = jmoo_test(problems, algorithms)
    IGD_Results = []
    for problem in tests.problems:
        print problem.name, " ",
        for algorithm in tests.algorithms:
            for repeat in xrange(Configurations["Universal"]["Repeats"]):
                print repeat, " ",
                import sys
                sys.stdout.flush()
                initialPopulation(
                    problem, Configurations["Universal"]["Population_Size"])
                statBox = jmoo_evo(problem, algorithm, Configurations)

                resulting_pf = [[float(f) for f in individual.fitness.fitness]
                                for individual in statBox.box[-1].population]
                IGD_Results.append(IGD(resulting_pf, readpf(problem)))
                print IGD(resulting_pf, readpf(problem))
            IGD_Results = sorted(IGD_Results)

            results_string = ""
            results_string += "Problem Name: " + str(problem.name) + "\n"
            results_string += "Algorithm Name: " + str(algorithm.name) + "\n"
            results_string += "- Generated New Population" + "\n"
            results_string += "- Ran the algorithm for " + str(
                Configurations["Universal"]["Repeats"]) + "\n"
            results_string += "- The SBX crossover and mutation parameters are correct" + "\n"
            results_string += "Best: " + str(IGD_Results[0]) + "\n"
            results_string += "Worst: " + str(IGD_Results[-1]) + "\n"
            results_string += "Median: " + str(IGD_Results[int(
                len(IGD_Results) / 2)]) + "\n"

            filename = "./Testing/Algorithms/DE/Results/" + str(
                problem.name) + ".txt"
            f = open(filename, "w")
            f.write(results_string)
            f.close()
コード例 #2
0
ファイル: jmetal_igd_check.py プロジェクト: spati2/storm
import os
import sys
import inspect

cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe()))[0], "../../..")))
if cmd_subfolder not in sys.path:
    sys.path.insert(0, cmd_subfolder)

from jmoo_core import *
problem = dtlz1(7, 3)
def readpf(problem):
    filename = "./Testing/PF/" + problem.name.split("_")[0] + "(" + str(len(problem.objectives)) + ")-PF.txt"
    return [[float(num) for num in line.split()] for line in open(filename, "r").readlines()]

def read_file(filename):
    list_objectives = []
    for line in open(filename, "r").readlines():
        if line == "\n": continue
        decision = 7
        objectives = 3
        line = [float(a) for a in line.split()]
        list_objectives.append(line[len(line) - objectives:])
    return list_objectives

resulting_pf = read_file("jmetal_PF")
os.chdir("../../..")  # Since the this file is nested so the working directory has to be changed
from PerformanceMetrics.IGD.IGD_Calculation import IGD

print IGD(resulting_pf, readpf(problem))
コード例 #3
0
def igd_reporter(problems,
                 algorithms,
                 Configurations,
                 aggregate_measure=mean,
                 tag="IGD"):
    def get_data_from_archive(problems, algorithms, Configurations, function):
        from PerformanceMeasures.DataFrame import ProblemFrame
        problem_dict = {}
        for problem in problems:
            data = ProblemFrame(problem, algorithms)

            # # finding the final frontier
            final_frontiers = data.get_frontier_values()

            # unpacking the final frontiers
            unpacked_frontier = []
            for key in final_frontiers.keys():
                for repeat in final_frontiers[key]:
                    unpacked_frontier.extend(repeat)

            # Vivek: I have noticed that some of the algorithms (specifically VALE8) produces duplicate points
            # which would then show up in nondominated sort and tip the scale in its favour. So I would like to remove
            # all the duplicate points from the population and then perform a non dominated sort
            old = len(unpacked_frontier)
            unpacked_frontier = list(set(unpacked_frontier))
            if len(unpacked_frontier) - old == 0:
                print "There are no duplicates!! check"

            # Find the non dominated solutions

            # change into jmoo_individual
            from jmoo_individual import jmoo_individual
            population = [
                jmoo_individual(problem, i.decisions, i.objectives)
                for i in unpacked_frontier
            ]

            # Vivek: I first tried to choose only the non dominated solutions. But then there are only few solutions
            # (in order of 1-2) so I am just doing a non dominated sorting with crowd distance
            actual_frontier = [
                sol.fitness.fitness for sol in get_non_dominated_solutions(
                    problem, population, Configurations)
            ]
            assert (len(actual_frontier) == Configurations["Universal"]
                    ["Population_Size"])

            generation_dict = {}
            for generation in xrange(
                    Configurations["Universal"]["No_of_Generations"]):
                #
                population = data.get_frontier_values(generation)
                evaluations = data.get_evaluation_values(generation)

                algorithm_dict = {}
                for algorithm in algorithms:
                    repeat_dict = {}
                    for repeat in xrange(
                            Configurations["Universal"]["Repeats"]):
                        candidates = [
                            pop.objectives
                            for pop in population[algorithm.name][repeat]
                        ]
                        repeat_dict[str(repeat)] = {}
                        from PerformanceMetrics.IGD.IGD_Calculation import IGD
                        if len(candidates) > 0:
                            repeat_dict[str(repeat)]["IGD"] = IGD(
                                actual_frontier, candidates)
                            repeat_dict[str(
                                repeat)]["Evaluations"] = evaluations[
                                    algorithm.name][repeat]
                        else:
                            repeat_dict[str(repeat)]["IGD"] = None
                            repeat_dict[str(repeat)]["Evaluations"] = None

                    algorithm_dict[algorithm.name] = repeat_dict
                generation_dict[str(generation)] = algorithm_dict
            problem_dict[problem.name] = generation_dict
        return problem_dict, actual_frontier

    from PerformanceMetrics.HyperVolume.hv import get_hyper_volume
    result, actual_frontier = get_data_from_archive(problems, algorithms,
                                                    Configurations,
                                                    get_hyper_volume)

    date_folder_prefix = strftime("%m-%d-%Y")
    if not os.path.isdir('./Results/Final_Frontier/' + date_folder_prefix):
        os.makedirs('./Results/Final_Frontier/' + date_folder_prefix)

    date_folder_prefix = strftime("%m-%d-%Y")
    if not os.path.isdir('./Results/Charts/' + date_folder_prefix):
        os.makedirs('./Results/Charts/' + date_folder_prefix)

    problem_scores = {}
    for problem in problems:
        print problem.name
        from PerformanceMetrics.IGD.IGD_Calculation import IGD
        baseline_igd = IGD(actual_frontier,
                           baseline_objectives(problem, Configurations))

        # write the final frontier
        fignum = len([
            name for name in os.listdir('./Results/Final_Frontier/' +
                                        date_folder_prefix)
        ]) + 1
        filename_frontier = './Results/Final_Frontier/' + date_folder_prefix + '/table' + str("%02d" % fignum) + "_" \
                             + problem.name + "_" + tag + '.csv'
        ffrontier = open(filename_frontier, "w")
        for l in actual_frontier:
            string_l = ",".join(map(str, l))
            ffrontier.write(string_l + "\n")
        ffrontier.close()

        f, axarr = plt.subplots(1)
        scores = {}
        for algorithm in algorithms:
            median_scores = []
            median_evals = []
            Tables_Content = ""
            Tables_Content += "Generation, o25, o50, o75, n25, n50, n75 \n"

            for generation in xrange(
                    Configurations["Universal"]["No_of_Generations"]):
                temp_result = result[problem.name][str(generation)][
                    algorithm.name]
                hypervolume_list = [
                    temp_result[str(repeat)]["IGD"] for repeat in xrange(
                        Configurations["Universal"]["Repeats"])
                    if temp_result[str(repeat)]["IGD"] is not None
                ]

                old_evals = [
                    sum([
                        result[problem.name][str(tgen)][algorithm.name][str(
                            repeat)]["Evaluations"]
                        for tgen in xrange(generation)
                        if result[problem.name][str(tgen)][algorithm.name][str(
                            repeat)]["Evaluations"] is not None
                    ]) for repeat in xrange(Configurations["Universal"]
                                            ["Repeats"])
                ]
                evaluation_list = [
                    temp_result[str(repeat)]["Evaluations"] for repeat in
                    xrange(Configurations["Universal"]["Repeats"])
                    if temp_result[str(repeat)]["Evaluations"] is not None
                ]

                assert (len(hypervolume_list) == len(evaluation_list)
                        ), "Something is wrong"
                if len(hypervolume_list) > 0 and len(evaluation_list) > 0:
                    o25 = getPercentile(hypervolume_list, 25)
                    o50 = getPercentile(hypervolume_list, 50)
                    o75 = getPercentile(hypervolume_list, 75)
                    Tables_Content += str(generation) + "," + str(
                        o25) + "," + str(o50) + "," + str(o75) + "," + str(
                            (o25 - baseline_igd) / baseline_igd) + "," + str(
                                (o50 - baseline_igd) /
                                baseline_igd) + "," + str(
                                    (o75 - baseline_igd) / baseline_igd) + "\n"
                    median_scores.append(aggregate_measure(hypervolume_list))
                    median_evals.append(aggregate_measure(old_evals))

            if not os.path.isdir('./Results/Tables/' + date_folder_prefix):
                os.makedirs('./Results/Tables/' + date_folder_prefix)
            fignum = len([
                name for name in os.listdir('./Results/Tables/' +
                                            date_folder_prefix)
            ]) + 1
            filename_table = './Results/Tables/' + date_folder_prefix + '/table' + str("%02d" % fignum) + "_" \
                             + algorithm.name + "_" + problem.name + "_" + tag + '.csv'

            # print Tables_Content
            open(filename_table, "w").write(Tables_Content)

            scores[algorithm.name] = aggregate_measure(median_scores)
            axarr.plot(median_evals,
                       median_scores,
                       linestyle='None',
                       label=algorithm.name,
                       marker=algorithm.type,
                       color=algorithm.color,
                       markersize=8,
                       markeredgecolor='none')
            axarr.plot(median_evals, median_scores, color=algorithm.color)
            # axarr[o].set_ylim(0, 130)
            axarr.set_autoscale_on(True)
            axarr.set_xlim([-10, 10000])
            axarr.set_xscale('log', nonposx='clip')
            axarr.set_ylabel("IGD")

        f.suptitle(problem.name)
        fignum = len([
            name
            for name in os.listdir('./Results/Charts/' + date_folder_prefix)
        ]) + 1
        plt.legend(loc='lower center', bbox_to_anchor=(1, 0.5))
        plt.savefig('./Results/Charts/' + date_folder_prefix + '/figure' +
                    str("%02d" % fignum) + "_" + problem.name + "_" + tag +
                    '.png',
                    dpi=100)
        cla()
        problem_scores[problem.name] = scores

    return problem_scores
コード例 #4
0
    def get_data_from_archive(problems, algorithms, Configurations, function):
        from PerformanceMeasures.DataFrame import ProblemFrame
        problem_dict = {}
        for problem in problems:
            data = ProblemFrame(problem, algorithms)

            # # finding the final frontier
            final_frontiers = data.get_frontier_values()

            # unpacking the final frontiers
            unpacked_frontier = []
            for key in final_frontiers.keys():
                for repeat in final_frontiers[key]:
                    unpacked_frontier.extend(repeat)

            # Vivek: I have noticed that some of the algorithms (specifically VALE8) produces duplicate points
            # which would then show up in nondominated sort and tip the scale in its favour. So I would like to remove
            # all the duplicate points from the population and then perform a non dominated sort
            old = len(unpacked_frontier)
            unpacked_frontier = list(set(unpacked_frontier))
            if len(unpacked_frontier) - old == 0:
                print "There are no duplicates!! check"

            # Find the non dominated solutions

            # change into jmoo_individual
            from jmoo_individual import jmoo_individual
            population = [
                jmoo_individual(problem, i.decisions, i.objectives)
                for i in unpacked_frontier
            ]

            # Vivek: I first tried to choose only the non dominated solutions. But then there are only few solutions
            # (in order of 1-2) so I am just doing a non dominated sorting with crowd distance
            actual_frontier = [
                sol.fitness.fitness for sol in get_non_dominated_solutions(
                    problem, population, Configurations)
            ]
            assert (len(actual_frontier) == Configurations["Universal"]
                    ["Population_Size"])

            generation_dict = {}
            for generation in xrange(
                    Configurations["Universal"]["No_of_Generations"]):
                #
                population = data.get_frontier_values(generation)
                evaluations = data.get_evaluation_values(generation)

                algorithm_dict = {}
                for algorithm in algorithms:
                    repeat_dict = {}
                    for repeat in xrange(
                            Configurations["Universal"]["Repeats"]):
                        candidates = [
                            pop.objectives
                            for pop in population[algorithm.name][repeat]
                        ]
                        repeat_dict[str(repeat)] = {}
                        from PerformanceMetrics.IGD.IGD_Calculation import IGD
                        if len(candidates) > 0:
                            repeat_dict[str(repeat)]["IGD"] = IGD(
                                actual_frontier, candidates)
                            repeat_dict[str(
                                repeat)]["Evaluations"] = evaluations[
                                    algorithm.name][repeat]
                        else:
                            repeat_dict[str(repeat)]["IGD"] = None
                            repeat_dict[str(repeat)]["Evaluations"] = None

                    algorithm_dict[algorithm.name] = repeat_dict
                generation_dict[str(generation)] = algorithm_dict
            problem_dict[problem.name] = generation_dict
        return problem_dict, actual_frontier
コード例 #5
0
def draw_igd(problem, algorithms, gtechniques, Configurations, tag):
    import os
    from time import strftime
    date_folder_prefix = strftime("%m-%d-%Y")
    if not os.path.isdir('./Results/Charts/' + date_folder_prefix):
        os.makedirs('./Results/Charts/' + date_folder_prefix)

    actual_frontier = get_actual_frontier(problem, algorithms, gtechniques,
                                          Configurations, tag)
    # actual_frontier = apply_normalization(problem[-1], actual_frontier)
    results = {}
    number_of_repeats = Configurations["Universal"]["Repeats"]
    number_of_objectives = len(problem[-1].objectives)
    generations = Configurations["Universal"]["No_of_Generations"]
    pop_size = Configurations["Universal"]["Population_Size"]
    evaluations = [pop_size * i for i in xrange(generations + 1)]

    f, axarr = plt.subplots(1)

    for algorithm in algorithms:
        results[algorithm.name] = {}
        for gtechnique in gtechniques:
            results[algorithm.name][gtechnique.__name__] = []
    for algorithm in algorithms:
        for gtechnique in gtechniques:

            points = get_initial_datapoints(problem[-1], algorithm, gtechnique,
                                            Configurations)
            from PerformanceMetrics.IGD.IGD_Calculation import IGD
            results[algorithm.name][gtechnique.__name__].append(
                IGD(actual_frontier, points))

            for generation in xrange(generations):
                print ".",
                import sys
                sys.stdout.flush()
                temp_igd_list = []
                files = find_files_for_generations(problem[-1].name,
                                                   algorithm.name,
                                                   gtechnique.__name__,
                                                   number_of_repeats,
                                                   generation + 1)
                for file in files:
                    temp_value = get_content_all(problem[-1],
                                                 file,
                                                 pop_size,
                                                 initial_line=False)
                    # change into jmoo_individual
                    from jmoo_individual import jmoo_individual
                    population = [
                        jmoo_individual(problem[-1], i[number_of_objectives:],
                                        i[:number_of_objectives])
                        for i in temp_value
                    ]

                    from jmoo_algorithms import get_non_dominated_solutions
                    temp_value = [
                        sol.fitness.fitness
                        for sol in get_non_dominated_solutions(
                            problem[-1], population, Configurations)
                    ]
                    temp_igd_list.append(IGD(actual_frontier, temp_value))
                from numpy import mean
                results[algorithm.name][gtechnique.__name__].append(
                    mean(temp_igd_list))

            if gtechnique.__name__ == "sway":
                lstyle = "--"
                mk = "v"
                ms = 4
            elif gtechnique.__name__ == "wierd":
                lstyle = "-"
                mk = "o"
                ms = 4
            else:
                lstyle = '-'
                mk = algorithm.type
                ms = 8

            axarr.plot(evaluations,
                       results[algorithm.name][gtechnique.__name__],
                       linestyle=lstyle,
                       label=algorithm.name + "_" + gtechnique.__name__,
                       marker=mk,
                       color=algorithm.color,
                       markersize=ms,
                       markeredgecolor='none')
            axarr.set_autoscale_on(True)
            axarr.set_xlim([0, 10000])
            # axarr.set_xscale('log', nonposx='clip')
            axarr.set_yscale('log', nonposx='clip')
            axarr.set_ylabel("IGD")

            print
            print problem[
                -1].name, algorithm.name, gtechnique.__name__,  #results[algorithm.name][gtechnique.__name__]

    f.suptitle(problem[-1].name)
    fignum = len([
        name for name in os.listdir('./Results/Charts/' + date_folder_prefix)
    ]) + 1
    plt.legend(frameon=False,
               loc='lower center',
               bbox_to_anchor=(0.5, -0.025),
               fancybox=True,
               ncol=2)
    plt.savefig('./Results/Charts/' + date_folder_prefix + '/figure' +
                str("%02d" % fignum) + "_" + problem[-1].name + "_" + tag +
                '.png',
                dpi=100,
                bbox_inches='tight')
    plt.cla()
    print "Processed: ", problem[-1].name
    import pdb
    pdb.set_trace()