Beispiel #1
0
    def get_data_from_archive(gtechniques, problems, algorithms, Configurations, function):
        from PerformanceMeasures.DataFrame import ProblemFrame
        problem_dict = {}
        for problem in problems:
            actual_name = problem.name
            for gtechnique in gtechniques:
                problem.name = actual_name + "_" + gtechnique.__name__
                data = ProblemFrame(problem, algorithms)

                reference_point = data.get_reference_point(Configurations["Universal"]["No_of_Generations"])

                generation_dict = {}
                for generation in xrange(Configurations["Universal"]["No_of_Generations"]):
                    population = data.get_frontier_values(generation)
                    evaluations = data.get_evaluation_values(generation)
                    algorithm_dict = {}
                    for algorithm in algorithms:
                        repeat_dict = {}
                        for repeat in xrange(Configurations["Universal"]["Repeats"]):
                            candidates = [pop.objectives for pop in population[algorithm.name][repeat]]
                            repeat_dict[str(repeat)] = {}
                            if len(candidates) > 0:
                                repeat_dict[str(repeat)]["HyperVolume"] = function(reference_point, candidates)
                                if repeat_dict[str(repeat)]["HyperVolume"] == 0:
                                    pass
                                repeat_dict[str(repeat)]["Evaluations"] = evaluations[algorithm.name][repeat]
                            else:
                                repeat_dict[str(repeat)]["HyperVolume"] = None
                                repeat_dict[str(repeat)]["Evaluations"] = None

                        algorithm_dict[algorithm.name] = repeat_dict
                    generation_dict[str(generation)] = algorithm_dict
                problem_dict[problem.name] = generation_dict
            problem.name = actual_name
        return problem_dict
Beispiel #2
0
    def get_data_from_archive(problems, algorithms, Configurations):
        from PerformanceMeasures.DataFrame import ProblemFrame
        problem_dict = {}
        for problem in problems:
            data = ProblemFrame(problem, algorithms)
            generation_dict = {}
            for generation in xrange(Configurations["Universal"]["No_of_Generations"]):
                population = data.get_frontier_values(generation)
                evaluations = data.get_evaluation_values(generation)

                repeat_dict = {}
                for repeat in xrange(Configurations["Universal"]["Repeats"]):
                    algorithm_dict = {}
                    for algorithm in algorithms:
                        algorithm_dict[algorithm.name] = {}
                        try:
                            candidates = [jmoo_individual(problem, pop.decisions, pop.objectives) for pop in
                                          population[algorithm.name][repeat]]
                        except:
                            import pdb
                            pdb.set_trace()
                        repeat_dict[str(repeat)] = {}
                        if len(candidates) > 0:
                            algorithm_dict[algorithm.name]["Solutions"] = candidates
                            algorithm_dict[algorithm.name]["Evaluations"] = evaluations[algorithm.name][repeat]
                        else:
                            algorithm_dict[algorithm.name]["Solutions"] = None
                            algorithm_dict[algorithm.name]["Evaluations"] = None

                    repeat_dict[str(repeat)] = algorithm_dict
                generation_dict[str(generation)] = repeat_dict
            problem_dict[problem.name] = generation_dict
        return problem_dict
Beispiel #3
0
 def get_data_from_archive(problems, algorithms, Configurations, function):
     from PerformanceMeasures.DataFrame import ProblemFrame
     problem_dict = {}
     for problem in problems:
         print problem.name
         data = ProblemFrame(problem, algorithms)
         extreme_point1, extreme_point2 = data.get_extreme_points(Configurations["Universal"]["Repeats"])
         generation_dict = {}
         for generation in xrange(Configurations["Universal"]["No_of_Generations"]):
             population = data.get_frontier_values(generation)
             evaluations = data.get_evaluation_values(generation)
             algorithm_dict = {}
             for algorithm in algorithms:
                 repeat_dict = {}
                 for repeat in xrange(Configurations["Universal"]["Repeats"]):
                     candidates = [pop.objectives for pop in population[algorithm.name][repeat]]
                     repeat_dict[str(repeat)] = {}
                     if len(candidates) > 0:
                         try:
                             repeat_dict[str(repeat)]["Spread"] = function(candidates, extreme_point1,
                                                                           extreme_point2)
                             repeat_dict[str(repeat)]["Evaluations"] = evaluations[algorithm.name][repeat]
                         except:
                             repeat_dict[str(repeat)]["Spread"] = None
                             repeat_dict[str(repeat)]["Evaluations"] = None
                     else:
                         repeat_dict[str(repeat)]["Spread"] = None
                         repeat_dict[str(repeat)]["Evaluations"] = None
                 algorithm_dict[algorithm.name] = repeat_dict
             generation_dict[str(generation)] = algorithm_dict
         problem_dict[problem.name] = generation_dict
     return problem_dict
Beispiel #4
0
def get_data_from_archive(problems, algorithms, Configurations, function):
    from PerformanceMeasures.DataFrame import ProblemFrame
    problem_dict = {}
    for problem in problems:
        data = ProblemFrame(problem, algorithms)
        reference_point = data.get_reference_point(
            Configurations["Universal"]["No_of_Generations"])
        generation_dict = {}
        for generation in xrange(
                Configurations["Universal"]["No_of_Generations"]):
            population = data.get_frontier_values(generation)
            evaluations = data.get_evaluation_values(generation)
            algorithm_dict = {}
            for algorithm in algorithms:
                repeat_dict = {}
                for repeat in xrange(Configurations["Universal"]["Repeats"]):
                    candidates = [
                        pop.objectives
                        for pop in population[algorithm.name][repeat]
                    ]
                    repeat_dict[str(repeat)] = {}
                    if len(candidates) > 0:
                        repeat_dict[str(repeat)]["HyperVolume"] = function(
                            reference_point, candidates)
                        repeat_dict[str(repeat)]["Evaluations"] = evaluations[
                            algorithm.name][repeat]
                    else:
                        repeat_dict[str(repeat)]["HyperVolume"] = None
                        repeat_dict[str(repeat)]["Evaluations"] = None

                algorithm_dict[algorithm.name] = repeat_dict
            generation_dict[str(generation)] = algorithm_dict
        problem_dict[problem.name] = generation_dict
    return problem_dict
Beispiel #5
0
def generate_summary(problems,
                     algorithms,
                     baseline,
                     Configurations,
                     tag="Comparisions"):
    for problem in problems:
        print problem.name, " - " * 50
        from PerformanceMeasures.DataFrame import ProblemFrame
        data = ProblemFrame(problem, algorithms)
        population = data.get_frontier_values()

        fast_algorithm_population = []
        for repeat in xrange(Configurations["Universal"]["Repeats"]):
            fast_algorithm_population.append(
                [pop.objectives for pop in population[baseline][repeat]])

        for algorithm in algorithms:
            if algorithm.name != baseline:
                baseline_population = []
                print algorithm.name + " | ",
                for repeat in xrange(Configurations["Universal"]["Repeats"]):
                    baseline_population.append([
                        pop.objectives
                        for pop in population[algorithm.name][repeat]
                    ])
                for objective_number in xrange(len(problem.objectives)):
                    fast_algorithm_objective_list = [
                        flat[objective_number]
                        for fap in fast_algorithm_population for flat in fap
                    ]
                    baseline_objective_list = [
                        flat[objective_number] for bp in baseline_population
                        for flat in bp
                    ]

                    from numpy import std, mean
                    s = std(baseline_objective_list)
                    small_effect = s * 0.4
                    n1 = mean(baseline_objective_list)
                    n2 = mean(fast_algorithm_objective_list)
                    if abs(n1 - n2) <= small_effect:
                        print "Yes", round(
                            abs(n1 - n2) / (s + 0.000000001), 3),
                    else:
                        print "No", round(abs(n1 - n2) / (s + 0.000000001), 3),
                    print "|",
                print
Beispiel #6
0
    def get_data_from_archive(problems, algorithms, Configurations):
        from PerformanceMeasures.DataFrame import ProblemFrame
        problem_dict = {}
        for problem in problems:
            data = ProblemFrame(problem, algorithms)
            generation_dict = {}
            for generation in xrange(
                    Configurations["Universal"]["No_of_Generations"]):
                population = data.get_frontier_values(generation)
                evaluations = data.get_evaluation_values(generation)

                repeat_dict = {}
                for repeat in xrange(Configurations["Universal"]["Repeats"]):
                    algorithm_dict = {}
                    for algorithm in algorithms:
                        algorithm_dict[algorithm.name] = {}
                        try:
                            candidates = [
                                jmoo_individual(problem, pop.decisions,
                                                pop.objectives)
                                for pop in population[algorithm.name][repeat]
                            ]
                        except:
                            import pdb
                            pdb.set_trace()
                        repeat_dict[str(repeat)] = {}
                        if len(candidates) > 0:
                            algorithm_dict[
                                algorithm.name]["Solutions"] = candidates
                            algorithm_dict[
                                algorithm.name]["Evaluations"] = evaluations[
                                    algorithm.name][repeat]
                        else:
                            algorithm_dict[algorithm.name]["Solutions"] = None
                            algorithm_dict[
                                algorithm.name]["Evaluations"] = None

                    repeat_dict[str(repeat)] = algorithm_dict
                generation_dict[str(generation)] = repeat_dict
            problem_dict[problem.name] = generation_dict
        return problem_dict
Beispiel #7
0
def generate_summary(problems, algorithms, baseline, Configurations, tag="Comparisions"):
    for problem in problems:
        print problem.name, " - " * 50
        from PerformanceMeasures.DataFrame import ProblemFrame

        data = ProblemFrame(problem, algorithms)
        population = data.get_frontier_values()

        fast_algorithm_population = []
        for repeat in xrange(Configurations["Universal"]["Repeats"]):
            fast_algorithm_population.append([pop.objectives for pop in population[baseline][repeat]])

        for algorithm in algorithms:
            if algorithm.name != baseline:
                baseline_population = []
                print algorithm.name + " | ",
                for repeat in xrange(Configurations["Universal"]["Repeats"]):
                    baseline_population.append([pop.objectives for pop in population[algorithm.name][repeat]])
                for objective_number in xrange(len(problem.objectives)):
                    fast_algorithm_objective_list = [
                        flat[objective_number] for fap in fast_algorithm_population for flat in fap
                    ]
                    baseline_objective_list = [flat[objective_number] for bp in baseline_population for flat in bp]

                    from numpy import std, mean

                    s = std(baseline_objective_list)
                    small_effect = s * 0.4
                    n1 = mean(baseline_objective_list)
                    n2 = mean(fast_algorithm_objective_list)
                    if abs(n1 - n2) <= small_effect:
                        print "Yes", round(abs(n1 - n2) / (s + 0.000000001), 3),
                    else:
                        print "No", round(abs(n1 - n2) / (s + 0.000000001), 3),
                    print "|",
                print
Beispiel #8
0
    def get_data_from_archive(gtechniques, problems, algorithms, Configurations, function):
        from PerformanceMeasures.DataFrame import ProblemFrame
        problem_dict = {}
        for problem in problems:
            actual_name = problem.name
            for gtechnique in gtechniques:
                problem.name = actual_name + "_" + gtechnique.__name__
                data = ProblemFrame(problem, algorithms)

                # # finding the final frontier
                final_frontiers = data.get_frontier_values()

                # unpacking the final frontiers
                unpacked_frontier = []
                for key in final_frontiers.keys():
                    for repeat in final_frontiers[key]:
                        unpacked_frontier.extend(repeat)

                # Vivek: I have noticed that some of the algorithms (specifically VALE8) produces duplicate points
                # which would then show up in nondominated sort and tip the scale in its favour. So I would like to remove
                # all the duplicate points from the population and then perform a non dominated sort
                old = len(unpacked_frontier)
                unpacked_frontier = list(set(unpacked_frontier))
                if len(unpacked_frontier) - old == 0:
                    print "There are no duplicates!! check"

                # Find the non dominated solutions

                # change into jmoo_individual
                from jmoo_individual import jmoo_individual
                population = [jmoo_individual(problem, i.decisions, i.objectives) for i in unpacked_frontier]

                # Vivek: I first tried to choose only the non dominated solutions. But then there are only few solutions
                # (in order of 1-2) so I am just doing a non dominated sorting with crowd distance
                actual_frontier = [sol.fitness.fitness for sol in
                                   get_non_dominated_solutions(problem, population, Configurations)]
                assert (len(actual_frontier) == Configurations["Universal"]["Population_Size"])

                generation_dict = {}
                for generation in xrange(Configurations["Universal"]["No_of_Generations"]):
                    #
                    population = data.get_frontier_values(generation)
                    evaluations = data.get_evaluation_values(generation)

                    algorithm_dict = {}
                    for algorithm in algorithms:
                        repeat_dict = {}
                        for repeat in xrange(Configurations["Universal"]["Repeats"]):
                            candidates = [pop.objectives for pop in population[algorithm.name][repeat]]
                            repeat_dict[str(repeat)] = {}
                            from PerformanceMetrics.IGD.IGD_Calculation import IGD
                            if len(candidates) > 0:
                                repeat_dict[str(repeat)]["IGD"] = IGD(actual_frontier, candidates)
                                repeat_dict[str(repeat)]["Evaluations"] = evaluations[algorithm.name][repeat]
                            else:
                                repeat_dict[str(repeat)]["IGD"] = None
                                repeat_dict[str(repeat)]["Evaluations"] = None

                        algorithm_dict[algorithm.name] = repeat_dict
                    generation_dict[str(generation)] = algorithm_dict
                problem_dict[problem.name] = generation_dict
            problem.name = actual_name
        return problem_dict, actual_frontier
Beispiel #9
0
    def get_data_from_archive(problems, algorithms, Configurations, function):
        from PerformanceMeasures.DataFrame import ProblemFrame
        problem_dict = {}
        for problem in problems:
            data = ProblemFrame(problem, algorithms)

            # # finding the final frontier
            final_frontiers = data.get_frontier_values()

            # unpacking the final frontiers
            unpacked_frontier = []
            for key in final_frontiers.keys():
                for repeat in final_frontiers[key]:
                    unpacked_frontier.extend(repeat)

            # Vivek: I have noticed that some of the algorithms (specifically VALE8) produces duplicate points
            # which would then show up in nondominated sort and tip the scale in its favour. So I would like to remove
            # all the duplicate points from the population and then perform a non dominated sort
            old = len(unpacked_frontier)
            unpacked_frontier = list(set(unpacked_frontier))
            if len(unpacked_frontier) - old == 0:
                print "There are no duplicates!! check"

            # Find the non dominated solutions

            # change into jmoo_individual
            from jmoo_individual import jmoo_individual
            population = [
                jmoo_individual(problem, i.decisions, i.objectives)
                for i in unpacked_frontier
            ]

            # Vivek: I first tried to choose only the non dominated solutions. But then there are only few solutions
            # (in order of 1-2) so I am just doing a non dominated sorting with crowd distance
            actual_frontier = [
                sol.fitness.fitness for sol in get_non_dominated_solutions(
                    problem, population, Configurations)
            ]
            assert (len(actual_frontier) == Configurations["Universal"]
                    ["Population_Size"])

            generation_dict = {}
            for generation in xrange(
                    Configurations["Universal"]["No_of_Generations"]):
                #
                population = data.get_frontier_values(generation)
                evaluations = data.get_evaluation_values(generation)

                algorithm_dict = {}
                for algorithm in algorithms:
                    repeat_dict = {}
                    for repeat in xrange(
                            Configurations["Universal"]["Repeats"]):
                        candidates = [
                            pop.objectives
                            for pop in population[algorithm.name][repeat]
                        ]
                        repeat_dict[str(repeat)] = {}
                        from PerformanceMetrics.IGD.IGD_Calculation import IGD
                        if len(candidates) > 0:
                            repeat_dict[str(repeat)]["IGD"] = IGD(
                                actual_frontier, candidates)
                            repeat_dict[str(
                                repeat)]["Evaluations"] = evaluations[
                                    algorithm.name][repeat]
                        else:
                            repeat_dict[str(repeat)]["IGD"] = None
                            repeat_dict[str(repeat)]["Evaluations"] = None

                    algorithm_dict[algorithm.name] = repeat_dict
                generation_dict[str(generation)] = algorithm_dict
            problem_dict[problem.name] = generation_dict
        return problem_dict, actual_frontier