def sbx_crossover(problem, parent1, parent2, cr=1, eta=30): assert(len(parent1.decisionValues) == len(parent2.decisionValues)), "Parents are sick" from copy import deepcopy child1 = [0 for _ in xrange(len(parent1.decisionValues))] child2 = [0 for _ in xrange(len(parent1.decisionValues))] if random.random() > cr: return parent1, parent2 for index in xrange(len(parent1.decisionValues)): # import pdb # pdb.set_trace() # Should these variables be considered for crossover if random.random() > 0.5: child1[index] = parent1.decisionValues[index] child2[index] = parent2.decisionValues[index] continue # Are these variable the same # print parent1.decisionValues[index], parent2.decisionValues[index], parent1.decisionValues[index] - parent2.decisionValues[index] if parent1.decisionValues[index] - parent2.decisionValues[index] <= EPS: child1[index] = parent1.decisionValues[index] child2[index] = parent2.decisionValues[index] continue lower_bound = problem.decisions[index].low upper_bound = problem.decisions[index].up y1 = min(parent1.decisionValues[index], parent2.decisionValues[index]) y2 = max(parent1.decisionValues[index], parent2.decisionValues[index]) random_no = random.random() # child 1 beta = 1.0 + (2.0 * (y1 - lower_bound)/(y2 - y1)) alpha = 2.0 - beta ** (-(eta+1.0)) betaq = get_betaq(random_no, alpha, eta) child1[index] = 0.5 * ((y1 + y2) - betaq * (y2 - y1)) # child 2 beta = 1.0 + (2.0 * (upper_bound-y2)/(y2-y1)) alpha = 2.0 - beta ** -(eta+1.0) betaq = get_betaq(random_no, alpha, eta) child2[index] = 0.5 * ((y1 + y2) + betaq * (y2 - y1)) child1[index] = max(lower_bound, min(child1[index], upper_bound)) child2[index] = max(lower_bound, min(child2[index], upper_bound)) return jmoo_individual(problem, child1), jmoo_individual(problem, child2)
def find_poles(problem, population): poles = [] # remove duplicates temp_poles = [] for _ in xrange(jmoo_properties.ANYWHERE_POLES): while True: one = random.choice(population) east = find_extreme(one, population) west = find_extreme(east, population) if ( east != west and east != one and west != one and east not in list(temp_poles) and west not in list(temp_poles) ): break poles.append(east) poles.append(west) if look_for_duplicates(east, temp_poles) is False: temp_poles.append(east) else: assert True, "Something'S wrong" if look_for_duplicates(west, temp_poles, lambda x: x.decisionValues) is False: temp_poles.append(west) else: assert True, "Something'S wrong" min_point, max_point = find_extreme_point([pop.decisionValues for pop in poles]) mid_point = find_midpoint(min_point, max_point) mid_point = jmoo_individual(problem, mid_point, None) stars = rearrange(problem, mid_point, poles) return stars
def generate_final_frontier_for_gale4(problems, algorithms, Configurations, tag=""): if "GALE4" not in [algorithm.name for algorithm in algorithms]: return else: for problem in problems: from Graphics.PerformanceMeasures.DataFrame import ProblemFrame data = ProblemFrame(problem, [a for a in algorithms if a.name == "GALE4"]) # data for all repeats total_data = [data.get_frontier_values(gen_no) for gen_no in xrange(Configurations["Universal"]["No_of_Generations"])] data_for_all_generations = [] for repeat in xrange(Configurations["Universal"]["Repeats"]): temp_data = [] for gen_no in xrange(Configurations["Universal"]["No_of_Generations"]): temp_data.extend(total_data[gen_no]["GALE4"][repeat]) from jmoo_individual import jmoo_individual solutions = [jmoo_individual(problem, td.decisions, problem.evaluate(td.decisions)) for td in temp_data] # non dominated sorting from jmoo_algorithms import selNSGA2 final_solutions, _ = selNSGA2(problem, [], solutions, Configurations) for i in xrange(Configurations["Universal"]["No_of_Generations"]): filename = "./RawData/PopulationArchives/" + "GALE4" + "_" + problem.name + "/" + str( repeat) + "/" + \ str(i + 1) + ".txt" f = open(filename, "w") for fs in final_solutions: f.write(','.join([str(fss) for fss in fs.decisionValues]) + "," + ",".join( [str(fss) for fss in fs.fitness.fitness]) + "\n") f.close()
def find_poles(problem, population): poles = [] #remove duplicates temp_poles = [] for _ in xrange(jmoo_properties.ANYWHERE_POLES): while True: one = random.choice(population) east = find_extreme(one, population) west = find_extreme(east, population) if east != west and east != one and west != one and east not in list( temp_poles) and west not in list(temp_poles): break poles.append(east) poles.append(west) if look_for_duplicates(east, temp_poles) is False: temp_poles.append(east) else: assert (True), "Something'S wrong" if look_for_duplicates(west, temp_poles, lambda x: x.decisionValues) is False: temp_poles.append(west) else: assert (True), "Something'S wrong" min_point, max_point = find_extreme_point( [pop.decisionValues for pop in poles]) mid_point = find_midpoint(min_point, max_point) mid_point = jmoo_individual(problem, mid_point, None) stars = rearrange(problem, mid_point, poles) return stars
def get_actual_frontier(problem, algorithms, Configurations, tag): number_of_objectives = len(problem[-1].objectives) pop_size = Configurations["Universal"]["Population_Size"] max_repeats = Configurations["Universal"]["Repeats"] max_gens = Configurations["Universal"]["No_of_Generations"] files = get_actual_frontier_files(problem, algorithms, max_repeats, max_gens, pop_size) content = [] for file in files: content.extend( remove_duplicates(get_content_all(problem[-1], file, pop_size))) # change into jmoo_individual from jmoo_individual import jmoo_individual population = [ jmoo_individual(problem[-1], i[number_of_objectives:], i[:number_of_objectives]) for i in content ] from jmoo_algorithms import get_non_dominated_solutions actual_frontier = [ sol.fitness.fitness for sol in get_non_dominated_solutions( problem[-1], population, Configurations) ] return actual_frontier
def get_data_from_archive(problems, algorithms, Configurations): from PerformanceMeasures.DataFrame import ProblemFrame problem_dict = {} for problem in problems: data = ProblemFrame(problem, algorithms) generation_dict = {} for generation in xrange(Configurations["Universal"]["No_of_Generations"]): population = data.get_frontier_values(generation) evaluations = data.get_evaluation_values(generation) repeat_dict = {} for repeat in xrange(Configurations["Universal"]["Repeats"]): algorithm_dict = {} for algorithm in algorithms: algorithm_dict[algorithm.name] = {} try: candidates = [jmoo_individual(problem, pop.decisions, pop.objectives) for pop in population[algorithm.name][repeat]] except: import pdb pdb.set_trace() repeat_dict[str(repeat)] = {} if len(candidates) > 0: algorithm_dict[algorithm.name]["Solutions"] = candidates algorithm_dict[algorithm.name]["Evaluations"] = evaluations[algorithm.name][repeat] else: algorithm_dict[algorithm.name]["Solutions"] = None algorithm_dict[algorithm.name]["Evaluations"] = None repeat_dict[str(repeat)] = algorithm_dict generation_dict[str(generation)] = repeat_dict problem_dict[problem.name] = generation_dict return problem_dict
def find_poles3(problem, population): poles = [] min_point, max_point = find_extreme_point([pop.decisionValues for pop in population]) mid_point = find_midpoint(min_point, max_point) directions = [population[i] for i in sorted(random.sample(xrange(len(population)), jmoo_properties.ANYWHERE_POLES * 2)) ] mid_point = jmoo_individual(problem, mid_point, None) stars = rearrange(problem, mid_point, directions) return stars
def nudge(problem, individual, east, increment): if individual.anyscore == 1e32: return individual temp = [] for i, decision in enumerate(problem.decisions): up = decision.up low = decision.low mutation = increment * (east.decisionValues[i] - individual.decisionValues[i]) temp.append(trim(individual.decisionValues[i] + mutation, low, up)) return jmoo_individual(problem, temp, None)
def midpoint(population): def median(lst): import numpy return numpy.median(numpy.array(lst)) mdpnt = [] for dec in xrange(len(population[0].decisionValues)): mdpnt.append(median([pop.decisionValues[dec] for pop in population])) assert(len(mdpnt) == len(population[0].decisionValues)), "Something's wrong" # print mdpnt return jmoo_individual(problem, mdpnt, None)
def extrapolate(problem, individuals, one, f, cf): two, three, four = three_others(individuals, one) solution = [] for d, decision in enumerate(problem.decisions): x, y, z = two.decisionValues[d], three.decisionValues[d], four.decisionValues[d] if random.random() < cf: mutated = x + f * (y - z) solution.append(trim(mutated, decision.low, decision.up)) else: solution.append(one.decisionValues[d]) return jmoo_individual(problem, [float(d) for d in solution], None)
def midpoint(population): def median(lst): import numpy return numpy.median(numpy.array(lst)) mdpnt = [] for dec in xrange(len(population[0].decisionValues)): mdpnt.append( median([pop.decisionValues[dec] for pop in population])) assert (len(mdpnt) == len( population[0].decisionValues)), "Something's wrong" # print mdpnt return jmoo_individual(problem, mdpnt, None)
def nudge(problem, individual, east, increment, configuration): if individual.anyscore == 1e32: return individual temp = [] for i, decision in enumerate(problem.decisions): up = decision.up low = decision.low if east.decisionValues[i] > individual.decisionValues[i] >= 0: weight = +1 else: weight = -1 mutation = increment % configuration["STORM"]["GAMMA"] * ( east.decisionValues[i] - individual.decisionValues[i]) temp.append( trim(individual.decisionValues[i] + weight * mutation, low, up)) return jmoo_individual(problem, temp, None)
def generate_final_frontier_for_gale4(problems, algorithms, Configurations, tag=""): if "GALE4" not in [algorithm.name for algorithm in algorithms]: return else: for problem in problems: from Graphics.PerformanceMeasures.DataFrame import ProblemFrame data = ProblemFrame(problem, [a for a in algorithms if a.name == "GALE4"]) # data for all repeats total_data = [ data.get_frontier_values(gen_no) for gen_no in xrange( Configurations["Universal"]["No_of_Generations"]) ] data_for_all_generations = [] for repeat in xrange(Configurations["Universal"]["Repeats"]): temp_data = [] for gen_no in xrange( Configurations["Universal"]["No_of_Generations"]): temp_data.extend(total_data[gen_no]["GALE4"][repeat]) from jmoo_individual import jmoo_individual solutions = [ jmoo_individual(problem, td.decisions, problem.evaluate(td.decisions)) for td in temp_data ] # non dominated sorting from jmoo_algorithms import selNSGA2 final_solutions, _ = selNSGA2(problem, [], solutions, Configurations) for i in xrange( Configurations["Universal"]["No_of_Generations"]): filename = "./RawData/PopulationArchives/" + "GALE4" + "_" + problem.name + "/" + str(repeat) + "/" + \ str(i+1) + ".txt" f = open(filename, "w") for fs in final_solutions: f.write( ','.join([str(fss) for fss in fs.decisionValues]) + "," + ",".join([str(fss) for fss in fs.fitness.fitness]) + "\n") f.close()
def find_poles3(problem, population, configurations): """This version of find_poles, randomly selects individuals from the population and considers assumes them to be the directions Intuition: Random directions are better than any """ # min_point = minimum in all dimensions, max_point = maximum in all dimensions min_point, max_point = find_extreme_point([pop.decisionValues for pop in population]) # find mid point between min_point and max_point (mean) temp_mid_point = find_midpoint(min_point, max_point) # Randomly Sample the population to generate directions. This is a simpler approach to find_poles2() directions = [population[i] for i in sorted(random.sample(xrange(len(population)), configurations["STORM"]["STORM_POLES"] * 2))] # Encapsulate the mid_point into an jmoo_individual structure mid_point = jmoo_individual(problem, temp_mid_point, None) assert(problem.validate(temp_mid_point) is True), "Mid point is not a valid solution and this shouldn't happen" # stars now is a list of poles in the Poles format stars = rearrange(problem, mid_point, directions) return stars
def get_data_from_archive(problems, algorithms, Configurations): from PerformanceMeasures.DataFrame import ProblemFrame problem_dict = {} for problem in problems: data = ProblemFrame(problem, algorithms) generation_dict = {} for generation in xrange( Configurations["Universal"]["No_of_Generations"]): population = data.get_frontier_values(generation) evaluations = data.get_evaluation_values(generation) repeat_dict = {} for repeat in xrange(Configurations["Universal"]["Repeats"]): algorithm_dict = {} for algorithm in algorithms: algorithm_dict[algorithm.name] = {} try: candidates = [ jmoo_individual(problem, pop.decisions, pop.objectives) for pop in population[algorithm.name][repeat] ] except: import pdb pdb.set_trace() repeat_dict[str(repeat)] = {} if len(candidates) > 0: algorithm_dict[ algorithm.name]["Solutions"] = candidates algorithm_dict[ algorithm.name]["Evaluations"] = evaluations[ algorithm.name][repeat] else: algorithm_dict[algorithm.name]["Solutions"] = None algorithm_dict[ algorithm.name]["Evaluations"] = None repeat_dict[str(repeat)] = algorithm_dict generation_dict[str(generation)] = repeat_dict problem_dict[problem.name] = generation_dict return problem_dict
def find_poles3(problem, population, configurations): """This version of find_poles, randomly selects individuals from the population and considers assumes them to be the directions Intuition: Random directions are better than any """ # min_point = minimum in all dimensions, max_point = maximum in all dimensions min_point, max_point = find_extreme_point( [pop.decisionValues for pop in population]) # find mid point between min_point and max_point (mean) temp_mid_point = find_midpoint(min_point, max_point) # Randomly Sample the population to generate directions. This is a simpler approach to find_poles2() directions = [ population[i] for i in sorted( random.sample(xrange(len(population)), configurations["STORM"]["STORM_POLES"] * 2)) ] # Encapsulate the mid_point into an jmoo_individual structure mid_point = jmoo_individual(problem, temp_mid_point, None) assert (problem.validate(temp_mid_point) is True ), "Mid point is not a valid solution and this shouldn't happen" # stars now is a list of poles in the Poles format stars = rearrange(problem, mid_point, directions) return stars
def get_data_from_archive(gtechniques, problems, algorithms, Configurations, function): from PerformanceMeasures.DataFrame import ProblemFrame problem_dict = {} for problem in problems: actual_name = problem.name for gtechnique in gtechniques: problem.name = actual_name + "_" + gtechnique.__name__ data = ProblemFrame(problem, algorithms) # # finding the final frontier final_frontiers = data.get_frontier_values() # unpacking the final frontiers unpacked_frontier = [] for key in final_frontiers.keys(): for repeat in final_frontiers[key]: unpacked_frontier.extend(repeat) # Vivek: I have noticed that some of the algorithms (specifically VALE8) produces duplicate points # which would then show up in nondominated sort and tip the scale in its favour. So I would like to remove # all the duplicate points from the population and then perform a non dominated sort old = len(unpacked_frontier) unpacked_frontier = list(set(unpacked_frontier)) if len(unpacked_frontier) - old == 0: print "There are no duplicates!! check" # Find the non dominated solutions # change into jmoo_individual from jmoo_individual import jmoo_individual population = [jmoo_individual(problem, i.decisions, i.objectives) for i in unpacked_frontier] # Vivek: I first tried to choose only the non dominated solutions. But then there are only few solutions # (in order of 1-2) so I am just doing a non dominated sorting with crowd distance actual_frontier = [sol.fitness.fitness for sol in get_non_dominated_solutions(problem, population, Configurations)] assert (len(actual_frontier) == Configurations["Universal"]["Population_Size"]) generation_dict = {} for generation in xrange(Configurations["Universal"]["No_of_Generations"]): # population = data.get_frontier_values(generation) evaluations = data.get_evaluation_values(generation) algorithm_dict = {} for algorithm in algorithms: repeat_dict = {} for repeat in xrange(Configurations["Universal"]["Repeats"]): candidates = [pop.objectives for pop in population[algorithm.name][repeat]] repeat_dict[str(repeat)] = {} from PerformanceMetrics.IGD.IGD_Calculation import IGD if len(candidates) > 0: repeat_dict[str(repeat)]["IGD"] = IGD(actual_frontier, candidates) repeat_dict[str(repeat)]["Evaluations"] = evaluations[algorithm.name][repeat] else: repeat_dict[str(repeat)]["IGD"] = None repeat_dict[str(repeat)]["Evaluations"] = None algorithm_dict[algorithm.name] = repeat_dict generation_dict[str(generation)] = algorithm_dict problem_dict[problem.name] = generation_dict problem.name = actual_name return problem_dict, actual_frontier
def get_hyper_volume(problem, reference_point, results, Configurations): """Receives list of lists""" # +The results should only be the non dominated points # non dominated sorting from jmoo_algorithms import deap_format dIndividuals = deap_format(problem, results) # get only the first front from Algorithms.DEAP.tools.emo import sortNondominated first_front = sortNondominated(dIndividuals, len(dIndividuals), first_front_only=True) from itertools import chain chosen = list(chain(*first_front)) # Copy from DEAP structure to JMOO structure from jmoo_individual import jmoo_individual population = [] for i, dIndividual in enumerate(chosen): cells = [] for j in range(len(dIndividual)): cells.append(dIndividual[j]) population.append(jmoo_individual(problem, cells, [f for f in dIndividual.fitness.values])) # +The results should only be the non dominated points # Find the min and max of the objectives MU = Configurations["Universal"]["Population_Size"] # Normalization filename = "Data/" + problem.name + "-p" + str(MU) + "-d" + str(len(problem.decisions)) + "-o" + \ str(len(problem.objectives)) + "-dataset.txt" import csv input = open(filename, 'rb') reader = csv.reader(input, delimiter=',') #Use the csv file to build the initial population for k,p in enumerate(reader): if k > MU: problem.objectives[k-MU-1].med = float(p[1]) low_not_found = False up_not_found = False if problem.objectives[k-MU-1].low is None: problem.objectives[k-MU-1].low = float(p[0]) low_not_found = True if problem.objectives[k-MU-1].up is None: problem.objectives[k-MU-1].up = float(p[2]) up_not_found = True rangeX5 = (problem.objectives[k-MU-1].up - problem.objectives[k-MU-1].low)*5 if low_not_found: problem.objectives[k-MU-1].low -= rangeX5 if up_not_found: problem.objectives[k-MU-1].up += rangeX5 # convert the objectives as list of list results = [[(f-problem.objectives[i].low)/(problem.objectives[i].up - problem.objectives[i].low) for i, f in enumerate(pop.fitness.fitness)] for pop in population] normalized_reference_point = [1 for _ in reference_point] HV = HyperVolume(normalized_reference_point) return HV.compute(results)
def draw_gd(problem, algorithms, gtechniques, Configurations, tag): import os from time import strftime date_folder_prefix = strftime("%m-%d-%Y") if not os.path.isdir('./Results/Charts/' + date_folder_prefix): os.makedirs('./Results/Charts/' + date_folder_prefix) actual_frontier = get_actual_frontier(problem, algorithms, gtechniques, Configurations, tag) # actual_frontier = apply_normalization(problem[-1], actual_frontier) results = {} number_of_repeats = Configurations["Universal"]["Repeats"] number_of_objectives = len(problem[-1].objectives) generations = Configurations["Universal"]["No_of_Generations"] pop_size = Configurations["Universal"]["Population_Size"] evaluations = [pop_size * i for i in xrange(generations + 1)] f, axarr = plt.subplots(1) for algorithm in algorithms: results[algorithm.name] = {} for gtechnique in gtechniques: results[algorithm.name][gtechnique.__name__] = [] for algorithm in algorithms: for gtechnique in gtechniques: points = get_initial_datapoints(problem[-1], algorithm, gtechnique, Configurations) from PerformanceMetrics.GD.GD_Calculation import GD results[algorithm.name][gtechnique.__name__].append( GD(actual_frontier, points)) for generation in xrange(generations): print ".", import sys sys.stdout.flush() temp_gd_list = [] files = find_files_for_generations(problem[-1].name, algorithm.name, gtechnique.__name__, number_of_repeats, generation + 1) for file in files: temp_value = get_content_all(problem[-1], file, pop_size, initial_line=False) # change into jmoo_individual from jmoo_individual import jmoo_individual population = [ jmoo_individual(problem[-1], i[number_of_objectives:], i[:number_of_objectives]) for i in temp_value ] from jmoo_algorithms import get_non_dominated_solutions temp_value = [ sol.fitness.fitness for sol in get_non_dominated_solutions( problem[-1], population, Configurations) ] temp_gd_list.append(GD(actual_frontier, temp_value)) from numpy import mean results[algorithm.name][gtechnique.__name__].append( mean(temp_igd_list)) if gtechnique.__name__ == "sway": lstyle = "--" mk = "v" ms = 4 elif gtechnique.__name__ == "wierd": lstyle = "-" mk = "o" ms = 4 else: lstyle = '-' mk = algorithm.type ms = 8 axarr.plot(evaluations, results[algorithm.name][gtechnique.__name__], linestyle=lstyle, label=algorithm.name + "_" + gtechnique.__name__, marker=mk, color=algorithm.color, markersize=ms, markeredgecolor='none') axarr.set_autoscale_on(True) axarr.set_xlim([0, 10000]) # axarr.set_xscale('log', nonposx='clip') axarr.set_yscale('log', nonposx='clip') axarr.set_ylabel("GD") print print problem[ -1].name, algorithm.name, gtechnique.__name__, #results[algorithm.name][gtechnique.__name__] f.suptitle(problem[-1].name) fignum = len([ name for name in os.listdir('./Results/Charts/' + date_folder_prefix) ]) + 1 plt.legend(frameon=False, loc='lower center', bbox_to_anchor=(0.5, -0.025), fancybox=True, ncol=2) plt.savefig('./Results/Charts/' + date_folder_prefix + '/figure' + str("%02d" % fignum) + "_" + problem[-1].name + "_" + tag + '.png', dpi=100, bbox_inches='tight') plt.cla() print "Processed: ", problem[-1].name
def get_data_from_archive(problems, algorithms, Configurations, function): from PerformanceMeasures.DataFrame import ProblemFrame problem_dict = {} for problem in problems: data = ProblemFrame(problem, algorithms) # # finding the final frontier final_frontiers = data.get_frontier_values() # unpacking the final frontiers unpacked_frontier = [] for key in final_frontiers.keys(): for repeat in final_frontiers[key]: unpacked_frontier.extend(repeat) # Vivek: I have noticed that some of the algorithms (specifically VALE8) produces duplicate points # which would then show up in nondominated sort and tip the scale in its favour. So I would like to remove # all the duplicate points from the population and then perform a non dominated sort old = len(unpacked_frontier) unpacked_frontier = list(set(unpacked_frontier)) if len(unpacked_frontier) - old == 0: print "There are no duplicates!! check" # Find the non dominated solutions # change into jmoo_individual from jmoo_individual import jmoo_individual population = [ jmoo_individual(problem, i.decisions, i.objectives) for i in unpacked_frontier ] # Vivek: I first tried to choose only the non dominated solutions. But then there are only few solutions # (in order of 1-2) so I am just doing a non dominated sorting with crowd distance actual_frontier = [ sol.fitness.fitness for sol in get_non_dominated_solutions( problem, population, Configurations) ] assert (len(actual_frontier) == Configurations["Universal"] ["Population_Size"]) generation_dict = {} for generation in xrange( Configurations["Universal"]["No_of_Generations"]): # population = data.get_frontier_values(generation) evaluations = data.get_evaluation_values(generation) algorithm_dict = {} for algorithm in algorithms: repeat_dict = {} for repeat in xrange( Configurations["Universal"]["Repeats"]): candidates = [ pop.objectives for pop in population[algorithm.name][repeat] ] repeat_dict[str(repeat)] = {} from PerformanceMetrics.IGD.IGD_Calculation import IGD if len(candidates) > 0: repeat_dict[str(repeat)]["IGD"] = IGD( actual_frontier, candidates) repeat_dict[str( repeat)]["Evaluations"] = evaluations[ algorithm.name][repeat] else: repeat_dict[str(repeat)]["IGD"] = None repeat_dict[str(repeat)]["Evaluations"] = None algorithm_dict[algorithm.name] = repeat_dict generation_dict[str(generation)] = algorithm_dict problem_dict[problem.name] = generation_dict return problem_dict, actual_frontier