Esempio n. 1
0
def convert_to_jmoo(problem, pareto_fronts):
    tpopulation = []
    for front_no, front in enumerate(pareto_fronts[:-1]):
        for i, dIndividual in enumerate(front):
            cells = []
            for j in xrange(len(dIndividual)):
                cells.append(dIndividual[j])
            tpopulation.append(
                jmoo_individual(problem, cells,
                                list(dIndividual.fitness.values)))
    for pop in tpopulation:
        pop.front_no = 0  # all the front except the last front

    lpopulation = []
    for front_no, front in enumerate(pareto_fronts[-1:]):
        for i, dIndividual in enumerate(front):
            cells = []
            for j in xrange(len(dIndividual)):
                cells.append(dIndividual[j])
            lpopulation.append(
                jmoo_individual(problem, cells,
                                list(dIndividual.fitness.values)))
    for pop in lpopulation:
        pop.front_no = -1  # last front

    from itertools import chain
    assert (len(list(chain(*pareto_fronts))) <= len(lpopulation) +
            len(tpopulation)), "Non Dominated Sorting is wrong!"
    return lpopulation + tpopulation
Esempio n. 2
0
def gale_nm_Mutate(problem, NDLeafs, configuration):
    #################
    # Mutation Phase
    #################

    number_of_evaluation = 0
    # After mutation; Convert back to JMOO Data Structures
    population = []
    for leaf in NDLeafs:
        for row in leaf.table.rows:
            if row.evaluated:
                population.append(jmoo_individual(problem, [x for x in row.cells[:len(problem.decisions)]],
                                                  [x for x in row.cells[len(problem.decisions):]]))
                number_of_evaluation += 1
            else:
                population.append(jmoo_individual(problem, [x for x in row.cells[:len(problem.decisions)]], None))

    if number_of_evaluation < 2:
        while True:
            from random import randint
            index = randint(0, len(population) - 1)
            if population[index].valid is not True:
                population[index].evaluate()
                number_of_evaluation +=1
                break
    if number_of_evaluation == 0:
        return population, 0
    else:
        return population, number_of_evaluation
Esempio n. 3
0
def gale_4_Mutate(problem, NDLeafs, configuration):
    #################
    # Mutation Phase
    #################

    number_of_evaluation = 0
    # After mutation; Convert back to JMOO Data Structures
    population = []
    for leaf in NDLeafs:
        for row in leaf.table.rows:
            if row.evaluated:
                population.append(
                    jmoo_individual(
                        problem,
                        [x for x in row.cells[:len(problem.decisions)]],
                        [x for x in row.cells[len(problem.decisions):]]))
                number_of_evaluation += 1
            else:
                population.append(
                    jmoo_individual(
                        problem,
                        [x for x in row.cells[:len(problem.decisions)]], None))

    if number_of_evaluation < 2:
        while True:
            from random import randint
            index = randint(0, len(population) - 1)
            if population[index].valid is not True:
                population[index].evaluate()
                number_of_evaluation += 1
                break
    if number_of_evaluation == 0:
        return population, 0
    else:
        return population, number_of_evaluation
Esempio n. 4
0
def gale0WHERE(problem, population, configuration, values_to_be_passed):
    "The Core method behind GALE"

    # for pop in population:
    #     assert(pop.generation_number == 0), "Generation has to be 0"

    # Compile population into table form used by WHERE
    t = slurp([[x for x in row.decisionValues] + ["?" for y in problem.objectives] for row in population],
              problem.buildHeader().split(","))

    # Initialize some parameters for WHERE
    The.allowDomination = True
    The.alpha = 1
    for i, row in enumerate(t.rows):
        row.evaluated = False

    # Run WHERE
    m = Moo(problem, t, len(t.rows), N=1).divide(minnie=rstop(t))

    print "Where done"
    # Organizing
    NDLeafs = m.nonPrunedLeaves()  # The surviving non-dominated leafs
    allLeafs = m.nonPrunedLeaves() + m.prunedLeaves()  # All of the leafs

    # After mutation: Check how many rows were actually evaluated
    numEval = 0
    for leaf in allLeafs:
        for row in leaf.table.rows:
            if row.evaluated:
                numEval += 1

    # After mutation; Convert back to JMOO Data Structures
    population = []
    for leaf in NDLeafs:
        for row in leaf.table.rows:
            if row.evaluated:
                population.append(jmoo_individual(problem, [x for x in row.cells[:len(problem.decisions)]], 0,
                                                  [x for x in row.cells[len(problem.decisions):]]))
            else:
                indi = jmoo_individual(problem, [x for x in row.cells[:len(problem.decisions)]], 0, None)
                indi.fitness.fitness = problem.evaluate(indi.decisionValues)
                # print "> ", indi.fitness.fitness
                population.append(indi)
                numEval += 1

    # median_values = []
    # for i in xrange(len(problem.objectives)):
    #     temp_values = []
    #     for pop in population:
    #         temp_values.append(pop.fitness.fitness[i])
    #     median_values.append(median(temp_values))
    #
    # print median_values

    print "number of evals: ", numEval
    return population, numEval
Esempio n. 5
0
def galeEWWHERE(problem, population, configuration, values_to_be_passed):
    "The Core method behind GALE"

    # for pop in population:
    #     assert(pop.generation_number == 0), "Generation has to be 0"

    # Compile population into table form used by WHERE
    t = slurp([[x for x in row.decisionValues] + ["?" for y in problem.objectives] for row in population],
              problem.buildHeader().split(","))

    # Initialize some parameters for WHERE
    The.allowDomination = True
    The.alpha = 1
    for i, row in enumerate(t.rows):
        row.evaluated = False

    # Run WHERE
    m = Moo(problem, t, len(t.rows), N=1).divide(minnie=rstop(t))

    print "Where done"
    # Organizing
    NDLeafs = m.nonPrunedLeaves()  # The surviving non-dominated leafs
    allLeafs = m.nonPrunedLeaves() + m.prunedLeaves()  # All of the leafs

    # After mutation: Check how many rows were actually evaluated
    numEval = 0
    for leaf in allLeafs:
        for row in leaf.table.rows:
            if row.evaluated:
                numEval += 1

    # After mutation; Convert back to JMOO Data Structures
    population = []
    for leaf in NDLeafs:
        for row in leaf.table.rows:
            if row.evaluated:
                population.append(jmoo_individual(problem, [x for x in row.cells[:len(problem.decisions)]],
                                                  [x for x in row.cells[len(problem.decisions):]]))
            else:
                indi = jmoo_individual(problem, [x for x in row.cells[:len(problem.decisions)]], None)
                indi.fitness.fitness = problem.evaluate(indi.decisionValues)
                population.append(indi)
                numEval += 1

    # median_values = []
    # for i in xrange(len(problem.objectives)):
    #     temp_values = []
    #     for pop in population:
    #         temp_values.append(pop.fitness.fitness[i])
    #     median_values.append(median(temp_values))
    #
    # print median_values

    return population, numEval
Esempio n. 6
0
def over_sampling(problem, population, more_count, f=0.75):
    def trim(mutated, low, up):
        """Constraint checking of decision"""
        return max(low, min(mutated, up))

    def interpolate(a, b, random_number):
        return a + random_number * (b - a)

    new_population = []
    for i in xrange(more_count):
        from random import choice, randint, random
        one_count = i % len(population)
        one = population[one_count]
        while True:
            two_count = randint(0, len(population) - 1)
            if one_count != two_count: break
        two = population[two_count]
        solution = []
        for d, decision in enumerate(problem.decisions):
            assert isinstance(one, jmoo_individual)
            assert isinstance(two, jmoo_individual)
            solution.append(
                trim(
                    interpolate(one.decisionValues[d], two.decisionValues[d],
                                random()), decision.low, decision.up))

        new_population.append(jmoo_individual(problem, solution, None))
    return new_population
Esempio n. 7
0
def polynomial_mutation(problem, individual, configuration):
    from numpy.random import random
    eta_m_ = configuration["NSGAIII"]["ETA_M_DEFAULT_"]
    distributionIndex_ = eta_m_
    output = jmoo_individual(problem, individual.decisionValues)

    probability = 1/len(problem.decisions)
    for var in xrange(len(problem.decisions)):
        if random() <= probability:
            y = individual.decisionValues[var]
            yU = problem.decisions[var].up
            yL = problem.decisions[var].low
            delta1 = (y - yL)/(yU - yL)
            delta2 = (yU - y)/(yU - yL)
            rnd = random()

            mut_pow = 1.0/(eta_m_ + 1.0)
            if rnd < 0.5:
                xy = 1.0 - delta1
                val = 2.0 * rnd + (1 - 2 * rnd) * (xy ** (distributionIndex_ + 1.0))
                deltaq = val ** mut_pow - 1
            else:
                xy = 1.0 - delta2
                val = 2.0 * (1.0-rnd) + 2.0 * (rnd-0.5) * (xy ** (distributionIndex_+1.0))
                deltaq = 1.0 - (val ** mut_pow)


            y +=  deltaq * (yU - yL)
            if y < yL: y = yL
            if y > yU: y = yU

            output.decisionValues[var] = y

    return output
Esempio n. 8
0
def extrapolate(problem, individuals, one, f, cf):
    from random import randint
    two, three, four = three_others(individuals, one)
    solution = []

    # from Binary Differential Evolution Algorithm with New Mutation Operator

    if problem.is_binary is True:
        for d, decision in enumerate(problem.decisions):
            assert isinstance(two, jmoo_individual)
            x, y, z = two.decisionValues[d], three.decisionValues[d], four.decisionValues[d]
            if random.random() < cf:
                if x == 0 or x == 1:
                    solution.append(1 - x)
                else:
                    solution.append(1 - x)
            else:
                solution.append(one.decisionValues[d])

    else:
        for d, decision in enumerate(problem.decisions):
            assert isinstance(two, jmoo_individual)
            x, y, z = two.decisionValues[d], three.decisionValues[d], four.decisionValues[d]
            if random.random() < cf or randint(0, len(problem.decisions)) == d:
                solution.append(trim(x + f * (y - z), decision.low, decision.up))
            else:
                solution.append(one.decisionValues[d])

    return jmoo_individual(problem, [float(d) for d in solution], None)
Esempio n. 9
0
def selNSGA2(problem, population, selectees, configurations):
    k = configurations["Universal"]["Population_Size"]
    # Evaluate any new guys
    for individual in population + selectees:
        if not individual.valid:
            individual.evaluate()

    # Format a population Data structure usable by DEAP's package
    dIndividuals = deap_format(problem, population + selectees)

    # Combine
    from Algorithms.DEAP.tools.emo import deap_selNSGA2
    dIndividuals = deap_selNSGA2(dIndividuals, k)

    # Copy from DEAP structure to JMOO structure
    population = []
    for i, dIndividual in enumerate(dIndividuals):
        cells = []
        for j in range(len(dIndividual)):
            cells.append(dIndividual[j])
        population.append(
            jmoo_individual(problem, cells,
                            [f for f in dIndividual.fitness.values]))

    return population, k
Esempio n. 10
0
 def loadInitialPopulation(problem, MU):
     "a way to load *the* initial problem as used in jmoo_jmoea.py"
     "this will load a csv as generated by the dataGen method of"
     "jmoo_problems.py"
     
     filename = "data/" + problem.name + "-p" + str(MU) + "-d" + str(len(problem.decisions)) + "-o" + str(len(problem.objectives)) + "-dataset.txt"
     input = open(filename, 'rb')
     reader = csv.reader(input, delimiter=',')
     population = []
     
     #Use the csv file to build the initial population
     for k,p in enumerate(reader):
         if k > MU:
             problem.objectives[k-MU-1].med = float(p[1])
             lownotfound = False
             upnotfound = False
             
             if problem.objectives[k-MU-1].low == None:
                 problem.objectives[k-MU-1].low = float(p[0])
                 lownotfound = True
             if problem.objectives[k-MU-1].up == None:
                 problem.objectives[k-MU-1].up = float(p[2])
                 upnotfound = True
             rangeX5 = (problem.objectives[k-MU-1].up - problem.objectives[k-MU-1].low)*5
             if lownotfound:
                 problem.objectives[k-MU-1].low -= rangeX5
             if upnotfound:
                 problem.objectives[k-MU-1].up += rangeX5
             
         elif k > 0:
             population.append(jmoo_individual(problem,[float(p[n]) for n,dec in enumerate(problem.decisions)],None))
             #population[-1].fitness = jmoo_fitness(problem, [float(p[n+len(problem.decisions)]) for n,obj in enumerate(problem.objectives)])
         
     
     return population
Esempio n. 11
0
def update_neighbor(problem, individual, mutant, population, dist_function, configuration, values_to_be_passed):
    from copy import deepcopy
    new_population = population[:] #deepcopy(population)

    for i in xrange(configuration["MOEAD"]["niche"]):
        k = individual.neighbor[i]
        neighbor = [pop for pop in new_population if pop.id == k][-1]
        f1 = dist_function(problem, neighbor.fitness.fitness, neighbor.weight, values_to_be_passed, configuration)
        f2 = dist_function(problem, mutant.fitness.fitness, neighbor.weight, values_to_be_passed, configuration)
        if f2 < f1:
            backup_copy = None
            for pop in new_population:
                if pop.id == k:
                    backup_copy = deepcopy(pop)
                    new_population.remove(pop)
                    break
            assert(backup_copy.id == k), "Assumption of value of pop exists is wrong!"
            new_solution = jmoo_individual(problem, mutant.decisionValues, mutant.fitness.fitness)
            new_solution.id = k
            new_solution.neighbor = deepcopy(backup_copy.neighbor)
            new_solution.weight = deepcopy(backup_copy.weight)
            new_population.append(new_solution)
            assert(new_solution.id == backup_copy.id), "Something is wrong"

    assert(len(new_population) == configuration["Universal"]["Population_Size"]), "Something is wrong with updation"
    return new_population
Esempio n. 12
0
def polynomial_mutation(problem, individual, configuration):
    from numpy.random import random
    eta_m_ = configuration["NSGAIII"]["ETA_M_DEFAULT_"]
    distributionIndex_ = eta_m_
    output = jmoo_individual(problem, individual.decisionValues)

    probability = 1 / len(problem.decisions)
    for var in xrange(len(problem.decisions)):
        if random() <= probability:
            y = individual.decisionValues[var]
            yU = problem.decisions[var].up
            yL = problem.decisions[var].low
            delta1 = (y - yL) / (yU - yL)
            delta2 = (yU - y) / (yU - yL)
            rnd = random()

            mut_pow = 1.0 / (eta_m_ + 1.0)
            if rnd < 0.5:
                xy = 1.0 - delta1
                val = 2.0 * rnd + (1 - 2 * rnd) * (xy**
                                                   (distributionIndex_ + 1.0))
                deltaq = val**mut_pow - 1
            else:
                xy = 1.0 - delta2
                val = 2.0 * (1.0 - rnd) + 2.0 * (rnd - 0.5) * (xy**(
                    distributionIndex_ + 1.0))
                deltaq = 1.0 - (val**mut_pow)

            y += deltaq * (yU - yL)
            if y < yL: y = yL
            if y > yU: y = yU

            output.decisionValues[var] = y

    return output
Esempio n. 13
0
def update_neighbor(problem, individual, mutant, population, dist_function,
                    configuration, values_to_be_passed):
    from copy import deepcopy
    new_population = population[:]  #deepcopy(population)

    for i in xrange(configuration["MOEAD"]["niche"]):
        k = individual.neighbor[i]
        neighbor = [pop for pop in new_population if pop.id == k][-1]
        f1 = dist_function(problem, neighbor.fitness.fitness, neighbor.weight,
                           values_to_be_passed, configuration)
        f2 = dist_function(problem, mutant.fitness.fitness, neighbor.weight,
                           values_to_be_passed, configuration)
        if f2 < f1:
            backup_copy = None
            for pop in new_population:
                if pop.id == k:
                    backup_copy = deepcopy(pop)
                    new_population.remove(pop)
                    break
            assert (backup_copy.id == k
                    ), "Assumption of value of pop exists is wrong!"
            new_solution = jmoo_individual(problem, mutant.decisionValues,
                                           mutant.fitness.fitness)
            new_solution.id = k
            new_solution.neighbor = deepcopy(backup_copy.neighbor)
            new_solution.weight = deepcopy(backup_copy.weight)
            new_population.append(new_solution)
            assert (new_solution.id == backup_copy.id), "Something is wrong"

    assert (len(new_population) == configuration["Universal"]
            ["Population_Size"]), "Something is wrong with updation"
    return new_population
Esempio n. 14
0
def over_sampling(problem, population, more_count, f=0.75):
    def trim(mutated, low, up):
        """Constraint checking of decision"""
        return max(low, min(mutated, up))

    def interpolate(a, b, random_number):
        return a + random_number * (b - a)

    new_population = []
    for i in xrange(more_count):
        from random import choice, randint, random
        one_count = i%len(population)
        one = population[one_count]
        while True:
            two_count = randint(0, len(population)-1)
            if one_count != two_count: break
        two = population[two_count]
        solution = []
        for d, decision in enumerate(problem.decisions):
            assert isinstance(one, jmoo_individual)
            assert isinstance(two, jmoo_individual)
            solution.append(trim(interpolate(one.decisionValues[d], two.decisionValues[d], random()), decision.low, decision.up))

        new_population.append(jmoo_individual(problem, solution, None))
    return new_population
Esempio n. 15
0
def gale_8_Mutate(problem, leaves, configuration):
    def mutate(candidate, SouthPole, NorthPole):
        mutant = [None for _ in xrange(len(candidate.decisionValues))]
        g = abs(SouthPole.x - NorthPole.x)
        for attr in range(0, len(problem.decisions)):
            # just some naming shortcuts
            me = candidate.decisionValues[attr]
            good = SouthPole.decisionValues[attr]
            bad = NorthPole.decisionValues[attr]
            dec = problem.decisions[attr]

            # Find direction to mutate (Want to mutate towards good pole)
            if me > good: d = -1
            if me < good: d = +1
            if me == good: d = 0
            mutant[attr] = min(dec.up, max(dec.low, (me + me * g * d) * 1.1))

        return jmoo_individual(problem, mutant, None)

    #################
    # Mutation Phase
    #################

    new_population = []
    # Keep track of evals
    number_of_evaluations = 0

    for leaf in leaves:
        initial_length = len(leaf)
        sorted_population = fastmap(problem, leaf)
        # print "sorted_population: ", len(sorted_population), len([g for g in sorted_population if g.fitness.valid])
        number_of_evaluations += 2

        good_ones = sorted_population[:initial_length / 2]
        # print "good_ones: ", len(good_ones), len([g for g in good_ones if g.fitness.valid])
        mutants = [
            mutate(good_one, sorted_population[0], sorted_population[-1])
            for good_one in good_ones
        ]
        # print "mutants: ", len(mutants)

        excess = initial_length - (len(good_ones) + len(mutants))
        random_points = [
            jmoo_individual(problem, problem.generateInput(), None)
            for _ in xrange(excess)
        ]
        # print "excess: ", excess

        new_population += good_ones
        new_population += mutants
        new_population += random_points
        # print "new_population: ", len(new_population)
        assert (initial_length == len(good_ones) + len(mutants) +
                len(random_points)), "Something is wrong"

    # print len(new_population), configuration["Universal"]["Population_Size"]
    assert (len(new_population) == configuration["Universal"]
            ["Population_Size"]), "Something is wrong"
    return new_population, number_of_evaluations
Esempio n. 16
0
def gale2Regen(problem, unusedslot, mutants, configuration):
    howMany = configuration["Universal"]["Population_Size"] - len(mutants)
    # Generate random individuals
    population = []
    for i in range(howMany):
        population.append(jmoo_individual(problem, problem.generateInput(), None))
    
    return mutants+population, 0
Esempio n. 17
0
def gale_nm_Regen(problem, unusedslot, mutants, configuration, generation_number):
    howMany = configuration["Universal"]["Population_Size"]
    # Generate random individuals
    population = []
    for i in range(howMany):
        population.append(jmoo_individual(problem, problem.generateInput(), [generation_number], None))
    
    return population, 0
Esempio n. 18
0
def gale_nm_Mutate(problem, NDLeafs, configuration, gen, actual_population):
    #################
    # Mutation Phase
    #################

    # After mutation; Convert back to JMOO Data Structures
    population = []
    for leaf in NDLeafs:
        for row in leaf.table.rows:
            if row.evaluated:
                population.append(jmoo_individual(problem, [x for x in row.cells[:len(problem.decisions)]], 0,
                                                  [x for x in row.cells[len(problem.decisions):]]))
            else:
                population.append(jmoo_individual(problem, [x for x in row.cells[:len(problem.decisions)]], 0, None))

                # Return selectees and number of evaluations
    return population, 0
Esempio n. 19
0
def gale_64_Regen(problem, unusedslot, mutants, configuration):
    howMany = configuration["Universal"]["Population_Size"] - len(mutants)
    # Generate random individuals
    population = []
    for i in range(howMany):
        population.append(jmoo_individual(problem, problem.generateInput(), None))
    
    return mutants+population, 0
Esempio n. 20
0
def generate_population(problem, N):
    population = []
    for i in xrange(N):
        temp_decision = problem.generateInput()
        population.append(
            jmoo_individual(problem,
                            temp_decision,
                            problem.evaluate(temp_decision),
                            index=i))
    return population
Esempio n. 21
0
def galeRegen(problem, unusedSlot, mutants, MU):
    
    howMany = MU - len(mutants)
    
    # Generate random individuals
    population = []
    for i in range(howMany):
        population.append(jmoo_individual(problem, problem.generateInput(), None))
    
    return mutants+population, 0
Esempio n. 22
0
def gale_8_Mutate(problem, leaves, configuration):

    def mutate(candidate, SouthPole, NorthPole):
        mutant = [None for _ in xrange(len(candidate.decisionValues))]
        g = abs(SouthPole.x - NorthPole.x)
        for attr in range(0, len(problem.decisions)):
            # just some naming shortcuts
            me = candidate.decisionValues[attr]
            good = SouthPole.decisionValues[attr]
            bad = NorthPole.decisionValues[attr]
            dec = problem.decisions[attr]

            # Find direction to mutate (Want to mutate towards good pole)
            if me > good:  d = -1
            if me < good:  d = +1
            if me == good: d = 0
            mutant[attr] = min(dec.up, max(dec.low, (me + me * g * d) * 1.1))

        return jmoo_individual(problem, mutant, None)

    #################
    # Mutation Phase
    #################

    new_population = []
    # Keep track of evals
    number_of_evaluations = 0

    for leaf in leaves:
        initial_length = len(leaf)
        sorted_population = fastmap(problem, leaf)
        # print "sorted_population: ", len(sorted_population), len([g for g in sorted_population if g.fitness.valid])
        number_of_evaluations += 2

        good_ones = sorted_population[:initial_length/2]
        # print "good_ones: ", len(good_ones), len([g for g in good_ones if g.fitness.valid])
        mutants = [mutate(good_one, sorted_population[0], sorted_population[-1]) for good_one in good_ones]
        # print "mutants: ", len(mutants)

        excess = initial_length - (len(good_ones) + len(mutants))
        random_points = [jmoo_individual(problem, problem.generateInput(), None) for _ in xrange(excess)]
        # print "excess: ", excess

        new_population += good_ones
        new_population += mutants
        new_population += random_points
        # print "new_population: ", len(new_population)
        assert(initial_length == len(good_ones) + len(mutants) + len(random_points)), "Something is wrong"



    # print len(new_population), configuration["Universal"]["Population_Size"]
    assert(len(new_population) == configuration["Universal"]["Population_Size"]), "Something is wrong"
    return new_population, number_of_evaluations
Esempio n. 23
0
def gale_nm_Regen(problem, unusedslot, mutants, configuration,
                  generation_number):
    howMany = configuration["Universal"]["Population_Size"]
    # Generate random individuals
    population = []
    for i in range(howMany):
        population.append(
            jmoo_individual(problem, problem.generateInput(),
                            [generation_number], None))

    return population, 0
Esempio n. 24
0
def extrapolate(problem, individuals, one, f, cf):
    from random import randint
    two, three, four = three_others(individuals, one)
    solution = []
    for d, decision in enumerate(problem.decisions):
        assert isinstance(two, jmoo_individual)
        x, y, z = two.decisionValues[d], three.decisionValues[d], four.decisionValues[d]
        if random.random() < cf or randint(0, len(problem.decisions)) == d:
            solution.append(trim(x + f * (y - z), decision.low, decision.up))
        else: solution.append(one.decisionValues[d])

    return jmoo_individual(problem, [float(d) for d in solution], None)
Esempio n. 25
0
def convert_to_jmoo(problem, pareto_fronts):
    tpopulation = []
    for front_no, front in enumerate(pareto_fronts[:-1]):
        for i, dIndividual in enumerate(front):
            cells = []
            for j in xrange(len(dIndividual)):
                cells.append(dIndividual[j])
            tpopulation.append(jmoo_individual(problem, cells, list(dIndividual.fitness.values)))
    for pop in tpopulation: pop.front_no = 0  # all the front except the last front

    lpopulation = []
    for front_no, front in enumerate(pareto_fronts[-1:]):
        for i, dIndividual in enumerate(front):
            cells = []
            for j in xrange(len(dIndividual)):
                cells.append(dIndividual[j])
            lpopulation.append(jmoo_individual(problem, cells, list(dIndividual.fitness.values)))
    for pop in lpopulation: pop.front_no = -1  # last front

    from itertools import chain
    assert(len(list(chain(*pareto_fronts))) <= len(lpopulation) + len(tpopulation)), "Non Dominated Sorting is wrong!"
    return lpopulation + tpopulation
Esempio n. 26
0
def gale_8_WHERE(problem, population, configuration, values_to_be_passed):
    "The Core method behind GALE"

    from Utilities.where import where
    import numpy as np
    decisions = np.array([pop.decisionValues for pop in population])
    leaves = where(decisions)
    filled_leaves = []
    for leaf in leaves:
        temp_list = [jmoo_individual(problem, list(member), None) for member in leaf]
        filled_leaves.append(temp_list)

    return filled_leaves, 0
Esempio n. 27
0
def extrapolate(problem, individuals, one, f, cf):
    from random import randint
    two, three, four = three_others(individuals, one)
    solution = []
    for d, decision in enumerate(problem.decisions):
        assert isinstance(two, jmoo_individual)
        x, y, z = two.decisionValues[d], three.decisionValues[
            d], four.decisionValues[d]
        if random.random() < cf or randint(0, len(problem.decisions)) == d:
            solution.append(trim(x + f * (y - z), decision.low, decision.up))
        else:
            solution.append(one.decisionValues[d])

    return jmoo_individual(problem, [float(d) for d in solution], None)
Esempio n. 28
0
def gale_nm_Mutate(problem, NDLeafs, configuration):
    #################
    # Mutation Phase
    #################

    # After mutation; Convert back to JMOO Data Structures
    population = []
    for leaf in NDLeafs:
        for row in leaf.table.rows:
            if row.evaluated:
                population.append(
                    jmoo_individual(
                        problem,
                        [x for x in row.cells[:len(problem.decisions)]],
                        [x for x in row.cells[len(problem.decisions):]]))
            else:
                population.append(
                    jmoo_individual(
                        problem,
                        [x for x in row.cells[:len(problem.decisions)]], None))

                # Return selectees and number of evaluations
    return population, 0
Esempio n. 29
0
def gale_8_WHERE(problem, population, configuration, values_to_be_passed):
    "The Core method behind GALE"

    from Utilities.where import where
    import numpy as np
    decisions = np.array([pop.decisionValues for pop in population])
    leaves = where(decisions)
    filled_leaves = []
    for leaf in leaves:
        temp_list = [
            jmoo_individual(problem, list(member), None) for member in leaf
        ]
        filled_leaves.append(temp_list)

    return filled_leaves, 0
Esempio n. 30
0
def extrapolate(problem, individuals, one, f, cf):
    # #print "Extrapolate"
    two, three, four = three_others(individuals, one)
    # #print two,three,four
    solution = []
    for d, decision in enumerate(problem.decisions):
        assert isinstance(two, jmoo_individual)
        x, y, z = two.decisionValues[d], three.decisionValues[
            d], four.decisionValues[d]
        if random.random() < cf:
            mutated = x + f * (y - z)
            solution.append(trim(mutated, decision.low, decision.up))
        else:
            solution.append(one.decisionValues[d])
    return jmoo_individual(problem, [float(d) for d in solution], None)
Esempio n. 31
0
def extrapolate(problem, individuals, one, f, cf):
    # #print "Extrapolate"
    two, three, four = three_others(individuals, one)
    # #print two,three,four
    solution = []
    for d, decision in enumerate(problem.decisions):
        assert isinstance(two, jmoo_individual)
        x, y, z = two.decisionValues[d], three.decisionValues[d], four.decisionValues[d]
        if random.random() < cf:
            mutated = x + f * (y - z)
            solution.append(trim(mutated, decision.low, decision.up))
        else:
            solution.append(one.decisionValues[d])

    return jmoo_individual(problem, [float(d) for d in solution], None)
Esempio n. 32
0
    def mutate(candidate, SouthPole, NorthPole):
        mutant = [None for _ in xrange(len(candidate.decisionValues))]
        g = abs(SouthPole.x - NorthPole.x)
        for attr in range(0, len(problem.decisions)):
            # just some naming shortcuts
            me = candidate.decisionValues[attr]
            good = SouthPole.decisionValues[attr]
            bad = NorthPole.decisionValues[attr]
            dec = problem.decisions[attr]

            # Find direction to mutate (Want to mutate towards good pole)
            if me > good: d = -1
            if me < good: d = +1
            if me == good: d = 0
            mutant[attr] = min(dec.up, max(dec.low, (me + me * g * d) * 1.1))

        return jmoo_individual(problem, mutant, None)
Esempio n. 33
0
    def mutate(candidate, SouthPole, NorthPole):
        mutant = [None for _ in xrange(len(candidate.decisionValues))]
        g = abs(SouthPole.x - NorthPole.x)
        for attr in range(0, len(problem.decisions)):
            # just some naming shortcuts
            me = candidate.decisionValues[attr]
            good = SouthPole.decisionValues[attr]
            bad = NorthPole.decisionValues[attr]
            dec = problem.decisions[attr]

            # Find direction to mutate (Want to mutate towards good pole)
            if me > good:  d = -1
            if me < good:  d = +1
            if me == good: d = 0
            mutant[attr] = min(dec.up, max(dec.low, (me + me * g * d) * 1.1))

        return jmoo_individual(problem, mutant, None)
Esempio n. 34
0
    def loadInitialPopulation(problem, MU, path=""):
        "a way to load *the* initial problem as used in jmoo_jmoea.py"
        "this will load a csv as generated by the dataGen method of"
        "jmoo_problems.py"

        if path == "":
            filename = "Data/" + problem.name + "-p" + str(MU) + "-d" + str(
                len(problem.decisions)) + "-o" + str(len(
                    problem.objectives)) + "-dataset.txt"
        elif path == "unittesting":
            filename = "../../Data/Testing-dataset.txt"
        else:
            print "No accounted for"
            exit()

        input = open(filename, 'rb')
        reader = csv.reader(input, delimiter=',')
        population = []

        #Use the csv file to build the initial population
        for k, p in enumerate(reader):
            if k > MU:
                problem.objectives[k - MU - 1].med = float(p[1])
                lownotfound = False
                upnotfound = False

                if problem.objectives[k - MU - 1].low == None:
                    problem.objectives[k - MU - 1].low = float(p[0])
                    lownotfound = True
                if problem.objectives[k - MU - 1].up == None:
                    problem.objectives[k - MU - 1].up = float(p[2])
                    upnotfound = True
                # rangeX5 = (problem.objectives[k-MU-1].up - problem.objectives[k-MU-1].low)*5
                # if lownotfound:
                #     problem.objectives[k-MU-1].low -= rangeX5
                # if upnotfound:
                #     problem.objectives[k-MU-1].up += rangeX5

            elif k > 0:
                population.append(
                    jmoo_individual(problem, [
                        float(p[n]) for n, dec in enumerate(problem.decisions)
                    ], None))
                #population[-1].fitness = jmoo_fitness(problem, [float(p[n+len(problem.decisions)]) for n,obj in enumerate(problem.objectives)])

        return population
Esempio n. 35
0
def over_sampling(problem, population, more_count, f=0.75):
    def trim(mutated, low, up):
        """Constraint checking of decision"""
        return max(low, min(mutated, up))
    new_population = []
    for _ in xrange(more_count):
        from random import choice, randint
        one = choice(population)
        two = choice(population)
        three = choice(population)
        solution = []
        for d, decision in enumerate(problem.decisions):
            assert isinstance(one, jmoo_individual)
            assert isinstance(two, jmoo_individual)
            assert isinstance(three, jmoo_individual)
            x, y, z = one.decisionValues[d], two.decisionValues[d], three.decisionValues[d]
            solution.append(trim(x + f * (y - z), decision.low, decision.up))

        new_population.append(jmoo_individual(problem, solution, None))
    return new_population
Esempio n. 36
0
def over_sampling(problem, population, more_count, f=0.75):
    def trim(mutated, low, up):
        """Constraint checking of decision"""
        return max(low, min(mutated, up))

    new_population = []
    for _ in xrange(more_count):
        from random import choice, randint
        one = choice(population)
        two = choice(population)
        three = choice(population)
        solution = []
        for d, decision in enumerate(problem.decisions):
            assert isinstance(one, jmoo_individual)
            assert isinstance(two, jmoo_individual)
            assert isinstance(three, jmoo_individual)
            x, y, z = one.decisionValues[d], two.decisionValues[
                d], three.decisionValues[d]
            solution.append(trim(x + f * (y - z), decision.low, decision.up))

        new_population.append(jmoo_individual(problem, solution, None))
    return new_population
Esempio n. 37
0
def selNSGA2(problem, population, selectees, k):
    
    # Evaluate any new guys
    for individual in population+selectees:
        if not individual.valid:
            individual.evaluate()
            
    # Format a population data structure usable by DEAP's package
    dIndividuals = deap_format(problem, population+selectees)
    
    # Combine
    dIndividuals = tools.selNSGA2(dIndividuals, k)
    
    # Copy from DEAP structure to JMOO structure
    population = []
    for i,dIndividual in enumerate(dIndividuals):
        cells = []
        for j in range(len(dIndividual)):
            cells.append(dIndividual[j])
        population.append(jmoo_individual(problem, cells, dIndividual.fitness.values))
        
    return population,k
Esempio n. 38
0
def selSPEA2(problem, population, selectees, k):
    # Evaluate any new guys
    for individual in population + selectees:
        if not individual.valid:
            individual.evaluate()

    # Format a population data structure usable by DEAP's package
    dIndividuals = deap_format(problem, population + selectees)

    # Combine
    dIndividuals = tools.selSPEA2(dIndividuals, k)

    # Copy from DEAP structure to JMOO structure
    population = []
    for i, dIndividual in enumerate(dIndividuals):
        cells = []
        for j in range(len(dIndividual)):
            cells.append(dIndividual[j])
        population.append(
            jmoo_individual(problem, cells, dIndividual.fitness.values))

    return population, k
Esempio n. 39
0
    def loadInitialPopulation(problem, MU):  #Done
        "a way to load *the* initial problem as used in jmoo_jmoea.py"
        "this will load a csv as generated by the dataGen method of"
        "jmoo_problems.py"
        "Vivek: This will return a list of object <jmoo_individual>"

        input = open('./data/' + problem.name + str(MU) + 'dataset.txt', 'rb')
        reader = csv.reader(input, delimiter=',')
        population = []

        #Use the csv file to build the initial population
        for k, p in enumerate(reader):
            if k > MU:
                problem.objectives[k - MU - 1].med = float(p[1])
                lownotfound = False
                upnotfound = False

                if problem.objectives[k - MU - 1].low == None:
                    problem.objectives[k - MU - 1].low = float(p[0])
                    lownotfound = True
                if problem.objectives[k - MU - 1].up == None:
                    problem.objectives[k - MU - 1].up = float(p[2])
                    upnotfound = True
                rangeX5 = (problem.objectives[k - MU - 1].up -
                           problem.objectives[k - MU - 1].low) * 5
                if lownotfound:
                    problem.objectives[k - MU - 1].low -= rangeX5
                if upnotfound:
                    problem.objectives[k - MU - 1].up += rangeX5

            elif k > 0:
                population.append(
                    jmoo_individual(problem, [
                        float(p[n]) for n, dec in enumerate(problem.decisions)
                    ], None))
                #population[-1].fitness = jmoo_fitness(problem, [float(p[n+len(problem.decisions)]) for n,obj in enumerate(problem.objectives)])

        return population
Esempio n. 40
0
def selNSGA2(problem, population, selectees, configurations):
    k = configurations["Universal"]["Population_Size"]
    # Evaluate any new guys
    for individual in population + selectees:
        if not individual.valid:
            individual.evaluate()

    # Format a population Data structure usable by DEAP's package
    dIndividuals = deap_format(problem, population + selectees)

    # Combine
    from Algorithms.DEAP.tools.emo import deap_selNSGA2
    dIndividuals = deap_selNSGA2(dIndividuals, k)

    # Copy from DEAP structure to JMOO structure
    population = []
    for i, dIndividual in enumerate(dIndividuals):
        cells = []
        for j in range(len(dIndividual)):
            cells.append(dIndividual[j])
        population.append(jmoo_individual(problem, cells, [f for f in dIndividual.fitness.values]))

    return population, k
Esempio n. 41
0
def get_non_dominated_solutions(problem, population, configurations):
    # NOTE: This might look wierd but this would return all the non dominated solutions
    k = configurations["Universal"]["Population_Size"]
    # Evaluate any new guys
    for individual in population:
        if not individual.valid:
            individual.evaluate()

    # Format a population Data structure usable by DEAP's package
    dIndividuals = deap_format(problem, population)

    # Combine
    from Algorithms.DEAP.tools.emo import deap_selNSGA2
    dIndividuals = deap_selNSGA2(dIndividuals, k)

    # Copy from DEAP structure to JMOO structure
    population = []
    for i, dIndividual in enumerate(dIndividuals):
        cells = []
        for j in range(len(dIndividual)):
            cells.append(dIndividual[j])
        population.append(jmoo_individual(problem, cells, [f for f in dIndividual.fitness.values]))

    return population
Esempio n. 42
0
    def update(statBox, population, gen, num_new_evals, initial = False, population_size=None, printOption=True):
        "add a stat box - compute the statistics first"

        # Find a file name to write the stats to
        if (statBox.alg.name == "GALE0" or statBox.alg.name == "GALE_no_mutation") and population_size is not None:
            filename = "Data/results_"+statBox.problem.name + "-p" + str(population_size) + "-d" + \
                   str(len(statBox.problem.decisions)) + "-o" + str(len(statBox.problem.objectives))+\
                   "_"+statBox.alg.name+".datatable"
        else:
            filename = "Data/results_"+statBox.problem.name + "-p" + str(len(population)) + "-d" + \
                       str(len(statBox.problem.decisions)) + "-o" + str(len(statBox.problem.objectives))+\
                       "_"+statBox.alg.name+".datatable"

        fa = open(filename, 'a')

        # Update Number of Evaluations
        statBox.numEval += num_new_evals

        # population represents on the individuals which have been evaluated
        shorten_population = [pop for pop in population if pop.fitness.valid]
        objectives = [individual.fitness.fitness for individual in shorten_population]
        # Split Columns into Lists
        objective_columns = [[objective[i] for objective in objectives] for i, obj in enumerate(statBox.problem.objectives)]
        # Calculate Medians of objective scores
        objective_medians = [median(fitCol) for fitCol in objective_columns]
        # Calculate IQR of objective scores
        objective_iqr = [spread(fitCol) for fitCol in objective_columns]
        
        # Initialize Reference Point on Initial Run
        if initial is True:
            statBox.referencePoint = [o.med for o in statBox.problem.objectives]
            statBox.reference_point_for_hypervolume = [o.up for o in statBox.problem.objectives]


        # Calculate IBD & IBS
        # Finding min and max for each objectives
        norms = [[min(objective_columns[i]+[statBox.referencePoint[i]]), max(objective_columns[i]+[statBox.referencePoint[i]])] for i,obj in enumerate(statBox.problem.objectives)]

        lossInQualities = [{"qual": loss_in_quality(statBox.problem, [statBox.referencePoint], fit, norms), "index": i} for i,fit in enumerate(objectives)]
        lossInQualities.sort(key=lambda(r): r["qual"])
        if len(objectives) > 0: 
            best_fitness = objectives[lossInQualities[0]["index"]]
        else:
            best_fitness = objective_medians
        lossInQualities = [item["qual"] for item in lossInQualities]

        IBD = median(lossInQualities)
        IBS = spread(lossInQualities)

        if initial is True:
            IBD = 1.0
            statBox.referenceIBD = 1.0
        
        changes = []
        # Print Option
        if printOption is True:
            outString = ""
            
            if initial:
                outString += str(statBox.numEval) + ","
                for med, spr, initmed, obj, o in zip(statBox.referencePoint, [0 for x in statBox.problem.objectives],
                                                 statBox.referencePoint, statBox.problem.objectives,
                                                 range(len(statBox.problem.objectives))):
                    change = percentChange(med, initmed, obj.lismore, obj.low, obj.up)
                    changes.append(float(change.strip("%")))
                    statBox.bests[o] = changes[-1]
                    statBox.bests_actuals[o] = med
                    outString += str("%8.4f" % med) + "," + change + "," + str("%8.4f" % spr) + ","
                    if statBox.numEval in statBox.foam[o]: statBox.foam[o][statBox.numEval].append(change)
                    else: statBox.foam[o][statBox.numEval] = [change]
                outString += str("%8.4f" % IBD) + "," + percentChange(statBox.referenceIBD, statBox.referenceIBD, True, 0, 1) + "," + str("%8.4f" % IBS)
            else:
                outString += str(statBox.numEval) + ","
                for med, spr, initmed, obj, o in zip(best_fitness, objective_iqr, statBox.referencePoint,
                                                 statBox.problem.objectives, range(len(statBox.problem.objectives))):
                    change = percentChange(med, initmed, obj.lismore, obj.low, obj.up)
                    changes.append(float(change.strip("%")))
                    if changes[-1] < statBox.bests[o]: 
                        statBox.bests[o] = changes[-1]
                        statBox.bests_actuals[o] = med
                    outString += str("%8.4f" % med) + "," + change + "," + str("%8.4f" % spr) + ","
                    if statBox.numEval in statBox.foam[o]: statBox.foam[o][statBox.numEval].append(change)
                    else: statBox.foam[o][statBox.numEval] = [change]
                outString += str("%8.4f" % IBD) + "," + percentChange(IBD, statBox.referenceIBD, True, 0, 1) + "," + str("%8.4f" % IBS)
            fa.write(outString + "\n")
        
            
        # Add Stat to the Stat Box
        trunk = []
        for i,pop in enumerate(shorten_population):
            trunk.append(jmoo_individual(statBox.problem, pop.decisionValues, pop.fitness.fitness))
        statBox.box[-1] = jmoo_stats(trunk, objectives, best_fitness, objective_iqr, statBox.numEval, gen, IBD, IBS, changes)
        fa.close()
Esempio n. 43
0
    def update(statBox, population, gen, numNewEvals, initial = False, printOption=True):
        "add a stat box - compute the statistics first"
        filename = "data/results_"+statBox.problem.name + "-p" + str(len(population)) + "-d" + \
                   str(len(statBox.problem.decisions)) + "-o" + str(len(statBox.problem.objectives))+\
                   "_"+statBox.alg.name+".datatable"
        fa = open(filename, 'a')
        
        # Calculate percentage of violations
        violationsPercent = sum([ 1 for pop in population if statBox.problem.evalConstraints(pop.decisionValues)])/float(len(population))
        
        # Update Number of Evaluations
        statBox.numEval += numNewEvals
        #front = population
        #for pop in population:
        #    if not pop.valid: pop.evaluate()
        population = [pop for pop in population if pop.fitness.valid]


        fitnesses = [individual.fitness.fitness for individual in population if individual.valid]

        # Split Columns into Lists
        fitnessColumns = [[fit[i] for fit in fitnesses] for i,obj in enumerate(statBox.problem.objectives)]


    
        # Calculate Medians and Spreads
        fitnessMedians = [median(fitCol) for fitCol in fitnessColumns]
        fitnessSpreads = [spread(fitCol) for fitCol in fitnessColumns]
        
        # Initialize Reference Point on Initial Run
        if initial == True:
            statBox.referencePoint = [o.med for o in statBox.problem.objectives]

            

        # Calculate IBD & IBS
        norms = [[min(fitnessColumns[i]+[statBox.referencePoint[i]]), max(fitnessColumns[i]+[statBox.referencePoint[i]])] for i,obj in enumerate(statBox.problem.objectives)]

        lossInQualities = [{"qual": loss_in_quality(statBox.problem, [statBox.referencePoint], fit, norms), "index": i} for i,fit in enumerate(fitnesses)]
        
        lossInQualities.sort(key=lambda(r): r["qual"])
        if len(fitnesses) > 0: 
            best_fitness = fitnesses[lossInQualities[0]["index"]]
        else:
            best_fitness = fitnessMedians
        lossInQualities = [item["qual"] for item in lossInQualities]
        #best_fitness = [min(fitCol) for fitCol in fitnessColumns if len(fitCol) > 0]

        # + IGD Calculation: This would only work if the true PF is known.
        if IGDMEASURE is True:
            approximate = []
            true_PF = readpf(statBox.problem)
            for individual in population:
                temp = []
                for x in individual.fitness.fitness: temp.append(round(x, 5))
                approximate.append(temp)
        # - IGD Calculation

        IBD = median(lossInQualities)
        IBS = spread(lossInQualities)
        if IGDMEASURE is True:
            IGD = IGD_Calculation.IGD(approximate, true_PF)
        
        if initial == True:
            IBD = 1.0
            statBox.referenceIBD = 1.0
            # TODO: Need to come up with an smart way to assign reference IGD: This is stupid I ran into the problems
            # with this
            if IGDMEASURE is True:
                statBox.referenceIGD = 1e30
        
        
        changes = []
        # Print Option
        if printOption == True:
            outString = ""
            
            if initial:
                outString += str(statBox.numEval) + ","
                for med, spr, initmed, obj, o in zip(statBox.referencePoint, [0 for x in statBox.problem.objectives],
                                                 statBox.referencePoint, statBox.problem.objectives,
                                                 range(len(statBox.problem.objectives))):
                    change = percentChange(med, initmed, obj.lismore, obj.low, obj.up)
                    changes.append(float(change.strip("%")))
                    statBox.bests[o] = changes[-1]
                    statBox.bests_actuals[o] = med
                    outString += str("%8.4f" % med) + "," + change + "," + str("%8.4f" % spr) + ","
                    if statBox.numEval in statBox.foam[o]: statBox.foam[o][statBox.numEval].append(change)
                    else: statBox.foam[o][statBox.numEval] = [change]
                outString += str("%8.4f" % IBD) + "," + percentChange(statBox.referenceIBD, statBox.referenceIBD, True, 0, 1) + "," + str("%8.4f" % IBS)
                if IGDMEASURE is True:
                    outString += "," + str("%8.4f" % IGD) + "," + percentChange(statBox.referenceIGD, statBox.referenceIGD, True, 0, 1e3)
            else:
                outString += str(statBox.numEval) + ","
                for med, spr, initmed, obj, o in zip(best_fitness, fitnessSpreads, statBox.referencePoint,
                                                 statBox.problem.objectives, range(len(statBox.problem.objectives))):
                    change = percentChange(med, initmed, obj.lismore, obj.low, obj.up)
                    changes.append(float(change.strip("%")))
                    if changes[-1] < statBox.bests[o]: 
                        statBox.bests[o] = changes[-1]
                        statBox.bests_actuals[o] = med
                    outString += str("%8.4f" % med) + "," + change + "," + str("%8.4f" % spr) + ","
                    if statBox.numEval in statBox.foam[o]: statBox.foam[o][statBox.numEval].append(change)
                    else: statBox.foam[o][statBox.numEval] = [change]
                outString += str("%8.4f" % IBD) + "," + percentChange(IBD, statBox.referenceIBD, True, 0, 1) + "," + str("%8.4f" % IBS)
                print outString  + ", violations: " + str("%4.1f" % violationsPercent)
                if IGDMEASURE is True:
                    outString += "," + str("%8.4f" % IGD) + "," + percentChange(IGD, statBox.referenceIGD, True, 0, 1e3)




            # if initial:
            #     if IGDMEASURE is True:
            #         print outString  + ", violations: " + str("%4.1f" % violationsPercent) + "||IGD: " + str(IGD_Calculation.IGD(approximate, true_PF))
            #     else: print outString  + ", violations: " + str("%4.1f" % violationsPercent)
            # else:
            #     # print outString  + ", violations: " + str("%4.1f" % violationsPercent) + "||IGD: " + str(IGD(approximate, true_PF))
            #     # print str(statBox.numEval) + "|IGD: |" + str(IGD(approximate, true_PF)) + "|Fitness:| ", normalized_median
            #     print "|Fitness: |", fitnessMedians
            fa.write(outString + "\n")
        
            
        # Add Stat to the Stat Box
        trunk = []
        for i,pop in enumerate(population):
            trunk.append(jmoo_individual(statBox.problem, pop.decisionValues, pop.fitness.fitness))
            #if i < 5: print trunk[-1].decisionValues, statBox.problem.evalConstraints(trunk[-1].decisionValues)
        statBox.box.append(jmoo_stats(trunk, fitnesses, best_fitness, fitnessSpreads, statBox.numEval, gen, IBD, IBS, changes))
        fa.close()
Esempio n. 44
0
def sbxcrossover(problem, parent1, parent2, configuration):

    EPS = 1.0e-14
    distribution_index = configuration["NSGAIII"]["ETA_C_DEFAULT_"]
    probability = configuration["NSGAIII"]["SBX_Probability"]
    from numpy.random import random
    offspring1 = jmoo_individual(problem, parent1.decisionValues)
    offspring2 = jmoo_individual(problem, parent2.decisionValues)

    number_of_variables = len(problem.decisions)
    if random() <= probability:
        for i in xrange(number_of_variables):
            valuex1 = offspring1.decisionValues[i]
            valuex2 = offspring2.decisionValues[i]
            if random() <= 0.5:
                if abs(valuex1 - valuex2) > EPS:
                    if valuex1 < valuex2:
                        y1 = valuex1
                        y2 = valuex2
                    else:
                        y1 = valuex2
                        y2 = valuex1

                    yL = problem.decisions[i].low
                    yU = problem.decisions[i].up
                    rand = random()
                    beta = 1.0 + (2.0 * (y1 - yL) / (y2 - y1))
                    alpha = 2.0 - beta ** (-1 * (distribution_index + 1.0))

                    if rand <= 1/alpha:
                        betaq = (1.0 / (2.0 - rand * alpha)) ** (1.0 / (distribution_index + 1.0))
                    else:
                        betaq = (1.0 / (2.0 - rand * alpha)) ** (1.0 / (distribution_index + 1.0))

                    c1 = 0.5 * ((y1 + y2) - betaq * (y2 - y1))
                    beta = 1.0 + (2.0 * (yU - y2) / (y2 - y1))
                    alpha = 2.0 - beta ** -(distribution_index + 1.0)

                    if rand <= (1.0 / alpha):
                        betaq = (rand * alpha) ** (1.0 / (distribution_index + 1.0))
                    else:
                        betaq = ((1.0 / (2.0 - rand * alpha)) ** (1.0 / (distribution_index + 1.0)))

                    c2 = 0.5 * ((y1 + y2) + betaq * (y2 - y1))

                    if c1 < yL: c1 = yL
                    if c2 < yL: c2 = yL
                    if c1 > yU: c1 = yU
                    if c2 > yU: c2 = yU

                    if random() <= 0.5:
                        offspring1.decisionValues[i] = c2
                        offspring2.decisionValues[i] = c1
                    else:
                        offspring1.decisionValues[i] = c1
                        offspring2.decisionValues[i] = c2
                else:
                    offspring1.decisionValues[i] = valuex1
                    offspring2.decisionValues[i] = valuex2
            else:
                offspring1.decisionValues[i] = valuex2
                offspring2.decisionValues[i] = valuex1

    return offspring1, offspring2
Esempio n. 45
0
def gale_64_Mutate(problem, NDLeafs, configuration):
    #################
    # Mutation Phase
    #################
    # Keep track of evals
    numEval = 0

    population = []
    for leaf in NDLeafs:

        initial_size = len(leaf.table.rows)

        # print "Number of mutants: ", len(leaf.table.rows)
        # Pull out the Poles
        east = leaf.table.rows[0]
        west = leaf.table.rows[-1]

        # Evaluate those poles if needed
        if not east.evaluated:
            for o, objScore in enumerate(problem.evaluate(east.cells)):
                east.cells[-(len(problem.objectives) - o)] = objScore
            east.evaluated = True
            numEval += 1
        if not west.evaluated:
            for o, objScore in enumerate(problem.evaluate(west.cells)):
                west.cells[-(len(problem.objectives) - o)] = objScore
            west.evaluated = True
            numEval += 1

        # Score the poles
        n = len(problem.decisions)
        weights = []
        for obj in problem.objectives:
            # w is negative when we are maximizing that objective
            if obj.lismore:
                weights.append(+1)
            else:
                weights.append(-1)
        weightedWest = [c * w for c, w in zip(west.cells[n:], weights)]
        weightedEast = [c * w for c, w in zip(east.cells[n:], weights)]
        westLoss = loss(weightedWest,
                        weightedEast,
                        mins=[obj.low for obj in problem.objectives],
                        maxs=[obj.up for obj in problem.objectives])
        eastLoss = loss(weightedEast,
                        weightedWest,
                        mins=[obj.low for obj in problem.objectives],
                        maxs=[obj.up for obj in problem.objectives])

        # Determine better Pole
        if eastLoss < westLoss:
            to_be_mutated = leaf.table.rows[:int(len(leaf.table.rows) / 2)]
        else:
            to_be_mutated = leaf.table.rows[:int(len(leaf.table.rows) / 2)]

        to_be_mutated_jmoo = []
        for row in to_be_mutated:
            if row.evaluated:
                to_be_mutated_jmoo.append(
                    jmoo_individual(
                        problem,
                        [x for x in row.cells[:len(problem.decisions)]],
                        [x for x in row.cells[len(problem.decisions):]]))
            else:
                to_be_mutated_jmoo.append(
                    jmoo_individual(
                        problem,
                        [x for x in row.cells[:len(problem.decisions)]], None))

        for i in xrange(initial_size - len(to_be_mutated)):
            index = i % len(to_be_mutated_jmoo)
            mutant = variation(problem, index, to_be_mutated_jmoo,
                               configuration)
            to_be_mutated_jmoo.append(mutant)

        members_evaluated = sum([1 for i in to_be_mutated_jmoo if i.valid])
        while members_evaluated <= 2:
            from random import randint
            index = randint(0, len(to_be_mutated_jmoo) - 1)
            to_be_mutated_jmoo[index].evaluate()
            numEval += 1
            members_evaluated += 1
            print "> ", members_evaluated

        population += to_be_mutated_jmoo

    return population, numEval
Esempio n. 46
0
def evolve_neighbor(problem, individual, population):
    mutant = genetic_operation(problem, individual, population)
    mutant = jmoo_individual(problem, [float(d) for d in mutant], None)
    mutant.evaluate()
    return update_neighbor(problem, individual, mutant, population, weighted_tche)
Esempio n. 47
0
def gale2Mutate(problem, NDLeafs, configuration, gen, actual_population):
    #################
    # Mutation Phase
    #################

    # Keep track of evals
    numEval = 0

    for leaf in NDLeafs:
        # Pull out the Poles
        east = leaf.table.rows[0]
        west = leaf.table.rows[-1]

        # Evaluate those poles if needed
        if not east.evaluated:
            for o, objScore in enumerate(problem.evaluate(east.cells)):
                east.cells[-(len(problem.objectives) - o)] = objScore
            east.evaluated = True
            numEval += 1
        if not west.evaluated:
            for o, objScore in enumerate(problem.evaluate(west.cells)):
                west.cells[-(len(problem.objectives) - o)] = objScore
            west.evaluated = True
            numEval += 1

        # Score the poles
        n = len(problem.decisions)
        weights = []
        for obj in problem.objectives:
            # w is negative when we are maximizing that objective
            if obj.lismore:
                weights.append(+1)
            else:
                weights.append(-1)
        weightedWest = [c * w for c, w in zip(west.cells[n:], weights)]
        weightedEast = [c * w for c, w in zip(east.cells[n:], weights)]
        westLoss = loss(weightedWest,
                        weightedEast,
                        mins=[obj.low for obj in problem.objectives],
                        maxs=[obj.up for obj in problem.objectives])
        eastLoss = loss(weightedEast,
                        weightedWest,
                        mins=[obj.low for obj in problem.objectives],
                        maxs=[obj.up for obj in problem.objectives])

        # Determine better Pole
        if eastLoss < westLoss:
            SouthPole, NorthPole = east, west
        else:
            SouthPole, NorthPole = west, east

        # Magnitude of the mutations
        g = abs(SouthPole.x - NorthPole.x)

        # Iterate over the individuals of the leaf
        for row in leaf.table.rows:

            # Make a copy of the row in case we reject it
            copy = [item for item in row.cells]
            gen_num_xyz = get_previous_generation_number(
                actual_population, copy)
            print "Generation Number: ", gen_num_xyz
            temp_generation_number = get_previous_generation_number(
                actual_population, copy)
            cx = row.x

            for attr in range(0, len(problem.decisions)):

                # just some naming shortcuts
                me = row.cells[attr]
                good = SouthPole.cells[attr]
                bad = NorthPole.cells[attr]
                dec = problem.decisions[attr]

                # Find direction to mutate (Want to mutate towards good pole)
                if me > good: d = -1
                if me < good: d = +1
                if me == good: d = 0

                row.cells[attr] = min(
                    dec.up,
                    max(dec.low,
                        (me + me * g * d) * configuration["GALE"]["DELTA"]))

            # Project the Mutant
            a = row.distance(NorthPole)
            b = row.distance(SouthPole)
            c = NorthPole.distance(SouthPole)
            x = (a**2 + row.c**2 - b**2) / (2 * row.c + 0.00001)

            # Test Mutant for Acceptance
            # confGAMMA = 0.15 #note: make this a property

            # print abs(cx-x), (cx + (g * configuration["GALE"]["GAMMA"]))
            if abs(x - cx) > (g * configuration["GALE"]["GAMMA"]
                              ) or problem.evalConstraints(
                                  row.cells[:n]):  # reject it
                row.cells = copy
                row.x = x
            row.generation = temp_generation_number + [gen]

    # After mutation; Convert back to JMOO Data Structures
    population = []
    for leaf in NDLeafs:
        for row in leaf.table.rows:
            if row.evaluated:
                population.append(
                    jmoo_individual(
                        problem,
                        [x for x in row.cells[:len(problem.decisions)]],
                        row.generation,
                        [x for x in row.cells[len(problem.decisions):]]))
            else:
                population.append(
                    jmoo_individual(
                        problem,
                        [x for x in row.cells[:len(problem.decisions)]],
                        row.generation, None))

                # Return selectees and number of evaluations
    return population, numEval
Esempio n. 48
0
    def update(statBox, population, gen, numNewEvals, initial = False, printOption=True):
        "add a stat box - compute the statistics first"
        fa = open("data/results_"+statBox.problem.name+"_"+statBox.alg.name+".datatable", 'a')
        
        # Calculate percentage of violations
        violationsPercent = sum([ 1 for pop in population if statBox.problem.evalConstraints(pop.decisionValues)])/float(len(population))
        
        # Update Number of Evaluations
        statBox.numEval += numNewEvals
        #front = population
        #for pop in population:
        #    if not pop.valid: pop.evaluate()
        population = [pop for pop in population if pop.fitness.valid]
        
        #population = jmoo_algorithms.deap_format(statBox.problem, population)
        
        #front = ParetoFront()
        #front.update(population)
        
        """
        fitnesses = []
        population = []
        for i,dIndividual in enumerate(front):
            cells = []
            for j in range(len(dIndividual)):
                cells.append(dIndividual[j])
            fit = []
            for k in range(len(statBox.problem.objectives)):
                fit.append(dIndividual.fitness.values[k])
            population.append( jmoo_individual(statBox.problem, cells, fit) )
        """         
        # Evaluate Fitnesses
        #for individual in population:
        #    if not individual.valid: individual.evaluate()
        fitnesses = [individual.fitness.fitness for individual in population if individual.valid]
    
                
        
                
        # Split Columns into Lists
        fitnessColumns = [[fit[i] for fit in fitnesses] for i,obj in enumerate(statBox.problem.objectives)]
    
        # Calculate Medians and Spreads
        fitnessMedians = [median(fitCol) for fitCol in fitnessColumns]
        fitnessSpreads = [spread(fitCol) for fitCol in fitnessColumns]
        
        # Initialize Reference Point on Initial Run
        if initial == True:
            #statBox.referencePoint = fitnessMedians 
            #statBox.referencePoint = statBox.problem.referencePoint
            #statBox.referencePoint = statBox.problem.evaluate(population[0].decisionValues)
            statBox.referencePoint = [o.med for o in statBox.problem.objectives]
            print [(o.low, o.up) for o in statBox.problem.objectives]
            

        # Calculate IBD & IBS
        norms = [[min(fitnessColumns[i]+[statBox.referencePoint[i]]), max(fitnessColumns[i]+[statBox.referencePoint[i]])] for i,obj in enumerate(statBox.problem.objectives)]
        lossInQualities = [loss_in_quality(statBox.problem, [statBox.referencePoint], fit, norms) for fit in fitnesses]
        IBD = median(lossInQualities)
        IBS = spread(lossInQualities)
        
        if initial == True:
            IBD = 1.0
            statBox.referenceIBD = 1.0
        
        
        changes = []
        # Print Option
        if printOption == True:
            outString = ""
            
            if initial:
                outString += str(statBox.numEval) + ","
                for med,spr,initmed,obj,o in zip(statBox.referencePoint, [0 for x in statBox.problem.objectives], statBox.referencePoint,statBox.problem.objectives,range(len(statBox.problem.objectives))):
                    change = percentChange(med, initmed, obj.lismore, obj.low, obj.up)
                    changes.append(float(change.strip("%")))
                    statBox.bests[o] = changes[-1]
                    statBox.bests_actuals[o] = med
                    outString += str("%8.4f" % med) + "," + change + "," + str("%8.4f" % spr) + ","
                    if statBox.numEval in statBox.foam[o]: statBox.foam[o][statBox.numEval].append(change)
                    else: statBox.foam[o][statBox.numEval] = [change]
                outString += str("%8.4f" % IBD) + "," + percentChange(statBox.referenceIBD, statBox.referenceIBD, True, 0, 1) + "," + str("%8.4f" % IBS)
            else:
                outString += str(statBox.numEval) + ","
                for med,spr,initmed,obj,o in zip(fitnessMedians, fitnessSpreads, statBox.referencePoint,statBox.problem.objectives,range(len(statBox.problem.objectives))):
                    change = percentChange(med, initmed, obj.lismore, obj.low, obj.up)
                    changes.append(float(change.strip("%")))
                    if changes[-1] < statBox.bests[o]: 
                        statBox.bests[o] = changes[-1]
                        statBox.bests_actuals[o] = med
                    outString += str("%8.4f" % med) + "," + change + "," + str("%8.4f" % spr) + ","
                    if statBox.numEval in statBox.foam[o]: statBox.foam[o][statBox.numEval].append(change)
                    else: statBox.foam[o][statBox.numEval] = [change]
                outString += str("%8.4f" % IBD) + "," + percentChange(IBD, statBox.referenceIBD, True, 0, 1) + "," + str("%8.4f" % IBS)
                
            print outString  + ", violations: " + str("%4.1f" % violationsPercent)
            fa.write(outString + "\n")
        
            
        # Add Stat to the Stat Box
        trunk = []
        for i,pop in enumerate(population):
            trunk.append(jmoo_individual(statBox.problem, pop.decisionValues, pop.fitness.fitness))
            #if i < 5: print trunk[-1].decisionValues, statBox.problem.evalConstraints(trunk[-1].decisionValues)
        statBox.box.append(jmoo_stats(trunk, fitnesses, fitnessMedians, fitnessSpreads, statBox.numEval, gen, IBD, IBS, changes))
        fa.close()
Esempio n. 49
0
def galeMutate(problem, NDLeafs, configuration):
    #################
    # Mutation Phase
    #################

    # Keep track of evals
    numEval = 0

    for leaf in NDLeafs:

        # Pull out the Poles
        east = leaf.table.rows[0]
        west = leaf.table.rows[-1]

        # Evaluate those poles if needed
        if not east.evaluated:
            for o, objScore in enumerate(problem.evaluate(east.cells)):
                east.cells[-(len(problem.objectives) - o)] = objScore
            east.evaluated = True
            numEval += 1
        if not west.evaluated:
            for o, objScore in enumerate(problem.evaluate(west.cells)):
                west.cells[-(len(problem.objectives) - o)] = objScore
            west.evaluated = True
            numEval += 1

        # Score the poles
        n = len(problem.decisions)
        weights = []
        for obj in problem.objectives:
            # w is negative when we are maximizing that objective
            if obj.lismore:
                weights.append(+1)
            else:
                weights.append(-1)
        weightedWest = [c * w for c, w in zip(west.cells[n:], weights)]
        weightedEast = [c * w for c, w in zip(east.cells[n:], weights)]
        westLoss = loss(
            weightedWest,
            weightedEast,
            mins=[obj.low for obj in problem.objectives],
            maxs=[obj.up for obj in problem.objectives],
        )
        eastLoss = loss(
            weightedEast,
            weightedWest,
            mins=[obj.low for obj in problem.objectives],
            maxs=[obj.up for obj in problem.objectives],
        )

        # Determine better Pole
        if eastLoss < westLoss:
            SouthPole, NorthPole = east, west
        else:
            SouthPole, NorthPole = west, east

        # Magnitude of the mutations
        g = abs(SouthPole.x - NorthPole.x)

        # Iterate over the individuals of the leaf
        for row in leaf.table.rows:

            # Make a copy of the row in case we reject it
            copy = [item for item in row.cells]
            cx = row.x

            for attr in range(0, len(problem.decisions)):

                # just some naming shortcuts
                me = row.cells[attr]
                good = SouthPole.cells[attr]
                bad = NorthPole.cells[attr]
                dec = problem.decisions[attr]

                # Find direction to mutate (Want to mutate towards good pole)
                if me > good:
                    d = -1
                if me < good:
                    d = +1
                if me == good:
                    d = 0

                row.cells[attr] = min(dec.up, max(dec.low, me + me * g * d))

            # Project the Mutant
            a = row.distance(NorthPole)
            b = row.distance(SouthPole)
            c = NorthPole.distance(SouthPole)
            x = (a ** 2 + row.c ** 2 - b ** 2) / (2 * row.c + 0.00001)

            # Test Mutant for Acceptance
            # confGAMMA = 0.15 #note: make this a property

            # print abs(cx-x), (cx + (g * configuration["GALE"]["GAMMA"]))
            if abs(x - cx) > (g * configuration["GALE"]["GAMMA"]) or problem.evalConstraints(
                row.cells[:n]
            ):  # reject it
                row.cells = copy
                row.x = x

    # After mutation; Convert back to JMOO Data Structures
    population = []
    for leaf in NDLeafs:
        for row in leaf.table.rows:
            if row.evaluated:
                population.append(
                    jmoo_individual(
                        problem,
                        [x for x in row.cells[: len(problem.decisions)]],
                        [x for x in row.cells[len(problem.decisions) :]],
                    )
                )
            else:
                population.append(jmoo_individual(problem, [x for x in row.cells[: len(problem.decisions)]], None))

                # Return selectees and number of evaluations
    return population, numEval
Esempio n. 50
0
def fastmap(problem, true_population):
    """
    Fastmap function that projects all the points on the principal component
    :param problem: Instance of the problem
    :param population: Set of points in the cluster population
    :return:
    """

    def list_equality(lista, listb):
        for a, b in zip(lista, listb):
            if a != b: return False
        return True

    from random import choice
    from Techniques.euclidean_distance import euclidean_distance

    decision_population = [pop.decisionValues for pop in true_population]
    one = choice(decision_population)
    west = furthest(one, decision_population)
    east = furthest(west, decision_population)

    west_indi = jmoo_individual(problem,west, None)
    east_indi = jmoo_individual(problem,east, None)
    west_indi.evaluate()
    east_indi.evaluate()

    for true_pop in true_population:
        if list_equality(true_pop.decisionValues, west_indi.decisionValues): true_pop.fitness.fitness = west_indi.fitness.fitness
        if list_equality(true_pop.decisionValues, east_indi.decisionValues): true_pop.fitness.fitness = east_indi.fitness.fitness


    # Score the poles
    n = len(problem.decisions)
    weights = []
    for obj in problem.objectives:
        # w is negative when we are maximizing that objective
        if obj.lismore:
            weights.append(+1)
        else:
            weights.append(-1)
    weightedWest = [c * w for c, w in zip(west_indi.fitness.fitness, weights)]
    weightedEast = [c * w for c, w in zip(east_indi.fitness.fitness, weights)]
    westLoss = loss(weightedWest, weightedEast, mins=[obj.low for obj in problem.objectives],
                    maxs=[obj.up for obj in problem.objectives])
    eastLoss = loss(weightedEast, weightedWest, mins=[obj.low for obj in problem.objectives],
                    maxs=[obj.up for obj in problem.objectives])

    # Determine better Pole
    if eastLoss < westLoss:
        SouthPole, NorthPole = east_indi, west_indi
    else:
        SouthPole, NorthPole = west_indi, east_indi


    east = SouthPole.decisionValues
    west = NorthPole.decisionValues

    c = euclidean_distance(east, west)
    tpopulation = []
    for one in decision_population:
        a = euclidean_distance(one, west)
        b = euclidean_distance(one, east)
        tpopulation.append([one, projection(a, b, c)])

    for tpop in tpopulation:
        for true_pop in true_population:
            if list_equality(tpop[0], true_pop.decisionValues):
                true_pop.x = tpop[-1]
    temp_list =  sorted(true_population, key=lambda pop: pop.x)
    return temp_list
Esempio n. 51
0
def gale_64_Mutate(problem, NDLeafs, configuration):
    #################
    # Mutation Phase
    #################
    # Keep track of evals
    numEval = 0

    population = []
    for leaf in NDLeafs:

        initial_size = len(leaf.table.rows)

        # print "Number of mutants: ", len(leaf.table.rows)
        # Pull out the Poles
        east = leaf.table.rows[0]
        west = leaf.table.rows[-1]

        # Evaluate those poles if needed
        if not east.evaluated:
            for o, objScore in enumerate(problem.evaluate(east.cells)):
                east.cells[-(len(problem.objectives) - o)] = objScore
            east.evaluated = True
            numEval += 1
        if not west.evaluated:
            for o, objScore in enumerate(problem.evaluate(west.cells)):
                west.cells[-(len(problem.objectives) - o)] = objScore
            west.evaluated = True
            numEval += 1

        # Score the poles
        n = len(problem.decisions)
        weights = []
        for obj in problem.objectives:
            # w is negative when we are maximizing that objective
            if obj.lismore:
                weights.append(+1)
            else:
                weights.append(-1)
        weightedWest = [c * w for c, w in zip(west.cells[n:], weights)]
        weightedEast = [c * w for c, w in zip(east.cells[n:], weights)]
        westLoss = loss(weightedWest, weightedEast, mins=[obj.low for obj in problem.objectives],
                        maxs=[obj.up for obj in problem.objectives])
        eastLoss = loss(weightedEast, weightedWest, mins=[obj.low for obj in problem.objectives],
                        maxs=[obj.up for obj in problem.objectives])

        # Determine better Pole
        if eastLoss < westLoss:
            to_be_mutated = leaf.table.rows[:int(len(leaf.table.rows)/2)]
        else:
            to_be_mutated = leaf.table.rows[:int(len(leaf.table.rows)/2)]

        to_be_mutated_jmoo = []
        for row in to_be_mutated:
            if row.evaluated:
                to_be_mutated_jmoo.append(jmoo_individual(problem, [x for x in row.cells[:len(problem.decisions)]],
                                                  [x for x in row.cells[len(problem.decisions):]]))
            else:
                to_be_mutated_jmoo.append(jmoo_individual(problem, [x for x in row.cells[:len(problem.decisions)]], None))

        for i in xrange(initial_size - len(to_be_mutated)):
            index = i%len(to_be_mutated_jmoo)
            mutant = variation(problem, index, to_be_mutated_jmoo, configuration)
            to_be_mutated_jmoo.append(mutant)

        members_evaluated = sum([1 for i in to_be_mutated_jmoo if i.valid])
        while members_evaluated <= 2:
            from random import randint
            index = randint(0, len(to_be_mutated_jmoo)-1)
            to_be_mutated_jmoo[index].evaluate()
            numEval += 1
            members_evaluated += 1
            print "> ", members_evaluated

        population += to_be_mutated_jmoo

    return population, numEval
Esempio n. 52
0
def sbxcrossover(problem, parent1, parent2, configuration):

    EPS = 1.0e-14
    distribution_index = configuration["NSGAIII"]["ETA_C_DEFAULT_"]
    probability = configuration["NSGAIII"]["SBX_Probability"]
    from numpy.random import random
    offspring1 = jmoo_individual(problem, parent1.decisionValues)
    offspring2 = jmoo_individual(problem, parent2.decisionValues)

    number_of_variables = len(problem.decisions)
    if random() <= probability:
        for i in xrange(number_of_variables):
            valuex1 = offspring1.decisionValues[i]
            valuex2 = offspring2.decisionValues[i]
            if random() <= 0.5:
                if abs(valuex1 - valuex2) > EPS:
                    if valuex1 < valuex2:
                        y1 = valuex1
                        y2 = valuex2
                    else:
                        y1 = valuex2
                        y2 = valuex1

                    yL = problem.decisions[i].low
                    yU = problem.decisions[i].up
                    rand = random()
                    beta = 1.0 + (2.0 * (y1 - yL) / (y2 - y1))
                    alpha = 2.0 - beta**(-1 * (distribution_index + 1.0))

                    if rand <= 1 / alpha:
                        betaq = (1.0 / (2.0 - rand * alpha))**(
                            1.0 / (distribution_index + 1.0))
                    else:
                        betaq = (1.0 / (2.0 - rand * alpha))**(
                            1.0 / (distribution_index + 1.0))

                    c1 = 0.5 * ((y1 + y2) - betaq * (y2 - y1))
                    beta = 1.0 + (2.0 * (yU - y2) / (y2 - y1))
                    alpha = 2.0 - beta**-(distribution_index + 1.0)

                    if rand <= (1.0 / alpha):
                        betaq = (rand * alpha)**(1.0 /
                                                 (distribution_index + 1.0))
                    else:
                        betaq = ((1.0 / (2.0 - rand * alpha))**(
                            1.0 / (distribution_index + 1.0)))

                    c2 = 0.5 * ((y1 + y2) + betaq * (y2 - y1))

                    if c1 < yL: c1 = yL
                    if c2 < yL: c2 = yL
                    if c1 > yU: c1 = yU
                    if c2 > yU: c2 = yU

                    if random() <= 0.5:
                        offspring1.decisionValues[i] = c2
                        offspring2.decisionValues[i] = c1
                    else:
                        offspring1.decisionValues[i] = c1
                        offspring2.decisionValues[i] = c2
                else:
                    offspring1.decisionValues[i] = valuex1
                    offspring2.decisionValues[i] = valuex2
            else:
                offspring1.decisionValues[i] = valuex2
                offspring2.decisionValues[i] = valuex1

    return offspring1, offspring2
Esempio n. 53
0
def fastmap(problem, true_population):
    """
    Fastmap function that projects all the points on the principal component
    :param problem: Instance of the problem
    :param population: Set of points in the cluster population
    :return:
    """
    def list_equality(lista, listb):
        for a, b in zip(lista, listb):
            if a != b: return False
        return True

    from random import choice
    from Techniques.euclidean_distance import euclidean_distance

    decision_population = [pop.decisionValues for pop in true_population]
    one = choice(decision_population)
    west = furthest(one, decision_population)
    east = furthest(west, decision_population)

    west_indi = jmoo_individual(problem, west, None)
    east_indi = jmoo_individual(problem, east, None)
    west_indi.evaluate()
    east_indi.evaluate()

    for true_pop in true_population:
        if list_equality(true_pop.decisionValues, west_indi.decisionValues):
            true_pop.fitness.fitness = west_indi.fitness.fitness
        if list_equality(true_pop.decisionValues, east_indi.decisionValues):
            true_pop.fitness.fitness = east_indi.fitness.fitness

    # Score the poles
    n = len(problem.decisions)
    weights = []
    for obj in problem.objectives:
        # w is negative when we are maximizing that objective
        if obj.lismore:
            weights.append(+1)
        else:
            weights.append(-1)
    weightedWest = [c * w for c, w in zip(west_indi.fitness.fitness, weights)]
    weightedEast = [c * w for c, w in zip(east_indi.fitness.fitness, weights)]
    westLoss = loss(weightedWest,
                    weightedEast,
                    mins=[obj.low for obj in problem.objectives],
                    maxs=[obj.up for obj in problem.objectives])
    eastLoss = loss(weightedEast,
                    weightedWest,
                    mins=[obj.low for obj in problem.objectives],
                    maxs=[obj.up for obj in problem.objectives])

    # Determine better Pole
    if eastLoss < westLoss:
        SouthPole, NorthPole = east_indi, west_indi
    else:
        SouthPole, NorthPole = west_indi, east_indi

    east = SouthPole.decisionValues
    west = NorthPole.decisionValues

    c = euclidean_distance(east, west)
    tpopulation = []
    for one in decision_population:
        a = euclidean_distance(one, west)
        b = euclidean_distance(one, east)
        tpopulation.append([one, projection(a, b, c)])

    for tpop in tpopulation:
        for true_pop in true_population:
            if list_equality(tpop[0], true_pop.decisionValues):
                true_pop.x = tpop[-1]
    temp_list = sorted(true_population, key=lambda pop: pop.x)
    return temp_list
    def update(statBox,
               population,
               gen,
               num_new_evals,
               initial=False,
               population_size=None,
               printOption=True):
        "add a stat box - compute the statistics first"

        # Find a file name to write the stats to
        if (statBox.alg.name == "GALE0" or statBox.alg.name
                == "GALE_no_mutation") and population_size is not None:
            filename = "Data/results_"+statBox.problem.name + "-p" + str(population_size) + "-d" + \
                   str(len(statBox.problem.decisions)) + "-o" + str(len(statBox.problem.objectives))+\
                   "_"+statBox.alg.name+".datatable"
        else:
            filename = "Data/results_"+statBox.problem.name + "-p" + str(len(population)) + "-d" + \
                       str(len(statBox.problem.decisions)) + "-o" + str(len(statBox.problem.objectives))+\
                       "_"+statBox.alg.name+".datatable"

        fa = open(filename, 'a')

        # Update Number of Evaluations
        statBox.numEval += num_new_evals

        # population represents on the individuals which have been evaluated
        shorten_population = [pop for pop in population if pop.fitness.valid]
        objectives = [
            individual.fitness.fitness for individual in shorten_population
        ]
        # Split Columns into Lists
        objective_columns = [[
            objective[i] for objective in objectives
        ] for i, obj in enumerate(statBox.problem.objectives)]
        # Calculate Medians of objective scores
        objective_medians = [median(fitCol) for fitCol in objective_columns]
        # Calculate IQR of objective scores
        objective_iqr = [spread(fitCol) for fitCol in objective_columns]

        # Initialize Reference Point on Initial Run
        if initial is True:
            statBox.referencePoint = [
                o.med for o in statBox.problem.objectives
            ]
            statBox.reference_point_for_hypervolume = [
                o.up for o in statBox.problem.objectives
            ]

        # Calculate IBD & IBS
        # Finding min and max for each objectives
        norms = [[
            min(objective_columns[i] + [statBox.referencePoint[i]]),
            max(objective_columns[i] + [statBox.referencePoint[i]])
        ] for i, obj in enumerate(statBox.problem.objectives)]

        lossInQualities = [{
            "qual":
            loss_in_quality(statBox.problem, [statBox.referencePoint], fit,
                            norms),
            "index":
            i
        } for i, fit in enumerate(objectives)]
        lossInQualities.sort(key=lambda (r): r["qual"])
        if len(objectives) > 0:
            best_fitness = objectives[lossInQualities[0]["index"]]
        else:
            best_fitness = objective_medians
        lossInQualities = [item["qual"] for item in lossInQualities]

        IBD = median(lossInQualities)
        IBS = spread(lossInQualities)

        if initial is True:
            IBD = 1.0
            statBox.referenceIBD = 1.0

        changes = []
        # Print Option
        if printOption is True:
            outString = ""

            if initial:
                outString += str(statBox.numEval) + ","
                for med, spr, initmed, obj, o in zip(
                        statBox.referencePoint,
                    [0 for x in statBox.problem.objectives],
                        statBox.referencePoint, statBox.problem.objectives,
                        range(len(statBox.problem.objectives))):
                    change = percentChange(med, initmed, obj.lismore, obj.low,
                                           obj.up)
                    changes.append(float(change.strip("%")))
                    statBox.bests[o] = changes[-1]
                    statBox.bests_actuals[o] = med
                    outString += str("%8.4f" % med) + "," + change + "," + str(
                        "%8.4f" % spr) + ","
                    if statBox.numEval in statBox.foam[o]:
                        statBox.foam[o][statBox.numEval].append(change)
                    else:
                        statBox.foam[o][statBox.numEval] = [change]
                outString += str("%8.4f" % IBD) + "," + percentChange(
                    statBox.referenceIBD, statBox.referenceIBD, True, 0,
                    1) + "," + str("%8.4f" % IBS)
            else:
                outString += str(statBox.numEval) + ","
                for med, spr, initmed, obj, o in zip(
                        best_fitness, objective_iqr, statBox.referencePoint,
                        statBox.problem.objectives,
                        range(len(statBox.problem.objectives))):
                    change = percentChange(med, initmed, obj.lismore, obj.low,
                                           obj.up)
                    changes.append(float(change.strip("%")))
                    if changes[-1] < statBox.bests[o]:
                        statBox.bests[o] = changes[-1]
                        statBox.bests_actuals[o] = med
                    outString += str("%8.4f" % med) + "," + change + "," + str(
                        "%8.4f" % spr) + ","
                    if statBox.numEval in statBox.foam[o]:
                        statBox.foam[o][statBox.numEval].append(change)
                    else:
                        statBox.foam[o][statBox.numEval] = [change]
                outString += str("%8.4f" % IBD) + "," + percentChange(
                    IBD, statBox.referenceIBD, True, 0, 1) + "," + str(
                        "%8.4f" % IBS)
            fa.write(outString + "\n")

        # Add Stat to the Stat Box
        trunk = []
        for i, pop in enumerate(shorten_population):
            trunk.append(
                jmoo_individual(statBox.problem, pop.decisionValues,
                                pop.fitness.fitness))
        statBox.box[-1] = jmoo_stats(trunk, objectives, best_fitness,
                                     objective_iqr, statBox.numEval, gen, IBD,
                                     IBS, changes)
        fa.close()
Esempio n. 55
0
def selNSGA3(problem, individuals, k):
    """Apply NSGA-III selection operator on the *individuals*. Usually, the
    size of *individuals* will be larger than *k* because any individual
    present in *individuals* will appear in the returned list at most once.
    Having the size of *individuals* equals to *k* will have no effect other
    than sorting the population according according to their front rank. The
    list returned contains references to the input *individuals*. For more
    details on the NSGA-II operator see [Deb2002]_.

    :param individuals: A list of individuals to select from.
    :param k: The number of individuals to select.
    :returns: A list of selected individuals.

    Deb, Kalyanmoy, and Himanshu Jain. "An evolutionary many-objective optimization algorithm using
    reference-point-based nondominated sorting approach, part i: Solving problems with box constraints."x
    Evolutionary Computation, IEEE Transactions on 18.4 (2014): 577-601.
    """

    #print "Length of individuals: ", len(individuals)


    pareto_fronts = sortNondominated(individuals, k)
    f_l_no = len(pareto_fronts) - 1
    P_t_1_no = len(list(chain(*pareto_fronts[:-1])))
    total_points_returned = len(list(chain(*pareto_fronts)))
    population =[]

    for front_no, front in enumerate(pareto_fronts):
        for i, dIndividual in enumerate(front):
            cells = []
            for j in xrange(len(dIndividual)):
                cells.append(dIndividual[j])
            population.append(jmoo_individual(problem, cells, dIndividual.fitness.values))
            population[-1].front_no = front_no

    print ">" * 10
    Z_s = cover(len(problem.objectives))
    Z_a = None
    Z_r = None

    if total_points_returned == k:
        return normalize(problem, population, Z_r, Z_s, Z_a)

    # S_t = P_t_1 + pareto_fronts[-1]


    K = k - P_t_1_no

    # Get the reference points


    population = normalize(problem, population, Z_r, Z_s, Z_a)
    population = associate(population, Z_s)

    f_l = []
    P_t_1 = []
    for pop in population:
        if pop.front_no == f_l_no:
            f_l.append(pop)
        else:
            P_t_1.append(pop)
    assert(len(P_t_1) == P_t_1_no), "Something's wrong"
    P_t_1 = niching(K, len(Z_s), P_t_1, f_l)
    assert(len(P_t_1) == jmoo_properties.MU), "Length is mismatched"
    return P_t_1
Esempio n. 56
0
    def update(statBox,
               population,
               gen,
               numNewEvals,
               initial=False,
               printOption=True):
        "add a stat box - compute the statistics first"
        fa = open(
            "data/results_" + statBox.problem.name + "_" + statBox.alg.name +
            ".datatable", 'a')

        # Calculate percentage of violations
        violationsPercent = sum([
            1 for pop in population
            if statBox.problem.evalConstraints(pop.decisionValues)
        ]) / float(len(population))  # not sure what is this used for

        # Update Number of Evaluations
        statBox.numEval += numNewEvals

        population = [pop for pop in population if pop.fitness.valid
                      ]  # population for which the score is calculated

        # Evaluate Fitnesses
        #for individual in population:
        #    if not individual.valid: individual.evaluate()
        fitnesses = [
            individual.fitness.fitness for individual in population
            if individual.valid
        ]

        # Split Columns into Lists
        fitnessColumns = [[fit[i] for fit in fitnesses]
                          for i, obj in enumerate(statBox.problem.objectives)]

        # Calculate Medians and Spreads
        fitnessMedians = [median(fitCol) for fitCol in fitnessColumns]
        fitnessSpreads = [spread(fitCol) for fitCol in fitnessColumns]

        # Initialize Reference Point on Initial Run
        if initial == True:
            statBox.referencePoint = [
                o.med for o in statBox.problem.objectives
            ]

        # Calculate IBD & IBS
        norms = [[
            min(fitnessColumns[i] + [statBox.referencePoint[i]]),
            max(fitnessColumns[i] + [statBox.referencePoint[i]])
        ] for i, obj in enumerate(statBox.problem.objectives)]
        lossInQualities = [
            loss_in_quality(statBox.problem, [statBox.referencePoint], fit,
                            norms) for fit in fitnesses
        ]
        IBD = median(lossInQualities)  #median
        IBS = spread(lossInQualities)  #IQR

        if initial == True:
            IBD = 1.0
            statBox.referenceIBD = 1.0

        changes = []
        # Print Option
        if printOption == True:
            outString = ""

            if initial:
                outString += str(statBox.numEval) + ","
                for med, spr, initmed, obj, o in zip(
                        statBox.referencePoint,
                    [0 for x in statBox.problem.objectives],
                        statBox.referencePoint, statBox.problem.objectives,
                        range(len(statBox.problem.objectives))):
                    change = percentChange(med, initmed, obj.lismore, obj.low,
                                           obj.up)
                    changes.append(float(change.strip("%")))
                    statBox.bests[o] = changes[-1]
                    statBox.bests_actuals[o] = med
                    outString += str("%8.4f" % med) + "," + change + "," + str(
                        "%8.4f" % spr) + ","
                    if statBox.numEval in statBox.foam[o]:
                        statBox.foam[o][statBox.numEval].append(change)
                    else:
                        statBox.foam[o][statBox.numEval] = [change]
                outString += str("%8.4f" % IBD) + "," + percentChange(
                    statBox.referenceIBD, statBox.referenceIBD, True, 0,
                    1) + "," + str("%8.4f" % IBS)
            else:
                outString += str(statBox.numEval) + ","
                for med, spr, initmed, obj, o in zip(
                        fitnessMedians, fitnessSpreads, statBox.referencePoint,
                        statBox.problem.objectives,
                        range(len(statBox.problem.objectives))):
                    change = percentChange(med, initmed, obj.lismore, obj.low,
                                           obj.up)
                    changes.append(float(change.strip("%")))
                    if changes[-1] < statBox.bests[o]:
                        statBox.bests[o] = changes[-1]
                        statBox.bests_actuals[o] = med
                    outString += str("%8.4f" % med) + "," + change + "," + str(
                        "%8.4f" % spr) + ","
                    if statBox.numEval in statBox.foam[o]:
                        statBox.foam[o][statBox.numEval].append(change)
                    else:
                        statBox.foam[o][statBox.numEval] = [change]
                outString += str("%8.4f" % IBD) + "," + percentChange(
                    IBD, statBox.referenceIBD, True, 0, 1) + "," + str(
                        "%8.4f" % IBS)

            print outString + ", violations: " + str(
                "%4.1f" % violationsPercent)
            fa.write(outString + "\n")

        # Add Stat to the Stat Box
        trunk = []
        for i, pop in enumerate(population):
            trunk.append(
                jmoo_individual(statBox.problem, pop.decisionValues,
                                pop.fitness.fitness))
            #if i < 5: print trunk[-1].decisionValues, statBox.problem.evalConstraints(trunk[-1].decisionValues)
        statBox.box.append(
            jmoo_stats(trunk, fitnesses, fitnessMedians, fitnessSpreads,
                       statBox.numEval, gen, IBD, IBS, changes))
        fa.close()