Exemplo n.º 1
0
def structured_reference_points(n_obj, div1, div2=None):
    ref_points = tools.uniform_reference_points(n_obj, div1)
    if div2 is not None:
        in_ref_points = tools.uniform_reference_points(n_obj,
                                                       div2) / 2. + 0.5 / n_obj
        ref_points = np.vstack((ref_points, in_ref_points))

    return ref_points
Exemplo n.º 2
0
def init_opti():
    ### setup NSGA3 with deap (minimize the first two goals returned by the evaluate function and maximize the third one)
    creator.create("FitnessMulti", base.Fitness, weights=(-1.0, -1.0, 1.0))
    creator.create("Individual", list, fitness=creator.FitnessMulti)

    ref_points = tools.uniform_reference_points(nobj=3, p=12)

    #initial individual and pop
    toolbox.register("initial_indi", initial_indi)
    toolbox.register("individual",
                     tools.initRepeat,
                     creator.Individual,
                     toolbox.initial_indi,
                     n=1)

    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    #evaluation and constraints
    toolbox.register("evaluate", evaluate)

    ##assign the feasibility of solutions and if not feasible a large number for the minimization tasks and a small number for the maximization task
    toolbox.decorate("evaluate", tools.DeltaPenalty(feasible,
                                                    (10e20, 10e20, 0)))

    #mate, mutate and select to perform crossover
    toolbox.register("mate", tools.cxTwoPoint)
    toolbox.register("mutate", tools.mutFlipBit, indpb=0.05)
    toolbox.register("select", tools.selNSGA3, ref_points=ref_points)
Exemplo n.º 3
0
    def setup(self):
        # Create uniform reference point
        self.ref_points = tools.uniform_reference_points(self.NOBJ, self.P)

        # Create classes
        creator.create("FitnessMin", base.Fitness, weights=self.weights)
        creator.create("Individual", list, fitness=creator.FitnessMin)
        ##

        self.toolbox = base.Toolbox()
        self.toolbox.register("attr_float", self.uniform, self.BOUND_LOW,
                              self.BOUND_UP, self.NDIM)
        self.toolbox.register("individual", tools.initIterate,
                              creator.Individual, self.toolbox.attr_float)
        self.toolbox.register("population", tools.initRepeat, list,
                              self.toolbox.individual)

        self.toolbox.register("evaluate", self.evaluate)
        self.toolbox.register("mate",
                              tools.cxSimulatedBinaryBounded,
                              low=self.BOUND_LOW,
                              up=self.BOUND_UP,
                              eta=self.cx_eta)
        self.toolbox.register("mutate",
                              tools.mutPolynomialBounded,
                              low=self.BOUND_LOW,
                              up=self.BOUND_UP,
                              eta=self.mut_eta,
                              indpb=1 / self.NDIM)
        self.toolbox.register("select",
                              tools.selNSGA3,
                              ref_points=self.ref_points)
Exemplo n.º 4
0
def test_unconstrained_dtlz1(recording_path):
    recorder = om.SqliteRecorder(recording_path)

    pymop_problem = pymop.DTLZ1(n_var=3, n_obj=3)

    prob = om.Problem()
    prob.model = PymopGroup(problem=pymop_problem)
    prob.model.add_recorder(recorder)
    prob.driver = Nsga3Driver(generation_count=500, random_seed=0)

    try:
        prob.setup()
        prob.run_driver()
    finally:
        prob.cleanup()

    cases = case_dataset(om.CaseReader(recording_path))
    pareto_cases = pareto_subset(cases)

    distance_function = "euclidean"
    ref_dirs = uniform_reference_points(pymop_problem.n_obj, p=4)
    ideal_pareto_front = pymop_problem.pareto_front(ref_dirs)
    min_pareto_point_distance = sp.spatial.distance.pdist(
        ideal_pareto_front, distance_function
    ).min()

    distances = sp.spatial.distance.cdist(
        pareto_cases["problem.obj"].values, ideal_pareto_front, distance_function
    )
    distances_to_ideal = np.min(distances, axis=0)

    assert distances_to_ideal.max() <= min_pareto_point_distance * 0.75
Exemplo n.º 5
0
    def _setup_driver(self, problem):
        super()._setup_driver(problem)

        ref_points = tools.uniform_reference_points(
            self.num_objectives, p=self.options["reference_partitions"])
        nsga3_select = tools.selNSGA3WithMemory(
            ref_points=ref_points,
            # The "standard" non-dominated sort algorithm uses Fitness.dominates,
            # which we have modified for constraint-domination.
            nd="standard",
        )

        self.toolbox.register(
            "vary",
            nsga3_vary,
            toolbox=self.toolbox,
            cxpb=self.options["crossover_prob"] or 1.0,
            mutpb=self.options["mutation_prob"] or 1.0,
        )
        self.toolbox.register("select", nsga3_select)

        min_population_size = self.options["min_population_size"]

        if not min_population_size:
            min_population_size = len(ref_points)

        # The population size should be the smallest multiple of 4, greater than
        # min_population_size (and min_population_size should ideally be the
        # number of reference points)
        self.population_size = 4 * (min_population_size // 4 + 1)
Exemplo n.º 6
0
def test_nsga3():
    NDIM = 5
    BOUND_LOW, BOUND_UP = 0.0, 1.0
    MU = 16
    NGEN = 100

    ref_points = tools.uniform_reference_points(2, p=12)

    toolbox = base.Toolbox()
    toolbox.register("attr_float", random.uniform, BOUND_LOW, BOUND_UP)
    toolbox.register("individual", tools.initRepeat,
                     creator.__dict__[INDCLSNAME], toolbox.attr_float, NDIM)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    toolbox.register("evaluate", benchmarks.zdt1)
    toolbox.register("mate",
                     tools.cxSimulatedBinaryBounded,
                     low=BOUND_LOW,
                     up=BOUND_UP,
                     eta=20.0)
    toolbox.register("mutate",
                     tools.mutPolynomialBounded,
                     low=BOUND_LOW,
                     up=BOUND_UP,
                     eta=20.0,
                     indpb=1.0 / NDIM)
    toolbox.register("select", tools.selNSGA3, ref_points=ref_points)

    pop = toolbox.population(n=MU)
    fitnesses = toolbox.map(toolbox.evaluate, pop)
    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit

    pop = toolbox.select(pop, len(pop))
    # Begin the generational process
    for gen in range(1, NGEN):
        offspring = algorithms.varAnd(pop, toolbox, 1.0, 1.0)

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # Select the next generation population
        pop = toolbox.select(pop + offspring, MU)

    hv = hypervolume(pop, [11.0, 11.0])
    # hv = 120.777 # Optimal value

    assert hv > HV_THRESHOLD, "Hypervolume is lower than expected %f < %f" % (
        hv, HV_THRESHOLD)

    for ind in pop:
        assert not (any(numpy.asarray(ind) < BOUND_LOW)
                    or any(numpy.asarray(ind) > BOUND_UP))
Exemplo n.º 7
0
    def setup(self):
        creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0))
        creator.create("Individual",
                       array.array,
                       typecode='d',
                       fitness=creator.FitnessMin)

        self.toolbox.register("attr_float", self.uniform, self.bound_low,
                              self.bound_up, self.n_design_variables_dimension)
        self.toolbox.register("individual", tools.initIterate,
                              creator.Individual, self.toolbox.attr_float)
        self.toolbox.register("population", tools.initRepeat, list,
                              self.toolbox.individual)

        if self.evaluation_function:
            self.toolbox.register("evaluate", self.evaluation_function)

        self.toolbox.register("mate",
                              tools.cxSimulatedBinaryBounded,
                              low=self.bound_low,
                              up=self.bound_up,
                              eta=20.0)
        self.toolbox.register("mutate",
                              tools.mutPolynomialBounded,
                              low=self.bound_low,
                              up=self.bound_up,
                              eta=20.0,
                              indpb=1.0 / self.n_design_variables_dimension)

        ref_points = tools.uniform_reference_points(2, self.n_population)
        self.toolbox.register("select", tools.selNSGA3, ref_points=ref_points)

        self.stats = tools.Statistics(lambda ind: ind.fitness.values)
        self.stats.register("avg", np.mean, axis=0)
        self.stats.register("std", np.std, axis=0)
        self.stats.register("min", np.min, axis=0)
        self.stats.register("max", np.max, axis=0)
        self.logbook = tools.Logbook()
        self.logbook.header = "gen", "evals", "std", "min", "avg", "max"
        self.pop = self.toolbox.population(n=self.n_population)
Exemplo n.º 8
0
    def _setup(self):
        """
        Setup genetic algorithm
        """

        self.num_objectives = len(self.features_list)

        # Create individual types
        creator.create("FitnessMin", base.Fitness,
                       weights=(-1.0,) * self.num_objectives)
        creator.create("Individual", list, fitness=creator.FitnessMin)

        # Setup toolbox
        self.toolbox = base.Toolbox()

        # Attribute generator
        self.toolbox.register("attr_float", random.random)

        # Structure initializers
        self.toolbox.register(
            "individual",
            tools.initRepeat,
            creator.Individual,
            self.toolbox.attr_float,
            self.num_params
        )
        self.toolbox.register("population", tools.initRepeat,
                              list, self.toolbox.individual)

        ref_points = tools.uniform_reference_points(self.num_objectives, 12)

        self.toolbox.register("evaluate", self.fitness)
        self.toolbox.register("mate", tools.cxSimulatedBinaryBounded, low=0.0,
                              up=1.0, eta=30.0)
        self.toolbox.register("mutate", tools.mutPolynomialBounded, low=0.0,
                              up=1.0, eta=20.0, indpb=1.0/self.num_params)
        self.toolbox.register("select", tools.selNSGA3, ref_points=ref_points)
Exemplo n.º 9
0
SCALES = [1]

globalLinear = 0.25
globalBalance = 0.25
globalN1 = 0.25
globalN2 = 0.25

fig = plt.figure(figsize=(7, 7))
ax = fig.add_subplot(111, projection="3d")

# the coordinate origin
ax.scatter(0, 0, 0, c="k", marker="+", s=100)

# reference points
ref_points = [
    tools.uniform_reference_points(NOBJ, p, s) for p, s in zip(P, SCALES)
]
ref_points = np.concatenate(ref_points)
_, uniques = np.unique(ref_points, axis=0, return_index=True)
ref_points = ref_points[uniques]

ax.scatter(ref_points[:, 0],
           ref_points[:, 1],
           ref_points[:, 2],
           marker="o",
           s=48)

# final figure details
ax.set_xlabel("$f_1(\mathbf{x})$", fontsize=15)
ax.set_ylabel("$f_2(\mathbf{x})$", fontsize=15)
ax.set_zlabel("$f_3(\mathbf{x})$", fontsize=15)
Exemplo n.º 10
0
def create_Environment_For_NSGAIII_Discrete_Case(X,
                                                 bss,
                                                 weights,
                                                 evaluate_function,
                                                 random_labels=False,
                                                 indpb=None,
                                                 seed=None,
                                                 NOBJ=2,
                                                 P=12):
    '''
    Intro:
        This function returns an environment to use
        in the NSGAIII_Discrete_Case.
        
    ---
    Input:
        X: List
            List of real values
        bss: Integer
            Number of bits for each individual.
        weights: Tuple of integers
            Uper limit of each value of individual.
        indpb: Integer
            Weights for each output of the evaluate_function.
        evaluate_function: Function
            Function to evaluate individuals.
        random_labels: Boolean
            If we want to get randomly or linearly the points of X.
            
    ---
    Output:
        A tuple with (toolbox, logbook)
        
    '''

    random.seed(seed)

    if indpb is None:
        indpb = 1.0 / (bss * 10)

    # Create uniform reference point
    ref_points = tools.uniform_reference_points(NOBJ, P)

    # Creating individuals
    creator.create("FitnessCustom", base.Fitness, weights=weights)
    creator.create("Individual", list, fitness=creator.FitnessCustom)

    # Creating toolbox
    toolbox = base.Toolbox()
    toolbox.register("individual", intToIndividual, creator.Individual, bss)
    toolbox.register("population",
                     getPop,
                     toolbox.individual,
                     len(X),
                     random_labels=random_labels,
                     seed=seed)
    toolbox.register("mate", tools.cxTwoPoint)
    toolbox.register("mutate", tools.mutFlipBit, indpb=indpb)
    toolbox.register("select", tools.selNSGA3, ref_points=ref_points)

    # Defining ceil
    toolbox.register("ceiling", ceilling, toolbox.individual, len(X) - 1)

    # Defining evaluate function
    toolbox.register("evaluate", evaluate, evaluate_function, X)

    # Creating stats
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean, axis=0)
    stats.register("std", np.std, axis=0)
    stats.register("min", np.min, axis=0)
    stats.register("max", np.max, axis=0)

    # Creating log book
    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max"

    return (toolbox, stats, logbook, ref_points)
Exemplo n.º 11
0
def create_Environment_For_NSGAIII_Continuous_Case(BOUND_LOW,
                                                   BOUND_UP,
                                                   NDIM,
                                                   attr_individual_function,
                                                   evaluate_function,
                                                   weights,
                                                   etaMate=30.0,
                                                   etaMutate=20.0,
                                                   indpb=None,
                                                   NOBJ=2,
                                                   P=12):
    '''
    Intro:
        This function returns an environment to use
        in the NSGAIII_Continuous_Case.
    ---
    Input:
        BOUND_LOW: Float
            Lower limit of each value of individual.
        BOUND_UP: Float
            Uper limit of each value of individual.
        NDIM: Integer
            Number of dimensions of each individual.
        attr_individual_function: Funtion
            Function to create individual.
        evaluate_function: Function
            Function to evalulate our points.
        weights: Tuple
            Weights for each output of the evaluate_function
        etaMate: Float
            Crowding degree of the crossover. A high eta will 
            produce children resembling to their parents, 
            while a small eta will produce solutions much more different.
        etaMutate: Float
            Crowding degree of the mutate. A high eta will 
            produce mutations resembling to their parents, 
            while a small eta will produce solutions much more different.
        indpb: Float
            Probability of mutation
        NOBJ: Integer
            Number of objectives
        P: Integer
            Number of partitions
    ---
    Output:
        A tuple with (toolbox, stats, logbook, refpoints)
        
    '''

    if indpb is None:
        indpb = 1.0 / NDIM

    # Create uniform reference point
    ref_points = tools.uniform_reference_points(NOBJ, P)

    # Create classes
    creator.create("FitnessCustom", base.Fitness, weights=weights)
    creator.create("Individual", list, fitness=creator.FitnessCustom)

    toolbox = base.Toolbox()
    toolbox.register("attr_float", attr_individual_function, BOUND_LOW,
                     BOUND_UP, NDIM)
    toolbox.register("individual", tools.initIterate, creator.Individual,
                     toolbox.attr_float)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    toolbox.register("evaluate", evaluate_function)
    toolbox.register("mate",
                     tools.cxSimulatedBinaryBounded,
                     low=BOUND_LOW,
                     up=BOUND_UP,
                     eta=etaMate)
    toolbox.register("mutate",
                     tools.mutPolynomialBounded,
                     low=BOUND_LOW,
                     up=BOUND_UP,
                     eta=etaMutate,
                     indpb=indpb)
    toolbox.register("select", tools.selNSGA3, ref_points=ref_points)

    # Creating stats
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean, axis=0)
    stats.register("std", np.std, axis=0)
    stats.register("min", np.min, axis=0)
    stats.register("max", np.max, axis=0)

    # Creating log book
    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max"

    return (toolbox, stats, logbook, ref_points)
Exemplo n.º 12
0
def optim(MU, NGEN, path):

    # load the shp of the scenario
    #in_pts = "D:/04_PROJECTS/2001_WIND_OPTIM/WIND_OPTIM_git/intermediate_steps/3_wp/NSGA3_RES/in/B3.shp"
    #load in memory
    #all_pts = r"in_memory/inMemoryFeatureClass"
    all_pts = path
    #arcpy.CopyFeatures_management(in_pts, all_pts)

    #transform it to numpy array
    na = arcpy.da.TableToNumPyArray(all_pts, ['WT_ID', 'ENER_DENS', 'prod_MW'])

    # CXPB  is the probability with which two individuals
    #       are crossed
    # MUTPB is the probability for mutating an individual
    CXPB, MUTPB = 0.7, 0.4
    #MU,NGEN =20, 10
    enertarg = 4300000
    #some parameters to define the random individual
    nBITS = len(na)

    #production of energy
    sum_MW = np.sum(na['prod_MW'])

    low_targ = enertarg
    up_targ = enertarg * 1.07

    #the function to determine the initial random population which might reach the energy target
    def initial_indi():
        # relative to the total energy production to build the initial vector
        bound_up = (1.0 * up_targ / sum_MW)
        bound_low = (1.0 * low_targ / sum_MW)
        x3 = random.uniform(bound_low, bound_up)
        return np.random.choice([1, 0], size=(nBITS, ), p=[x3, 1 - x3])

    #some lists for the evaluation function
    enerd = list(na['ENER_DENS'])
    prod = list(na['prod_MW'])
    id = np.array(na['WT_ID'])

    #the evaluation function, taking the individual vector as input

    def evaluate(individual):
        individual = individual[0]
        #first check if the production of the seleced WT's is in the range between 4.31 and 4.29 TWH
        # goal 1
        mean_enerdsel = sum(
            x * y for x, y in zip(enerd, individual)) / sum(individual)
        # goal 2
        count_WTsel = sum(individual)
        # goal 3 (subset the input points by the WT_IDs which are in the ini pop (=1)
        WT_pop = np.column_stack((id, individual))
        WT_sel = WT_pop[WT_pop[:, [1]] == 1]
        WT_sel = WT_sel.astype(int)
        qry = '"WT_ID" IN ' + str(tuple(WT_sel))
        subset = arcpy.MakeFeatureLayer_management(all_pts, "tmp", qry)
        nn_output = arcpy.AverageNearestNeighbor_stats(subset,
                                                       "EUCLIDEAN_DISTANCE",
                                                       "NO_REPORT",
                                                       "41290790000")
        clus = float(nn_output.getOutput(0))
        res = (clus, count_WTsel, mean_enerdsel)
        ## delete the feature tmp since otherwise it will not work in a loop
        arcpy.Delete_management("tmp")
        arcpy.Delete_management("subset")
        return (res)

    def feasible(individual):
        individual = individual[0]
        prod_MWsel = sum(x * y for x, y in zip(prod, individual))
        if (prod_MWsel <= up_targ and prod_MWsel >= low_targ):
            return True
        return False

    ### setup NSGA3 with deap (minimize the first two goals returned by the evaluate function and maximize the third one)
    creator.create("FitnessMulti", base.Fitness, weights=(-1.0, -1.0, 1.0))
    creator.create("Individual", list, fitness=creator.FitnessMulti)

    ref_points = tools.uniform_reference_points(nobj=3, p=12)
    ##setup the optim toolbox I do not understand that totally
    toolbox = base.Toolbox()

    #initial individual and pop
    toolbox.register("initial_indi", initial_indi)
    toolbox.register("individual",
                     tools.initRepeat,
                     creator.Individual,
                     toolbox.initial_indi,
                     n=1)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    #evaluation and constraints
    toolbox.register("evaluate", evaluate)

    ##assign the feasibility of solutions and if not feasible a large number for the minimization tasks and a small number for the maximization task
    toolbox.decorate("evaluate", tools.DeltaPenalty(feasible,
                                                    (10e20, 10e20, 0)))

    #mate, mutate and select to perform crossover
    toolbox.register("mate", tools.cxTwoPoint)
    toolbox.register("mutate", tools.mutFlipBit, indpb=0.05)
    toolbox.register("select", tools.selNSGA3, ref_points=ref_points)

    # initialize pareto front
    pareto = tools.ParetoFront(similar=np.array_equal)

    first_stats = tools.Statistics(key=lambda ind: ind.fitness.values[0])
    second_stats = tools.Statistics(key=lambda ind: ind.fitness.values[1])
    third_stats = tools.Statistics(key=lambda ind: ind.fitness.values[2])

    first_stats.register("min_clus", np.min, axis=0)
    second_stats.register("min_WT", np.min, axis=0)
    third_stats.register("max_enerd", np.max, axis=0)

    logbook1 = tools.Logbook()
    logbook2 = tools.Logbook()
    logbook3 = tools.Logbook()
    logbook1.header = "gen", "evals", "TIME", "min_clus"
    logbook2.header = "gen", "evals", "min_WT"
    logbook2.header = "gen", "evals", "max_enerd"

    pop = toolbox.population(n=MU)
    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    # invalid_ind = pop
    start_time = time.time()
    fitnesses = list(toolbox.map(toolbox.evaluate, invalid_ind))
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit
    end_time = time.time()
    delt_time = end_time - start_time

    ## Hyper volume
    # hv.hypervolume()

    # Compile statistics about the population
    record1 = first_stats.compile(pop)
    logbook1.record(gen=0, evals=len(invalid_ind), TIME=delt_time, **record1)

    record2 = second_stats.compile(pop)
    logbook2.record(gen=0, evals=len(invalid_ind), **record2)

    record3 = third_stats.compile(pop)
    logbook3.record(gen=0, evals=len(invalid_ind), **record3)

    # Begin the evolution with NGEN repetitions
    for gen in range(1, NGEN):
        print("-- Generation %i --" % gen)
        start_time = time.time()
        offspring = toolbox.select(pop, len(pop))
        # Clone the selected individuals
        offspring = list(map(toolbox.clone, offspring))

        # Apply crossover and mutation on the offspring
        for child1, child2 in zip(offspring[::2], offspring[1::2]):
            if random.random() < CXPB:
                toolbox.mate(child1[0], child2[0])
                del child1.fitness.values
                del child2.fitness.values

        for mutant in offspring:
            if random.random() < MUTPB:
                toolbox.mutate(mutant[0])
                del mutant.fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]

        fitnesses = list(toolbox.map(toolbox.evaluate, invalid_ind))
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit
        end_time = time.time()
        delt_time = end_time - start_time
        #select the next generation with NSGA3 from pop and offspring of size MU
        pop = toolbox.select(pop + offspring, MU)

        pareto.update(pop)

        # Compile statistics about the new population
        record1 = first_stats.compile(invalid_ind)
        logbook1.record(gen=gen,
                        evals=len(invalid_ind),
                        TIME=delt_time,
                        **record1)

        record2 = second_stats.compile(invalid_ind)
        logbook2.record(gen=gen, evals=len(invalid_ind), **record2)

        record3 = third_stats.compile(invalid_ind)
        logbook3.record(gen=gen, evals=len(invalid_ind), **record3)
        print("--- %s seconds ---" % delt_time)

    # pareto fitnes values
    fitness_pareto = toolbox.map(toolbox.evaluate, pareto)
    fitness_pareto = np.array(fitness_pareto)
    fitness_pareto = {
        'CLUS': fitness_pareto[:, 0],
        'N_WT': fitness_pareto[:, 1],
        'ENERDENS': fitness_pareto[:, 2]
    }

    #pareto items and robustness
    par_items = np.array(pareto.items)
    par_rob = np.array(1.0 * sum(par_items[1:len(par_items)]) / len(par_items))
    par_rob = par_rob.ravel()
    par_rob_mat = np.column_stack((id, par_rob))
    par_rob_mat = {'WT_ID2': par_rob_mat[:, 0], 'par_rob': par_rob_mat[:, 1]}

    #last items and robustness
    last_rob = np.array(invalid_ind)
    last_rob = np.array(1.0 * sum(last_rob[1:len(last_rob)]) / len(last_rob))
    last_rob = last_rob.ravel()
    last_rob_mat = np.column_stack((id, last_rob))
    last_rob_mat = {
        'WT_ID2': last_rob_mat[:, 0],
        'last_rob': last_rob_mat[:, 1]
    }

    #logbook
    gen = np.array(logbook1.select('gen'))
    TIME = np.array(logbook1.select('TIME'))
    WT = np.array(logbook2.select('min_WT'))
    clus = np.array(logbook1.select('min_clus'))
    enerd = np.array(logbook3.select('max_enerd'))
    logbook = np.column_stack((gen, TIME, WT, clus, enerd))
    logbook = {
        'GENERATION': logbook[:, 0],
        'TIME': logbook[:, 1],
        'N_WT': logbook[:, 2],
        'CLUS': logbook[:, 3],
        'ENERDENS': logbook[:, 4]
    }
    arcpy.Delete_management("all_pts")
    arcpy.Delete_management("in_pts")
    arcpy.Delete_management("na")
    arcpy.Delete_management("in_memory/inMemoryFeatureClass")
    return par_rob_mat, last_rob_mat, fitness_pareto, logbook
Exemplo n.º 13
0
def optim(MU, NGEN, path, CXPB, MUTPB, A, B):
    # path = "D:/04_PROJECTS/2001_WIND_OPTIM/WIND_OPTIM_git/intermediate_steps/3_wp/NSGA3_RES/in/B3_FFF+.shp"
    fc = path
    na = arcpy.da.FeatureClassToNumPyArray(
        fc, ["WT_ID", "ENER_DENS", "prod_MW", "SHAPE@XY"],
        explode_to_points=True)

    ##here we calculate the expected nearest neighbor distance (in meters) of the scenario
    nBITS = len(na)

    # CXPB  is the probability with which two individuals are crossed
    # MUTPB is the probability for mutating an individual
    #CXPB, MUTPB = 0.8, 0.6
    # MU,NGEN =20, 10
    enertarg = 4300000
    # some parameters to define the random individual

    # total production of energy
    sum_MW = np.sum(na['prod_MW'])

    # the 4.3TWh/y represent the minimal target to reach and app. 4.6Twh is the upper bandwidth
    low_targ = enertarg
    up_targ = enertarg * 1.07

    # the function to determine the initial random population which might reach the energy target bandwidth
    def initial_indi():
        # relative to the total energy production to build the initial vector
        bound_up = (1.0 * up_targ / sum_MW)
        bound_low = (1.0 * low_targ / sum_MW)
        x3 = random.uniform(bound_low, bound_up)
        return np.random.choice([1, 0], size=(nBITS, ), p=[x3, 1 - x3])

    def initial_ind2():
        N = np.array(A)
        return N

    def initial_ind3():
        M = np.array(B)
        return M

    # some lists for the evaluation function
    enerd = list(na['ENER_DENS'])
    prod = list(na['prod_MW'])
    id = np.array(na['WT_ID'])
    _xy = list(na['SHAPE@XY'])

    # the evaluation function, taking the individual vector as input

    def evaluate(individual):
        individual = individual[0]
        prod_MWsel = sum(x * y for x, y in zip(prod, individual))
        #check if the total production is witin boundaries, if not return a penalty vector
        if up_targ >= prod_MWsel >= low_targ:
            # goal 1
            mean_enerdsel = sum(
                x * y for x, y in zip(enerd, individual)) / sum(individual)
            # goal 2
            count_WTsel = sum(individual)
            # goal 3 zip the individual vector to the _xy coordinates
            subset = np.column_stack((_xy, individual))
            #subset the data that only the 1 remains
            subset = subset[subset[:, 2] == 1]
            subset = np.delete(subset, 2, 1)
            tree = cKDTree(subset)
            dists = tree.query(subset, 2)
            nn_dist = dists[0][:, 1]
            rE = 1 / (2 * math.sqrt(1.0 * len(subset) / 41290790000))
            rA = np.mean(nn_dist)
            clus = rA / rE
            res = (clus, count_WTsel, mean_enerdsel)
            ## delete the feature tmp since otherwise it will not work in a loop
            arcpy.Delete_management("tmp")
            arcpy.Delete_management("subset")
        else:
            res = (10e20, 10e20, 0)
        return res

        #def feasible(individual):
        individual = individual[0]
        prod_MWsel = sum(x * y for x, y in zip(prod, individual))
        if (prod_MWsel <= up_targ and prod_MWsel >= low_targ):
            return True
        return False

    ### setup NSGA3 with deap (minimize the first two goals returned by the evaluate function and maximize the third one)
    creator.create("FitnessMulti", base.Fitness, weights=(-1.0, -1.0, 1.0))
    creator.create("Individual", list, fitness=creator.FitnessMulti)
    ref_points = tools.uniform_reference_points(nobj=3, p=12)
    ##setup the optim toolbox I do not understand that totally
    toolbox = base.Toolbox()
    # initial individual and pop
    toolbox.register("initial_indi", initial_indi)
    toolbox.register("individual",
                     tools.initRepeat,
                     creator.Individual,
                     toolbox.initial_indi,
                     n=1)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    ###and the specific individual A
    toolbox.register("initial_indi2", initial_ind2)
    toolbox.register("individual2",
                     tools.initRepeat,
                     creator.Individual,
                     toolbox.initial_indi2,
                     n=1)
    toolbox.register("population2", tools.initRepeat, list,
                     toolbox.individual2)
    pop2 = toolbox.population2(n=1)

    ###and the specific individual B
    toolbox.register("initial_indi3", initial_ind3)
    toolbox.register("individual3",
                     tools.initRepeat,
                     creator.Individual,
                     toolbox.initial_indi3,
                     n=1)
    toolbox.register("population3", tools.initRepeat, list,
                     toolbox.individual2)
    pop3 = toolbox.population3(n=1)

    # evaluation and constraints
    toolbox.register("evaluate", evaluate)
    ##assign the feasibility of solutions and if not feasible a large number for the minimization tasks and a small number for the maximization task
    #toolbox.decorate("evaluate", tools.DeltaPenalty(feasible, (10e20, 10e20, 0)))
    # mate, mutate and select to perform crossover
    toolbox.register("mate", tools.cxTwoPoint)
    toolbox.register("mutate", tools.mutFlipBit, indpb=0.05)
    toolbox.register("select", tools.selNSGA3, ref_points=ref_points)

    ### initialize pareto front
    pareto = tools.ParetoFront(similar=np.array_equal)
    ### initialize population
    pop = toolbox.population(n=MU)
    #pick a random number where to insert the vector containing the best producing WT into the population
    pop[random.randrange(0, MU, 1)] = pop2[0]
    #and insert the best energydens accordingly
    pop[random.randrange(0, MU, 1)] = pop3[0]

    first_stats = tools.Statistics(key=lambda ind: ind.fitness.values[0])
    second_stats = tools.Statistics(key=lambda ind: ind.fitness.values[1])
    third_stats = tools.Statistics(key=lambda ind: ind.fitness.values[2])

    first_stats.register("min_clus", np.min, axis=0)
    second_stats.register("min_WT", np.min, axis=0)
    third_stats.register("max_enerd", np.max, axis=0)

    logbook1 = tools.Logbook()
    logbook2 = tools.Logbook()
    logbook3 = tools.Logbook()
    logbook1.header = "gen", "evals", "TIME", "min_clus"
    logbook2.header = "gen", "evals", "min_WT"
    logbook2.header = "gen", "evals", "max_enerd"

    HV = []
    # Evaluate the initial individuals with an invalid fitness
    print("-- fitness of initial population --")
    start_time = time.time()
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitnesses = list(toolbox.map(toolbox.evaluate, invalid_ind))

    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    ## Hyper volume of initial fitness (scale the n_WT and change the value of the maximization goal with -1
    fitness_trans = np.array(fitnesses)
    fitness_trans[:, 1] *= 1.0 / nBITS
    fitness_trans[:, 2] *= -1
    hyp = hv.hypervolume(fitness_trans, ref=np.array([1, 1, 1]))
    HV.append(hyp)

    end_time = time.time()
    delt_time = end_time - start_time

    record1 = first_stats.compile(pop)
    logbook1.record(gen=0, evals=len(invalid_ind), TIME=delt_time, **record1)

    record2 = second_stats.compile(pop)
    logbook2.record(gen=0, evals=len(invalid_ind), **record2)

    record3 = third_stats.compile(pop)
    logbook3.record(gen=0, evals=len(invalid_ind), **record3)

    # Begin the evolution with NGEN repetitions
    for gen in range(1, NGEN):
        print("-- Generation %i --" % gen)
        start_time = time.time()
        offspring = toolbox.select(pop, len(pop))
        # Clone the selected individuals
        offspring = list(map(toolbox.clone, offspring))

        # Apply crossover and mutation on the offspring
        for child1, child2 in zip(offspring[::2], offspring[1::2]):
            if random.random() < CXPB:
                toolbox.mate(child1[0], child2[0])
                del child1.fitness.values
                del child2.fitness.values

        for mutant in offspring:
            if random.random() < MUTPB:
                toolbox.mutate(mutant[0])
                del mutant.fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]

        fitnesses = list(toolbox.map(toolbox.evaluate, invalid_ind))

        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        fitness_trans = np.array(fitnesses)
        fitness_trans[:, 1] *= 1.0 / nBITS
        fitness_trans[:, 2] *= -1
        ## Hyper volume
        hyp = hv.hypervolume(fitness_trans, ref=np.array([1, 1, 1]))
        HV.append(hyp)
        # select the next generation with NSGA3 from pop and offspring of size MU
        pop = toolbox.select(pop + offspring, MU)
        pareto.update(pop)

        record1 = first_stats.compile(invalid_ind)
        logbook1.record(gen=gen,
                        evals=len(invalid_ind),
                        TIME=delt_time,
                        **record1)

        record2 = second_stats.compile(invalid_ind)
        logbook2.record(gen=gen, evals=len(invalid_ind), **record2)

        record3 = third_stats.compile(invalid_ind)
        logbook3.record(gen=gen, evals=len(invalid_ind), **record3)

        end_time = time.time()
        delt_time = end_time - start_time
        print("--- %s seconds ---" % delt_time)

    # fitness pareto
    fitness_pareto = toolbox.map(toolbox.evaluate, pareto)
    fitness_pareto = np.array(fitness_pareto)
    fitness_pareto = {
        'CLUS': fitness_pareto[:, 0],
        'N_WT': fitness_pareto[:, 1],
        'ENERDENS': fitness_pareto[:, 2]
    }
    # pareto items and robustness
    par_items = np.array(pareto.items)
    par_rob = np.array(1.0 * sum(par_items[1:len(par_items)]) / len(par_items))
    par_rob = par_rob.ravel()
    par_rob_mat = np.column_stack((id, par_rob))
    par_rob_mat = {'WT_ID2': par_rob_mat[:, 0], 'par_rob': par_rob_mat[:, 1]}

    # logbook
    gen = np.array(logbook1.select('gen'))
    TIME = np.array(logbook1.select('TIME'))
    WT = np.array(logbook2.select('min_WT'))
    clus = np.array(logbook1.select('min_clus'))
    enerd = np.array(logbook3.select('max_enerd'))
    logbook = np.column_stack((gen, TIME, WT, clus, enerd))
    logbook = {
        'GENERATION': logbook[:, 0],
        'TIME': logbook[:, 1],
        'N_WT': logbook[:, 2],
        'CLUS': logbook[:, 3],
        'ENERDENS': logbook[:, 4]
    }

    return HV, par_rob_mat, fitness_pareto, logbook
Exemplo n.º 14
0
def main(seed=None):
    global obj1_max, obj2_max, obj3_max, CXPB, MUTPB
    pool = Pool(4)  #同時並列数(空白にすると最大数になる)
    toolbox.register("map", pool.map)

    random.seed(seed)

    # Initialize statistics object
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", numpy.mean, axis=0)
    stats.register("std", numpy.std, axis=0)
    stats.register("min", numpy.min, axis=0)
    stats.register("max", numpy.max, axis=0)

    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max"

    pop = toolbox.population(n=MU)

    #0世代目の評価
    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    #0世代目の統計
    # Compile statistics about the population
    record = stats.compile(pop)
    logbook.record(gen=0, evals=len(invalid_ind), **record)
    print(logbook.stream)

    #描画準備
    plt.ion()

    #進化の始まり
    # Begin the generational process
    for gen in range(1, NGEN):
        offspring = algorithms.varAnd(pop, toolbox, CXPB, MUTPB)
        #評価
        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        #淘汰
        # Select the next generation population from parents and offspring
        pop = toolbox.select(pop + offspring, MU)

        print("------objs_before-------")
        print(obj1_max, obj2_max, obj3_max)
        #最大値取得
        pop_fit = numpy.array([ind.fitness.values for ind in pop])
        fits1 = [obj[0] for obj in pop_fit]
        _obj1_max = max(fits1)
        fits2 = [obj[1] for obj in pop_fit]
        _obj2_max = max(fits2)
        fits3 = [obj[2] for obj in pop_fit]
        _obj3_max = max(fits3)

        #最大値に応じて正規化
        if _obj1_max <= 1.0:
            obj1_max = _obj1_max
        if _obj2_max <= 1.0:
            obj2_max = _obj2_max
        if _obj3_max <= 1.0:
            obj3_max = _obj3_max

        print("------objs_after-------")
        print(obj1_max, obj2_max, obj3_max)

        #======================================================
        #---------途中経過プロット----------

        ##1世代ごとに翼型を書き出す
        k = 0
        for ind in pop[:10]:
            global code_division
            ratios = decoder(ind, code_division)
            try:
                k += 1
                datlist_list = [fc.read_datfile(file) for file in datfiles]
                datlist_shaped_list = [
                    fc.shape_dat(datlist) for datlist in datlist_list
                ]
                newdat = fc.interpolate_dat(datlist_shaped_list, ratios)
                fc.write_datfile(datlist=newdat,
                                 newfile_name="newfoil" + str(k) + str(".dat"))
            except Exception as e:
                print("message:{0}".format(e))
        #

        ##翼型それぞれの評価値を出力する
        k = 0
        for ind, fit in zip(pop, pop_fit):
            try:
                k += 1
                print(k)
                print("individual:" + str(ind) + "\nfit:" + str(fit))
            except Exception as e:
                print("message:{0}".format(e))
        #

        plt.cla()  #消去
        ##新翼型の描画
        datlist_list = [fc.read_datfile(file) for file in datfiles]
        datlist_shaped_list = [
            fc.shape_dat(datlist) for datlist in datlist_list
        ]
        newdat = fc.interpolate_dat(datlist_shaped_list,
                                    decoder(pop[0], code_division))
        fc.write_datfile(datlist=newdat,
                         newfile_name="./foil_dat_gen/newfoil_gen" + str(gen) +
                         str(".dat"))
        plt.title('generation:' + str(gen))
        newdat_x = [dat[0] for dat in newdat]
        newdat_y = [dat[1] for dat in newdat]
        plt.xlim([0, 1])
        plt.ylim([-0.5, 0.5])
        plt.plot(newdat_x, newdat_y)
        plt.savefig("./newfoil_gen/newfoil_gen" + str(gen) + ".png")
        plt.draw()  #描画
        plt.pause(0.1)  #描画待機

        ##評価値のプロット
        fig = plt.figure(figsize=(7, 7))
        ax = fig.add_subplot(111, projection="3d")
        p = [ind.fitness.values for ind in pop]
        p1 = [i[0] for i in p]
        p2 = [j[1] for j in p]
        p3 = [k[2] for k in p]
        ax.set_xlim(0, obj1_max)
        ax.set_ylim(0, obj2_max)
        ax.set_zlim(0, obj3_max)
        ax.scatter(p1, p2, p3, marker="o", s=24, label="Final Population")
        ref = tools.uniform_reference_points(NOBJ, P)
        ax.scatter(ref[:, 0],
                   ref[:, 1],
                   ref[:, 2],
                   marker="o",
                   s=24,
                   label="Reference Points")
        ax.view_init(elev=11, azim=-25)
        #ax.autoscale(tight=True)
        plt.legend()
        plt.title("nsga3_gen:" + str(gen))
        plt.tight_layout()
        plt.savefig("./nsga3_gen/nsga3_gen" + str(gen) + ".png")
        plt.close()
        #======================================================

        # Compile statistics about the new population
        record = stats.compile(pop)
        logbook.record(gen=gen, evals=len(invalid_ind), **record)
        print(logbook.stream)

    return pop, logbook
Exemplo n.º 15
0
    def main(self,seed=None):
        self.setup()
        #同時並列数(空白にすると最大数になる)
        self.pool = Pool(self.thread)
        self.toolbox.register("map", self.pool.map)

        random.seed(seed)
        # Initialize statistics object
        stats = tools.Statistics(lambda ind: ind.fitness.values)
        stats.register("avg", np.mean, axis=0)
        stats.register("std", np.std, axis=0)
        stats.register("min", np.min, axis=0)
        stats.register("max", np.max, axis=0)

        logbook = tools.Logbook()
        logbook.header = "gen", "evals", "std", "min", "avg", "max"

        #初期化(個体生成のこと)
        pop = self.toolbox.population(n=self.MU)

        #描画準備
        plt.ion()

        #進化の始まり
        # Begin the generational process
        for gen in range(self.NGEN):

            if(gen == 0):
                #0世代目の評価
                # Evaluate the individuals with an invalid fitness
                invalid_ind = [ind for ind in pop if not ind.fitness.valid]
                fitnesses = self.toolbox.map(self.toolbox.evaluate, invalid_ind)
                for ind, fit in zip(invalid_ind, fitnesses):
                    ind.fitness.values = fit

                #0世代目の統計
                # Compile statistics about the population
                record = stats.compile(pop)
                logbook.record(gen=0, evals=len(invalid_ind), **record)

            else:
                offspring = algorithms.varAnd(pop, self.toolbox, self.CXPB, self.MUTPB)
                #評価
                # Evaluate the individuals with an invalid fitness
                invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
                fitnesses = self.toolbox.map(self.toolbox.evaluate, invalid_ind)
                for ind, fit in zip(invalid_ind, fitnesses):
                    ind.fitness.values = fit

                #淘汰
                # Select the next generation population from parents and offspring
                pop = self.toolbox.select(pop + offspring, self.MU)

            #評価
            pop_fit = np.array([ind.fitness.values for ind in pop])

            #----------------------------------
            #途中経過プロット
            #----------------------------------

            #1世代ごとに翼型をファイルに書き出す
            k = 0
            for ind in pop[:10]:
                ratios = self.decoder(ind,self.code_division)
                try:
                    k += 1
                    datlist_list = [fc.read_datfile(file) for file in self.datfiles]
                    datlist_shaped_list = [fc.shape_dat(datlist) for datlist in datlist_list]
                    newdat = fc.interpolate_dat(datlist_shaped_list,ratios)
                    fc.write_datfile(datlist=newdat,newfile_name = "newfoil"+str(k)+str(".dat"))
                except Exception as e:
                    print("message:{0}".format(e))
            #

            ##翼型それぞれの評価値を出力する
            k = 0
            for ind, fit in zip(pop, pop_fit):
                try:
                    k += 1
                    print(k)
                    print("individual:" + str(ind) + "\nfit:" + str(fit))
                except Exception as e:
                    print("message:{0}".format(e))
            #

            plt.cla()#消去
            ##新翼型の描画
            datlist_list = [fc.read_datfile(file) for file in self.datfiles]
            datlist_shaped_list = [fc.shape_dat(datlist) for datlist in datlist_list]
            newdat = fc.interpolate_dat(datlist_shaped_list,self.decoder(pop[0],self.code_division))
            fc.write_datfile(datlist=newdat,newfile_name = "./foil_dat_gen/newfoil_gen"+str(gen)+str(".dat"))
            plt.title('generation:'+str(gen))
            newdat_x = [dat[0] for dat in newdat]
            newdat_y = [dat[1] for dat in newdat]
            plt.xlim([0,1])
            plt.ylim([-0.5,0.5])
            plt.plot(newdat_x,newdat_y)
            plt.savefig("./newfoil_gen/newfoil_gen"+str(gen)+".png")
            plt.draw()#描画
            plt.pause(0.1)#描画待機

            ##評価値のプロット
            fig = plt.figure(figsize=(7, 7))
            ax = fig.add_subplot(111, projection="3d")
            p = [ind.fitness.values for ind in pop]
            p1 = [i[0] for i in p]
            p2 = [j[1] for j in p]
            p3 = [k[2] for k in p]
            ax.set_xlim(0,self.obj1_max)
            ax.set_ylim(0,self.obj2_max)
            ax.set_zlim(0,self.obj3_max)
            ax.scatter(p1, p2, p3, marker="o", s=24, label="Final Population")
            ref = tools.uniform_reference_points(self.NOBJ, self.P)
            ax.scatter(ref[:, 0], ref[:, 1], ref[:, 2], marker="o", s=24, label="Reference Points")
            ax.view_init(elev=11, azim=-25)
            #ax.autoscale(tight=True)
            plt.legend()
            plt.title("nsga3_gen:"+str(gen))
            plt.tight_layout()
            plt.savefig("./nsga3_gen/nsga3_gen"+str(gen)+".png")
            plt.close()
            #======================================================


            # Compile statistics about the new population
            record = stats.compile(pop)
            logbook.record(gen=gen, evals=len(invalid_ind), **record)
            with SetIO('stats2.log'):
                #stas.logに統計データを出力
                print(logbook.stream)

        return pop, logbook

        def __getstate__(self):
            self_dict = self.__dict__.copy()
            del self_dict['pool']
            return self_dict

        def __setstate__(self, state):
            self.__dict__.update(state)
Exemplo n.º 16
0
    def __init__(self,
                 problem: Problem,
                 mutation: Union[Mutation, DeapMutation] = None,
                 crossover: DeapCrossover = None,
                 selection: DeapSelection = None,
                 encoding: Optional[Union[str, Dict[str, Any]]] = None,
                 objectives: Optional[Union[str, List[str]]] = None,
                 objective_weights: Optional[List[float]] = None,
                 pop_size: int = None,
                 max_evals: int = None,
                 mut_rate: float = None,
                 crossover_rate: float = None,
                 deap_verbose: bool = None):

        self._default_crossovers = {
            TypeAttribute.LIST_BOOLEAN: DeapCrossover.CX_UNIFORM,
            TypeAttribute.LIST_INTEGER: DeapCrossover.CX_ONE_POINT,
            TypeAttribute.LIST_INTEGER_SPECIFIC_ARRITY:
            DeapCrossover.CX_ONE_POINT,
            TypeAttribute.PERMUTATION:
            DeapCrossover.CX_UNIFORM_PARTIALY_MATCHED
        }
        self._default_mutations = {
            TypeAttribute.LIST_BOOLEAN: DeapMutation.MUT_FLIP_BIT,
            TypeAttribute.LIST_INTEGER: DeapMutation.MUT_UNIFORM_INT,
            TypeAttribute.LIST_INTEGER_SPECIFIC_ARRITY:
            DeapMutation.MUT_UNIFORM_INT,
            TypeAttribute.PERMUTATION: DeapMutation.MUT_SHUFFLE_INDEXES
        }
        self._default_selection = DeapSelection.SEL_TOURNAMENT
        self.params_objective_function = ParamsObjectiveFunction(
            objective_handling=ObjectiveHandling.MULTI_OBJ,
            objectives=objectives,
            weights=objective_weights,
            sense_function=ModeOptim.MAXIMIZATION)
        self.evaluate_sol, _ = build_evaluate_function_aggregated(
            problem=problem,
            params_objective_function=self.params_objective_function)

        self.problem = problem
        if pop_size is not None:
            self._pop_size = pop_size
        else:
            self._pop_size = 100

        if max_evals is not None:
            self._max_evals = max_evals
        else:
            self._max_evals = 100 * self._pop_size
            print(
                'No value specified for max_evals. Using the default 10*pop_size - This should really be set carefully'
            )

        if mut_rate is not None:
            self._mut_rate = mut_rate
        else:
            self._mut_rate = 0.1

        if crossover_rate is not None:
            self._crossover_rate = crossover_rate
        else:
            self._crossover_rate = 0.9

        self.problem = problem

        if deap_verbose is not None:
            self._deap_verbose = deap_verbose
        else:
            self._deap_verbose = True

        # set encoding
        register_solution: EncodingRegister = problem.get_attribute_register()
        self._encoding_name = None
        self._encoding_variable_name = None
        if encoding is not None and isinstance(encoding, str):
            # check name specified is in problem register
            print(encoding)
            if encoding in register_solution.dict_attribute_to_type.keys():
                self._encoding_name = encoding
                self._encoding_variable_name = register_solution.dict_attribute_to_type[
                    self._encoding_name]['name']
                self._encoding_type = register_solution.dict_attribute_to_type[
                    self._encoding_name]['type'][0]
                self.n = register_solution.dict_attribute_to_type[
                    self._encoding_name]['n']

                if self._encoding_type == TypeAttribute.LIST_INTEGER:
                    self.arrity = register_solution.dict_attribute_to_type[
                        self._encoding_name]['arrity']
                    self.arrities = [self.arrity for i in range(self.n)]
                else:
                    self.arrity = None
                if self._encoding_type == TypeAttribute.LIST_INTEGER_SPECIFIC_ARRITY:
                    self.arrities = register_solution.dict_attribute_to_type[
                        self._encoding_name]['arrities']
                # else:
                #     self.arrities = None

        if encoding is not None and isinstance(encoding, Dict):
            # check there is a type key and a n key
            if 'name' in encoding.keys() and 'type' in encoding.keys(
            ) and 'n' in encoding.keys():
                self._encoding_name = "custom"
                self._encoding_variable_name = encoding['name']
                self._encoding_type = encoding['type'][0]
                self.n = encoding['n']
                if 'arrity' in encoding.keys():
                    self.arrity = encoding['arrity']
                    self.arrities = [self.arrity for i in range(self.n)]
                if 'arrities' in encoding.keys():
                    self.arrities = register_solution.dict_attribute_to_type[
                        self._encoding_name]['arrities']
            else:
                print(
                    'Erroneous encoding provided as input (encoding name not matching encoding of problem or custom '
                    'definition not respecting encoding dict entry format, trying to use default one instead'
                )

        if self._encoding_name is None:
            if len(register_solution.dict_attribute_to_type.keys()) == 0:
                raise Exception(
                    "An encoding of type TypeAttribute should be specified or at least 1 TypeAttribute "
                    "should be defined in the RegisterSolution of your Problem"
                )
            print(register_solution.dict_attribute_to_type)
            print(register_solution.dict_attribute_to_type.keys())
            self._encoding_name = list(
                register_solution.dict_attribute_to_type.keys())[0]
            self._encoding_variable_name = register_solution.dict_attribute_to_type[
                self._encoding_name]['name']
            self._encoding_type = register_solution.dict_attribute_to_type[
                self._encoding_name]['type'][
                    0]  # TODO : while it's usually a list we could also have a unique value(not a list)
            self.n = register_solution.dict_attribute_to_type[
                self._encoding_name]['n']

            if self._encoding_type == TypeAttribute.LIST_INTEGER:
                self.arrity = register_solution.dict_attribute_to_type[
                    self._encoding_name]['arrity']
                self.arrities = [self.arrity for i in range(self.n)]
            else:
                self.arrity = None
            if self._encoding_type == TypeAttribute.LIST_INTEGER_SPECIFIC_ARRITY:
                self.arrities = register_solution.dict_attribute_to_type[
                    self._encoding_name]['arrities']
            # else:
            #     self.arrities = None

        if self._encoding_type == TypeAttribute.LIST_BOOLEAN:
            self.arrity = 2
            self.arities = [2 for i in range(self.n)]

        print("Encoding used by the GA: " + self._encoding_name + ": " +
              str(self._encoding_type) + " of length " + str(self.n))

        self._objectives = objectives
        print('_objectives: ', self._objectives)
        self._objective_weights = objective_weights
        if (self._objective_weights is None) \
                or self._objective_weights is not None \
                and(len(self._objective_weights) != len(self._objectives)):
            print(
                'Objective weight issue: no weight given or size of weights and objectives lists mismatch. '
                'Setting all weights to default 1 value.')
            self._objective_weights = [1 for i in range(len(self._objectives))]

        if selection is None:
            self._selection_type = self._default_selection
        else:
            self._selection_type = selection

        nobj = len(self._objectives)
        ref_points = tools.uniform_reference_points(nobj=nobj)

        # DEAP toolbox setup
        self._toolbox = base.Toolbox()

        # Define representation
        creator.create("fitness",
                       base.Fitness,
                       weights=tuple(self._objective_weights))
        creator.create(
            "individual", list, fitness=creator.fitness
        )  # associate the fitness function to the individual type

        # Create the individuals required by the encoding
        if self._encoding_type == TypeAttribute.LIST_BOOLEAN:
            self._toolbox.register(
                "bit", random.randint, 0, 1
            )  # Each element of a solution is a bit (i.e. an int between 0 and 1 incl.)

            self._toolbox.register(
                "individual",
                tools.initRepeat,
                creator.individual,
                self._toolbox.bit,
                n=self.n)  # An individual (aka solution) contains n bits
        elif self._encoding_type == TypeAttribute.PERMUTATION:
            self._toolbox.register("permutation_indices", random.sample,
                                   range(self.n), self.n)
            self._toolbox.register("individual", tools.initIterate,
                                   creator.individual,
                                   self._toolbox.permutation_indices)
        elif self._encoding_type == TypeAttribute.LIST_INTEGER:
            self._toolbox.register("int_val", random.randint, 0,
                                   self.arrity - 1)
            self._toolbox.register("individual",
                                   tools.initRepeat,
                                   creator.individual,
                                   self._toolbox.int_val,
                                   n=self.n)
        elif self._encoding_type == TypeAttribute.LIST_INTEGER_SPECIFIC_ARRITY:
            gen_idx = lambda: [
                random.randint(0, arrity - 1) for arrity in self.arrities
            ]
            self._toolbox.register("individual", tools.initIterate,
                                   creator.individual, gen_idx)

        self._toolbox.register(
            "population",
            tools.initRepeat,
            list,
            self._toolbox.individual,
            n=self._pop_size)  # A population is made of pop_size individuals

        # Define objective function
        self._toolbox.register(
            "evaluate",
            self.evaluate_problem,
        )

        # Define crossover
        if crossover is None:
            self._crossover = self._default_crossovers[self._encoding_type]
        else:
            self._crossover = crossover

        # if self._encoding_type == TypeAttribute.LIST_BOOLEAN:
        if self._crossover == DeapCrossover.CX_UNIFORM:
            self._toolbox.register("mate",
                                   tools.cxUniform,
                                   indpb=self._crossover_rate)
        elif self._crossover == DeapCrossover.CX_ONE_POINT:
            self._toolbox.register("mate", tools.cxOnePoint)
        elif self._crossover == DeapCrossover.CX_TWO_POINT:
            self._toolbox.register("mate", tools.cxTwoPoint)

    # elif self._encoding_type == TypeAttribute.PERMUTATION:
        elif self._crossover == DeapCrossover.CX_UNIFORM_PARTIALY_MATCHED:
            self._toolbox.register("mate",
                                   tools.cxUniformPartialyMatched,
                                   indpb=0.5)
        elif self._crossover == DeapCrossover.CX_ORDERED:
            self._toolbox.register("mate", tools.cxOrdered)
        elif self._crossover == DeapCrossover.CX_PARTIALY_MATCHED:
            self._toolbox.register("mate", tools.cxPartialyMatched)
        else:
            print("Crossover of specified type not handled!")

        # Define mutation
        if mutation is None:
            self._mutation = self._default_mutations[self._encoding_type]
        else:
            self._mutation = mutation

        if isinstance(self._mutation, Mutation):
            self._toolbox.register(
                "mutate",
                generic_mutate_wrapper,
                problem=self.problem,
                encoding_name=self._encoding_variable_name,
                indpb=self._mut_rate,
                solution_fn=self.problem.get_solution_type(),
                custom_mutation=mutation)
        elif isinstance(self._mutation, DeapMutation):
            if self._mutation == DeapMutation.MUT_FLIP_BIT:
                self._toolbox.register(
                    "mutate", tools.mutFlipBit,
                    indpb=self._mut_rate)  # Choice of mutation operator
            elif self._mutation == DeapMutation.MUT_SHUFFLE_INDEXES:
                self._toolbox.register(
                    "mutate", tools.mutShuffleIndexes,
                    indpb=self._mut_rate)  # Choice of mutation operator
            elif self._mutation == DeapMutation.MUT_UNIFORM_INT:
                # self._toolbox.register("mutate", tools.mutUniformInt, low=0, up=self.arrity-1, indpb=self._mut_rate)
                self._toolbox.register("mutate",
                                       tools.mutUniformInt,
                                       low=0,
                                       up=self.arrities,
                                       indpb=self._mut_rate)

        # No choice of selection: In NSGA, only 1 selection: Non Dominated Sorted Selection
        self._toolbox.register("select", tools.selNSGA3, ref_points=ref_points)
Exemplo n.º 17
0

def feasible(individual):
    individual = individual[0]
    prod_MWsel = sum(x * y for x, y in zip(prod, individual))
    if (prod_MWsel <= up_targ and prod_MWsel >= low_targ):
        return True
    return False


### setup NSGA3 with deap (minimize the first two goals returned by the evaluate function and maximize the third one)
creator.create("FitnessMulti", base.Fitness, weights=(-1.0, -1.0, 1.0))
creator.create("Individual", list, fitness=creator.FitnessMulti)

#??
ref_points = tools.uniform_reference_points(nobj=3, p=12)
##setup the optim toolbox I do not understand that totally
toolbox = base.Toolbox()

#initial individual and pop
toolbox.register("initial_indi", initial_indi)
toolbox.register("individual",
                 tools.initRepeat,
                 creator.Individual,
                 toolbox.initial_indi,
                 n=1)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)

#evaluation and constraints
toolbox.register("evaluate", evaluate)
    def run(self,
            optimization_problem,
            n_gen=None,
            population_size=None,
            use_multicore=True,
            use_checkpoint=True):

        self.optimization_problem = optimization_problem
        # Abbreviations
        lb = optimization_problem.lower_bounds
        ub = optimization_problem.upper_bounds
        n_vars = optimization_problem.n_variables

        # Settings
        if population_size is None:
            population_size = min(
                200, max(25 * len(optimization_problem.variables), 50))
        if n_gen is None:
            n_gen = min(100, max(10 * len(optimization_problem.variables), 40))

        # NSGA3 Settings
        n_obj = 1
        p = 4
        ref_points = tools.uniform_reference_points(n_obj, p)

        # !!! emo functions breaks if n_obj == 1, this is a temporary fix
        if n_obj == 1:

            def sortNDHelperB(best, worst, obj, front):
                if obj < 0:
                    return
                sortNDHelperB(best, worst, obj, front)

            tools.emo.sortNDHelperB = sortNDHelperB

        # Definition of classes
        creator.create("FitnessMin", base.Fitness, weights=(-1.0, ) * n_obj)
        creator.create("Individual", list, fitness=creator.FitnessMin)

        # Tools
        toolbox = base.Toolbox()

        # Map for parallel evaluation
        manager = multiprocessing.Manager()
        cache = manager.dict()
        pool = multiprocessing.Pool()
        if use_multicore:
            toolbox.register("map", pool.map)

        # Functions for creating individuals and population
        toolbox.register("individual", tools.initIterate, creator.Individual,
                         optimization_problem.create_initial_values)

        def initIndividual(icls, content):
            return icls(content)

        toolbox.register("individual_guess", initIndividual,
                         creator.Individual)

        def initPopulation(pcls, ind_init, population_size):
            population = optimization_problem.create_initial_values(
                population_size)
            return pcls(ind_init(c) for c in population)

        toolbox.register(
            "population",
            initPopulation,
            list,
            toolbox.individual_guess,
        )

        # Functions for evolution
        toolbox.register("evaluate", self.evaluate, cache=cache)
        toolbox.register("mate",
                         tools.cxSimulatedBinaryBounded,
                         low=lb,
                         up=ub,
                         eta=30.0)
        toolbox.register("mutate",
                         tools.mutPolynomialBounded,
                         low=lb,
                         up=ub,
                         eta=20.0,
                         indpb=1.0 / n_vars)
        toolbox.register("select",
                         tools.selNSGA3,
                         nd="standard",
                         ref_points=ref_points)

        # Round individuals to prevent reevaluation of similar individuals
        def round_individuals():
            def decorator(func):
                def wrapper(*args, **kargs):
                    offspring = func(*args, **kargs)
                    for child in offspring:
                        for index, el in enumerate(child):
                            child[index] = round(el, self.sig_figures)
                    return offspring

                return wrapper

            return decorator

        toolbox.decorate("mate", round_individuals())
        toolbox.decorate("mutate", round_individuals())

        statistics = tools.Statistics(key=lambda ind: ind.fitness.values)
        statistics.register("min", np.min)
        statistics.register("max", np.max)
        statistics.register("avg", np.mean)
        statistics.register("std", np.std)

        # Load checkpoint if present
        checkpoint_path = os.path.join(
            settings.project_directory,
            optimization_problem.name + '/checkpoint.pkl')

        if use_checkpoint and os.path.isfile(checkpoint_path):
            # A file name has been given, then load the data from the file
            with open(checkpoint_path, "rb") as cp_file:
                cp = pickle.load(cp_file)

            self.population = cp["population"]
            start_gen = cp["generation"]
            self.halloffame = cp["halloffame"]
            self.logbook = cp["logbook"]
            random.setstate(cp["rndstate"])
        else:
            # Start a new evolution
            start_gen = 0
            self.halloffame = tools.HallOfFame(maxsize=1)
            self.logbook = tools.Logbook()
            self.logbook.header = "gen", "evals", "std", "min", "avg", "max"

            # Initialize random population
            random.seed(self.seed)
            self.population = toolbox.population(population_size)

            # Evaluate the individuals with an invalid fitness
            invalid_ind = [
                ind for ind in self.population if not ind.fitness.valid
            ]
            fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
            for ind, fit in zip(invalid_ind, fitnesses):
                ind.fitness.values = fit

            # Compile statistics about the population
            record = statistics.compile(self.population)
            self.logbook.record(gen=0, evals=len(invalid_ind), **record)

        # Begin the generational process
        start = time.time()
        for gen in range(start_gen, n_gen):
            self.offspring = algorithms.varAnd(self.population, toolbox,
                                               self.cxpb, self.mutpb)

            # Evaluate the individuals with an invalid fitness
            invalid_ind = [
                ind for ind in self.offspring if not ind.fitness.valid
            ]
            fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
            for ind, fit in zip(invalid_ind, fitnesses):
                ind.fitness.values = fit

            # Select the next generation population from parents and offspring
            self.population = toolbox.select(self.population + self.offspring,
                                             population_size)

            # Compile statistics about the new population
            record = statistics.compile(self.population)
            self.logbook.record(gen=gen, evals=len(invalid_ind), **record)
            self.halloffame.update(self.population)

            # Create Checkpoint file
            cp = dict(population=self.population,
                      generation=gen,
                      halloffame=self.halloffame,
                      logbook=self.logbook,
                      rndstate=random.getstate())

            with open(checkpoint_path, "wb") as cp_file:
                pickle.dump(cp, cp_file)

            best = self.halloffame.items[0]
            self.logger.info('Generation {}: x: {}, f: {}'.format(
                str(gen), str(best), str(best.fitness.values[0])))

        elapsed = time.time() - start

        x = self.halloffame.items[0]

        eval_object = optimization_problem.set_variables(x, make_copy=True)
        if self.optimization_problem.evaluator is not None:
            frac = optimization_problem.evaluator.evaluate(eval_object,
                                                           return_frac=True)
            performance = frac.performance
        else:
            frac = None
            performance = optimization_problem.evaluate(x, force=True)
        f = optimization_problem.objective_fun(performance)

        results = OptimizationResults(
            optimization_problem=optimization_problem,
            evaluation_object=eval_object,
            solver_name=str(self),
            solver_parameters=self.options,
            exit_flag=1,
            exit_message='DEAP terminated successfully',
            time_elapsed=elapsed,
            x=list(x),
            f=f,
            c=None,
            frac=frac,
            performance=performance.to_dict())

        return results
Exemplo n.º 19
0
def non_dominated_sorting_genetic_algorithm(
        locator, building_names_all, district_heating_network,
        district_cooling_network, building_names_heating,
        building_names_cooling, building_names_electricity, network_features,
        config, prices, lca):
    t0 = time.clock()

    # LOCAL VARIABLES
    NGEN = config.optimization.number_of_generations  # number of generations
    NIND = config.optimization.population_size  # int(H + (4 - H % 4)) # number of individuals to select
    RANDOM_SEED = config.optimization.random_seed

    # SET-UP EVOLUTIONARY ALGORITHM
    # Hyperparameters
    # during the warmp up period we make sure we explore a wide range of solutions so the scaler works
    if NGEN < 20:
        NIND_GEN0 = 20
    else:
        NIND_GEN0 = NGEN
    NOBJ = 3  # number of objectives
    P = [2, 1]
    SCALES = [1, 0.5]
    euclidean_distance = 0
    spread = 0
    random.seed(RANDOM_SEED)
    np.random.seed(RANDOM_SEED)
    ref_points = [
        tools.uniform_reference_points(NOBJ, p, s) for p, s in zip(P, SCALES)
    ]
    ref_points = np.concatenate(ref_points)
    _, uniques = np.unique(ref_points, axis=0, return_index=True)
    ref_points = ref_points[uniques]

    # SET-UP INDIVIDUAL STRUCTURE INCLUIDING HOW EVERY POINT IS CALLED (COLUM_NAMES)
    column_names, \
    heating_unit_names_share, \
    cooling_unit_names_share, \
    column_names_buildings_heating, \
    column_names_buildings_cooling = get_column_names_individual(district_heating_network,
                                                                 district_cooling_network,
                                                                 building_names_heating,
                                                                 building_names_cooling,
                                                                 )
    individual_with_names_dict = create_empty_individual(
        column_names, column_names_buildings_heating,
        column_names_buildings_cooling, district_heating_network,
        district_cooling_network)

    # DEAP LIBRARY REFERENCE_POINT CLASSES AND TOOLS
    # reference points
    toolbox = base.Toolbox()
    toolbox.register(
        "generate",
        generate_main,
        individual_with_names_dict=individual_with_names_dict,
        column_names=column_names,
        column_names_buildings_heating=column_names_buildings_heating,
        column_names_buildings_cooling=column_names_buildings_cooling,
        district_heating_network=district_heating_network,
        district_cooling_network=district_cooling_network)
    toolbox.register("individual", tools.initIterate, creator.Individual,
                     toolbox.generate)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)
    toolbox.register("mate", tools.cxUniform, indpb=CXPB)
    toolbox.register(
        "mutate",
        mutation_main,
        indpb=MUTPB,
        column_names=column_names,
        heating_unit_names_share=heating_unit_names_share,
        cooling_unit_names_share=cooling_unit_names_share,
        column_names_buildings_heating=column_names_buildings_heating,
        column_names_buildings_cooling=column_names_buildings_cooling,
        district_heating_network=district_heating_network,
        district_cooling_network=district_cooling_network)
    toolbox.register("evaluate", objective_function_wrapper)
    toolbox.register("select", tools.selNSGA3, ref_points=ref_points)

    # configure multiprocessing
    if config.multiprocessing:
        pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
        toolbox.register("map", pool.map)

    # Initialize statistics object
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean, axis=0)
    stats.register("std", np.std, axis=0)
    stats.register("min", np.min, axis=0)
    stats.register("max", np.max, axis=0)

    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max"

    pop = toolbox.population(n=NIND_GEN0)

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitnesses = toolbox.map(
        toolbox.evaluate,
        izip(invalid_ind, range(len(invalid_ind)), repeat(0, len(invalid_ind)),
             repeat(building_names_all, len(invalid_ind)),
             repeat(column_names_buildings_heating, len(invalid_ind)),
             repeat(column_names_buildings_cooling, len(invalid_ind)),
             repeat(building_names_heating, len(invalid_ind)),
             repeat(building_names_cooling, len(invalid_ind)),
             repeat(building_names_electricity, len(invalid_ind)),
             repeat(locator, len(invalid_ind)),
             repeat(network_features, len(invalid_ind)),
             repeat(config, len(invalid_ind)), repeat(prices,
                                                      len(invalid_ind)),
             repeat(lca, len(invalid_ind)),
             repeat(district_heating_network, len(invalid_ind)),
             repeat(district_cooling_network, len(invalid_ind)),
             repeat(column_names, len(invalid_ind))))

    # normalization of the first generation
    scaler_dict = scaler_for_normalization(NOBJ, fitnesses)
    fitnesses = normalize_fitnesses(scaler_dict, fitnesses)

    # add fitnesses to population individuals
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    # Compile statistics about the population
    record = stats.compile(pop)
    logbook.record(gen=0, evals=len(invalid_ind), **record)

    print(logbook.stream)

    # Begin the generational process
    # Initialization of variables
    DHN_network_list = []
    DCN_network_list = []
    halloffame = []
    halloffame_fitness = []
    epsInd = []
    for gen in range(1, NGEN + 1):
        print("Evaluating Generation %s{} of %s{} generations", gen)
        # Select and clone the next generation individuals
        offspring = algorithms.varAnd(pop, toolbox, CXPB, MUTPB)

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(
            toolbox.evaluate,
            izip(invalid_ind, range(len(invalid_ind)),
                 repeat(gen, len(invalid_ind)),
                 repeat(building_names_all, len(invalid_ind)),
                 repeat(column_names_buildings_heating, len(invalid_ind)),
                 repeat(column_names_buildings_cooling, len(invalid_ind)),
                 repeat(building_names_heating, len(invalid_ind)),
                 repeat(building_names_cooling, len(invalid_ind)),
                 repeat(building_names_electricity, len(invalid_ind)),
                 repeat(locator, len(invalid_ind)),
                 repeat(network_features, len(invalid_ind)),
                 repeat(config, len(invalid_ind)),
                 repeat(prices, len(invalid_ind)),
                 repeat(lca, len(invalid_ind)),
                 repeat(district_heating_network, len(invalid_ind)),
                 repeat(district_cooling_network, len(invalid_ind)),
                 repeat(column_names, len(invalid_ind))))
        # normalization of the second generation on
        fitnesses = normalize_fitnesses(scaler_dict, fitnesses)

        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # Select the next generation population from parents and offspring
        pop = toolbox.select(pop + offspring, NIND)

        # Compile statistics about the new population
        record = stats.compile(pop)
        logbook.record(gen=gen, evals=len(invalid_ind), **record)
        print(logbook.stream)

        DHN_network_list_tested = []
        DCN_network_list_tested = []
        for individual in invalid_ind:
            DHN_barcode, DCN_barcode, individual_with_name_dict, _ = individual_to_barcode(
                individual, building_names_all, building_names_heating,
                building_names_cooling, column_names,
                column_names_buildings_heating, column_names_buildings_cooling)
            DCN_network_list_tested.append(DCN_barcode)
            DHN_network_list_tested.append(DHN_barcode)

        print "Save population \n"
        save_generation_dataframes(gen, invalid_ind, locator,
                                   DCN_network_list_tested,
                                   DHN_network_list_tested)
        save_generation_individuals(column_names, gen, invalid_ind, locator)

        # Create Checkpoint if necessary
        print "Create CheckPoint", gen, "\n"
        with open(locator.get_optimization_checkpoint(gen), "wb") as fp:
            cp = dict(
                selected_population=pop,
                generation=gen,
                all_population_DHN_network_barcode=DHN_network_list,
                all_population_DCN_network_barcode=DCN_network_list,
                tested_population_DHN_network_barcode=DHN_network_list_tested,
                tested_population_DCN_network_barcode=DCN_network_list_tested,
                tested_population=invalid_ind,
                tested_population_fitness=fitnesses,
                epsIndicator=epsInd,
                halloffame=halloffame,
                halloffame_fitness=halloffame_fitness,
                euclidean_distance=euclidean_distance,
                spread=spread,
                detailed_electricity_pricing=config.optimization.
                detailed_electricity_pricing,
                district_heating_network=config.optimization.
                district_heating_network,
                district_cooling_network=config.optimization.
                district_cooling_network)
            json.dump(cp, fp)

    print("save totals for generation")
    print "Master Work Complete \n"
    # print ("Number of function evaluations = " + str(function_evals))
    t1 = time.clock()
    print(t1 - t0)
    if config.multiprocessing:
        pool.close()

    return pop, logbook
Exemplo n.º 20
0
    toolbox.register("map", pool.map)
    logging.info(f"Running on {n_cpus} CPUs")
else:
    logging.info(f"Running on GPU.")

# register operators
fit = Fitness(**config.global_config["dataset"])
mut = MutationConv() if use_conv_layers else Mutation()
cross = CrossoverConv() if use_conv_layers else Crossover()

toolbox.register("eval_batch", fit.evaluate_batch)
toolbox.register("evaluate", fit.evaluate)
toolbox.register("mate", cross.cxOnePoint)
toolbox.register("mutate", mut.mutate)
if nsga_number == 3:
    ref_points = tools.uniform_reference_points(2, 12)
    toolbox.register("select", tools.selNSGA3, ref_points=ref_points)
elif nsga_number == 2:
    # nsgaII - deap implementation
    toolbox.register("select", tools.selNSGA2)
elif nsga_number == 1:
    # stepan's version of nsga
    toolbox.register("select", selectNSGA)
elif nsga_number == 0:
    # use vanilla GA
    toolbox.register("select", tools.selTournament, tournsize=3)
else:
    raise NotImplementedError()


def main(exp_id, checkpoint_name=None):
Exemplo n.º 21
0
NDIM = NOBJ + K - 1
P = 12
H = factorial(NOBJ + P - 1) / (factorial(P) * factorial(NOBJ - 1))
BOUND_LOW, BOUND_UP = 0.0, 1.0
problem = pymop.factory.get_problem(PROBLEM, n_var=NDIM, n_obj=NOBJ)
##

# Algorithm parameters
MU = int(H + (4 - H % 4))
NGEN = 400
CXPB = 1.0
MUTPB = 1.0
##

# Create uniform reference point
ref_points = tools.uniform_reference_points(NOBJ, P)

# Create classes
creator.create("FitnessMin", base.Fitness, weights=(-1.0, ) * NOBJ)
creator.create("Individual", list, fitness=creator.FitnessMin)

##


# Toolbox initialization
def uniform(low, up, size=None):
    try:
        return [random.uniform(a, b) for a, b in zip(low, up)]
    except TypeError:
        return [
            random.uniform(a, b) for a, b in zip([low] * size, [up] * size)
Exemplo n.º 22
0
    def __init__(self, individual_cls, species, **params):
        """Initialize the wrapper method.

        :param individual_cls: Individual representation.
        :type individual_cls: Any subclass of
            :py:class:`~base.Individual`
        :param species: The species the individual will belong to
        :type species: Any sublass of :py:class:`~base.Species`
        :param pop_size: Population size, defaults to
            :py:attr:`~wrapper.DEFAULT_POP_SIZE`
        :type pop_size: :py:class:`int`, optional
        :param n_gens: Number of generations, defaults to
            :py:attr:`~wrapper.DEFAULT_N_GENS`
        :type n_gens: :py:class:`int`, optional
        :param xover_func: Crossover function, defaults to the
            :py:meth:`~base.Individual.crossover` method of
            *individual_cls*
        :type xover_func: Any callable object, optional
        :param xover_pb: Crossover rate, defaults to
            :py:attr:`~wrapper.DEFAULT_XOVER_PB`
        :type xover_pb: :py:class:`float`, optional
        :param mut_func: Mutation function, defaults to the
            :py:meth:`~base.Individual.mutate` method of
            *individual_cls*
        :type mut_func: Any callable object, optional
        :param mut_pb: Mutation rate, defaults to
            :py:attr:`~wrapper.DEFAULT_MUT_PB`
        :type mut_pb: :py:class:`float`, optional
        :param mut_ind_pb: Independent gene mutation probability, defaults to
            :py:attr:`~wrapper.DEFAULT_MUT_IND_PB`
        :type mut_ind_pb: :py:class:`float`, optional
        :param sel_func: Selection function (:py:func:`~deap.tools.selNSGA2` or
            :py:func:`~deap.tools.selNSGA3`), defaults to
            :py:attr:`~wrapper.DEFAULT_NSGA_SEL_FUNC`
        :type sel_func: Any callable object, optional
        :param sel_func_params: Selection function parameters. If NSGA-III is
            used, this attribute must include a key named *'ref_points'*
            containing a :py:class:`dict` with the parameters needed to
            generate the reference points (the arguments of
            :py:func:`~deap.tools.uniform_reference_points`). Since *sel_func*
            defaults to NSGA-II, the default value for *sel_func_params*
            is :py:attr:`~wrapper.DEFAULT_NSGA_SEL_FUNC_PARAMS`
        :type sel_func_params: :py:class:`dict`, optional
        :param checkpoint_freq: Frequency for checkpointing, defaults to
            :py:attr:`~base.DEFAULT_WRAPPER_CHECKPOINT_FREQ`
        :type checkpoint_freq: :py:class:`int`, optional
        :param checkpoint_file: File path for checkpointing, defaults to
            :py:attr:`~base.DEFAULT_WRAPPER_CHECKPOINT_FILE`
        :type checkpoint_file: :py:class:`str`, optional
        :param random_seed: Random seed for the random generator, defaults to
            `None`
        :type random_seed: :py:class:`int`, optional
        :param verbose: Whether or not to log the statistics, defaults to
            :py:data:`__debug__`
        :type verbose: :py:class:`bool`
        :raises TypeError: If any parameter has a wrong type
        """
        # Initialize the wrapper process
        super().__init__(individual_cls, species, **params)

        # Register the selection operator
        self.sel_func = params.pop('sel_func', DEFAULT_NSGA_SEL_FUNC)
        self.sel_func_params = params.pop('sel_func_params',
                                          DEFAULT_NSGA_SEL_FUNC_PARAMS)
        ref_points = self.sel_func_params.pop('ref_points', None)

        # If NSGA3 is selected, the reference points are mandatory
        if self.sel_func is selNSGA3:
            # If sel_func_params doesn't define the ref points for NSGA-III
            if ref_points is None:
                raise ValueError("The reference points parameters are missing")

            ref_points = uniform_reference_points(**ref_points)
            self.sel_func_params['ref_points'] = ref_points

        self._toolbox.register("select", self.sel_func, **self.sel_func_params)
Exemplo n.º 23
0
    #10個の最適翼型候補の評価値を出力
    k = 0
    for ind, fit in zip(pop, pop_fit):
        try:
            k += 1
            print(k)
            print("individual:" + str(ind) + "\nfit:" + str(fit))
        except Exception as e:
            print("message:{0}".format(e))

    #pf = problem.pareto_front(ref_points)
    #print(igd(pop_fit, pf))

    fig = plt.figure(figsize=(7, 7))
    ax = fig.add_subplot(111)

    p = np.array([ind.fitness.values for ind in pop])
    ax.scatter(p[:, 0], p[:, 1], marker="o", label="Final Population")

    #ax.scatter(pf[:, 0], pf[:, 1], pf[:, 2], marker="x", c="k", s=32, label="Ideal Pareto Front")

    ref_points = tools.uniform_reference_points(ng.NOBJ, ng.P)

    ax.scatter(ref_points[:, 0], ref_points[:, 1], marker="o", label="Reference Points")

    ax.autoscale(tight=True)
    plt.legend()
    plt.tight_layout()
    plt.savefig("nsga3.png")
Exemplo n.º 24
0
def cougar_individual() -> list:
    default = DEFAULT_CLUSTERING_PARAMS[CLUSTERING_ALGORITHM]
    ind = list()
    for param in default:
        low = param - param * random.random()
        high = param + param * random.random()
        ind.append(random.uniform(low, high))
    return ind


# Must define these values, and functions above, at the root level of the script to
# ensure that SCOOP workers can access them.
creator.create('FitnessMaxMinMax', base.Fitness, weights=(1.0, -1.0, 1.0))
creator.create('Individual', list, fitness=creator.FitnessMaxMinMax)
ref_points = tools.uniform_reference_points(NUMBER_OF_OBJECTIVES, P)

toolbox = base.Toolbox()
toolbox.register('attr_cougar', cougar_individual)
toolbox.register('individual', tools.initIterate, creator.Individual,
                 toolbox.attr_cougar)
toolbox.register('population', tools.initRepeat, list, toolbox.individual)

toolbox.register('evaluate', eval_cougar)
BOUND_LOW = DEFAULT_CLUSTERING_BOUNDS[CLUSTERING_ALGORITHM][0]
BOUND_UP = DEFAULT_CLUSTERING_BOUNDS[CLUSTERING_ALGORITHM][1]
toolbox.register('mate',
                 tools.cxSimulatedBinaryBounded,
                 low=BOUND_LOW,
                 up=BOUND_UP,
                 eta=30.0)
Exemplo n.º 25
0
def non_dominated_sorting_genetic_algorithm(locator,
                                            building_names_all,
                                            district_heating_network,
                                            district_cooling_network,
                                            building_names_heating,
                                            building_names_cooling,
                                            building_names_electricity,
                                            network_features,
                                            weather_features,
                                            config,
                                            prices,
                                            lca):
    # LOCAL VARIABLES
    NGEN = config.optimization.number_of_generations  # number of generations
    MU = config.optimization.population_size  # int(H + (4 - H % 4)) # number of individuals to select
    RANDOM_SEED = config.optimization.random_seed
    CXPB = config.optimization.crossover_prob
    MUTPB = config.optimization.mutation_prob
    technologies_heating_allowed = config.optimization.technologies_DH
    technologies_cooling_allowed = config.optimization.technologies_DC
    mutation_method_integer = config.optimization.mutation_method_integer
    mutation_method_continuous = config.optimization.mutation_method_continuous
    crossover_method_integer = config.optimization.crossover_method_integer
    crossover_method_continuous = config.optimization.crossover_method_continuous

    # SET-UP EVOLUTIONARY ALGORITHM
    # Hyperparameters
    P = 12
    ref_points = tools.uniform_reference_points(NOBJ, P)
    if MU == None:
        H = factorial(NOBJ + P - 1) / (factorial(P) * factorial(NOBJ - 1))
        MU = int(H + (4 - H % 4))
    random.seed(RANDOM_SEED)
    np.random.seed(RANDOM_SEED)

    # SET-UP INDIVIDUAL STRUCTURE INCLUIDING HOW EVERY POINT IS CALLED (COLUM_NAMES)
    column_names, \
    heating_unit_names_share, \
    cooling_unit_names_share, \
    column_names_buildings_heating, \
    column_names_buildings_cooling = get_column_names_individual(district_heating_network,
                                                                 district_cooling_network,
                                                                 building_names_heating,
                                                                 building_names_cooling,
                                                                 technologies_heating_allowed,
                                                                 technologies_cooling_allowed,

                                                                 )
    individual_with_names_dict = create_empty_individual(column_names,
                                                         column_names_buildings_heating,
                                                         column_names_buildings_cooling,
                                                         district_heating_network,
                                                         district_cooling_network,
                                                         technologies_heating_allowed,
                                                         technologies_cooling_allowed,
                                                         )

    # DEAP LIBRARY REFERENCE_POINT CLASSES AND TOOLS
    # reference points
    toolbox = base.Toolbox()
    toolbox.register("generate",
                     generate_main,
                     individual_with_names_dict=individual_with_names_dict,
                     column_names=column_names,
                     column_names_buildings_heating=column_names_buildings_heating,
                     column_names_buildings_cooling=column_names_buildings_cooling,
                     district_heating_network=district_heating_network,
                     district_cooling_network=district_cooling_network,
                     technologies_heating_allowed=technologies_heating_allowed,
                     technologies_cooling_allowed=technologies_cooling_allowed,
                     )
    toolbox.register("individual",
                     tools.initIterate,
                     creator.Individual,
                     toolbox.generate)
    toolbox.register("population",
                     tools.initRepeat,
                     list,
                     toolbox.individual)
    toolbox.register("mate",
                     crossover_main,
                     indpb=CXPB,
                     column_names=column_names,
                     heating_unit_names_share=heating_unit_names_share,
                     cooling_unit_names_share=cooling_unit_names_share,
                     column_names_buildings_heating=column_names_buildings_heating,
                     column_names_buildings_cooling=column_names_buildings_cooling,
                     district_heating_network=district_heating_network,
                     district_cooling_network=district_cooling_network,
                     technologies_heating_allowed=technologies_heating_allowed,
                     technologies_cooling_allowed=technologies_cooling_allowed,
                     crossover_method_integer=crossover_method_integer,
                     crossover_method_continuous=crossover_method_continuous)
    toolbox.register("mutate",
                     mutation_main,
                     indpb=MUTPB,
                     column_names=column_names,
                     heating_unit_names_share=heating_unit_names_share,
                     cooling_unit_names_share=cooling_unit_names_share,
                     column_names_buildings_heating=column_names_buildings_heating,
                     column_names_buildings_cooling=column_names_buildings_cooling,
                     district_heating_network=district_heating_network,
                     district_cooling_network=district_cooling_network,
                     technologies_heating_allowed=technologies_heating_allowed,
                     technologies_cooling_allowed=technologies_cooling_allowed,
                     mutation_method_integer=mutation_method_integer,
                     mutation_method_continuous=mutation_method_continuous
                     )
    toolbox.register("evaluate",
                     objective_function_wrapper)
    toolbox.register("select",
                     tools.selNSGA3WithMemory(ref_points))

    # configure multiprocessing
    if config.multiprocessing:
        pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
        toolbox.register("map", pool.map)

    # Initialize statistics object
    paretofrontier = tools.ParetoFront()
    generational_distances = []
    difference_generational_distances = []
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean, axis=0)
    stats.register("std", np.std, axis=0)
    stats.register("min", np.min, axis=0)
    stats.register("max", np.max, axis=0)

    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max"

    pop = toolbox.population(n=MU)

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, izip(invalid_ind, range(len(invalid_ind)), repeat(0, len(invalid_ind)),
                                                   repeat(building_names_all, len(invalid_ind)),
                                                   repeat(column_names_buildings_heating, len(invalid_ind)),
                                                   repeat(column_names_buildings_cooling, len(invalid_ind)),
                                                   repeat(building_names_heating, len(invalid_ind)),
                                                   repeat(building_names_cooling, len(invalid_ind)),
                                                   repeat(building_names_electricity, len(invalid_ind)),
                                                   repeat(locator, len(invalid_ind)),
                                                   repeat(network_features, len(invalid_ind)),
                                                   repeat(weather_features, len(invalid_ind)),
                                                   repeat(config, len(invalid_ind)),
                                                   repeat(prices, len(invalid_ind)),
                                                   repeat(lca, len(invalid_ind)),
                                                   repeat(district_heating_network, len(invalid_ind)),
                                                   repeat(district_cooling_network, len(invalid_ind)),
                                                   repeat(technologies_heating_allowed, len(invalid_ind)),
                                                   repeat(technologies_cooling_allowed, len(invalid_ind)),
                                                   repeat(column_names, len(invalid_ind))))

    # normalization of the first generation
    scaler_dict = scaler_for_normalization(NOBJ, fitnesses)
    fitnesses = normalize_fitnesses(scaler_dict, fitnesses)

    # add fitnesses to population individuals
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    # Compile statistics about the population
    record = stats.compile(pop)
    paretofrontier.update(pop)
    performance_metrics = calc_performance_metrics(0.0, paretofrontier)
    generational_distances.append(performance_metrics[0])
    difference_generational_distances.append(performance_metrics[1])
    logbook.record(gen=0, evals=len(invalid_ind), **record)

    # create a dictionary to store which individuals that are being calculated
    record_individuals_tested = {'generation': [], "individual_id": [], "individual_code": []}
    record_individuals_tested = calc_dictionary_of_all_individuals_tested(record_individuals_tested, gen=0,
                                                                          invalid_ind=invalid_ind)
    print(logbook.stream)

    # Begin the generational process
    # Initialization of variables
    for gen in range(1, NGEN + 1):
        print ("Evaluating Generation %s of %s generations" % (gen, NGEN + 1))
        # Select and clone the next generation individuals
        offspring = algorithms.varAnd(pop, toolbox, CXPB, MUTPB)

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        invalid_ind = [ind for ind in invalid_ind if ind not in pop]
        fitnesses = toolbox.map(toolbox.evaluate,
                                izip(invalid_ind, range(len(invalid_ind)), repeat(gen, len(invalid_ind)),
                                     repeat(building_names_all, len(invalid_ind)),
                                     repeat(column_names_buildings_heating, len(invalid_ind)),
                                     repeat(column_names_buildings_cooling, len(invalid_ind)),
                                     repeat(building_names_heating, len(invalid_ind)),
                                     repeat(building_names_cooling, len(invalid_ind)),
                                     repeat(building_names_electricity, len(invalid_ind)),
                                     repeat(locator, len(invalid_ind)),
                                     repeat(network_features, len(invalid_ind)),
                                     repeat(weather_features, len(invalid_ind)),
                                     repeat(config, len(invalid_ind)),
                                     repeat(prices, len(invalid_ind)),
                                     repeat(lca, len(invalid_ind)),
                                     repeat(district_heating_network, len(invalid_ind)),
                                     repeat(district_cooling_network, len(invalid_ind)),
                                     repeat(technologies_heating_allowed, len(invalid_ind)),
                                     repeat(technologies_cooling_allowed, len(invalid_ind)),
                                     repeat(column_names, len(invalid_ind))))
        # normalization of the second generation on
        fitnesses = normalize_fitnesses(scaler_dict, fitnesses)

        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # Select the next generation population from parents and offspring
        pop = toolbox.select(pop + invalid_ind, MU)

        # get paretofront and update dictionary of individuals evaluated
        paretofrontier.update(pop)
        record_individuals_tested = calc_dictionary_of_all_individuals_tested(record_individuals_tested, gen=gen,
                                                                              invalid_ind=invalid_ind)

        # Compile statistics about the new population
        record = stats.compile(pop)
        performance_metrics = calc_performance_metrics(generational_distances[-1], paretofrontier)
        generational_distances.append(performance_metrics[0])
        difference_generational_distances.append(performance_metrics[1])
        logbook.record(gen=gen, evals=len(invalid_ind), **record)
        print(logbook.stream)

        DHN_network_list_tested = []
        DCN_network_list_tested = []
        for individual in invalid_ind:
            DHN_barcode, DCN_barcode, individual_with_name_dict, _ = individual_to_barcode(individual,
                                                                                           building_names_all,
                                                                                           building_names_heating,
                                                                                           building_names_cooling,
                                                                                           column_names,
                                                                                           column_names_buildings_heating,
                                                                                           column_names_buildings_cooling)
            DCN_network_list_tested.append(DCN_barcode)
            DHN_network_list_tested.append(DHN_barcode)

        if config.debug:
            print "Saving results for generation", gen, "\n"
            valid_generation = [gen]
            save_generation_dataframes(gen, invalid_ind, locator, DCN_network_list_tested, DHN_network_list_tested)
            save_generation_individuals(column_names, gen, invalid_ind, locator)
            systems_name_list = save_generation_pareto_individuals(locator, gen, record_individuals_tested, paretofrontier)
        else:
            systems_name_list = []
            valid_generation = []

        if gen == NGEN and config.debug == False:  # final generation re-evaluate paretofront
            print "Saving results for generation", gen, "\n"
            valid_generation = [gen]
            systems_name_list = save_final_generation_pareto_individuals(toolbox,
                                                     locator,
                                                     gen,
                                                     record_individuals_tested,
                                                     paretofrontier,
                                                     building_names_all,
                                                     column_names_buildings_heating,
                                                     column_names_buildings_cooling,
                                                     building_names_heating,
                                                     building_names_cooling,
                                                     building_names_electricity,
                                                     network_features,
                                                     weather_features,
                                                     config,
                                                     prices,
                                                     lca,
                                                     district_heating_network,
                                                     district_cooling_network,
                                                     technologies_heating_allowed,
                                                     technologies_cooling_allowed,
                                                     column_names)

        # Create Checkpoint if necessary
        print "Creating CheckPoint", gen, "\n"
        with open(locator.get_optimization_checkpoint(gen), "wb") as fp:
            cp = dict(generation=gen,
                      selected_population=pop,
                      tested_population=invalid_ind,
                      generational_distances=generational_distances,
                      difference_generational_distances = difference_generational_distances,
                      systems_to_show=systems_name_list,
                      generation_to_show =valid_generation,
                      )
            json.dump(cp, fp)
    if config.multiprocessing:
        pool.close()

    return pop, logbook
Exemplo n.º 26
0
import random
from copy import copy

import numpy as np
import scipy.stats as st
from deap.benchmarks.tools import hypervolume
from deap import creator, base, tools
from deap.tools import sortNondominated

from DSE.evaluation import monte_carlo
from DSE.evaluation.Pareto_UCB1 import pareto_ucb1
from DSE.evaluation.SAR import sSAR

from DSE.exploration import Chromosome, SearchSpace

REF_POINTS = tools.uniform_reference_points(3)

# Has to be defined globally
# https://stackoverflow.com/a/61082335
weights = (1.0, -1.0, -1.0)
creator.create("FitnessDSE", base.Fitness, weights=weights)
creator.create("Individual", Chromosome, fitness=creator.FitnessDSE)


def scalarized_lambda(w):
    """ Return function that will linearly scalarize a given vector based on weights w.
    :param w: iterable
    :return: lambda function vec: scalarized reward
    """
    return lambda vec: linear_scalarize(vec, w)