예제 #1
0
파일: ibea.py 프로젝트: gitter-badger/deap
def main(seed=None):
    """Main"""
    random.seed(seed)

    NGEN = 250
    MU = 100
    CXPB = 0.9

    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("min", numpy.min, axis=0)
    stats.register("max", numpy.max, axis=0)

    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max"

    pop = toolbox.population(n=MU)

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    record = stats.compile(pop)
    logbook.record(gen=0, evals=len(invalid_ind), **record)
    print(logbook.stream)

    algorithms.eaAlphaMuPlusLambda(pop, toolbox,
                                   MU, None, CXPB, 1.0 - CXPB, NGEN, stats)

    print("Final population hypervolume is %f" % hypervolume(pop, [11.0, 11.0]))

    return pop, logbook
예제 #2
0
def test_nsga2():
    NDIM = 5
    BOUND_LOW, BOUND_UP = 0.0, 1.0
    MU = 16
    NGEN = 100

    toolbox = base.Toolbox()
    toolbox.register("attr_float", random.uniform, BOUND_LOW, BOUND_UP)
    toolbox.register("individual", tools.initRepeat,
                     creator.__dict__[INDCLSNAME], toolbox.attr_float, NDIM)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    toolbox.register("evaluate", benchmarks.zdt1)
    toolbox.register("mate",
                     tools.cxSimulatedBinaryBounded,
                     low=BOUND_LOW,
                     up=BOUND_UP,
                     eta=20.0)
    toolbox.register("mutate",
                     tools.mutPolynomialBounded,
                     low=BOUND_LOW,
                     up=BOUND_UP,
                     eta=20.0,
                     indpb=1.0 / NDIM)
    toolbox.register("select", tools.selNSGA2)

    pop = toolbox.population(n=MU)
    fitnesses = toolbox.map(toolbox.evaluate, pop)
    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit

    pop = toolbox.select(pop, len(pop))
    for gen in range(1, NGEN):
        offspring = tools.selTournamentDCD(pop, len(pop))
        offspring = [toolbox.clone(ind) for ind in offspring]

        for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
            if random.random() <= 0.9:
                toolbox.mate(ind1, ind2)

            toolbox.mutate(ind1)
            toolbox.mutate(ind2)
            del ind1.fitness.values, ind2.fitness.values

        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        pop = toolbox.select(pop + offspring, MU)

    hv = hypervolume(pop, [11.0, 11.0])
    # hv = 120.777 # Optimal value

    assert hv > HV_THRESHOLD, "Hypervolume is lower than expected %f < %f" % (
        hv, HV_THRESHOLD)

    for ind in pop:
        assert not (any(numpy.asarray(ind) < BOUND_LOW)
                    or any(numpy.asarray(ind) > BOUND_UP))
예제 #3
0
def main(sim, NGEN, PSIZE, CXPB, seed):
    # print(NGEN,PSIZE,CXPB,seed)
    random.seed(seed)

    stats = tools.Statistics(lambda ind: ind.fitness.values)
    # stats.register("avg", numpy.mean, axis=0)
    # stats.register("std", numpy.std, axis=0)
    stats.register("min", numpy.min, axis=0)
    stats.register("max", numpy.max, axis=0)

    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max"

    pop = toolbox.population(n=PSIZE)

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitnesses = evaluate_population(sim, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    # This is just to assign the crowding distance to the individuals
    # no actual selection is done
    pop = toolbox.select(pop, len(pop))

    record = stats.compile(pop)
    logbook.record(gen=0, evals=len(invalid_ind), **record)
    print(logbook.stream)

    # Begin the generational process
    for gen in range(1, NGEN):
        # Vary the population
        offspring = tools.selTournamentDCD(pop, len(pop))
        offspring = [toolbox.clone(ind) for ind in offspring]

        for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
            if random.random() <= CXPB:
                toolbox.mate(ind1, ind2)

            toolbox.mutate(ind1)
            toolbox.mutate(ind2)
            del ind1.fitness.values, ind2.fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = evaluate_population(sim, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # Select the next generation population
        pop = toolbox.select(pop + offspring, PSIZE)
        record = stats.compile(pop)
        logbook.record(gen=gen, evals=len(invalid_ind), **record)
        print(logbook.stream)

    print("Final population hypervolume is %f" %
          hypervolume(pop, [11.0, 11.0]))

    return pop, logbook
예제 #4
0
    def run(self):
        self.setup()

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in self.pop if not ind.fitness.valid]
        fitnesses = list((self.toolbox.map(self.toolbox.evaluate,
                                           invalid_ind)))
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # This is just to assign the crowding distance to the individuals
        # no actual selection is done
        self.pop = self.toolbox.select(self.pop, len(self.pop))

        record = self.stats.compile(self.pop)
        self.logbook.record(gen=0, evals=len(invalid_ind), **record)
        print(self.logbook.stream)

        # Begin the generational process
        for i_generation in range(1, self.n_generation):
            # Vary the population
            #offspring = tools.selTournamentDCD(self.pop, len(self.pop))
            #offspring = [self.toolbox.clone(ind) for ind in offspring]
            offspring = algorithms.varAnd(self.pop, self.toolbox, self.CXPB,
                                          self.MUTPB)
            '''
            for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
                if random.random() <= self.crossover_probability:
                    self.toolbox.mate(ind1, ind2)

                self.toolbox.mutate(ind1)
                self.toolbox.mutate(ind2)
                del ind1.fitness.values, ind2.fitness.values
            '''
            # Evaluate the individuals with an invalid fitness
            '''
            invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
            fitnesses = self.toolbox.map(self.toolbox.evaluate, invalid_ind)
            for ind, fit in zip(invalid_ind, fitnesses):
                ind.fitness.values = fit
            '''
            invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
            fitnesses = self.toolbox.map(self.toolbox.evaluate, invalid_ind)
            for ind, fit in zip(invalid_ind, fitnesses):
                ind.fitness.values = fit

            # Select the next generation population
            self.pop = self.toolbox.select(self.pop + offspring,
                                           self.n_population)
            record = self.stats.compile(self.pop)
            self.logbook.record(gen=i_generation,
                                evals=len(invalid_ind),
                                **record)
            print(self.logbook.stream)

        print("Final population hypervolume is %f" %
              hypervolume(self.pop, [11.0, 11.0]))
        return self.pop, self.logbook
예제 #5
0
    def __PrintOutput(self, front, pop, SaveFile=False):

        NFront = front.shape[0]

        if self.Metrics:
            GenDist = GD(front, self.TPF)
            SS = Spread2D(front, self.TPF)
            HausDist = HD(front, self.TPF)[0]
        else:
            GenDist = np.nan
            SS = np.nan
            HausDist = np.nan

        Cover = Coverage(front)
        HV = hypervolume(pop, [11.0] * self.NObj)

        if self.MetricsPS and self.Metrics:
            FPS = []
            for x in pop:
                FF = -self.target(x)
                FPS += [[FF[i] for i in range(self.NObj)]]
            FPS = np.array(FPS)

            GDPS = GD(FPS, self.TPF)
            SSPS = Spread2D(FPS, self.TPF)
            HDPS = HD(FPS, self.TPF)[0]
        else:
            GDPS = np.nan
            SSPS = np.nan
            HDPS = np.nan

        self.vprint(f"NFront = {NFront}, GD = {GenDist:7.3e} |"
                    f" SS = {SS:7.3e} | HV = {HV:7.3e} ")

        if SaveFile:
            FrontFilename = f"FF_D{self.NParam:02d}_I{self.counter:04d}_" + \
                f"NI{self.N_init_points:02d}_P{self.NewProb:4.2f}_" + \
                f"Q{self.q:4.2f}" + \
                self.Filename

            PF = np.asarray([np.asarray(y) for y in self.y_Pareto])
            PS = np.asarray([np.asarray(x) for x in self.x_Pareto])

            Population = np.asarray(pop)
            np.savez(FrontFilename, Front=front, Pop=Population, PF=PF, PS=PS)

            FrontFilename += ".npz"
        else:
            FrontFilename = np.nan

        self.FF.write("{} {} {} {} {} {} {} {} {} {} {} {} {} {} {}\n".format(
            self.NParam, self.counter + len(self.init_points),
            self.N_init_points, NFront, GenDist, SS, HV, HausDist, Cover, GDPS,
            SSPS, HDPS, self.NewProb, self.q, FrontFilename))

        return
예제 #6
0
def test_nsga3():
    NDIM = 5
    BOUND_LOW, BOUND_UP = 0.0, 1.0
    MU = 16
    NGEN = 100

    ref_points = tools.uniform_reference_points(2, p=12)

    toolbox = base.Toolbox()
    toolbox.register("attr_float", random.uniform, BOUND_LOW, BOUND_UP)
    toolbox.register("individual", tools.initRepeat,
                     creator.__dict__[INDCLSNAME], toolbox.attr_float, NDIM)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    toolbox.register("evaluate", benchmarks.zdt1)
    toolbox.register("mate",
                     tools.cxSimulatedBinaryBounded,
                     low=BOUND_LOW,
                     up=BOUND_UP,
                     eta=20.0)
    toolbox.register("mutate",
                     tools.mutPolynomialBounded,
                     low=BOUND_LOW,
                     up=BOUND_UP,
                     eta=20.0,
                     indpb=1.0 / NDIM)
    toolbox.register("select", tools.selNSGA3, ref_points=ref_points)

    pop = toolbox.population(n=MU)
    fitnesses = toolbox.map(toolbox.evaluate, pop)
    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit

    pop = toolbox.select(pop, len(pop))
    # Begin the generational process
    for gen in range(1, NGEN):
        offspring = algorithms.varAnd(pop, toolbox, 1.0, 1.0)

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # Select the next generation population
        pop = toolbox.select(pop + offspring, MU)

    hv = hypervolume(pop, [11.0, 11.0])
    # hv = 120.777 # Optimal value

    assert hv > HV_THRESHOLD, "Hypervolume is lower than expected %f < %f" % (
        hv, HV_THRESHOLD)

    for ind in pop:
        assert not (any(numpy.asarray(ind) < BOUND_LOW)
                    or any(numpy.asarray(ind) > BOUND_UP))
예제 #7
0
def nsgaSpea(ngen, mu, cxpb, toolbox, halloffame, stats, seed=None):

    random.seed(seed)

    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max"

    pop = toolbox.population(n=mu)

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    # This is just to assign the crowding distance to the individuals
    # no actual selection is done
    pop = toolbox.select(pop, len(pop))

    record = stats.compile(pop)
    logbook.record(gen=0, evals=len(invalid_ind), **record)
    print(logbook.stream)

    # Begin the generational process
    for gen in range(1, ngen):
        # Vary the population
        offspring = toolbox.tournament(pop, len(pop))
        offspring = [toolbox.clone(ind) for ind in offspring]

        for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
            if random.random() <= cxpb:
                toolbox.mate(ind1, ind2)

            toolbox.mutate(ind1)
            toolbox.mutate(ind2)
            del ind1.fitness.values, ind2.fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # Select the next generation population
        pop = toolbox.select(pop + offspring, mu)

        halloffame.update(pop)
        record = stats.compile(pop)
        logbook.record(gen=gen, evals=len(invalid_ind), **record)
        print(logbook.stream)

    print("Final population hypervolume is %f" %
          hypervolume(pop, [11.0, 11.0]))

    return pop, logbook
예제 #8
0
def test_mo_cma_es():

    def distance(feasible_ind, original_ind):
        """A distance function to the feasability region."""
        return sum((f - o)**2 for f, o in zip(feasible_ind, original_ind))

    def closest_feasible(individual):
        """A function returning a valid individual from an invalid one."""
        feasible_ind = numpy.array(individual)
        feasible_ind = numpy.maximum(BOUND_LOW, feasible_ind)
        feasible_ind = numpy.minimum(BOUND_UP, feasible_ind)
        return feasible_ind

    def valid(individual):
        """Determines if the individual is valid or not."""
        if any(individual < BOUND_LOW) or any(individual > BOUND_UP):
            return False
        return True

    NDIM = 5
    BOUND_LOW, BOUND_UP = 0.0, 1.0
    MU, LAMBDA = 10, 10
    NGEN = 500

    # The MO-CMA-ES algorithm takes a full population as argument
    population = [creator.__dict__[INDCLSNAME](x) for x in numpy.random.uniform(BOUND_LOW, BOUND_UP, (MU, NDIM))]

    toolbox = base.Toolbox()
    toolbox.register("evaluate", benchmarks.zdt1)
    toolbox.decorate("evaluate", tools.ClosestValidPenality(valid, closest_feasible, 1.0e-6, distance))

    for ind in population:
        ind.fitness.values = toolbox.evaluate(ind)

    strategy = cma.StrategyMultiObjective(population, sigma=1.0, mu=MU, lambda_=LAMBDA)
    
    toolbox.register("generate", strategy.generate, creator.__dict__[INDCLSNAME])
    toolbox.register("update", strategy.update)

    for gen in range(NGEN):
        # Generate a new population
        population = toolbox.generate()

        # Evaluate the individuals
        fitnesses = toolbox.map(toolbox.evaluate, population)
        for ind, fit in zip(population, fitnesses):
            ind.fitness.values = fit
        
        # Update the strategy with the evaluated individuals
        toolbox.update(population)
    
    hv = hypervolume(strategy.parents, [11.0, 11.0])
    assert hv > HV_THRESHOLD, "Hypervolume is lower than expected %f < %f" % (hv, HV_THRESHOLD)
def test_mo_cma_es():

    def distance(feasible_ind, original_ind):
        """A distance function to the feasibility region."""
        return sum((f - o)**2 for f, o in zip(feasible_ind, original_ind))

    def closest_feasible(individual):
        """A function returning a valid individual from an invalid one."""
        feasible_ind = numpy.array(individual)
        feasible_ind = numpy.maximum(BOUND_LOW, feasible_ind)
        feasible_ind = numpy.minimum(BOUND_UP, feasible_ind)
        return feasible_ind

    def valid(individual):
        """Determines if the individual is valid or not."""
        if any(individual < BOUND_LOW) or any(individual > BOUND_UP):
            return False
        return True

    NDIM = 5
    BOUND_LOW, BOUND_UP = 0.0, 1.0
    MU, LAMBDA = 10, 10
    NGEN = 500

    # The MO-CMA-ES algorithm takes a full population as argument
    population = [creator.__dict__[INDCLSNAME](x) for x in numpy.random.uniform(BOUND_LOW, BOUND_UP, (MU, NDIM))]

    toolbox = base.Toolbox()
    toolbox.register("evaluate", benchmarks.zdt1)
    toolbox.decorate("evaluate", tools.ClosestValidPenality(valid, closest_feasible, 1.0e-6, distance))

    for ind in population:
        ind.fitness.values = toolbox.evaluate(ind)

    strategy = cma.StrategyMultiObjective(population, sigma=1.0, mu=MU, lambda_=LAMBDA)
    
    toolbox.register("generate", strategy.generate, creator.__dict__[INDCLSNAME])
    toolbox.register("update", strategy.update)

    for gen in range(NGEN):
        # Generate a new population
        population = toolbox.generate()

        # Evaluate the individuals
        fitnesses = toolbox.map(toolbox.evaluate, population)
        for ind, fit in zip(population, fitnesses):
            ind.fitness.values = fit
        
        # Update the strategy with the evaluated individuals
        toolbox.update(population)
    
    hv = hypervolume(strategy.parents, [11.0, 11.0])
    assert hv > HV_THRESHOLD, "Hypervolume is lower than expected %f < %f" % (hv, HV_THRESHOLD)
예제 #10
0
def test_nsga2():
    NDIM = 5
    BOUND_LOW, BOUND_UP = 0.0, 1.0
    MU = 16
    NGEN = 100

    toolbox = base.Toolbox()
    toolbox.register("attr_float", random.uniform, BOUND_LOW, BOUND_UP)
    toolbox.register("individual", tools.initRepeat, creator.__dict__[INDCLSNAME], toolbox.attr_float, NDIM)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    toolbox.register("evaluate", benchmarks.zdt1)
    toolbox.register("mate", tools.cxSimulatedBinaryBounded, low=BOUND_LOW, up=BOUND_UP, eta=20.0)
    toolbox.register("mutate", tools.mutPolynomialBounded, low=BOUND_LOW, up=BOUND_UP, eta=20.0, indpb=1.0/NDIM)
    toolbox.register("select", tools.selNSGA2)

    pop = toolbox.population(n=MU)
    fitnesses = toolbox.map(toolbox.evaluate, pop)
    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit

    pop = toolbox.select(pop, len(pop))
    for gen in range(1, NGEN):
        offspring = tools.selTournamentDCD(pop, len(pop))
        offspring = [toolbox.clone(ind) for ind in offspring]
        
        for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
            if random.random() <= 0.9:
                toolbox.mate(ind1, ind2)
            
            toolbox.mutate(ind1)
            toolbox.mutate(ind2)
            del ind1.fitness.values, ind2.fitness.values
        
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        pop = toolbox.select(pop + offspring, MU)

    hv = hypervolume(pop, [11.0, 11.0])
    # hv = 120.777 # Optimal value

    assert hv > HV_THRESHOLD, "Hypervolume is lower than expected %f < %f" % (hv, HV_THRESHOLD)
예제 #11
0
def nsga3(ngen, mu, cxpb, mutpb, toolbox, halloffame, stats, seed=None):

    random.seed(seed)

    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max"

    pop = toolbox.population(n=mu)

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    # Compile statistics about the population
    record = stats.compile(pop)
    logbook.record(gen=0, evals=len(invalid_ind), **record)
    print(logbook.stream)

    # Begin the generational process
    for gen in range(1, ngen):
        offspring = algorithms.varAnd(pop, toolbox, cxpb, mutpb)

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # Select the next generation population from parents and offspring
        pop = toolbox.select(pop + offspring, mu)

        # Compile statistics about the new population
        halloffame.update(pop)
        record = stats.compile(pop)
        logbook.record(gen=gen, evals=len(invalid_ind), **record)
        print(logbook.stream)
        print("Final population hypervolume is %f" %
              hypervolume(pop, [11.0, 11.0]))

    return pop, logbook
예제 #12
0
def main(seed=None):
    random.seed(seed)

    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", numpy.mean, axis=0)
    stats.register("std", numpy.std, axis=0)
    stats.register("min", numpy.min, axis=0)
    stats.register("max", numpy.max, axis=0)

    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max"

    pop = toolbox.population(n=MU)
    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    record = stats.compile(pop)
    logbook.record(gen=0, evals=len(invalid_ind), **record)

    # Begin the generational process
    for gen in range(1, NGEN):
        offspring = algorithms.varAnd(pop, toolbox, CXPB, MUTPB)

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # Select the next generation population from parents and offspring
        pop = toolbox.select(pop + offspring, MU)

        # Compile statistics about the new population
        record = stats.compile(pop)
        logbook.record(gen=gen, evals=len(invalid_ind), **record)

    print("Final population hypervolume is %f" %
          hypervolume(pop, [11.0, 11.0]))
    return pop, logbook
예제 #13
0
    def calculate_hypervolume(self, pop):

        self.logger.info('Calculating hypervolume...')

        if self._hypervolume_ref_point is None:
            self.logger.info('Calculating hypervolume reference point...')
            worst_values = []
            fitness_array = np.array([i.fitness.values for i in pop])
            maxs = np.max(fitness_array, 0)
            mins = np.min(fitness_array, 0)
            for i, weight in enumerate(self.weights):
                if weight <= 0:
                    worst_values.append(maxs[i])
                else:
                    worst_values.append(mins[i])
            self._hypervolume_ref_point = np.array(worst_values)

        hv = hypervolume(pop, self._hypervolume_ref_point)
        self._progress_log.append(hv)
        self.logger.info('Hypervolume of the generation: {}'.format(self._progress_log[-1]))
예제 #14
0
def main(seed=None):
    random.seed(seed)

    NGEN = 999
    # MU = 120
    MU = 80
    # MU = 4
    CXPB = 0.9

    stats = tools.Statistics(lambda ind: ind.fitness.values)
    # stats.register("avg", numpy.mean, axis=0)
    # stats.register("std", numpy.std, axis=0)
    stats.register("min", numpy.min, axis=0)
    stats.register("max", numpy.max, axis=0)

    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max"

    pop = toolbox.population(n=MU)

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitnesses = toolbox.map_distributed(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    # This is just to assign the crowding distance to the individuals
    # no actual selection is done
    pop = toolbox.select(pop, len(pop))

    record = stats.compile(pop)
    logbook.record(gen=0, evals=len(invalid_ind), **record)
    print(logbook.stream)

    # Begin the generational process
    for gen in range(1, NGEN):
        # Vary the population
        offspring = tools.selTournamentDCD(pop, len(pop))
        offspring = [toolbox.clone(ind) for ind in offspring]

        for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
            if random.random() <= CXPB:
                toolbox.mate(ind1, ind2)

            toolbox.mutate(ind1)
            toolbox.mutate(ind2)
            del ind1.fitness.values, ind2.fitness.values

        # Recalculate all individual due to stochastic nature of simulation
        for ind in offspring:
            del ind.fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map_distributed(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # Select the next generation population
        pop = toolbox.select(pop + offspring, MU)
        record = stats.compile(pop)
        logbook.record(gen=gen, evals=len(invalid_ind), **record)
        print(logbook.stream)

        front = numpy.array(
            [ind.fitness.values + tuple(ind) for ind in pop])

        print("front: {}".format(front))

        try:
            conn = mysql.connector.connect(**config)
            print("Connection established")
        except mysql.connector.Error as err:
            if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
                print("Something is wrong with the user name or password")
            elif err.errno == errorcode.ER_BAD_DB_ERROR:
                print("Database does not exist")
            else:
                print(err)
        else:
            cursor = conn.cursor()

        # first_part = 'INSERT INTO carbon_results (reward,carbon_1,carbon_2,carbon_3,carbon_4,carbon_5,carbon_6,carbon_7,carbon_8,carbon_9,carbon_10,carbon_11,carbon_12,carbon_13,carbon_14,carbon_15,carbon_16,carbon_17,carbon_18) VALUES '
        #
        # insert_vars = "".join(["({},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}),\n".format(ind.flat[0], ind.flat[1], ind.flat[2], ind.flat[3], ind.flat[4], ind.flat[5], ind.flat[6], ind.flat[7], ind.flat[8], ind.flat[9], ind.flat[10], ind.flat[11], ind.flat[12], ind.flat[13], ind.flat[14], ind.flat[15], ind.flat[16], ind.flat[17], ind.flat[18]) for ind in front])

        # first_part = 'INSERT INTO carbon_results_function (average_electricity_price,carbon_emitted,attr_function,attr_m,attr_c,attr_a,attr_d) VALUES '
        #
        # insert_vars = "".join(["({},{},{},{},{},{},{}),\n".format(ind.flat[0], ind.flat[1], ind.flat[2], ind.flat[3], ind.flat[4], ind.flat[5], ind.flat[6]) for ind in front])
        first_part = 'INSERT INTO carbon_results_function (average_electricity_price,carbon_emitted,attr_m,attr_c) VALUES '

        insert_vars = "".join(["({},{},{},{}),\n".format(ind.flat[0], ind.flat[1], ind.flat[2], ind.flat[3]) for ind in front])



        insert_cmd = first_part+insert_vars
        insert_cmd = insert_cmd[:-2]
        # print("command: {}".format(insert_cmd))

        cursor.execute(insert_cmd)
        conn.commit()
        cursor.close()
        conn.close()


    print("Final population hypervolume is %f" % hypervolume(pop, [11.0, 11.0]))

    return pop, logbook
예제 #15
0
파일: cma_mo.py 프로젝트: DEAP/deap
def main():
    # The cma module uses the numpy random number generator
    # numpy.random.seed(128)

    MU, LAMBDA = 10, 10
    NGEN = 500
    verbose = True
    create_plot = False

    # The MO-CMA-ES algorithm takes a full population as argument
    population = [creator.Individual(x) for x in (numpy.random.uniform(0, 1, (MU, N)))]

    for ind in population:
        ind.fitness.values = toolbox.evaluate(ind)

    strategy = cma.StrategyMultiObjective(population, sigma=1.0, mu=MU, lambda_=LAMBDA)
    toolbox.register("generate", strategy.generate, creator.Individual)
    toolbox.register("update", strategy.update)

    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("min", numpy.min, axis=0)
    stats.register("max", numpy.max, axis=0)
   
    logbook = tools.Logbook()
    logbook.header = ["gen", "nevals"] + (stats.fields if stats else [])

    fitness_history = []

    for gen in range(NGEN):
        # Generate a new population
        population = toolbox.generate()

        # Evaluate the individuals
        fitnesses = toolbox.map(toolbox.evaluate, population)
        for ind, fit in zip(population, fitnesses):
            ind.fitness.values = fit
            fitness_history.append(fit)
        
        # Update the strategy with the evaluated individuals
        toolbox.update(population)
        
        record = stats.compile(population) if stats is not None else {}
        logbook.record(gen=gen, nevals=len(population), **record)
        if verbose:
            print(logbook.stream)

    if verbose:
        print("Final population hypervolume is %f" % hypervolume(strategy.parents, [11.0, 11.0]))

        # Note that we use a penalty to guide the search to feasible solutions,
        # but there is no guarantee that individuals are valid.
        # We expect the best individuals will be within bounds or very close.
        num_valid = 0
        for ind in strategy.parents:
            dist = distance(closest_feasible(ind), ind)
            if numpy.isclose(dist, 0.0, rtol=1.e-5, atol=1.e-5):
                num_valid += 1
        print("Number of valid individuals is %d/%d" % (num_valid, len(strategy.parents)))

        print("Final population:")
        print(numpy.asarray(strategy.parents))

    if create_plot:
        interactive = 0
        if not interactive:
            import matplotlib as mpl_tmp
            mpl_tmp.use('Agg')   # Force matplotlib to not use any Xwindows backend.
        import matplotlib.pyplot as plt

        fig = plt.figure()
        plt.title("Multi-objective minimization via MO-CMA-ES")
        plt.xlabel("First objective (function) to minimize")
        plt.ylabel("Second objective (function) to minimize")

        # Limit the scale because our history values include the penalty.
        plt.xlim((-0.1, 1.20))
        plt.ylim((-0.1, 1.20))

        # Plot all history. Note the values include the penalty.
        fitness_history = numpy.asarray(fitness_history)
        plt.scatter(fitness_history[:,0], fitness_history[:,1],
            facecolors='none', edgecolors="lightblue")

        valid_front = numpy.array([ind.fitness.values for ind in strategy.parents if close_valid(ind)])
        invalid_front = numpy.array([ind.fitness.values for ind in strategy.parents if not close_valid(ind)])

        if len(valid_front) > 0:
            plt.scatter(valid_front[:,0], valid_front[:,1], c="g")
        if len(invalid_front) > 0:
            plt.scatter(invalid_front[:,0], invalid_front[:,1], c="r")

        if interactive:
            plt.show()
        else:
            print("Writing cma_mo.png")
            plt.savefig("cma_mo.png")

    return strategy.parents
예제 #16
0
def main(seed=None):
    random.seed(seed)

    NGEN = 2
    MU = 4
    CXPB = 0.6
    pop = toolbox.population(n=MU)

    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("min", numpy.min, axis=0)
    stats.register("max", numpy.max, axis=0)
    stats.register("pop", copy.deepcopy)

    history = tools.History()
    # Decorate the variation operators
    #toolbox.register("variate", variate, mate=toolbox.mate, mutate=toolbox.mutate)
    #toolbox.decorate("variate", history.decorator)
    toolbox.decorate("mate", history.decorator)
    toolbox.decorate("mutate", history.decorator)

    fitnesses = toolbox.map(toolbox.evaluate, pop)
    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit
    plt.figure(figsize=(10, 4))
    plt.subplot(1, 2, 1)
    for ind in pop:
        plt.plot(ind[0], ind[1], 'k.', ms=3)
    plt.xlabel('$x_1$')
    plt.ylabel('$x_2$')
    plt.title('Decision space')
    plt.subplot(1, 2, 2)
    for ind in pop:
        plt.plot(ind.fitness.values[0], ind.fitness.values[1], 'k.', ms=3)
    plt.xlabel('$f_1(\mathbf{x})$')
    plt.ylabel('$f_2(\mathbf{x})$')
    plt.xlim((0.5, 3.6))
    plt.ylim((0.5, 3.6))
    plt.title('Objective space')
    plt.savefig("objective.png", dpi=200)

    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "fitness", "size", "pop", "ind"
    pickle.dump(logbook, open('nsga_ii-results.pickle', 'wb'),
                pickle.HIGHEST_PROTOCOL)

    hof = tools.ParetoFront()

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    #hof.update(pop)

    # This is just to assign the crowding distance to the individualis
    # no actual selection is done
    pop = toolbox.select(pop, len(pop))

    record = stats.compile(pop)
    logbook.record(gen=0, evals=len(invalid_ind), **record)
    print(logbook.stream)

    # Begin the generational process
    for gen in range(1, NGEN):
        # Vary the population
        offspring = tools.selTournamentDCD(pop, len(pop))
        offspring = [toolbox.clone(ind) for ind in offspring]

        for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
            if random.random() <= CXPB:
                toolbox.mate(ind1, ind2)

            toolbox.mutate(ind1)
            toolbox.mutate(ind2)
            del ind1.fitness.values, ind2.fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        print "Evaluated %i individuals" % len(invalid_ind)

        pop = toolbox.select(pop + offspring, len(offspring))
        hof.update(pop)

        # Select the next generation population
        pop = toolbox.select(pop + offspring, MU)
        record = stats.compile(pop)
        logbook.record(gen=gen, evals=len(invalid_ind), **record)
        print(logbook.stream)

        plt.close("all")
        front = numpy.array([ind.fitness.values for ind in pop])
        plt.figure(figsize=(10, 10))
        #fig,ax = plt.subplots(1,gen)
        plt.scatter(front[:, 0], front[:, 1], c="b")
        #locals()["ax"+str(gen)]=plt.scatter(front[:,0], front[:,1], c="b")
        #plt.tight_layout()
        plt.xlabel("RT(Time)")
        plt.ylabel("Memory usage, Mb")
        plt.savefig("front_gen" + str(gen) + ".png", dpi=200)

    print("Pareto individuals are:")
    for ind in hof:
        print ind, ind.fitness.values
    print("XXXXXXXXXX Making plots XXXXXXXXXXXXX")

    #fig = plt.figure(figsize=(10,10))
    #ax = fig.gca()
    #ax.set_xlabel('RT')
    #ax.set_ylabel('Memory')
    #anim = animation.FuncAnimation(fig, lambda i: animate(i, logbook),
    #                           frames=len(logbook), interval=1,
    #                           blit=True)
    #anim.save('nsgaii-geantv.mp4', fps=15, bitrate=-1, dpi=500)
    #anim.save('populations.gif', writer='imagemagick')

    #print("XXXXXXXXXXXXXXXXXXXXXXX")

    print("Final population hypervolume is %f" %
          hypervolume(pop, [11.0, 11.0]))

    print("XXXXXXXXXXX Making more plots XXXXXXXXXXXX")
    fronts_s = tools.emo.sortLogNondominated(pop, len(pop))
    plot_colors = ('b', 'r', 'g', 'm', 'y', 'k', 'c')
    fig, ax = plt.subplots(1, figsize=(10, 10))
    for i, inds in enumerate(fronts_s):
        par = [toolbox.evaluate(ind) for ind in inds]
        df = pd.DataFrame(par)
        df.plot(ax=ax,
                kind='scatter',
                label='Front ' + str(i + 1),
                x=df.columns[0],
                y=df.columns[1],
                color=plot_colors[i % len(plot_colors)])
    plt.xlabel('$f_1(\mathbf{x})$')
    plt.ylabel('$f_2(\mathbf{x})$')
    plt.savefig("front.png", dpi=200)
예제 #17
0
def train(seed=None, target_program=Config.current_program):
    random.seed(seed)
    global MASK_BITS_BOUNDS_LIST
    MASK_BITS_BOUNDS_LIST = get_mask_bounds(x_train)

    # set MASK_BITS_BOUNDS_LIST in a fixed range
    for i in range(len(MASK_BITS_BOUNDS_LIST)):
        if MASK_BITS_BOUNDS_LIST[i] > 16:
            MASK_BITS_BOUNDS_LIST[i] = 16

    stats = tools.Statistics(lambda ind: ind.fitness.values)
    # stats.register("avg", numpy.mean, axis=0)
    # stats.register("std", numpy.std, axis=0)
    stats.register("min", np.min, axis=0)
    stats.register("max", np.max, axis=0)

    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max"

    pop = toolbox.population(n=Config.MU)

    if Config.restore_model:
        try:
            pop = loadobj(Config.model_save_path)
            print("model loaded")
        except FileNotFoundError as fne:
            print("Model file not found, will train new model...")
        except Exception as err:
            print(
                "Model file load error, may be the model file is not exist, will train new model..."
            )

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    # fitnesses = toolbox.map(toolbox.evaluate,invalid_ind, len(invalid_ind) * [x_train], len(invalid_ind) * [y_train])

    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    # This is just to assign the crowding distance to the individuals
    # no actual selection is done
    pop = toolbox.select(pop, len(pop))

    record = stats.compile(pop)
    logbook.record(gen=0, evals=len(invalid_ind), **record)
    print(logbook.stream)

    # Begin the generational process
    for gen in range(1, Config.NGEN):

        if Config.selection_algorithm == "SPEA2":
            offspring = toolbox.select(pop, len(pop))
        else:
            # Vary the population
            offspring = tools.selTournamentDCD(pop, len(pop))
        offspring = [toolbox.clone(ind) for ind in offspring]

        for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
            if random.random() <= Config.CXPB:
                toolbox.mate(ind1, ind2)

            toolbox.mutate(ind1)
            toolbox.mutate(ind2)
            del ind1.fitness.values, ind2.fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # Select the next generation population
        pop = toolbox.select(pop[:] + offspring, Config.MU)
        # front = numpy.array([ind.fitness.values for ind in pop])
        # print(front)
        record = stats.compile(pop)
        logbook.record(gen=gen, evals=len(invalid_ind), **record)
        print(logbook.stream)

        # print pareto front every 100 round
        if gen % Config.epoch_size == 0:
            epoch = gen / Config.epoch_size
            saveobj(pop, "save/model/model_" + str(epoch + 1) + ".ckpt")
            saveobj(pop, "save/model/model.ckpt")
            plot_front(pop,
                       epoch,
                       do_plot_train_parato_front=True,
                       do_plot_test_parato_front=False,
                       target_program=target_program)

    print("Final population hypervolume is %f" %
          hypervolume(pop, [11.0, 11.0]))

    return pop, logbook
예제 #18
0
def main(cfg):
    """Main workflow of NSGA-II based Scenario analysis."""
    random.seed()
    pop_size = cfg.nsga2_npop
    gen_num = cfg.nsga2_ngens
    rule_cfg = cfg.bmps_rule
    rule_mth = cfg.rule_method
    cx_rate = cfg.nsga2_rcross
    mut_perc = cfg.nsga2_pmut
    mut_rate = cfg.nsga2_rmut
    sel_rate = cfg.nsga2_rsel
    ws = cfg.nsga2_dir
    worst_econ = cfg.worst_econ
    worst_env = cfg.worst_env
    # available gene value list
    possible_gene_values = list(cfg.bmps_params.keys())
    possible_gene_values.append(0)
    units_info = cfg.units_infos
    slppos_tagnames = cfg.slppos_tagnames
    suit_bmps = cfg.slppos_suit_bmps
    gene_to_unit = cfg.gene_to_slppos
    unit_to_gene = cfg.slppos_to_gene

    print_message('Population: %d, Generation: %d' % (pop_size, gen_num))
    print_message('BMPs configure method: %s' %
                  ('rule-based' if rule_cfg else 'random-based'))

    # create reference point for hypervolume
    ref_pt = numpy.array([worst_econ, worst_env]) * multi_weight * -1

    stats = tools.Statistics(lambda sind: sind.fitness.values)
    stats.register('min', numpy.min, axis=0)
    stats.register('max', numpy.max, axis=0)
    stats.register('avg', numpy.mean, axis=0)
    stats.register('std', numpy.std, axis=0)

    logbook = tools.Logbook()
    logbook.header = 'gen', 'evals', 'min', 'max', 'avg', 'std'

    pop = toolbox.population(cfg, n=pop_size)
    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]

    try:
        # parallel on multiprocesor or clusters using SCOOP
        from scoop import futures
        fitnesses = futures.map(toolbox.evaluate, [cfg] * len(invalid_ind),
                                invalid_ind)
        # print('parallel-fitnesses: ', fitnesses)
    except ImportError or ImportWarning:
        # serial
        fitnesses = toolbox.map(toolbox.evaluate, [cfg] * len(invalid_ind),
                                invalid_ind)
        # print('serial-fitnesses: ', fitnesses)

    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit[:2]
        ind.id = fit[2]

    # This is just to assign the crowding distance to the individuals
    # no actual selection is done
    pop = toolbox.select(pop, pop_size)
    record = stats.compile(pop)
    logbook.record(gen=0, evals=len(invalid_ind), **record)
    print_message(logbook.stream)

    # Begin the generational process
    output_str = '### Generation number: %d, Population size: %d ###\n' % (
        gen_num, pop_size)
    print_message(output_str)
    UtilClass.writelog(cfg.logfile, output_str, mode='replace')

    for gen in range(1, gen_num + 1):
        output_str = '###### Generation: %d ######\n' % gen
        print_message(output_str)
        # Vary the population
        offspring = tools.selTournamentDCD(pop, int(pop_size * sel_rate))
        offspring = [toolbox.clone(ind) for ind in offspring]
        # print_message('Offspring size: %d' % len(offspring))
        if len(offspring
               ) >= 2:  # when offspring size greater than 2, mate can be done
            for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
                if random.random() <= cx_rate:
                    if rule_cfg:
                        toolbox.mate_rule(slppos_tagnames, ind1, ind2)
                    else:
                        toolbox.mate_rdn(ind1, ind2)
                if rule_cfg:
                    toolbox.mutate_rule(units_info,
                                        gene_to_unit,
                                        unit_to_gene,
                                        slppos_tagnames,
                                        suit_bmps,
                                        ind1,
                                        perc=mut_perc,
                                        indpb=mut_rate,
                                        method=rule_mth)
                    toolbox.mutate_rule(units_info,
                                        gene_to_unit,
                                        unit_to_gene,
                                        slppos_tagnames,
                                        suit_bmps,
                                        ind2,
                                        perc=mut_perc,
                                        indpb=mut_rate,
                                        method=rule_mth)
                else:
                    toolbox.mutate_rdm(possible_gene_values,
                                       ind1,
                                       perc=mut_perc,
                                       indpb=mut_rate)
                    toolbox.mutate_rdm(possible_gene_values,
                                       ind2,
                                       perc=mut_perc,
                                       indpb=mut_rate)
                del ind1.fitness.values, ind2.fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        invalid_ind_size = len(invalid_ind)
        # print_message('Evaluate pop size: %d' % invalid_ind_size)
        try:
            from scoop import futures
            fitnesses = futures.map(toolbox.evaluate, [cfg] * invalid_ind_size,
                                    invalid_ind)
        except ImportError or ImportWarning:
            fitnesses = toolbox.map(toolbox.evaluate, [cfg] * invalid_ind_size,
                                    invalid_ind)

        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit[:2]
            ind.id = fit[2]

        # Select the next generation population
        pop = toolbox.select(pop + offspring, pop_size)

        hyper_str = 'Gen: %d, hypervolume: %f\n' % (gen,
                                                    hypervolume(pop, ref_pt))
        print_message(hyper_str)
        UtilClass.writelog(cfg.hypervlog, hyper_str, mode='append')

        record = stats.compile(pop)
        logbook.record(gen=gen, evals=len(invalid_ind), **record)
        print_message(logbook.stream)

        # Create plot
        front = numpy.array([ind.fitness.values for ind in pop])
        plot_pareto_front(
            front, ['Economic effectiveness', 'Environmental effectiveness'],
            ws, gen, 'Pareto frontier of Scenarios Optimization')
        # save in file
        output_str += 'scenario\teconomy\tenvironment\tgene_values\n'
        for indi in pop:
            output_str += '%d\t%f\t%f\t%s\n' % (
                indi.id, indi.fitness.values[0], indi.fitness.values[1],
                str(indi))
        UtilClass.writelog(cfg.logfile, output_str, mode='append')

        # Delete SEIMS output files, and BMP Scenario database of current generation
        delete_model_outputs(cfg.model_dir, cfg.hostname, cfg.port,
                             cfg.bmp_scenario_db)

    return pop, logbook
예제 #19
0
파일: main.py 프로젝트: crazyzlj/SEIMS
def main(cfg):
    """Main workflow of NSGA-II based Scenario analysis."""
    random.seed()
    pop_size = cfg.nsga2_npop
    gen_num = cfg.nsga2_ngens
    rule_cfg = cfg.bmps_rule
    rule_mth = cfg.rule_method
    cx_rate = cfg.nsga2_rcross
    mut_perc = cfg.nsga2_pmut
    mut_rate = cfg.nsga2_rmut
    sel_rate = cfg.nsga2_rsel
    ws = cfg.nsga2_dir
    worst_econ = cfg.worst_econ
    worst_env = cfg.worst_env
    # available gene value list
    possible_gene_values = list(cfg.bmps_params.keys())
    possible_gene_values.append(0)
    units_info = cfg.units_infos
    slppos_tagnames = cfg.slppos_tagnames
    suit_bmps = cfg.slppos_suit_bmps
    gene_to_unit = cfg.gene_to_slppos
    unit_to_gene = cfg.slppos_to_gene

    print_message('Population: %d, Generation: %d' % (pop_size, gen_num))
    print_message('BMPs configure method: %s' % ('rule-based' if rule_cfg else 'random-based'))

    # create reference point for hypervolume
    ref_pt = numpy.array([worst_econ, worst_env]) * multi_weight * -1

    stats = tools.Statistics(lambda sind: sind.fitness.values)
    stats.register('min', numpy.min, axis=0)
    stats.register('max', numpy.max, axis=0)
    stats.register('avg', numpy.mean, axis=0)
    stats.register('std', numpy.std, axis=0)

    logbook = tools.Logbook()
    logbook.header = 'gen', 'evals', 'min', 'max', 'avg', 'std'

    pop = toolbox.population(cfg, n=pop_size)
    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]

    try:
        # parallel on multiprocesor or clusters using SCOOP
        from scoop import futures
        fitnesses = futures.map(toolbox.evaluate, [cfg] * len(invalid_ind), invalid_ind)
        # print('parallel-fitnesses: ', fitnesses)
    except ImportError or ImportWarning:
        # serial
        fitnesses = toolbox.map(toolbox.evaluate, [cfg] * len(invalid_ind), invalid_ind)
        # print('serial-fitnesses: ', fitnesses)

    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit[:2]
        ind.id = fit[2]

    # This is just to assign the crowding distance to the individuals
    # no actual selection is done
    pop = toolbox.select(pop, pop_size)
    record = stats.compile(pop)
    logbook.record(gen=0, evals=len(invalid_ind), **record)
    print_message(logbook.stream)

    # Begin the generational process
    output_str = '### Generation number: %d, Population size: %d ###\n' % (gen_num, pop_size)
    print_message(output_str)
    UtilClass.writelog(cfg.logfile, output_str, mode='replace')

    for gen in range(1, gen_num + 1):
        output_str = '###### Generation: %d ######\n' % gen
        print_message(output_str)
        # Vary the population
        offspring = tools.selTournamentDCD(pop, int(pop_size * sel_rate))
        offspring = [toolbox.clone(ind) for ind in offspring]
        # print_message('Offspring size: %d' % len(offspring))
        if len(offspring) >= 2:  # when offspring size greater than 2, mate can be done
            for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
                if random.random() <= cx_rate:
                    if rule_cfg:
                        toolbox.mate_rule(slppos_tagnames, ind1, ind2)
                    else:
                        toolbox.mate_rdn(ind1, ind2)
                if rule_cfg:
                    toolbox.mutate_rule(units_info, gene_to_unit, unit_to_gene, slppos_tagnames,
                                        suit_bmps, ind1,
                                        perc=mut_perc, indpb=mut_rate, method=rule_mth)
                    toolbox.mutate_rule(units_info, gene_to_unit, unit_to_gene, slppos_tagnames,
                                        suit_bmps, ind2,
                                        perc=mut_perc, indpb=mut_rate, method=rule_mth)
                else:
                    toolbox.mutate_rdm(possible_gene_values, ind1, perc=mut_perc, indpb=mut_rate)
                    toolbox.mutate_rdm(possible_gene_values, ind2, perc=mut_perc, indpb=mut_rate)
                del ind1.fitness.values, ind2.fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        invalid_ind_size = len(invalid_ind)
        # print_message('Evaluate pop size: %d' % invalid_ind_size)
        try:
            from scoop import futures
            fitnesses = futures.map(toolbox.evaluate, [cfg] * invalid_ind_size, invalid_ind)
        except ImportError or ImportWarning:
            fitnesses = toolbox.map(toolbox.evaluate, [cfg] * invalid_ind_size, invalid_ind)

        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit[:2]
            ind.id = fit[2]

        # Select the next generation population
        pop = toolbox.select(pop + offspring, pop_size)

        hyper_str = 'Gen: %d, hypervolume: %f\n' % (gen, hypervolume(pop, ref_pt))
        print_message(hyper_str)
        UtilClass.writelog(cfg.hypervlog, hyper_str, mode='append')

        record = stats.compile(pop)
        logbook.record(gen=gen, evals=len(invalid_ind), **record)
        print_message(logbook.stream)

        # Create plot
        front = numpy.array([ind.fitness.values for ind in pop])
        plot_pareto_front(front, ['Economic effectiveness', 'Environmental effectiveness'],
                          ws, gen, 'Pareto frontier of Scenarios Optimization')
        # save in file
        output_str += 'scenario\teconomy\tenvironment\tgene_values\n'
        for indi in pop:
            output_str += '%d\t%f\t%f\t%s\n' % (indi.id, indi.fitness.values[0],
                                                indi.fitness.values[1],
                                                str(indi))
        UtilClass.writelog(cfg.logfile, output_str, mode='append')

        # Delete SEIMS output files, and BMP Scenario database of current generation
        delete_model_outputs(cfg.model_dir, cfg.hostname, cfg.port, cfg.bmp_scenario_db)

    return pop, logbook
def main(seed=None):
    random.seed(seed)

    NGEN = 250
    MU = 100
    CXPB = 0.9

    stats = tools.Statistics(lambda ind: ind.fitness.values)
    # stats.register("avg", numpy.mean, axis=0)
    # stats.register("std", numpy.std, axis=0)
    stats.register("min", numpy.min, axis=0)
    stats.register("max", numpy.max, axis=0)

    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max"

    pop = toolbox.population(n=MU)

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    # This is just to assign the crowding distance to the individuals
    # no actual selection is done
    pop = toolbox.select(pop, len(pop))

    record = stats.compile(pop)
    logbook.record(gen=0, evals=len(invalid_ind), **record)
    print(logbook.stream)

    # Begin the generational process
    for gen in range(1, NGEN):
        # Vary the population
        offspring = tools.selTournamentDCD(pop, len(pop))
        # selTournamentDCD means Tournament selection based on dominance (D)
        # followed by crowding distance (CD). This selection requires the
        # individuals to have a crowding_dist attribute
        offspring = [toolbox.clone(ind) for ind in offspring]

        for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
            #make pairs of all (even,odd) in offspring
            if random.random() <= CXPB:
                toolbox.mate(ind1, ind2)

            toolbox.mutate(ind1)
            toolbox.mutate(ind2)
            del ind1.fitness.values, ind2.fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # Select the next generation population
        pop = toolbox.select(pop + offspring, MU)
        record = stats.compile(pop)
        logbook.record(gen=gen, evals=len(invalid_ind), **record)
        print(logbook.stream)

    print("Final population hypervolume is %f" %
          hypervolume(pop, [11.0, 11.0]))

    return pop, logbook
예제 #21
0
def main():
    # The cma module uses the numpy random number generator
    # numpy.random.seed(128)

    MU, LAMBDA = 10, 10
    NGEN = 500
    verbose = True

    # The MO-CMA-ES algorithm takes a full population as argument
    population = [creator.Individual(x) for x in (
        numpy.random.uniform(0, 1, (MU, N)))]

    for ind in population:
        ind.fitness.values = toolbox.evaluate(ind)

    strategy = cma.StrategyMultiObjective(
        population, sigma=1.0, mu=MU, lambda_=LAMBDA)
    toolbox.register("generate", strategy.generate, creator.Individual)
    toolbox.register("update", strategy.update)

    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("min", numpy.min, axis=0)
    stats.register("max", numpy.max, axis=0)

    logbook = tools.Logbook()
    logbook.header = ["gen", "nevals"] + (stats.fields if stats else [])

    for gen in range(NGEN):
        # Generate a new population
        population = toolbox.generate()

        # Evaluate the individuals
        fitnesses = toolbox.map(toolbox.evaluate, population)
        for ind, fit in zip(population, fitnesses):
            ind.fitness.values = fit

        # Update the strategy with the evaluated individuals
        toolbox.update(population)

        record = stats.compile(population) if stats is not None else {}
        logbook.record(gen=gen, nevals=len(population), **record)
        if verbose:
            print(logbook.stream)

    if verbose:
        print("Final population hypervolume is %f" %
              hypervolume(strategy.parents, [11.0, 11.0]))

    # import matplotlib.pyplot as plt

    # valid_front = numpy.array([ind.fitness.values for ind in strategy.parents if valid(ind)])
    # invalid_front = numpy.array([ind.fitness.values for ind in strategy.parents if not valid(ind)])

    # fig = plt.figure()

    # if len(valid_front) > 0:
    #     plt.scatter(valid_front[:,0], valid_front[:,1], c="g")

    # if len(invalid_front) > 0:
    #     plt.scatter(invalid_front[:,0], invalid_front[:,1], c="r")

    # plt.show()

    return strategy.parents
예제 #22
0
파일: nsga2.py 프로젝트: AiTeamUSTC/GPE
def main(seed=None):
    random.seed(seed)

    NGEN = 250
    MU = 100
    CXPB = 0.9

    stats = tools.Statistics(lambda ind: ind.fitness.values)
    # stats.register("avg", numpy.mean, axis=0)
    # stats.register("std", numpy.std, axis=0)
    stats.register("min", numpy.min, axis=0)
    stats.register("max", numpy.max, axis=0)
    
    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max"
    
    pop = toolbox.population(n=MU)

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    # This is just to assign the crowding distance to the individuals
    # no actual selection is done
    pop = toolbox.select(pop, len(pop))
    
    record = stats.compile(pop)
    logbook.record(gen=0, evals=len(invalid_ind), **record)
    print(logbook.stream)

    # Begin the generational process
    for gen in range(1, NGEN):
        # Vary the population
        offspring = tools.selTournamentDCD(pop, len(pop))
        offspring = [toolbox.clone(ind) for ind in offspring]
        
        for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
            if random.random() <= CXPB:
                toolbox.mate(ind1, ind2)
            
            toolbox.mutate(ind1)
            toolbox.mutate(ind2)
            del ind1.fitness.values, ind2.fitness.values
        
        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # Select the next generation population
        pop = toolbox.select(pop + offspring, MU)
        record = stats.compile(pop)
        logbook.record(gen=gen, evals=len(invalid_ind), **record)
        print(logbook.stream)

    print("Final population hypervolume is %f" % hypervolume(pop, [11.0, 11.0]))

    return pop, logbook
예제 #23
0
def main(seed=None):
    random.seed(seed)

    NGEN = 250
    MU = 100

    log_interval = 25

    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean, axis=0)
    stats.register("std", np.std, axis=0)
    stats.register("min", np.min, axis=0)
    stats.register("max", np.max, axis=0)
    stats.register("median", np.median, axis=0)

    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max", "median"

    pop = toolbox.population(n=MU)
    hof = tools.ParetoFront()

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    # This is just to assign the crowding distance to the individuals
    # no actual selection is done
    pop = toolbox.select(pop, len(pop))

    record = stats.compile(pop)
    logbook.record(gen=0, evals=len(invalid_ind), **record)
    print(logbook.stream)
    hof.update(pop)

    basepath = os.path.dirname(os.path.abspath(__file__))
    log_dir = '{}/logs/{}/'.format(basepath, time.strftime('%y%m%d-%H%M%S'))
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)

    # log initial population
    os.makedirs('{}0'.format(log_dir))
    for i, agent in enumerate(pop):
        agent[0].save_weights('{}0/{}_weights.csv'.format(log_dir, i), overwrite=True)
    log_population(pop, '{}0'.format(log_dir))

    # Begin the generational process
    for gen in range(1, NGEN+1):
        # Get Offspring
        # first get pareto front
        pareto_fronts = tools.sortNondominated(pop, len(pop))
        selection = pareto_fronts[0]
        len_pareto = len(pareto_fronts[0])

        rest = list(chain(*pareto_fronts[1:]))
        if len(rest) % 4:
            rest.extend(random.sample(selection, 4 - (len(rest) % 4)))

        selection.extend(tools.selTournamentDCD(rest, len(rest)))
        offspring = [toolbox.mutate(toolbox.clone(ind)) for ind in selection[:len(pop)]]

        # Revaluate the individuals in last population
        fitnesses = toolbox.map(toolbox.evaluate, pop)
        for ind, fit in zip(pop, fitnesses):
            ind.fitness.values = fit

        # Evaluate the new offspring
        fitnesses = toolbox.map(toolbox.evaluate, offspring)
        for ind, fit in zip(offspring, fitnesses):
            ind.fitness.values = fit

        # Update the hall of fame with the generated individuals
        hof.update(offspring)

        plot_population(pop, offspring, lim = [[10,120],[0,0],[0,4]])

        # Select the next generation population
        pop = toolbox.select(pop + offspring, MU)
        pareto_fronts = tools.sortNondominated(pop, len(pop))
        plot_selection(pop, pareto_front_size=len(pareto_fronts[0]), lim = [[10,120],[0,0],[0,4]])
        
        record = stats.compile(pop)
        logbook.record(gen=gen, evals=len(offspring)+len(pop), **record)
        print(logbook.stream)

        if gen % log_interval == 0 or gen == NGEN:
            os.makedirs('{}{}'.format(log_dir, gen))
            for i, agent in enumerate(pop):
                agent[0].save_weights('{}{}/{}_weights.csv'.format(log_dir, gen, i), overwrite=True)
            log_population(pop, '{}{}'.format(log_dir, gen))

    with open('{}/gen_stats.txt'.format(log_dir), 'w') as fp:
        np.savetxt(fp, logbook, fmt="%s")

    plot_population(pop)
    print("Final population hypervolume is %f" % hypervolume(pop, [11.0, 11.0, 11.0]))

    os.makedirs('{}hof'.format(log_dir))
    for i, agent in enumerate(hof):
        agent[0].save_weights('{}hof/{}_weights.csv'.format(log_dir, i), overwrite=True)
    log_population(hof, '{}hof'.format(log_dir))

    return pop, logbook
예제 #24
0
def SPEA2_Discrete_Case(envrironment,
                        NGEN=100,
                        MU=200,
                        CXPB=0.93,
                        seed=None,
                        showprogress=True):
    '''
    Intro:
        This function returns an the best population and some
        statistics of a pareto front for a discrete case.
    
    Attention:
        the toolbox envrironment must have a function ceiling to
        put the values that are of the limits of the dataset back
        in the limits.
    ---
    Input:
        envrironment: Tuple
            A tuple with (toolbox, stats, logbook).
        NGEN: Integer
            Number of generations.
        MU: Integer
            Number of instances.
        CXPB: Float
            Cross over probability.
        seed: Integer
            Set function to get same results.
        showprogress: Boolean
            If we want to show the progress of each generation.
            
    ---
    Output:
        A tuple with (deap.toolbox.population, deap.Logbook)
        
    '''

    Nbar = 40

    archive = []

    random.seed(seed)

    (toolbox, stats, logbook) = envrironment

    pop = toolbox.population(MU)

    for gen in range(0, NGEN):

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in pop if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        for ind in archive:
            ind.fitness.values = toolbox.evaluate(ind)

        # This is just to assign the crowding distance to the individuals
        # no actual selection is done
        archive = toolbox.select(pop + archive, k=Nbar)

        # Begin the generational process

        # Vary the population
        mating_pool = toolbox.selectTournament(archive, k=MU)
        offspring = [toolbox.clone(ind) for ind in mating_pool]

        for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
            if random.random() <= CXPB:
                toolbox.mate(ind1, ind2)
            del ind1.fitness.values, ind2.fitness.values

        for mutant in offspring:
            if random.random() < 0.06:
                toolbox.mutate(mutant)
            del mutant.fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [
            toolbox.ceiling(ind) for ind in offspring if not ind.fitness.valid
        ]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # Select the next generation population
        allpop = offspring
        allpop = [toolbox.ceiling(p) for p in allpop]

        for p in allpop:
            p.fitness.values = toolbox.evaluate(p)

        pop = allpop
        record = stats.compile(pop)
        logbook.record(gen=gen, evals=len(invalid_ind), **record)
        if showprogress:
            print(logbook.stream)

    if showprogress:
        print("Final population hypervolume is %f" %
              hypervolume(pop, [11.0, 11.0]))

    return archive, logbook
예제 #25
0
파일: mo_rhv.py 프로젝트: DEAP/deap
def main(seed=None):
    random.seed(seed)

    NGEN = 250
    MU = 100
    CXPB = 0.9

    stats = tools.Statistics(lambda ind: ind.fitness.values)
    # stats.register("avg", numpy.mean, axis=0)
    # stats.register("std", numpy.std, axis=0)
    stats.register("min", numpy.min, axis=0)
    stats.register("max", numpy.max, axis=0)

    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max"

    pop = toolbox.population(n=MU)

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    record = stats.compile(pop)
    logbook.record(gen=0, evals=len(invalid_ind), **record)
    print(logbook.stream)

    # Begin the generational process
    for gen in range(1, NGEN):
        # Vary the population
        offspring = tools.selRandom(pop, len(pop))
        offspring = [toolbox.clone(ind) for ind in offspring]

        for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
            if random.random() <= CXPB:
                toolbox.mate(ind1, ind2)

            toolbox.mutate(ind1)
            toolbox.mutate(ind2)
            del ind1.fitness.values, ind2.fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # Select the next generation population
        pop = pop + offspring
        fronts = toolbox.sort(pop, len(pop))
        chosen = []
        for i, front in enumerate(fronts):
            # Move is front to chosen population til it is almost full
            if len(chosen) + len(front) <= MU:
                chosen.extend(front)
            else:
                # Assign hypervolume contribution to individuals of front that
                # cannot be completely move over to chosen individuals
                fitness_hv = hypervolume_contrib(front)
                for ind, fit_hv in zip(front, fitness_hv):
                    ind.fitness_hv.values = (fit_hv,)
                # Fill chosen with best indiviuals from inspect front
                # (based on hypervolume contribution)
                chosen.extend(toolbox.select(front, MU - len(chosen)))
                break

        pop = chosen

        record = stats.compile(pop)
        logbook.record(gen=gen, evals=len(invalid_ind), **record)
        print(logbook.stream)

    print("Final population hypervolume is %f" % hypervolume(pop, [11.0, 11.0]))

    return pop, logbook
예제 #26
0
def main(config, verbose):
    # Don't bother with determinism since tournament is stochastic!

    # Set last time to start time
    last_time = start_time

    # MP
    processes = multiprocessing.cpu_count() // 2
    pool = multiprocessing.Pool(processes=processes)

    # Build network
    network = build_network_partial(config)

    # Build environments and randomize
    envs = [build_environment(config) for _ in config["env"]["h0"]]
    for env in envs:
        randomize_env(env, config)

    # Objectives
    # Time to land, final height, final velocity, spikes per second
    valid_objectives = [
        "time to land",
        "time to land scaled",
        "final height",
        "final velocity",
        "final velocity squared",
        "spikes",
    ]
    assert (
        len(config["evo"]["objectives"]) >= 3
    ), "Only 3 or more objectives are supported"
    assert len(config["evo"]["objectives"]) == len(
        config["evo"]["obj weights"]
    ), "There should be as many weights as objectives"
    assert all(
        [obj in valid_objectives for obj in config["evo"]["objectives"]]
    ), "Invalid objective"

    # Optimal front and reference point for hypervolume
    optimal_front = config["evo"]["obj optimal"]
    hyperref = config["evo"]["obj worst"]
    optim_performance = []

    # Set up DEAP
    creator.create("Fitness", base.Fitness, weights=config["evo"]["obj weights"])
    creator.create("Individual", list, fitness=creator.Fitness)

    toolbox = base.Toolbox()
    toolbox.register(
        "individual", tools.initRepeat, container=creator.Individual, func=network, n=1
    )
    toolbox.register(
        "population", tools.initRepeat, container=list, func=toolbox.individual
    )
    toolbox.register(
        "evaluate",
        partial(evaluate, valid_objectives, config, envs, config["env"]["h0"]),
    )
    toolbox.register("mate", crossover_none)
    toolbox.register(
        "mutate",
        partial(
            mutate_call_network,
            config["evo"]["genes"],
            config["evo"]["types"],
            mutation_rate=config["evo"]["mutation rate"],
        ),
    )
    toolbox.register("select", tools.selNSGA2)
    toolbox.register("map", pool.map)

    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean, axis=0)
    stats.register("median", np.median, axis=0)
    stats.register("std", np.std, axis=0)
    stats.register("min", np.min, axis=0)
    stats.register("max", np.max, axis=0)

    logbook = tools.Logbook()
    logbook.header = ("gen", "evals", "avg", "median", "std", "min", "max")

    # Initialize population
    # Pareto front: set of individuals that are not strictly dominated
    # (i.e., better scores for all objectives) by others
    population = toolbox.population(n=config["evo"]["pop size"])
    hof = tools.ParetoFront()  # hall of fame!

    # Evaluate initial population
    fitnesses = toolbox.map(toolbox.evaluate, population)
    for ind, fit in zip(population, fitnesses):
        ind.fitness.values = fit

    # This is just to assign the crowding distance (needed for selTournamentDCD())
    # to the individuals, no actual selection is done
    population = toolbox.select(population, len(population))

    # Update hall of fame
    hof.update(population)

    # Log first record
    record = stats.compile(population)
    logbook.record(
        gen=0, evals=len(population), **{k: v.round(2) for k, v in record.items()}
    )

    # Log convergence (of first front) and hypervolume
    pareto_fronts = tools.sortNondominated(population, len(population))
    current_time = time.time()
    minutes = (current_time - last_time) / 60
    last_time = time.time()
    time_past = (current_time - start_time) / 60
    conv = convergence(pareto_fronts[0], optimal_front)
    hyper = hypervolume(pareto_fronts[0], hyperref)
    optim_performance.append([0, time_past, minutes, conv, hyper])
    print(
        f"gen: 0, time past: {time_past:.2f} min, minutes: {minutes:.2f} min, convergence: {conv:.3f}, hypervolume: {hyper:.3f}"
    )

    if verbose:
        # Plot relevant part of population fitness
        last_fig = []
        for dims in config["evo"]["plot"]:
            last_fig.append(
                vis_relevant(
                    population, hof, config["evo"]["objectives"], dims, verbose=verbose
                )
            )

        # Create folders for parameters of individuals
        # Only save hall of fame
        os.makedirs(f"{config['log location']}hof_000/")

        # And log the initial performance
        # Figures
        for i, last in enumerate(last_fig):
            if last[2]:
                last[0].savefig(f"{config['fig location']}relevant{i}_000.png")
        # Parameters
        for i, ind in enumerate(hof):
            torch.save(
                ind[0].state_dict(),
                f"{config['log location']}hof_000/individual_{i:03}.net",
            )
        # Fitnesses
        pd.DataFrame(
            [ind.fitness.values for ind in hof], columns=config["evo"]["objectives"]
        ).to_csv(f"{config['log location']}hof_000/fitnesses.csv", index=False, sep=",")

    # Begin the evolution!
    for gen in range(1, config["evo"]["gens"]):
        # Randomize environments (in-place) for this generation
        # Each individual in a generation experiences the same environments,
        # but re-seeding per individual is not done to prevent identically-performing
        # agents (and thus thousands of HOFs, due to stepping nature of SNNs)
        for env in envs:
            randomize_env(env, config)

        # Selection: Pareto front + best of the rest
        pareto_fronts = tools.sortNondominated(population, len(population))
        selection = pareto_fronts[0]
        others = list(chain(*pareto_fronts[1:]))
        # We need a multiple of 4 for selTournamentDCD()
        if len(others) % 4:
            others.extend(random.sample(selection, 4 - (len(others) % 4)))
        selection.extend(tools.selTournamentDCD(others, len(others)))

        # Get offspring: mutate selection
        # TODO: maybe add crossover? Which is usually done binary,
        #  so maybe not that useful..
        offspring = [
            toolbox.mutate(toolbox.clone(ind)) for ind in selection[: len(population)]
        ]

        # Re-evaluate last generation/population, because their conditions are random
        # and we want to test each individual against as many as possible
        fitnesses = toolbox.map(toolbox.evaluate, population)
        for ind, fit in zip(population, fitnesses):
            ind.fitness.values = fit

        # And evaluate the entire new offspring, for the same reason
        fitnesses = toolbox.map(toolbox.evaluate, offspring)
        for ind, fit in zip(offspring, fitnesses):
            ind.fitness.values = fit

        # Update the hall of fame with the offspring,
        # so we get the best of population + offspring in there
        # Also include population, because we re-evaluated it
        hof.update(population + offspring)

        # Select the population for the next generation
        # from the last generation and its offspring
        population = toolbox.select(population + offspring, config["evo"]["pop size"])

        # Log stuff, but don't print!
        record = stats.compile(population)
        logbook.record(
            gen=gen,
            evals=len(offspring) + len(population),
            **{k: v.round(2) for k, v in record.items()},
        )

        # Log convergence (of first front) and hypervolume
        pareto_fronts = tools.sortNondominated(population, len(population))
        current_time = time.time()
        minutes = (current_time - last_time) / 60
        last_time = time.time()
        time_past = (current_time - start_time) / 60
        conv = convergence(pareto_fronts[0], optimal_front)
        hyper = hypervolume(pareto_fronts[0], hyperref)
        optim_performance.append([gen, time_past, minutes, conv, hyper])
        print(
            f"gen: {gen}, time past: {time_past:.2f} min, minutes: {minutes:.2f} min, convergence: {conv:.3f}, hypervolume: {hyper:.3f}"
        )

        if verbose:
            # Plot relevant part of population fitness
            for i, last, dims in zip(
                range(len(last_fig)), last_fig, config["evo"]["plot"]
            ):
                last_fig[i] = vis_relevant(
                    population,
                    hof,
                    config["evo"]["objectives"],
                    dims,
                    last=last,
                    verbose=verbose,
                )

            # Log every so many generations
            if not gen % config["log interval"] or gen == config["evo"]["gens"] - 1:
                # Create directory
                if not os.path.exists(f"{config['log location']}hof_{gen:03}/"):
                    os.makedirs(f"{config['log location']}hof_{gen:03}/")

                # Save population figure
                for i, last in enumerate(last_fig):
                    if last[2]:
                        last[0].savefig(
                            f"{config['fig location']}relevant{i}_{gen:03}.png"
                        )

                # Save parameters of hall of fame individuals
                for i, ind in enumerate(hof):
                    torch.save(
                        ind[0].state_dict(),
                        f"{config['log location']}hof_{gen:03}/individual_{i:03}.net",
                    )

                # Save fitnesses
                pd.DataFrame(
                    [ind.fitness.values for ind in hof],
                    columns=config["evo"]["objectives"],
                ).to_csv(
                    f"{config['log location']}hof_{gen:03}/fitnesses.csv",
                    index=False,
                    sep=",",
                )

                # Save logbook
                pd.DataFrame(logbook).to_csv(
                    f"{config['log location']}logbook.csv", index=False, sep=","
                )

                # Save optimization performance
                pd.DataFrame(
                    optim_performance,
                    columns=[
                        "gen",
                        "time past",
                        "minutes",
                        "convergence",
                        "hypervolume",
                    ],
                ).to_csv(
                    f"{config['log location']}optim_performance.csv",
                    index=False,
                    sep=",",
                )

    # Close multiprocessing pool
    pool.close()
예제 #27
0
def main(cfg):
    """Main workflow of NSGA-II based Scenario analysis."""
    random.seed()
    print_message('Population: %d, Generation: %d' % (cfg.opt.npop, cfg.opt.ngens))

    # Initial timespan variables
    stime = time.time()
    plot_time = 0.
    allmodels_exect = list()  # execute time of all model runs

    # create reference point for hypervolume
    ref_pt = numpy.array(worse_objects) * multi_weight * -1

    stats = tools.Statistics(lambda sind: sind.fitness.values)
    stats.register('min', numpy.min, axis=0)
    stats.register('max', numpy.max, axis=0)
    stats.register('avg', numpy.mean, axis=0)
    stats.register('std', numpy.std, axis=0)
    logbook = tools.Logbook()
    logbook.header = 'gen', 'evals', 'min', 'max', 'avg', 'std'

    # read observation data from MongoDB
    cali_obj = Calibration(cfg)

    # Read observation data just once
    model_cfg_dict = cali_obj.model.ConfigDict
    model_obj = MainSEIMS(args_dict=model_cfg_dict)
    obs_vars, obs_data_dict = model_obj.ReadOutletObservations(object_vars)

    # Initialize population
    param_values = cali_obj.initialize(cfg.opt.npop)
    pop = list()
    for i in range(cfg.opt.npop):
        ind = creator.Individual(param_values[i])
        ind.gen = 0
        ind.id = i
        ind.obs.vars = obs_vars[:]
        ind.obs.data = deepcopy(obs_data_dict)
        pop.append(ind)
    param_values = numpy.array(param_values)

    # Write calibrated values to MongoDB
    # TODO, extract this function, which is same with `Sensitivity::write_param_values_to_mongodb`.
    write_param_values_to_mongodb(cfg.model.host, cfg.model.port, cfg.model.db_name,
                                  cali_obj.ParamDefs, param_values)
    # get the low and up bound of calibrated parameters
    bounds = numpy.array(cali_obj.ParamDefs['bounds'])
    low = bounds[:, 0]
    up = bounds[:, 1]
    low = low.tolist()
    up = up.tolist()
    pop_select_num = int(cfg.opt.npop * cfg.opt.rsel)
    init_time = time.time() - stime

    def evaluate_parallel(invalid_pops):
        """Evaluate model by SCOOP or map, and set fitness of individuals
         according to calibration step."""
        popnum = len(invalid_pops)
        labels = list()
        try:  # parallel on multi-processors or clusters using SCOOP
            from scoop import futures
            invalid_pops = list(futures.map(toolbox.evaluate, [cali_obj] * popnum, invalid_pops))
        except ImportError or ImportWarning:  # Python build-in map (serial)
            invalid_pops = list(toolbox.map(toolbox.evaluate, [cali_obj] * popnum, invalid_pops))
        for tmpind in invalid_pops:
            if step == 'Q':  # Step 1 Calibrating discharge
                tmpind.fitness.values, labels = tmpind.cali.efficiency_values('Q', object_names)
            elif step == 'SED':  # Step 2 Calibrating sediment
                sedobjvs, labels = tmpind.cali.efficiency_values('SED', object_names)
                qobjvs, qobjlabels = ind.cali.efficiency_values('Q', object_names)
                labels += [qobjlabels[0]]
                sedobjvs += [qobjvs[0]]
                tmpind.fitness.values = sedobjvs[:]
            elif step == 'NUTRIENT':  # Step 3 Calibrating NUTRIENT,TN,TP
                tnobjvs, tnobjlabels = tmpind.cali.efficiency_values('CH_TN', object_names)
                tpobjvs, tpobjlabels = tmpind.cali.efficiency_values('CH_TP', object_names)
                qobjvs, qobjlabels = ind.cali.efficiency_values('Q', object_names)
                sedobjvs, sedobjlabels = tmpind.cali.efficiency_values('SED', object_names)
                objvs = [tnobjvs[0], tpobjvs[0], qobjvs[0], sedobjvs[0]]
                labels = [tnobjlabels[0], tpobjlabels[0], qobjlabels[0], sedobjlabels[0]]
                tmpind.fitness.values = objvs[:]
        # NSE > 0 is the preliminary condition to be a valid solution!
        if filter_NSE:
            invalid_pops = [tmpind for tmpind in invalid_pops if tmpind.fitness.values[0] > 0]
            if len(invalid_pops) < 2:
                print('The initial population should be greater or equal than 2. '
                      'Please check the parameters ranges or change the sampling strategy!')
                exit(0)
        return invalid_pops, labels  # Currently, `invalid_pops` contains evaluated individuals

    # Record the count and execute timespan of model runs during the optimization
    modelruns_count = {0: len(pop)}
    modelruns_time = {0: 0.}  # Total time counted according to evaluate_parallel()
    modelruns_time_sum = {0: 0.}  # Summarize time of every model runs according to pop

    # Generation 0 before optimization
    stime = time.time()
    pop, plotlables = evaluate_parallel(pop)
    modelruns_time[0] = time.time() - stime
    for ind in pop:
        allmodels_exect.append([ind.io_time, ind.comp_time, ind.simu_time, ind.runtime])
        modelruns_time_sum[0] += ind.runtime

    # currently, len(pop) may less than pop_select_num
    pop = toolbox.select(pop, pop_select_num)
    # Output simulated data to json or pickle files for future use.
    output_population_details(pop, cfg.opt.simdata_dir, 0)

    record = stats.compile(pop)
    logbook.record(gen=0, evals=len(pop), **record)
    print_message(logbook.stream)

    # Begin the generational process
    output_str = '### Generation number: %d, Population size: %d ###\n' % (cfg.opt.ngens,
                                                                           cfg.opt.npop)
    print_message(output_str)
    UtilClass.writelog(cfg.opt.logfile, output_str, mode='replace')

    for gen in range(1, cfg.opt.ngens + 1):
        output_str = '###### Generation: %d ######\n' % gen
        print_message(output_str)

        offspring = [toolbox.clone(ind) for ind in pop]
        # method1: use crowding distance (normalized as 0~1) as eta
        # tools.emo.assignCrowdingDist(offspring)
        # method2: use the index of individual at the sorted offspring list as eta
        if len(offspring) >= 2:  # when offspring size greater than 2, mate can be done
            for i, ind1, ind2 in zip(range(len(offspring) // 2), offspring[::2], offspring[1::2]):
                if random.random() > cfg.opt.rcross:
                    continue
                eta = i
                toolbox.mate(ind1, ind2, eta, low, up)
                toolbox.mutate(ind1, eta, low, up, cfg.opt.rmut)
                toolbox.mutate(ind2, eta, low, up, cfg.opt.rmut)
                del ind1.fitness.values, ind2.fitness.values
        else:
            toolbox.mutate(offspring[0], 1., low, up, cfg.opt.rmut)
            del offspring[0].fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        valid_ind = [ind for ind in offspring if ind.fitness.valid]
        if len(invalid_ind) == 0:  # No need to continue
            print_message('Note: No invalid individuals available, the NSGA2 will be terminated!')
            break

        # Write new calibrated parameters to MongoDB
        param_values = list()
        for idx, ind in enumerate(invalid_ind):
            ind.gen = gen
            ind.id = idx
            param_values.append(ind[:])
        param_values = numpy.array(param_values)
        write_param_values_to_mongodb(cfg.model.host, cfg.model.port, cfg.model.db_name,
                                      cali_obj.ParamDefs, param_values)
        # Count the model runs, and execute models
        invalid_ind_size = len(invalid_ind)
        modelruns_count.setdefault(gen, invalid_ind_size)
        stime = time.time()
        invalid_ind, plotlables = evaluate_parallel(invalid_ind)
        curtimespan = time.time() - stime
        modelruns_time.setdefault(gen, curtimespan)
        modelruns_time_sum.setdefault(gen, 0.)
        for ind in invalid_ind:
            allmodels_exect.append([ind.io_time, ind.comp_time, ind.simu_time, ind.runtime])
            modelruns_time_sum[gen] += ind.runtime

        # Select the next generation population
        tmp_pop = list()
        gen_idx = list()
        for ind in pop + valid_ind + invalid_ind:  # these individuals are all evaluated!
            # remove individuals that has a NSE < 0
            if [ind.gen, ind.id] not in gen_idx:
                if filter_NSE and ind.fitness.values[0] < 0:
                    continue
                tmp_pop.append(ind)
                gen_idx.append([ind.gen, ind.id])
        pop = toolbox.select(tmp_pop, pop_select_num)
        output_population_details(pop, cfg.opt.simdata_dir, gen)
        hyper_str = 'Gen: %d, New model runs: %d, ' \
                    'Execute timespan: %.4f, Sum of model run timespan: %.4f, ' \
                    'Hypervolume: %.4f\n' % (gen, invalid_ind_size,
                                             curtimespan, modelruns_time_sum[gen],
                                             hypervolume(pop, ref_pt))
        print_message(hyper_str)
        UtilClass.writelog(cfg.opt.hypervlog, hyper_str, mode='append')

        record = stats.compile(pop)
        logbook.record(gen=gen, evals=len(invalid_ind), **record)
        print_message(logbook.stream)

        # Plot 2D near optimal pareto front graphs,
        #   i.e., (NSE, RSR), (NSE, PBIAS), and (RSR,PBIAS)
        # And 3D near optimal pareto front graphs, i.e., (NSE, RSR, PBIAS)
        stime = time.time()
        front = numpy.array([ind.fitness.values for ind in pop])
        plot_pareto_front(front, plotlables, cfg.opt.out_dir,
                          gen, 'Near Pareto optimal solutions')
        plot_time += time.time() - stime

        # save in file
        if step == 'Q':  # Step 1 Calibrate discharge
            output_str += 'generation-calibrationID\t%s' % pop[0].cali.output_header('Q',
                                                                                     object_names,
                                                                                     'Cali')
            if cali_obj.cfg.calc_validation:
                output_str += pop[0].vali.output_header('Q', object_names, 'Vali')
        elif step == 'SED':  # Step 2 Calibrate sediment
            output_str += 'generation-calibrationID\t%s%s' % \
                          (pop[0].cali.output_header('SED', object_names, 'Cali'),
                           pop[0].cali.output_header('Q', object_names, 'Cali'))
            if cali_obj.cfg.calc_validation:
                output_str += '%s%s' % (pop[0].vali.output_header('SED', object_names, 'Vali'),
                                        pop[0].vali.output_header('Q', object_names, 'Vali'))
        elif step == 'NUTRIENT':  # Step 3 Calibrate NUTRIENT,TN,TP
            output_str += 'generation-calibrationID\t%s%s%s%s' % \
                          (pop[0].cali.output_header('CH_TN', object_names, 'Cali'),
                           pop[0].cali.output_header('CH_TP', object_names, 'Cali'),
                           pop[0].cali.output_header('Q', object_names, 'Cali'),
                           pop[0].cali.output_header('SED', object_names, 'Cali'))
            if cali_obj.cfg.calc_validation:
                output_str += '%s%s%s%s' % (
                    pop[0].vali.output_header('CH_TN', object_names, 'Vali'),
                    pop[0].vali.output_header('CH_TP', object_names, 'Vali'),
                    pop[0].vali.output_header('Q', object_names, 'Vali'),
                    pop[0].vali.output_header('SED', object_names, 'Vali'))
        output_str += 'gene_values\n'
        for ind in pop:
            if step == 'Q':  # Step 1 Calibrate discharge
                output_str += '%d-%d\t%s' % (ind.gen, ind.id,
                                             ind.cali.output_efficiency('Q', object_names))
                if cali_obj.cfg.calc_validation:
                    output_str += ind.vali.output_efficiency('Q', object_names)
            elif step == 'SED':  # Step 2 Calibrate sediment
                output_str += '%d-%d\t%s%s' % (ind.gen, ind.id,
                                               ind.cali.output_efficiency('SED', object_names),
                                               ind.cali.output_efficiency('Q', object_names))
                if cali_obj.cfg.calc_validation:
                    output_str += '%s%s' % (ind.vali.output_efficiency('SED', object_names),
                                            ind.vali.output_efficiency('Q', object_names))
            elif step == 'NUTRIENT':  # Step 3 Calibrate NUTRIENT, i.e., TN and TP
                output_str += '%d-%d\t%s%s%s%s' % (ind.gen, ind.id,
                                                   ind.cali.output_efficiency('CH_TN',
                                                                              object_names),
                                                   ind.cali.output_efficiency('CH_TP',
                                                                              object_names),
                                                   ind.cali.output_efficiency('Q', object_names),
                                                   ind.cali.output_efficiency('SED', object_names))
                if cali_obj.cfg.calc_validation:
                    output_str += '%s%s%s%s' % (ind.vali.output_efficiency('CH_TN', object_names),
                                                ind.vali.output_efficiency('CH_TP', object_names),
                                                ind.vali.output_efficiency('Q', object_names),
                                                ind.vali.output_efficiency('SED', object_names))
            output_str += str(ind)
            output_str += '\n'
        UtilClass.writelog(cfg.opt.logfile, output_str, mode='append')

        # TODO: Figure out if we should terminate the evolution

    # Plot hypervolume and newly executed model count
    plot_hypervolume_single(cfg.opt.hypervlog, cfg.opt.out_dir)

    # Save and print timespan information
    allmodels_exect = numpy.array(allmodels_exect)
    numpy.savetxt('%s/exec_time_allmodelruns.txt' % cfg.opt.out_dir,
                  allmodels_exect, delimiter=' ', fmt='%.4f')
    print_message('Running time of all SEIMS models:\n'
                  '\tIO\tCOMP\tSIMU\tRUNTIME\n'
                  'MAX\t%s\n'
                  'MIN\t%s\n'
                  'AVG\t%s\n'
                  'SUM\t%s\n' % ('\t'.join('%.3f' % v for v in allmodels_exect.max(0)),
                                 '\t'.join('%.3f' % v for v in allmodels_exect.min(0)),
                                 '\t'.join('%.3f' % v for v in allmodels_exect.mean(0)),
                                 '\t'.join('%.3f' % v for v in allmodels_exect.sum(0))))

    exec_time = 0.
    for genid, tmptime in list(modelruns_time.items()):
        exec_time += tmptime
    exec_time_sum = 0.
    for genid, tmptime in list(modelruns_time_sum.items()):
        exec_time_sum += tmptime
    allcount = 0
    for genid, tmpcount in list(modelruns_count.items()):
        allcount += tmpcount

    print_message('Initialization timespan: %.4f\n'
                  'Model execution timespan: %.4f\n'
                  'Sum of model runs timespan: %.4f\n'
                  'Plot Pareto graphs timespan: %.4f' % (init_time, exec_time,
                                                         exec_time_sum, plot_time))

    return pop, logbook
    #evolutionary algorithm using deap takes in the parameters and outputs the final population and logbook (verbose set to false)
    algorithms.eaMuPlusLambda(pop,
                              toolbox,
                              MU,
                              LAMBDA,
                              CXPB,
                              MUTPB,
                              NGEN,
                              stats,
                              halloffame=hof,
                              verbose=False)

    #calculate the hypervolume of pareto front compare with arbitrary point 3000,500 much worse than any other possible value
    print("Hypervolume at iteration {}, {}".format(
        i, hypervolume(hof, [3000, 500])))

    #add all hypervolume results from each run used to get average over 10 runs
    hyp += hypervolume(hof, [3000, 500])

    #graph the pareto front for each run and save it to png file
    optimal_front = numpy.array([ind.fitness.values for ind in hof])
    optimal_front = numpy.array(optimal_front)
    plt.scatter(optimal_front[:, 0],
                numpy.negative(optimal_front[:, 1]),
                c="r")
    filename = str(input_file) + "_" + str(population_percentage)
    plt.savefig("pareto_graph_%s_%s.png" % (str(i), filename))
    plt.close()

#get the average hyper volume over the 10 runs
예제 #29
0
    offspring = toolbox.select(pop, N_POP)  # 选择
    offspring = toolbox.clone(offspring)
    offspring = algorithms.varAnd(offspring, toolbox, CXPB, MUTPB)  # 交叉变异

bestInd = tools.selBest(pop, 1)[0]  # 选择出种群中最优个体
bestFit = bestInd.fitness.values
print('best solution:', bestInd)
print('best fitness:', bestFit)

front = tools.emo.sortNondominated(
    pop, len(pop))[0]  # 返回的不同前沿的pareto层集合fronts中第一个front为当前最优解集
print(f"len of front:{len(front)}")

# 2目标,参考点
ref = [5, 5]
hypervolume_v = hypervolume(pop, ref=ref)
print(f"{name} : {hypervolume_v}")

write_front(front, hypervolume_v,
            f'{os.path.dirname(__file__)}/output/{test_func_name}/',
            f"{name}.txt")
# # 图形化显示
for ind in front:
    plt.plot(ind.fitness.values[0], ind.fitness.values[1], 'r.', ms=2)
plt.xlabel('f1')
plt.ylabel('f2')
plt.title(f"{test_func_name}: {name}")
plt.tight_layout()
plt.show()
예제 #30
0
def main(cfg):
    """Main workflow of NSGA-II based Scenario analysis."""
    random.seed()
    scoop_log('Population: %d, Generation: %d' % (cfg.opt.npop, cfg.opt.ngens))

    # Initial timespan variables
    stime = time.time()
    plot_time = 0.
    allmodels_exect = list()  # execute time of all model runs

    # create reference point for hypervolume
    ref_pt = numpy.array(worse_objects) * multi_weight * -1

    stats = tools.Statistics(lambda sind: sind.fitness.values)
    stats.register('min', numpy.min, axis=0)
    stats.register('max', numpy.max, axis=0)
    stats.register('avg', numpy.mean, axis=0)
    stats.register('std', numpy.std, axis=0)
    logbook = tools.Logbook()
    logbook.header = 'gen', 'evals', 'min', 'max', 'avg', 'std'

    # read observation data from MongoDB
    cali_obj = Calibration(cfg)

    # Read observation data just once
    model_cfg_dict = cali_obj.model.ConfigDict
    model_obj = MainSEIMS(args_dict=model_cfg_dict)
    obs_vars, obs_data_dict = model_obj.ReadOutletObservations(object_vars)

    # Initialize population
    param_values = cali_obj.initialize(cfg.opt.npop)
    pop = list()
    for i in range(cfg.opt.npop):
        ind = creator.Individual(param_values[i])
        ind.gen = 0
        ind.id = i
        ind.obs.vars = obs_vars[:]
        ind.obs.data = deepcopy(obs_data_dict)
        pop.append(ind)
    param_values = numpy.array(param_values)

    # Write calibrated values to MongoDB
    # TODO, extract this function, which is same with `Sensitivity::write_param_values_to_mongodb`.
    write_param_values_to_mongodb(cfg.model.host, cfg.model.port,
                                  cfg.model.db_name, cali_obj.ParamDefs,
                                  param_values)
    # get the low and up bound of calibrated parameters
    bounds = numpy.array(cali_obj.ParamDefs['bounds'])
    low = bounds[:, 0]
    up = bounds[:, 1]
    low = low.tolist()
    up = up.tolist()
    pop_select_num = int(cfg.opt.npop * cfg.opt.rsel)
    init_time = time.time() - stime

    def check_validation(fitvalues):
        """Check the validation of the fitness values of an individual."""
        flag = True
        for condidx, condstr in enumerate(conditions):
            if condstr is None:
                continue
            if not eval('%f%s' % (fitvalues[condidx], condstr)):
                flag = False
        return flag

    def evaluate_parallel(invalid_pops):
        """Evaluate model by SCOOP or map, and set fitness of individuals
         according to calibration step."""
        popnum = len(invalid_pops)
        labels = list()
        try:  # parallel on multi-processors or clusters using SCOOP
            from scoop import futures
            invalid_pops = list(
                futures.map(toolbox.evaluate, [cali_obj] * popnum,
                            invalid_pops))
        except ImportError or ImportWarning:  # Python build-in map (serial)
            invalid_pops = list(
                toolbox.map(toolbox.evaluate, [cali_obj] * popnum,
                            invalid_pops))
        for tmpind in invalid_pops:
            tmpfitnessv = list()
            for k, v in list(multiobj.items()):
                tmpvalues, tmplabel = tmpind.cali.efficiency_values(
                    k, object_names[k])
                tmpfitnessv += tmpvalues[:]
                labels += tmplabel[:]
            tmpind.fitness.values = tuple(tmpfitnessv)

        # Filter for a valid solution
        if filter_ind:
            invalid_pops = [
                tmpind for tmpind in invalid_pops
                if check_validation(tmpind.fitness.values)
            ]
            if len(invalid_pops) < 2:
                print(
                    'The initial population should be greater or equal than 2. '
                    'Please check the parameters ranges or change the sampling strategy!'
                )
                exit(2)
        return invalid_pops, labels  # Currently, `invalid_pops` contains evaluated individuals

    # Record the count and execute timespan of model runs during the optimization
    modelruns_count = {0: len(pop)}
    modelruns_time = {
        0: 0.
    }  # Total time counted according to evaluate_parallel()
    modelruns_time_sum = {
        0: 0.
    }  # Summarize time of every model runs according to pop

    # Generation 0 before optimization
    stime = time.time()
    pop, plotlables = evaluate_parallel(pop)
    modelruns_time[0] = time.time() - stime
    for ind in pop:
        allmodels_exect.append(
            [ind.io_time, ind.comp_time, ind.simu_time, ind.runtime])
        modelruns_time_sum[0] += ind.runtime

    # currently, len(pop) may less than pop_select_num
    pop = toolbox.select(pop, pop_select_num)
    # Output simulated data to json or pickle files for future use.
    output_population_details(pop, cfg.opt.simdata_dir, 0)

    record = stats.compile(pop)
    logbook.record(gen=0, evals=len(pop), **record)
    scoop_log(logbook.stream)

    # Begin the generational process
    output_str = '### Generation number: %d, Population size: %d ###\n' % (
        cfg.opt.ngens, cfg.opt.npop)
    scoop_log(output_str)
    UtilClass.writelog(cfg.opt.logfile, output_str, mode='replace')

    modelsel_count = {
        0: len(pop)
    }  # type: Dict[int, int] # newly added Pareto fronts

    for gen in range(1, cfg.opt.ngens + 1):
        output_str = '###### Generation: %d ######\n' % gen
        scoop_log(output_str)

        offspring = [toolbox.clone(ind) for ind in pop]
        # method1: use crowding distance (normalized as 0~1) as eta
        # tools.emo.assignCrowdingDist(offspring)
        # method2: use the index of individual at the sorted offspring list as eta
        if len(offspring
               ) >= 2:  # when offspring size greater than 2, mate can be done
            for i, ind1, ind2 in zip(range(len(offspring) // 2),
                                     offspring[::2], offspring[1::2]):
                if random.random() > cfg.opt.rcross:
                    continue
                eta = i
                toolbox.mate(ind1, ind2, eta, low, up)
                toolbox.mutate(ind1, eta, low, up, cfg.opt.rmut)
                toolbox.mutate(ind2, eta, low, up, cfg.opt.rmut)
                del ind1.fitness.values, ind2.fitness.values
        else:
            toolbox.mutate(offspring[0], 1., low, up, cfg.opt.rmut)
            del offspring[0].fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_inds = [ind for ind in offspring if not ind.fitness.valid]
        valid_inds = [ind for ind in offspring if ind.fitness.valid]
        if len(invalid_inds) == 0:  # No need to continue
            scoop_log(
                'Note: No invalid individuals available, the NSGA2 will be terminated!'
            )
            break

        # Write new calibrated parameters to MongoDB
        param_values = list()
        for idx, ind in enumerate(invalid_inds):
            ind.gen = gen
            ind.id = idx
            param_values.append(ind[:])
        param_values = numpy.array(param_values)
        write_param_values_to_mongodb(cfg.model.host, cfg.model.port,
                                      cfg.model.db_name, cali_obj.ParamDefs,
                                      param_values)
        # Count the model runs, and execute models
        invalid_ind_size = len(invalid_inds)
        modelruns_count.setdefault(gen, invalid_ind_size)
        stime = time.time()
        invalid_inds, plotlables = evaluate_parallel(invalid_inds)
        curtimespan = time.time() - stime
        modelruns_time.setdefault(gen, curtimespan)
        modelruns_time_sum.setdefault(gen, 0.)
        for ind in invalid_inds:
            allmodels_exect.append(
                [ind.io_time, ind.comp_time, ind.simu_time, ind.runtime])
            modelruns_time_sum[gen] += ind.runtime

        # Select the next generation population
        # Previous version may result in duplications of the same scenario in one Pareto front,
        #   thus, I decided to check and remove the duplications first.
        # pop = toolbox.select(pop + valid_inds + invalid_inds, pop_select_num)
        tmppop = pop + valid_inds + invalid_inds
        pop = list()
        unique_sces = dict()
        for tmpind in tmppop:
            if tmpind.gen in unique_sces and tmpind.id in unique_sces[
                    tmpind.gen]:
                continue
            if tmpind.gen not in unique_sces:
                unique_sces.setdefault(tmpind.gen, [tmpind.id])
            elif tmpind.id not in unique_sces[tmpind.gen]:
                unique_sces[tmpind.gen].append(tmpind.id)
            pop.append(tmpind)
        pop = toolbox.select(pop, pop_select_num)

        output_population_details(pop, cfg.opt.simdata_dir, gen)
        hyper_str = 'Gen: %d, New model runs: %d, ' \
                    'Execute timespan: %.4f, Sum of model run timespan: %.4f, ' \
                    'Hypervolume: %.4f\n' % (gen, invalid_ind_size,
                                             curtimespan, modelruns_time_sum[gen],
                                             hypervolume(pop, ref_pt))
        scoop_log(hyper_str)
        UtilClass.writelog(cfg.opt.hypervlog, hyper_str, mode='append')

        record = stats.compile(pop)
        logbook.record(gen=gen, evals=len(invalid_inds), **record)
        scoop_log(logbook.stream)

        # Count the newly generated near Pareto fronts
        new_count = 0
        for ind in pop:
            if ind.gen == gen:
                new_count += 1
        modelsel_count.setdefault(gen, new_count)

        # Plot 2D near optimal pareto front graphs,
        #   i.e., (NSE, RSR), (NSE, PBIAS), and (RSR,PBIAS)
        # And 3D near optimal pareto front graphs, i.e., (NSE, RSR, PBIAS)
        stime = time.time()
        front = numpy.array([ind.fitness.values for ind in pop])
        plot_pareto_front_single(front, plotlables, cfg.opt.out_dir, gen,
                                 'Near Pareto optimal solutions')
        plot_time += time.time() - stime

        # save in file
        # Header information
        output_str += 'generation\tcalibrationID\t'
        for kk, vv in list(object_names.items()):
            output_str += pop[0].cali.output_header(kk, vv, 'Cali')
        if cali_obj.cfg.calc_validation:
            for kkk, vvv in list(object_names.items()):
                output_str += pop[0].vali.output_header(kkk, vvv, 'Vali')

        output_str += 'gene_values\n'
        for ind in pop:
            output_str += '%d\t%d\t' % (ind.gen, ind.id)
            for kk, vv in list(object_names.items()):
                output_str += ind.cali.output_efficiency(kk, vv)
            if cali_obj.cfg.calc_validation:
                for kkk, vvv in list(object_names.items()):
                    output_str += ind.vali.output_efficiency(kkk, vvv)
            output_str += str(ind)
            output_str += '\n'
        UtilClass.writelog(cfg.opt.logfile, output_str, mode='append')

        # TODO: Figure out if we should terminate the evolution

    # Plot hypervolume and newly executed model count
    plot_hypervolume_single(cfg.opt.hypervlog, cfg.opt.out_dir)

    # Save newly added Pareto fronts of each generations
    new_fronts_count = numpy.array(list(modelsel_count.items()))
    numpy.savetxt('%s/new_pareto_fronts_count.txt' % cfg.opt.out_dir,
                  new_fronts_count,
                  delimiter=str(','),
                  fmt=str('%d'))

    # Save and print timespan information
    allmodels_exect = numpy.array(allmodels_exect)
    numpy.savetxt('%s/exec_time_allmodelruns.txt' % cfg.opt.out_dir,
                  allmodels_exect,
                  delimiter=str(' '),
                  fmt=str('%.4f'))
    scoop_log('Running time of all SEIMS models:\n'
              '\tIO\tCOMP\tSIMU\tRUNTIME\n'
              'MAX\t%s\n'
              'MIN\t%s\n'
              'AVG\t%s\n'
              'SUM\t%s\n' %
              ('\t'.join('%.3f' % t for t in allmodels_exect.max(0)),
               '\t'.join('%.3f' % t
                         for t in allmodels_exect.min(0)), '\t'.join(
                             '%.3f' % t
                             for t in allmodels_exect.mean(0)), '\t'.join(
                                 '%.3f' % t for t in allmodels_exect.sum(0))))

    exec_time = 0.
    for genid, tmptime in list(modelruns_time.items()):
        exec_time += tmptime
    exec_time_sum = 0.
    for genid, tmptime in list(modelruns_time_sum.items()):
        exec_time_sum += tmptime
    allcount = 0
    for genid, tmpcount in list(modelruns_count.items()):
        allcount += tmpcount

    scoop_log('Initialization timespan: %.4f\n'
              'Model execution timespan: %.4f\n'
              'Sum of model runs timespan: %.4f\n'
              'Plot Pareto graphs timespan: %.4f' %
              (init_time, exec_time, exec_time_sum, plot_time))

    return pop, logbook
예제 #31
0
def main():
    # The cma module uses the numpy random number generator
    # numpy.random.seed(128)

    MU, LAMBDA = 10, 10
    NGEN = 500
    verbose = True

    # The MO-CMA-ES algorithm takes a full population as argument
    population = [
        creator.Individual(x) for x in (numpy.random.uniform(0, 1, (MU, N)))
    ]

    for ind in population:
        ind.fitness.values = toolbox.evaluate(ind)

    strategy = cma.StrategyMultiObjective(population,
                                          sigma=1.0,
                                          mu=MU,
                                          lambda_=LAMBDA)
    toolbox.register("generate", strategy.generate, creator.Individual)
    toolbox.register("update", strategy.update)

    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("min", numpy.min, axis=0)
    stats.register("max", numpy.max, axis=0)

    logbook = tools.Logbook()
    logbook.header = ["gen", "nevals"] + (stats.fields if stats else [])

    for gen in range(NGEN):
        # Generate a new population
        population = toolbox.generate()

        # Evaluate the individuals
        fitnesses = toolbox.map(toolbox.evaluate, population)
        for ind, fit in zip(population, fitnesses):
            ind.fitness.values = fit

        # Update the strategy with the evaluated individuals
        toolbox.update(population)

        record = stats.compile(population) if stats is not None else {}
        logbook.record(gen=gen, nevals=len(population), **record)
        if verbose:
            print(logbook.stream)

    if verbose:
        print("Final population hypervolume is %f" %
              hypervolume(strategy.parents, [11.0, 11.0]))

    # import matplotlib.pyplot as plt

    # valid_front = numpy.array([ind.fitness.values for ind in strategy.parents if valid(ind)])
    # invalid_front = numpy.array([ind.fitness.values for ind in strategy.parents if not valid(ind)])

    # fig = plt.figure()

    # if len(valid_front) > 0:
    #     plt.scatter(valid_front[:,0], valid_front[:,1], c="g")

    # if len(invalid_front) > 0:
    #     plt.scatter(invalid_front[:,0], invalid_front[:,1], c="r")

    # plt.show()

    return strategy.parents
예제 #32
0
def search(toolbox, seed=None, gens=500, mu=200, verbose=False):
    random.seed(seed)

    CXPB = 0.9  # pravdepodobnost krizenia?

    stats = tools.Statistics(lambda ind: ind.fitness.values)

    # stats.register("avg", numpy.mean, axis=0)
    # stats.register("std", numpy.std, axis=0)
    # stats.register("min", np.min, axis=0)
    # stats.register("max", np.max, axis=0)

    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "hypervolume"

    # vytvorime inicialnu populaciu
    pop = toolbox.population(n=mu)

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    # Choose worst solution as reference point for hypervolume calculation
    ref = np.max([x.fitness.values for x in pop], axis=0) + 1

    # This is just to assign the crowding distance to the individuals
    # no actual selection is done
    pop = toolbox.select(pop, len(pop))
    record = stats.compile(pop)
    best_hypervolume = hypervolume(pop, ref)
    record['hypervolume'] = best_hypervolume
    logbook.record(gen=0, evals=len(invalid_ind), **record)
    output(logbook.stream, verbose)

    best_hypervolume_population = pop

    # Begin the generational process
    for gen in range(1, gens):
        # Vary the population
        offspring = tools.selTournamentDCD(pop, len(pop))
        offspring = [toolbox.clone(ind) for ind in offspring]

        for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
            if random.random() <= CXPB:
                toolbox.mate(ind1, ind2)

            toolbox.mutate(ind1)
            toolbox.mutate(ind2)
            del ind1.fitness.values, ind2.fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # Select the next generation population
        pop = toolbox.select(pop + offspring, mu, nd='log')
        record = stats.compile(pop)

        current_hypervolume = hypervolume(pop, ref)

        record['hypervolume'] = current_hypervolume

        if current_hypervolume > best_hypervolume:
            best_hypervolume = current_hypervolume
            best_hypervolume_population = pop

        logbook.record(gen=gen, evals=len(invalid_ind), **record)
        output(logbook.stream, verbose)

    return pop, logbook, best_hypervolume_population, best_hypervolume
예제 #33
0
def main():
    # The cma module uses the numpy random number generator
    # numpy.random.seed(128)

    MU, LAMBDA = 10, 10
    NGEN = 500
    verbose = True
    create_plot = False

    # The MO-CMA-ES algorithm takes a full population as argument
    population = [
        creator.Individual(x) for x in (numpy.random.uniform(0, 1, (MU, N)))
    ]

    for ind in population:
        ind.fitness.values = toolbox.evaluate(ind)

    strategy = cma.StrategyMultiObjective(population,
                                          sigma=1.0,
                                          mu=MU,
                                          lambda_=LAMBDA)
    toolbox.register("generate", strategy.generate, creator.Individual)
    toolbox.register("update", strategy.update)

    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("min", numpy.min, axis=0)
    stats.register("max", numpy.max, axis=0)

    logbook = tools.Logbook()
    logbook.header = ["gen", "nevals"] + (stats.fields if stats else [])

    fitness_history = []

    for gen in range(NGEN):
        # Generate a new population
        population = toolbox.generate()

        # Evaluate the individuals
        fitnesses = toolbox.map(toolbox.evaluate, population)
        for ind, fit in zip(population, fitnesses):
            ind.fitness.values = fit
            fitness_history.append(fit)

        # Update the strategy with the evaluated individuals
        toolbox.update(population)

        record = stats.compile(population) if stats is not None else {}
        logbook.record(gen=gen, nevals=len(population), **record)
        if verbose:
            print((logbook.stream))

    if verbose:
        print(("Final population hypervolume is %f" %
               hypervolume(strategy.parents, [11.0, 11.0])))

        # Note that we use a penalty to guide the search to feasible solutions,
        # but there is no guarantee that individuals are valid.
        # We expect the best individuals will be within bounds or very close.
        num_valid = 0
        for ind in strategy.parents:
            dist = distance(closest_feasible(ind), ind)
            if numpy.isclose(dist, 0.0, rtol=1.e-5, atol=1.e-5):
                num_valid += 1
        print(("Number of valid individuals is %d/%d" %
               (num_valid, len(strategy.parents))))

        print("Final population:")
        print((numpy.asarray(strategy.parents)))

    if create_plot:
        interactive = 0
        if not interactive:
            import matplotlib as mpl_tmp
            mpl_tmp.use(
                'Agg')  # Force matplotlib to not use any Xwindows backend.
        import matplotlib.pyplot as plt

        fig = plt.figure()
        plt.title("Multi-objective minimization via MO-CMA-ES")
        plt.xlabel("First objective (function) to minimize")
        plt.ylabel("Second objective (function) to minimize")

        # Limit the scale because our history values include the penalty.
        plt.xlim((-0.1, 1.20))
        plt.ylim((-0.1, 1.20))

        # Plot all history. Note the values include the penalty.
        fitness_history = numpy.asarray(fitness_history)
        plt.scatter(fitness_history[:, 0],
                    fitness_history[:, 1],
                    facecolors='none',
                    edgecolors="lightblue")

        valid_front = numpy.array([
            ind.fitness.values for ind in strategy.parents if close_valid(ind)
        ])
        invalid_front = numpy.array([
            ind.fitness.values for ind in strategy.parents
            if not close_valid(ind)
        ])

        if len(valid_front) > 0:
            plt.scatter(valid_front[:, 0], valid_front[:, 1], c="g")
        if len(invalid_front) > 0:
            plt.scatter(invalid_front[:, 0], invalid_front[:, 1], c="r")

        if interactive:
            plt.show()
        else:
            print("Writing cma_mo.png")
            plt.savefig("cma_mo.png")

    return strategy.parents
예제 #34
0
def main(seed=None):
    random.seed(seed)

    NGEN = 250
    MU = 100
    CXPB = 0.9

    stats = tools.Statistics(lambda ind: ind.fitness.values)
    # stats.register("avg", numpy.mean, axis=0)
    # stats.register("std", numpy.std, axis=0)
    stats.register("min", numpy.min, axis=0)
    stats.register("max", numpy.max, axis=0)

    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max"

    pop = toolbox.population(n=MU)

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    record = stats.compile(pop)
    logbook.record(gen=0, evals=len(invalid_ind), **record)
    print(logbook.stream)

    # Begin the generational process
    for gen in range(1, NGEN):
        # Vary the population
        offspring = tools.selRandom(pop, len(pop))
        offspring = [toolbox.clone(ind) for ind in offspring]

        for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
            if random.random() <= CXPB:
                toolbox.mate(ind1, ind2)

            toolbox.mutate(ind1)
            toolbox.mutate(ind2)
            del ind1.fitness.values, ind2.fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # Select the next generation population
        pop = pop + offspring
        fronts = toolbox.sort(pop, len(pop))
        chosen = []
        for i, front in enumerate(fronts):
            # Move is front to chosen population til it is almost full
            if len(chosen) + len(front) <= MU:
                chosen.extend(front)
            else:
                # Assign hypervolume contribution to individuals of front that
                # cannot be completely move over to chosen individuals
                fitness_hv = hypervolume_contrib(front)
                for ind, fit_hv in zip(front, fitness_hv):
                    ind.fitness_hv.values = (fit_hv, )
                # Fill chosen with best indiviuals from inspect front
                # (based on hypervolume contribution)
                chosen.extend(toolbox.select(front, MU - len(chosen)))
                break

        pop = chosen

        record = stats.compile(pop)
        logbook.record(gen=gen, evals=len(invalid_ind), **record)
        print(logbook.stream)

    print("Final population hypervolume is %f" %
          hypervolume(pop, [11.0, 11.0]))

    return pop, logbook
예제 #35
0
def main(seed=None):
   random.seed(seed)

   NGEN = 2
   MU = 4
   CXPB = 0.6
   pop = toolbox.population(n=MU)

   stats = tools.Statistics(lambda ind: ind.fitness.values)
   stats.register("min", numpy.min, axis=0)
   stats.register("max", numpy.max, axis=0)
   stats.register("pop", copy.deepcopy)

   history = tools.History()
   # Decorate the variation operators
   #toolbox.register("variate", variate, mate=toolbox.mate, mutate=toolbox.mutate)
   #toolbox.decorate("variate", history.decorator)
   toolbox.decorate("mate", history.decorator)
   toolbox.decorate("mutate", history.decorator)

   fitnesses = toolbox.map(toolbox.evaluate, pop)
   for ind, fit in zip(pop, fitnesses):
       ind.fitness.values = fit
   plt.figure(figsize=(10,4))
   plt.subplot(1,2,1)
   for ind in pop: plt.plot(ind[0], ind[1], 'k.', ms=3)
   plt.xlabel('$x_1$');plt.ylabel('$x_2$');plt.title('Decision space');
   plt.subplot(1,2,2)
   for ind in pop: plt.plot(ind.fitness.values[0], ind.fitness.values[1], 'k.', ms=3)
   plt.xlabel('$f_1(\mathbf{x})$');plt.ylabel('$f_2(\mathbf{x})$');
   plt.xlim((0.5,3.6));plt.ylim((0.5,3.6)); plt.title('Objective space');
   plt.savefig("objective.png", dpi=200)

   logbook = tools.Logbook()
   logbook.header = "gen", "evals", "fitness", "size", "pop","ind"
   pickle.dump(logbook, open('nsga_ii-results.pickle', 'wb'),
           pickle.HIGHEST_PROTOCOL)

   hof = tools.ParetoFront()

   # Evaluate the individuals with an invalid fitness
   invalid_ind = [ind for ind in pop if not ind.fitness.valid]
   fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
   for ind, fit in zip(invalid_ind, fitnesses):
       ind.fitness.values = fit

   #hof.update(pop)

   # This is just to assign the crowding distance to the individualis
   # no actual selection is done
   pop = toolbox.select(pop, len(pop))

   record = stats.compile(pop)
   logbook.record(gen=0, evals=len(invalid_ind), **record)
   print(logbook.stream)

   # Begin the generational process
   for gen in range(1, NGEN):
       # Vary the population
       offspring = tools.selTournamentDCD(pop, len(pop))
       offspring = [toolbox.clone(ind) for ind in offspring]

       for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
           if random.random() <= CXPB:
               toolbox.mate(ind1, ind2)

           toolbox.mutate(ind1)
           toolbox.mutate(ind2)
           del ind1.fitness.values, ind2.fitness.values

       # Evaluate the individuals with an invalid fitness
       invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
       fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
       for ind, fit in zip(invalid_ind, fitnesses):
           ind.fitness.values = fit

       print "Evaluated %i individuals" % len(invalid_ind)

       pop = toolbox.select(pop+offspring, len(offspring))
       hof.update(pop)

       # Select the next generation population
       pop = toolbox.select(pop + offspring, MU)
       record = stats.compile(pop)
       logbook.record(gen=gen, evals=len(invalid_ind), **record)
       print(logbook.stream)

       plt.close("all")
       front = numpy.array([ind.fitness.values for ind in pop])
       plt.figure(figsize=(10,10))
       #fig,ax = plt.subplots(1,gen)
       plt.scatter(front[:,0], front[:,1], c="b")
       #locals()["ax"+str(gen)]=plt.scatter(front[:,0], front[:,1], c="b")
       #plt.tight_layout()
       plt.xlabel("RT(Time)")
       plt.ylabel("Memory usage, Mb")
       plt.savefig("front_gen"+str(gen)+".png", dpi=200)

   print("Pareto individuals are:")
   for ind in hof:
       print ind, ind.fitness.values
   print("XXXXXXXXXX Making plots XXXXXXXXXXXXX")

   #fig = plt.figure(figsize=(10,10))
   #ax = fig.gca()
   #ax.set_xlabel('RT')
   #ax.set_ylabel('Memory')
   #anim = animation.FuncAnimation(fig, lambda i: animate(i, logbook),
    #                           frames=len(logbook), interval=1,
    #                           blit=True)
   #anim.save('nsgaii-geantv.mp4', fps=15, bitrate=-1, dpi=500)
   #anim.save('populations.gif', writer='imagemagick')

   #print("XXXXXXXXXXXXXXXXXXXXXXX")


   print("Final population hypervolume is %f" % hypervolume(pop, [11.0, 11.0]))

   print("XXXXXXXXXXX Making more plots XXXXXXXXXXXX")
   fronts_s = tools.emo.sortLogNondominated(pop, len(pop))
   plot_colors = ('b','r', 'g', 'm', 'y', 'k', 'c')
   fig, ax = plt.subplots(1, figsize=(10,10))
   for i,inds in enumerate(fronts_s):
       par = [toolbox.evaluate(ind) for ind in inds]
       df = pd.DataFrame(par)
       df.plot(ax=ax, kind='scatter', label='Front ' + str(i+1),
                 x=df.columns[0], y=df.columns[1],
                 color=plot_colors[i % len(plot_colors)])
   plt.xlabel('$f_1(\mathbf{x})$');plt.ylabel('$f_2(\mathbf{x})$');
   plt.savefig("front.png", dpi=200)
예제 #36
0
def main(s, e, parallel=True, save=True):

    random.seed()

    NGEN = 2
    MU = 100
    CXPB = 0.3
    MUTPB = 0.5
    print("\nEvolution Info:")
    if threeObjectives:
        print("Using three objectives: Profit, PC and Risk Exposure")
    else:
        print("Using two objectives: Profit and PC")
    print("Retrieving data from ", file)
    print("Number of generations: ", NGEN)
    print("Population size: ", MU)
    print("CXPB: ", CXPB)
    print("MUTPB: ", MUTPB, "\n")
    print("Training on data from ", s, " to ", e, "\n")

    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", numpy.mean, axis=0)
    stats.register("min", numpy.min, axis=0)
    stats.register("max", numpy.max, axis=0)

    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "min", "avg", "max"

    #multiprocessing
    if parallel:
        pool = multiprocessing.Pool()
        toolbox.register("map", pool.map)
        toolbox.register("map", futures.map)

    pop = toolbox.population(n=MU)

    paretofront = tools.ParetoFront()
    all = []
    hypers = {}

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    # This is just to assign the crowding distance to the individuals
    # no actual selection is done
    pop = toolbox.select(pop, len(pop))

    record = stats.compile(pop)
    logbook.record(gen=0, evals=len(invalid_ind), **record)
    print(logbook.stream)

    # Begin the generational process
    for gen in range(1, NGEN):
        # Vary the population
        # offspring = tools.selTournamentDCD(pop, len(pop))
        # offspring = [toolbox.clone(ind) for ind in offspring]
        offspring = algorithms.varAnd(pop, toolbox, CXPB, MUTPB)

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # Select the next generation population from parents and offspring
        pop = toolbox.select(pop + offspring, MU)

        paretofront.update(pop)
        for ind in pop:
            if ind not in all:
                all.append(ind)

        record = stats.compile(pop)
        logbook.record(gen=gen, evals=len(invalid_ind), **record)

        if threeObjectives:
            hypers[gen] = hypervolume(pop, [1.0, 1.0, 50])
        else:
            hypers[gen] = hypervolume(pop, [1.0, 1.0])

        print(logbook.stream)

    if save:
        cp = dict(population=pop,
                  generation=gen,
                  pareto=paretofront,
                  logbook=logbook,
                  all=all,
                  rndstate=random.getstate())

        with open("SavedOutput.pkl", "wb") as cp_file:
            pickle.dump(cp, cp_file)

    if parallel:
        pool.close()

    if notification:
        url = 'https://www.pushsafer.com/api'  # Set destination URL here
        post_fields = {  # Set POST fields here
            "t": "Trading Strategy Evolution Complete",
            "m": "Please attend your laptop to sort and evaluate your data.",
            "s": "1",
            "v": "2",
            "i": "",
            "c": "",
            "d": "",
            "u": "",
            "ut": "",
            "k": "ar8KrxDHmzlniBs6MUlf"
        }

        request = Request(url, urlencode(post_fields).encode())
        json = urlopen(request).read().decode()

    if threeObjectives:
        allValues = []
        for i in all:
            allValues.append(i.fitness.values)
        threeScatterPlot(allValues)
        threeDimensionalPlot(allValues, dominates)
    else:
        plot_pop_pareto_front(all, paretofront, "Fitness of all individuals")

    plot_hypervolume(hypers)

    for tree in paretofront:
        showTree(tree, tree.fitness.values)

    #print("Final population hypervolume is %f" % hypervolume(pop, [11.0, 11.0, 11.0]))

    return paretofront, logbook
예제 #37
0
파일: main_nsga2.py 프로젝트: jx-qqq/SEIMS
def main(sceobj):
    # type: (SUScenario) -> ()
    """Main workflow of NSGA-II based Scenario analysis."""
    if sceobj.cfg.eval_info['BASE_ENV'] < 0:
        run_base_scenario(sceobj)
        print('The environment effectiveness value of the '
              'base scenario is %.2f' % sceobj.cfg.eval_info['BASE_ENV'])

    random.seed()

    # Initial timespan variables
    stime = time.time()
    plot_time = 0.
    allmodels_exect = list()  # execute time of all model runs

    pop_size = sceobj.cfg.opt.npop
    gen_num = sceobj.cfg.opt.ngens
    cx_rate = sceobj.cfg.opt.rcross
    mut_perc = sceobj.cfg.opt.pmut
    mut_rate = sceobj.cfg.opt.rmut
    sel_rate = sceobj.cfg.opt.rsel
    pop_select_num = int(pop_size * sel_rate)

    ws = sceobj.cfg.opt.out_dir
    cfg_unit = sceobj.cfg.bmps_cfg_unit
    cfg_method = sceobj.cfg.bmps_cfg_method
    worst_econ = sceobj.worst_econ
    worst_env = sceobj.worst_env
    # available gene value list
    possible_gene_values = list(sceobj.bmps_params.keys())
    if 0 not in possible_gene_values:
        possible_gene_values.append(0)
    units_info = sceobj.cfg.units_infos
    suit_bmps = sceobj.suit_bmps
    gene_to_unit = sceobj.cfg.gene_to_unit
    unit_to_gene = sceobj.cfg.unit_to_gene
    updown_units = sceobj.cfg.updown_units

    scoop_log('Population: %d, Generation: %d' % (pop_size, gen_num))
    scoop_log('BMPs configure unit: %s, configuration method: %s' % (cfg_unit, cfg_method))

    # create reference point for hypervolume
    ref_pt = numpy.array([worst_econ, worst_env]) * multi_weight * -1

    stats = tools.Statistics(lambda sind: sind.fitness.values)
    stats.register('min', numpy.min, axis=0)
    stats.register('max', numpy.max, axis=0)
    stats.register('avg', numpy.mean, axis=0)
    stats.register('std', numpy.std, axis=0)

    logbook = tools.Logbook()
    logbook.header = 'gen', 'evals', 'min', 'max', 'avg', 'std'

    # Initialize population
    initialize_byinputs = False
    if sceobj.cfg.initial_byinput and sceobj.cfg.input_pareto_file is not None and \
        sceobj.cfg.input_pareto_gen > 0:  # Initial by input Pareto solutions
        inpareto_file = sceobj.modelcfg.model_dir + os.sep + sceobj.cfg.input_pareto_file
        if os.path.isfile(inpareto_file):
            inpareto_solutions = read_pareto_solutions_from_txt(inpareto_file,
                                                                sce_name='scenario',
                                                                field_name='gene_values')
            if sceobj.cfg.input_pareto_gen in inpareto_solutions:
                pareto_solutions = inpareto_solutions[sceobj.cfg.input_pareto_gen]
                pop = toolbox.population_byinputs(sceobj.cfg, pareto_solutions)  # type: List
                initialize_byinputs = True
    if not initialize_byinputs:
        pop = toolbox.population(sceobj.cfg, n=pop_size)  # type: List

    init_time = time.time() - stime

    def delete_fitness(new_ind):
        """Delete the fitness and other information of new individual."""
        del new_ind.fitness.values
        new_ind.gen = -1
        new_ind.id = -1
        new_ind.io_time = 0.
        new_ind.comp_time = 0.
        new_ind.simu_time = 0.
        new_ind.runtime = 0.

    def check_validation(fitvalues):
        """Check the validation of the fitness values of an individual."""
        flag = True
        for condidx, condstr in enumerate(conditions):
            if condstr is None:
                continue
            if not eval('%f%s' % (fitvalues[condidx], condstr)):
                flag = False
        return flag

    def evaluate_parallel(invalid_pops):
        """Evaluate model by SCOOP or map, and get fitness of individuals."""
        popnum = len(invalid_pops)
        try:
            # parallel on multiprocesor or clusters using SCOOP
            from scoop import futures
            invalid_pops = list(futures.map(toolbox.evaluate, [sceobj.cfg] * popnum, invalid_pops))
        except ImportError or ImportWarning:
            # serial
            invalid_pops = list(map(toolbox.evaluate, [sceobj.cfg] * popnum, invalid_pops))

        # Filter for a valid solution
        if filter_ind:
            invalid_pops = [tmpind for tmpind in invalid_pops
                            if check_validation(tmpind.fitness.values)]
            if len(invalid_pops) < 2:
                print('The initial population should be greater or equal than 2. '
                      'Please check the parameters ranges or change the sampling strategy!')
                exit(2)
        return invalid_pops  # Currently, `invalid_pops` contains evaluated individuals

    # Record the count and execute timespan of model runs during the optimization
    modelruns_count = {0: len(pop)}
    modelruns_time = {0: 0.}  # Total time counted according to evaluate_parallel()
    modelruns_time_sum = {0: 0.}  # Summarize time of every model runs according to pop

    # Generation 0 before optimization
    stime = time.time()
    pop = evaluate_parallel(pop)
    modelruns_time[0] = time.time() - stime
    for ind in pop:
        ind.gen = 0
        allmodels_exect.append([ind.io_time, ind.comp_time, ind.simu_time, ind.runtime])
        modelruns_time_sum[0] += ind.runtime

    # Currently, len(pop) may less than pop_select_num
    pop = toolbox.select(pop, pop_select_num)
    record = stats.compile(pop)
    logbook.record(gen=0, evals=len(pop), **record)
    scoop_log(logbook.stream)
    front = numpy.array([ind.fitness.values for ind in pop])
    # save front for further possible use
    numpy.savetxt(sceobj.scenario_dir + os.sep + 'pareto_front_gen0.txt',
                  front, delimiter=str(' '), fmt=str('%.4f'))

    # Begin the generational process
    output_str = '### Generation number: %d, Population size: %d ###\n' % (gen_num, pop_size)
    scoop_log(output_str)
    UtilClass.writelog(sceobj.cfg.opt.logfile, output_str, mode='replace')

    modelsel_count = {0: len(pop)}  # type: Dict[int, int] # newly added Pareto fronts

    for gen in range(1, gen_num + 1):
        output_str = '###### Generation: %d ######\n' % gen
        scoop_log(output_str)
        offspring = [toolbox.clone(ind) for ind in pop]
        if len(offspring) >= 2:  # when offspring size greater than 2, mate can be done
            for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
                old_ind1 = toolbox.clone(ind1)
                old_ind2 = toolbox.clone(ind2)
                if random.random() <= cx_rate:
                    if cfg_method == BMPS_CFG_METHODS[3]:  # SLPPOS method
                        toolbox.mate_slppos(ind1, ind2, sceobj.cfg.hillslp_genes_num)
                    elif cfg_method == BMPS_CFG_METHODS[2]:  # UPDOWN method
                        toolbox.mate_updown(updown_units, gene_to_unit, unit_to_gene, ind1, ind2)
                    else:
                        toolbox.mate_rdm(ind1, ind2)

                if cfg_method == BMPS_CFG_METHODS[0]:
                    toolbox.mutate_rdm(possible_gene_values, ind1, perc=mut_perc, indpb=mut_rate)
                    toolbox.mutate_rdm(possible_gene_values, ind2, perc=mut_perc, indpb=mut_rate)
                else:
                    tagnames = None
                    if sceobj.cfg.bmps_cfg_unit == BMPS_CFG_UNITS[3]:
                        tagnames = sceobj.cfg.slppos_tagnames
                    toolbox.mutate_rule(units_info, gene_to_unit, unit_to_gene,
                                        suit_bmps, ind1,
                                        perc=mut_perc, indpb=mut_rate,
                                        unit=cfg_unit, method=cfg_method,
                                        tagnames=tagnames,
                                        thresholds=sceobj.cfg.boundary_adaptive_threshs)
                    toolbox.mutate_rule(units_info, gene_to_unit, unit_to_gene,
                                        suit_bmps, ind2,
                                        perc=mut_perc, indpb=mut_rate,
                                        unit=cfg_unit, method=cfg_method,
                                        tagnames=tagnames,
                                        thresholds=sceobj.cfg.boundary_adaptive_threshs)
                if check_individual_diff(old_ind1, ind1):
                    delete_fitness(ind1)
                if check_individual_diff(old_ind2, ind2):
                    delete_fitness(ind2)

        # Evaluate the individuals with an invalid fitness
        invalid_inds = [ind for ind in offspring if not ind.fitness.valid]
        valid_inds = [ind for ind in offspring if ind.fitness.valid]
        invalid_ind_size = len(invalid_inds)
        if invalid_ind_size == 0:  # No need to continue
            scoop_log('Note: No invalid individuals available, the NSGA2 will be terminated!')
            break
        modelruns_count.setdefault(gen, invalid_ind_size)
        stime = time.time()
        invalid_inds = evaluate_parallel(invalid_inds)
        curtimespan = time.time() - stime
        modelruns_time.setdefault(gen, curtimespan)
        modelruns_time_sum.setdefault(gen, 0.)
        for ind in invalid_inds:
            ind.gen = gen
            allmodels_exect.append([ind.io_time, ind.comp_time, ind.simu_time, ind.runtime])
            modelruns_time_sum[gen] += ind.runtime

        # Select the next generation population
        # Previous version may result in duplications of the same scenario in one Pareto front,
        #   thus, I decided to check and remove the duplications first.
        # pop = toolbox.select(pop + valid_inds + invalid_inds, pop_select_num)
        tmppop = pop + valid_inds + invalid_inds
        pop = list()
        unique_sces = dict()
        for tmpind in tmppop:
            if tmpind.gen in unique_sces and tmpind.id in unique_sces[tmpind.gen]:
                continue
            if tmpind.gen not in unique_sces:
                unique_sces.setdefault(tmpind.gen, [tmpind.id])
            elif tmpind.id not in unique_sces[tmpind.gen]:
                unique_sces[tmpind.gen].append(tmpind.id)
            pop.append(tmpind)
        pop = toolbox.select(pop, pop_select_num)

        hyper_str = 'Gen: %d, New model runs: %d, ' \
                    'Execute timespan: %.4f, Sum of model run timespan: %.4f, ' \
                    'Hypervolume: %.4f\n' % (gen, invalid_ind_size,
                                             curtimespan, modelruns_time_sum[gen],
                                             hypervolume(pop, ref_pt))
        scoop_log(hyper_str)
        UtilClass.writelog(sceobj.cfg.opt.hypervlog, hyper_str, mode='append')

        record = stats.compile(pop)
        logbook.record(gen=gen, evals=len(invalid_inds), **record)
        scoop_log(logbook.stream)

        # Count the newly generated near Pareto fronts
        new_count = 0
        for ind in pop:
            if ind.gen == gen:
                new_count += 1
        modelsel_count.setdefault(gen, new_count)

        # Plot 2D near optimal pareto front graphs
        stime = time.time()
        front = numpy.array([ind.fitness.values for ind in pop])
        # save front for further possible use
        numpy.savetxt(sceobj.scenario_dir + os.sep + 'pareto_front_gen%d.txt' % gen,
                      front, delimiter=str(' '), fmt=str('%.4f'))

        # Comment out the following plot code if matplotlib does not work.
        try:
            from scenario_analysis.visualization import plot_pareto_front_single
            pareto_title = 'Near Pareto optimal solutions'
            xlabel = 'Economy'
            ylabel = 'Environment'
            if sceobj.cfg.plot_cfg.plot_cn:
                xlabel = r'经济净投入'
                ylabel = r'环境效益'
                pareto_title = r'近似最优Pareto解集'
            plot_pareto_front_single(front, [xlabel, ylabel],
                                     ws, gen, pareto_title,
                                     plot_cfg=sceobj.cfg.plot_cfg)
        except Exception as e:
            scoop_log('Exception caught: %s' % str(e))
        plot_time += time.time() - stime

        # save in file
        output_str += 'generation\tscenario\teconomy\tenvironment\tgene_values\n'
        for indi in pop:
            output_str += '%d\t%d\t%f\t%f\t%s\n' % (indi.gen, indi.id, indi.fitness.values[0],
                                                    indi.fitness.values[1], str(indi))
        UtilClass.writelog(sceobj.cfg.opt.logfile, output_str, mode='append')

    # Plot hypervolume and newly executed model count
    # Comment out the following plot code if matplotlib does not work.
    try:
        from scenario_analysis.visualization import plot_hypervolume_single
        plot_hypervolume_single(sceobj.cfg.opt.hypervlog, ws, plot_cfg=sceobj.cfg.plot_cfg)
    except Exception as e:
        scoop_log('Exception caught: %s' % str(e))

    # Save newly added Pareto fronts of each generations
    new_fronts_count = numpy.array(list(modelsel_count.items()))
    numpy.savetxt('%s/new_pareto_fronts_count.txt' % ws,
                  new_fronts_count, delimiter=str(','), fmt=str('%d'))

    # Save and print timespan information
    allmodels_exect = numpy.array(allmodels_exect)
    numpy.savetxt('%s/exec_time_allmodelruns.txt' % ws, allmodels_exect,
                  delimiter=str(' '), fmt=str('%.4f'))
    scoop_log('Running time of all SEIMS models:\n'
              '\tIO\tCOMP\tSIMU\tRUNTIME\n'
              'MAX\t%s\n'
              'MIN\t%s\n'
              'AVG\t%s\n'
              'SUM\t%s\n' % ('\t'.join('%.3f' % v for v in allmodels_exect.max(0)),
                             '\t'.join('%.3f' % v for v in allmodels_exect.min(0)),
                             '\t'.join('%.3f' % v for v in allmodels_exect.mean(0)),
                             '\t'.join('%.3f' % v for v in allmodels_exect.sum(0))))

    exec_time = 0.
    for genid, tmptime in list(modelruns_time.items()):
        exec_time += tmptime
    exec_time_sum = 0.
    for genid, tmptime in list(modelruns_time_sum.items()):
        exec_time_sum += tmptime
    allcount = 0
    for genid, tmpcount in list(modelruns_count.items()):
        allcount += tmpcount

    scoop_log('Initialization timespan: %.4f\n'
              'Model execution timespan: %.4f\n'
              'Sum of model runs timespan: %.4f\n'
              'Plot Pareto graphs timespan: %.4f' % (init_time, exec_time,
                                                     exec_time_sum, plot_time))

    return pop, logbook
예제 #38
0
def test_mo_cma_es():
    def distance(feasible_ind, original_ind):
        """A distance function to the feasibility region."""
        return sum((f - o)**2 for f, o in zip(feasible_ind, original_ind))

    def closest_feasible(individual):
        """A function returning a valid individual from an invalid one."""
        feasible_ind = numpy.array(individual)
        feasible_ind = numpy.maximum(BOUND_LOW, feasible_ind)
        feasible_ind = numpy.minimum(BOUND_UP, feasible_ind)
        return feasible_ind

    def valid(individual):
        """Determines if the individual is valid or not."""
        if any(individual < BOUND_LOW) or any(individual > BOUND_UP):
            return False
        return True

    NDIM = 5
    BOUND_LOW, BOUND_UP = 0.0, 1.0
    MU, LAMBDA = 10, 10
    NGEN = 500

    numpy.random.seed(128)

    # The MO-CMA-ES algorithm takes a full population as argument
    population = [
        creator.__dict__[INDCLSNAME](x)
        for x in numpy.random.uniform(BOUND_LOW, BOUND_UP, (MU, NDIM))
    ]

    toolbox = base.Toolbox()
    toolbox.register("evaluate", benchmarks.zdt1)
    toolbox.decorate(
        "evaluate",
        tools.ClosestValidPenalty(valid, closest_feasible, 1.0e+6, distance))

    for ind in population:
        ind.fitness.values = toolbox.evaluate(ind)

    strategy = cma.StrategyMultiObjective(population,
                                          sigma=1.0,
                                          mu=MU,
                                          lambda_=LAMBDA)

    toolbox.register("generate", strategy.generate,
                     creator.__dict__[INDCLSNAME])
    toolbox.register("update", strategy.update)

    for gen in range(NGEN):
        # Generate a new population
        population = toolbox.generate()

        # Evaluate the individuals
        fitnesses = toolbox.map(toolbox.evaluate, population)
        for ind, fit in zip(population, fitnesses):
            ind.fitness.values = fit

        # Update the strategy with the evaluated individuals
        toolbox.update(population)

    # Note that we use a penalty to guide the search to feasible solutions,
    # but there is no guarantee that individuals are valid.
    # We expect the best individuals will be within bounds or very close.
    num_valid = 0
    for ind in strategy.parents:
        dist = distance(closest_feasible(ind), ind)
        if numpy.isclose(dist, 0.0, rtol=1.e-5, atol=1.e-5):
            num_valid += 1
    assert num_valid >= len(strategy.parents)

    # Note that NGEN=500 is enough to get consistent hypervolume > 116,
    # but not 119. More generations would help but would slow down testing.
    hv = hypervolume(strategy.parents, [11.0, 11.0])
    assert hv > HV_THRESHOLD, "Hypervolume is lower than expected %f < %f" % (
        hv, HV_THRESHOLD)
예제 #39
0
def main(function, NGEN, MU, refPoint):

    # Problem definition
    # Functions zdt1, zdt2, zdt3, zdt6 have bounds [0, 1]
    # Functions zdt4 has bounds x1 = [0, 1], xn = [-5, 5], with n = 2, ..., 10
    # Functions zdt1, zdt2, zdt3 have 30 dimensions, zdt4 and zdt6 have 10
    if (function == 'ZDT4'):
        NDIM = 10
        BOUND_LOW, BOUND_UP = [0.0] + [-5.0] * (NDIM - 1), [
            1.0
        ] + [5.0] * (NDIM - 1)
    else:
        NDIM = 30
        if (function == 'ZDT6'):
            NDIM = 10
        BOUND_LOW, BOUND_UP = [0.0] * NDIM, [1.0] * NDIM


##    print 'refPoint =' ,refPoint
##    print 'NDIM=',NDIM
##    print 'BOUNDS=',BOUND_LOW,BOUND_UP
##    print 'NGEN=', NGEN
##    print 'MU=',MU

    toolbox.register("attr_float", uniform, BOUND_LOW, BOUND_UP, NDIM)
    toolbox.register("individual", tools.initIterate, creator.Individual,
                     toolbox.attr_float)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    toolbox.register("evaluate", eval(''.join(['benchmarks.zdt',
                                               function[3]])))
    toolbox.register("mate",
                     tools.cxSimulatedBinaryBounded,
                     low=BOUND_LOW,
                     up=BOUND_UP,
                     eta=30.0)
    toolbox.register("mutate",
                     tools.mutPolynomialBounded,
                     low=BOUND_LOW,
                     up=BOUND_UP,
                     eta=20.0,
                     indpb=1.0 / NDIM)
    toolbox.register("select", tools.selNSGA2)

    CXPB = 1.0

    stats = tools.Statistics(lambda ind: ind.fitness.values)
    # stats.register("avg", numpy.mean, axis=0)
    # stats.register("std", numpy.std, axis=0)
    stats.register("min", numpy.min, axis=0)
    stats.register("max", numpy.max, axis=0)

    hvValues = []

    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max"

    pop = toolbox.population(n=MU)

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    hvValues.append(hypervolume(pop, refPoint))

    # This is just to assign the crowding distance to the individuals
    # no actual selection is done
    pop = toolbox.select(pop, len(pop))

    record = stats.compile(pop)
    logbook.record(gen=0, evals=len(invalid_ind), **record)
    #print(logbook.stream)

    # Begin the generational process
    for gen in range(1, NGEN):
        # Vary the population
        offspring = tools.selTournamentDCD(pop, len(pop))
        offspring = [toolbox.clone(ind) for ind in offspring]

        for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
            if random.random() <= CXPB:
                toolbox.mate(ind1, ind2)

            toolbox.mutate(ind1)
            toolbox.mutate(ind2)
            del ind1.fitness.values, ind2.fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # Select the next generation population
        pop = toolbox.select(pop + offspring, MU)
        record = stats.compile(pop)
        logbook.record(gen=gen, evals=len(invalid_ind), **record)
        #print(logbook.stream)

        hvValues.append(hypervolume(pop, refPoint))

    print 'Hypervolume = ', hvValues[-1]

    return pop, logbook, hvValues
    def evolve_population(self, n=NUM_POP, CXPB=CXPB, MUTPB=MUTPB, NGEN=NGEN):
        toolbox = self.toolbox

        start_gen = 0

        stats = tools.Statistics(lambda ind: ind.fitness.values)
        stats.register("avg", numpy.mean, axis=0)
        stats.register("std", numpy.std, axis=0)
        stats.register("min", numpy.min, axis=0)
        stats.register("max", numpy.max, axis=0)

        logbook = tools.Logbook()
        logbook.header = "gen", "evals", "std", "min", "avg", "max"

        pop = toolbox.population(n)

        if os.path.isfile('saved_population.pkl'):
            with open('saved_population.pkl', 'rb') as f:
                p = pickle.load(f)
                pop = list(
                    map(lambda x: self.creator.Individual(set(x)),
                        p.population))
                start_gen = p.gen_number
                print("start from {}th generation".format(start_gen))

        self.population = Population(pop, start_gen)
        # print(pop)

        fitnesses = map(toolbox.evaluate, pop)

        for ind, fit in zip(pop, fitnesses):
            ind.fitness.values = fit

        def show_plot(g):
            fronts = sortNondominated(self.archive, k=len(self.archive))
            fronts_to_print = []

            for n in range(min(5, len(fronts))):
                dominating_group = sorted(
                    [self.archive[i] for i in fronts[n]],
                    key=lambda individual: individual.fitness.values[0])

                print(
                    "{}: ".format(n),
                    list(
                        map(lambda i: list(map(lambda e: self.elements[e], i)),
                            dominating_group)))
                print('\n')
                print(
                    "fitness values(scores): ",
                    list(map(lambda i: i.fitness.values[0], dominating_group)))
                print(
                    "fitness values(lengths): ",
                    list(map(lambda i: i.fitness.values[1], dominating_group)))
                print("{}: ".format(n), dominating_group)
                for ind in dominating_group:
                    print(self.count_indivudial_refatorings(ind))

                fronts_to_print.append(
                    numpy.array(
                        [ind.fitness.values for ind in dominating_group]))

            fig = plt.figure(figsize=(6, 6))

            colors = ['p', 'b', 'g', 'y', 'r']

            for n in range(min(5, len(fronts_to_print))):
                plt.plot(fronts_to_print[n][:, 1],
                         fronts_to_print[n][:, 0],
                         colors[n],
                         marker='o',
                         markersize=6,
                         linestyle='-')

            plt.title('Pareto Fronts of the {} Generation'.format(g))
            plt.xlabel('number of refactorings')
            plt.ylabel('similarity score(%)')
            try:
                plt.savefig('figs/{}_gen{}.png'.format(id(self.archive), g))
            except FileNotFoundError:
                os.makedirs('figs')
                plt.savefig('figs/{}_gen{}.png'.format(id(self.archive), g))

        for g in range(start_gen, NGEN):

            # Select the next generation individuals
            offspring = toolbox.select(pop, len(pop))
            # Clone the selected individuals
            offspring = list(map(toolbox.clone, offspring))

            # Apply crossover and mutation on the offspring
            for child1, child2 in zip(offspring[::2], offspring[1::2]):
                if random.random() < CXPB:
                    toolbox.mate(child1, child2, 1)
                    del child1.fitness.values
                    del child2.fitness.values

            new_offspring = list()
            for mutant in offspring:
                if random.random() < MUTPB:
                    mutant = toolbox.mutate(mutant)
                    del mutant.fitness.values
                new_offspring.append(mutant)

            # Evaluate the individuals with an invalid fitness
            fitnesses = map(toolbox.evaluate, offspring)

            for ind, fit in zip(offspring, fitnesses):
                ind.fitness.values = fit

            # The population is entirely replaced by the offspring
            pop[:] = offspring
            self.population = Population(pop, g + 1)

            fronts = sortNondominated(pop, k=len(pop))
            best_front = [pop[i] for i in fronts[0]]
            self.archive.extend(best_front)
            if (g + 1) % 10 == 0:
                show_plot('{}th'.format(g + 1))
            # print(pop)

            record = stats.compile(pop)
            logbook.record(gen=g + 1, evals=len(offspring), **record)

            with open('logbook_{}.pkl'.format(id(logbook)), 'wb') as f:
                pickle.dump(logbook, f)

            # TODO: currently, save result of arbitrary individual in current population
            self.fit.save_current_refactoring('gen_{}'.format(g + 1))

            with open('hypervolume.txt', 'a') as f:
                f.write("%f\n" % hypervolume(pop, [100.0, 100.0]))

            average_fitness = sum(
                map(lambda x: self.toolbox.evaluate(x)[0], pop)) / len(pop)

            if str(g)[-1] == '0':
                print('Result of {}st generation: {}'.format(
                    g + 1, average_fitness))
                if g < NGEN - 1:
                    print('Start of {}nd generation'.format(g + 2))
                else:
                    print('End of Evolution')
            elif str(g)[-1] == '1':
                print('Result of {}nd generation: {}'.format(
                    g + 1, average_fitness))
                if g < NGEN - 1:
                    print('Start of {}rd generation'.format(g + 2))
                else:
                    print('End of Evolution')
            elif str(g)[-1] == '2':
                print('Result of {}rd generation: {}'.format(
                    g + 1, average_fitness))
                if g < NGEN - 1:
                    print('Start of {}th generation'.format(g + 2))
                else:
                    print('End of Evolution')
            else:
                print('Result of {}th generation: {}'.format(
                    g + 1, average_fitness))
                if g < NGEN - 1:
                    print('Start of {}th generation'.format(g + 2))
                else:
                    print('End of Evolution')

        self.archive.extend(pop)

        show_plot('last')

        return pop
예제 #41
0
파일: nsga2.py 프로젝트: gaohr/SEIMS
def main(num_Gens, size_Pops, cx, seed=None):

    # toolbox.register("attr_float", iniPops)
    # toolbox.register("individual", tools.initIterate, creator.Individuals, toolbox.attr_float)
    # toolbox.register("population", tools.initRepeat, list, toolbox.individual)
    # toolbox.register("evaluate", calBenefitandCost)
    # toolbox.register("mate", tools.cxOnePoint)
    # toolbox.register("mutate", mutModel, indpb=MutateRate)
    # toolbox.register("select", tools.selNSGA2)

    random.seed(seed)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("min", numpy.min, axis=0)
    stats.register("max", numpy.max, axis=0)
    # stats.register("avg", numpy.mean, axis=0)
    # stats.register("std", numpy.std, axis=0)

    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "min", "max"
    
    pop = toolbox.population(n=size_Pops)
    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]

    try:
        # parallel on multiprocesor or clusters using SCOOP
        from scoop import futures
        fitnesses = futures.map(toolbox.evaluate, invalid_ind)
        # print "parallel-fitnesses: ",fitnesses
    except ImportError or ImportWarning:
        # serial
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        # print "serial-fitnesses: ",fitnesses

    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    # This is just to assign the crowding distance to the individuals
    # no actual selection is done
    pop = toolbox.select(pop, size_Pops)
    record = stats.compile(pop)
    logbook.record(gen=0, evals=len(invalid_ind), **record)
    print(logbook.stream)

    # Begin the generational process
    for gen in range(1, num_Gens):
        printInfo("###### Iteration: %d ######" % gen)
        # Vary the population
        offspring = tools.selTournamentDCD(pop, int(size_Pops * SelectRate))
        offspring = [toolbox.clone(ind) for ind in offspring]
        for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
            if random.random() <= cx:
                toolbox.mate(ind1, ind2)
            toolbox.mutate(ind1)
            toolbox.mutate(ind2)
            del ind1.fitness.values, ind2.fitness.values
        
        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        try:
            # parallel on multiprocesor or clusters using SCOOP
            from scoop import futures
            fitnesses = futures.map(toolbox.evaluate, invalid_ind)
        except ImportError or ImportWarning:
            # serial
            fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)

        # invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        # fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # Select the next generation population
        pop = toolbox.select(pop + offspring, size_Pops)
        record = stats.compile(pop)
        logbook.record(gen=gen, evals=len(invalid_ind), **record)
        # print "\nlogbook.stream: ", logbook.stream

        if gen % 1 == 0:
            # Create plot
            createPlot(pop, model_Workdir, num_Gens, size_Pops, gen)
            # save in file
            outputStr = "### Generation number: %d, Population size: %d ###" % (num_Gens, size_Pops) + LF
            outputStr += "### Generation_%d ###" % gen + LF
            outputStr += "cost\tbenefit\tscenario" + LF
            for indi in pop:
                outputStr += str(indi.fitness.values[0]) + "\t" + str(indi.fitness.values[1]) + "\t" \
                             + str(indi) + LF
            outfilename = model_Workdir + os.sep + "NSGAII_OUTPUT" + os.sep + "Gen_" \
                           + str(GenerationsNum) + "_Pop_" + str(PopulationSize) + os.sep + "Gen_" \
                           + str(GenerationsNum) + "_Pop_" + str(PopulationSize) + "_resultLog.txt"
            WriteLog(outfilename, outputStr, MODE='append')
    printInfo("Final population hypervolume is %f" % hypervolume(pop, [11.0, 11.0]))
    return pop, logbook