def run(num_gen, n, mutpb, cxpb):
    """
    Runs multiple episodes, evolving the RNN parameters using a GA
    """
    history = tools.History()
    # Decorate the variation operators
    toolbox.decorate("mate", history.decorator)
    toolbox.decorate("mutate", history.decorator)

    pool = multiprocessing.Pool(processes=12)
    toolbox.register("map", pool.map)

    pop = toolbox.population(n=n)
    history.update(pop)

    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean)
    stats.register("std", np.std)
    stats.register("min", np.min)
    stats.register("max", np.max)

    pop, log = algorithms.eaSimple(pop,
                                   toolbox,
                                   cxpb=cxpb,
                                   mutpb=mutpb,
                                   ngen=num_gen,
                                   stats=stats,
                                   halloffame=hof,
                                   verbose=True)

    return pop, log, hof, history
Beispiel #2
0
def parallel_simple(toolbox, pop_sz, cxpb, mutpb, ngen, n_jobs=4):
    # Process Pool of 4 workers
    print("Beginning parallel processing")
    pool = multiprocessing.Pool(processes=n_jobs)
    toolbox.register("map", pool.map)

    history = tools.History()
    # Decorate the variation operators
    toolbox.decorate("mate", history.decorator)
    toolbox.decorate("mutate", history.decorator)

    # Create the population and populate the history
    pop = toolbox.population(n=pop_sz)
    history.update(pop)
    # create a hall of fame record
    hof = tools.HallOfFame(1000)

    # intiialize stats
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean)
    stats.register("std", np.std)
    stats.register("min", np.min)
    stats.register("max", np.max)

    pop, logbook = algorithms.eaSimple(pop,
                                       toolbox,
                                       cxpb=cxpb,
                                       mutpb=mutpb,
                                       ngen=ngen,
                                       stats=stats,
                                       halloffame=hof,
                                       verbose=False)
    pool.close()
    return pop, logbook, hof, history
Beispiel #3
0
def run(num_gen=10,
        n=100,
        mutpb=0.8,
        cxpb=0.5):
    np.random.seed(0)
    history = tools.History()
    # Decorate the variation operators
    toolbox.decorate("mate", history.decorator)
    toolbox.decorate("mutate", history.decorator)

    pop = toolbox.population(n=n)
    history.update(pop)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean)
    stats.register("std", np.std)
    stats.register("min", np.min)
    stats.register("max", np.max)

    pop, log, best = eaSimpleModified(pop,
                                   toolbox,
                                   cxpb=cxpb,
                                   mutpb=mutpb,
                                   ngen=num_gen,
                                   stats=stats,
                                   halloffame=hof,
                                   verbose=True)

    return pop, log, hof, history, best
    def optimizeClf(self, population=10, generations=3):
        '''
        Searches through a genetic algorithm the best classifier

        :param int population: Number of members of the first generation
        :param int generations: Number of generations
        :return: Trained classifier
        '''
        print("Optimizing accuracy:\n")
        # Using deap, custom for decision tree
        creator.create("FitnessMax", base.Fitness, weights=(1.0, ))
        creator.create("Individual", list, fitness=creator.FitnessMax)

        # Creation of individual and population
        toolbox = base.Toolbox()
        toolbox.register("individual", self.initIndividual, creator.Individual)
        toolbox.register("population", tools.initRepeat, list,
                         toolbox.individual)

        # Methods for genetic algorithm
        toolbox.register("mate", tools.cxTwoPoint)
        toolbox.register("mutate",
                         tools.mutPolynomialBounded,
                         eta=0.5,
                         low=[x.minValue for x in self.params],
                         up=[x.maxValue for x in self.params],
                         indpb=0.2)
        toolbox.register("select", tools.selTournament, tournsize=2)
        toolbox.register("evaluate", self.evaluateClf)

        # Tools
        pop = toolbox.population(n=population)
        hof = tools.HallOfFame(1)
        stats = tools.Statistics(lambda ind: ind.fitness.values)
        stats.register("avg", np.mean)
        stats.register("min", np.min)
        stats.register("max", np.max)

        # History
        hist = tools.History()
        toolbox.decorate("mate", hist.decorator)
        toolbox.decorate("mutate", hist.decorator)
        hist.update(pop)

        fpop, logbook = algorithms.eaSimple(pop,
                                            toolbox,
                                            cxpb=0.5,
                                            mutpb=0.2,
                                            ngen=generations,
                                            stats=stats,
                                            halloffame=hof)

        best_score = hof[0].fitness.values[:]

        print("Best accuracy: " + str(best_score[0]))
        print("Best classifier: " + str(self.getClf(hof[0])))

        self.plotLogbook(logbook=logbook)
        return self.getClf(hof[0])
Beispiel #5
0
    def _init_toolbox(self):
        # Initialize population
        self._toolbox = base.Toolbox()
        # noinspection PyUnresolvedReferences
        self._toolbox.register('individual',
                               _init_individual,
                               creator.Individual,
                               hparam_space=self.hparam_space)
        # noinspection PyUnresolvedReferences
        self._toolbox.register('population', tools.initRepeat, list,
                               self._toolbox.individual)
        if self.n_jobs != 1:

            def _map_func(*_args, **_kwargs):
                return map_jobs(*_args,
                                n_jobs=self.n_jobs,
                                verbose=self.verbose,
                                **_kwargs)

            self._toolbox.register('map', _map_func)

        self._toolbox.register('mate',
                               _cx_individual_grid,
                               indpb=self.gene_crossover_prob,
                               hparam_space=self.hparam_space)

        self._toolbox.register('mutate',
                               _mut_individual_grid,
                               indpb=self.gene_mutation_prob,
                               hparam_space=self.hparam_space)

        self._toolbox.register('select',
                               tools.selTournament,
                               tournsize=self.tournament_size)

        # noinspection PyUnresolvedReferences
        self._pop = self._toolbox.population(n=self.population_size)
        self._hof = tools.HallOfFame(1)

        # Stats
        stats = tools.Statistics(lambda ind: ind.fitness.values)
        stats.register('avg', np.nanmean)
        stats.register('min', np.nanmin)
        stats.register('max', np.nanmax)
        stats.register('std', np.nanstd)
        stats.register('median', np.nanmedian)
        self._stats = stats

        # History
        hist = tools.History()
        self._toolbox.decorate('mate', hist.decorator)
        self._toolbox.decorate('mutate', hist.decorator)
        hist.update(self._pop)
        self._hist = hist
def main(seed=None):
    random.seed(seed)

    NGEN = 2
    MU = 4
    CXPB = 0.6
    pop = toolbox.population(n=MU)

    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("min", numpy.min, axis=0)
    stats.register("max", numpy.max, axis=0)
    stats.register("pop", copy.deepcopy)

    history = tools.History()
    # Decorate the variation operators
    #toolbox.register("variate", variate, mate=toolbox.mate, mutate=toolbox.mutate)
    #toolbox.decorate("variate", history.decorator)
    toolbox.decorate("mate", history.decorator)
    toolbox.decorate("mutate", history.decorator)

    fitnesses = toolbox.map(toolbox.evaluate, pop)
    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit
    plt.figure(figsize=(10, 4))
    plt.subplot(1, 2, 1)
    for ind in pop:
        plt.plot(ind[0], ind[1], 'k.', ms=3)
    plt.xlabel('$x_1$')
    plt.ylabel('$x_2$')
    plt.title('Decision space')
    plt.subplot(1, 2, 2)
    for ind in pop:
        plt.plot(ind.fitness.values[0], ind.fitness.values[1], 'k.', ms=3)
    plt.xlabel('$f_1(\mathbf{x})$')
    plt.ylabel('$f_2(\mathbf{x})$')
    plt.xlim((0.5, 3.6))
    plt.ylim((0.5, 3.6))
    plt.title('Objective space')
    plt.savefig("objective.png", dpi=200)

    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "fitness", "size", "pop", "ind"
    pickle.dump(logbook, open('nsga_ii-results.pickle', 'wb'),
                pickle.HIGHEST_PROTOCOL)

    hof = tools.ParetoFront()

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    #hof.update(pop)

    # This is just to assign the crowding distance to the individualis
    # no actual selection is done
    pop = toolbox.select(pop, len(pop))

    record = stats.compile(pop)
    logbook.record(gen=0, evals=len(invalid_ind), **record)
    print(logbook.stream)

    # Begin the generational process
    for gen in range(1, NGEN):
        # Vary the population
        offspring = tools.selTournamentDCD(pop, len(pop))
        offspring = [toolbox.clone(ind) for ind in offspring]

        for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
            if random.random() <= CXPB:
                toolbox.mate(ind1, ind2)

            toolbox.mutate(ind1)
            toolbox.mutate(ind2)
            del ind1.fitness.values, ind2.fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        print "Evaluated %i individuals" % len(invalid_ind)

        pop = toolbox.select(pop + offspring, len(offspring))
        hof.update(pop)

        # Select the next generation population
        pop = toolbox.select(pop + offspring, MU)
        record = stats.compile(pop)
        logbook.record(gen=gen, evals=len(invalid_ind), **record)
        print(logbook.stream)

        plt.close("all")
        front = numpy.array([ind.fitness.values for ind in pop])
        plt.figure(figsize=(10, 10))
        #fig,ax = plt.subplots(1,gen)
        plt.scatter(front[:, 0], front[:, 1], c="b")
        #locals()["ax"+str(gen)]=plt.scatter(front[:,0], front[:,1], c="b")
        #plt.tight_layout()
        plt.xlabel("RT(Time)")
        plt.ylabel("Memory usage, Mb")
        plt.savefig("front_gen" + str(gen) + ".png", dpi=200)

    print("Pareto individuals are:")
    for ind in hof:
        print ind, ind.fitness.values
    print("XXXXXXXXXX Making plots XXXXXXXXXXXXX")

    #fig = plt.figure(figsize=(10,10))
    #ax = fig.gca()
    #ax.set_xlabel('RT')
    #ax.set_ylabel('Memory')
    #anim = animation.FuncAnimation(fig, lambda i: animate(i, logbook),
    #                           frames=len(logbook), interval=1,
    #                           blit=True)
    #anim.save('nsgaii-geantv.mp4', fps=15, bitrate=-1, dpi=500)
    #anim.save('populations.gif', writer='imagemagick')

    #print("XXXXXXXXXXXXXXXXXXXXXXX")

    print("Final population hypervolume is %f" %
          hypervolume(pop, [11.0, 11.0]))

    print("XXXXXXXXXXX Making more plots XXXXXXXXXXXX")
    fronts_s = tools.emo.sortLogNondominated(pop, len(pop))
    plot_colors = ('b', 'r', 'g', 'm', 'y', 'k', 'c')
    fig, ax = plt.subplots(1, figsize=(10, 10))
    for i, inds in enumerate(fronts_s):
        par = [toolbox.evaluate(ind) for ind in inds]
        df = pd.DataFrame(par)
        df.plot(ax=ax,
                kind='scatter',
                label='Front ' + str(i + 1),
                x=df.columns[0],
                y=df.columns[1],
                color=plot_colors[i % len(plot_colors)])
    plt.xlabel('$f_1(\mathbf{x})$')
    plt.ylabel('$f_2(\mathbf{x})$')
    plt.savefig("front.png", dpi=200)
Beispiel #7
0
    def get_toolbox(self, predictors, response, pset, variable_type_indices,
                    variable_names):
        subset_size = int(
            math.floor(predictors.shape[0] * self.subset_proportion))
        creator.create("ErrorAgeSizeComplexity",
                       base.Fitness,
                       weights=(-1.0, -1.0, -1.0, -1.0))
        creator.create("Individual",
                       sp.SimpleParametrizedPrimitiveTree,
                       fitness=creator.ErrorAgeSizeComplexity,
                       age=int)
        toolbox = base.Toolbox()
        toolbox.register(
            "expr", sp.generate_parametrized_expression,
            partial(gp.genHalfAndHalf,
                    pset=pset,
                    min_=self.min_depth_init,
                    max_=self.max_depth_init), variable_type_indices,
            variable_names)
        toolbox.register("individual", tools.initIterate, creator.Individual,
                         toolbox.expr)
        toolbox.register("population", tools.initRepeat, list,
                         toolbox.individual)
        toolbox.register("compile", gp.compile, pset=pset)
        toolbox.register("select", tools.selRandom)
        toolbox.register("koza_node_selector",
                         operators.internally_biased_node_selector,
                         bias=self.internal_node_selection_bias)
        self.history = tools.History()
        toolbox.register("mate",
                         operators.one_point_xover_biased,
                         node_selector=toolbox.koza_node_selector)
        toolbox.decorate(
            "mate",
            operators.static_limit(key=operator.attrgetter("height"),
                                   max_value=self.max_height))
        toolbox.decorate(
            "mate", operators.static_limit(key=len, max_value=self.max_size))
        toolbox.decorate("mate", self.history.decorator)
        toolbox.register(
            "grow", sp.generate_parametrized_expression,
            partial(gp.genGrow,
                    pset=pset,
                    min_=self.min_gen_grow,
                    max_=self.max_gen_grow), variable_type_indices,
            variable_names)
        toolbox.register("mutate",
                         operators.mutation_biased,
                         expr=toolbox.grow,
                         node_selector=toolbox.koza_node_selector)
        toolbox.decorate(
            "mutate",
            operators.static_limit(key=operator.attrgetter("height"),
                                   max_value=self.max_height))
        toolbox.decorate(
            "mutate", operators.static_limit(key=len, max_value=self.max_size))
        toolbox.decorate("mutate", self.history.decorator)

        def generate_randoms(individuals):
            return individuals

        toolbox.register("generate_randoms",
                         generate_randoms,
                         individuals=[
                             toolbox.individual()
                             for i in range(self.num_randoms)
                         ])
        toolbox.decorate("generate_randoms", self.history.decorator)
        toolbox.register("error_func", self.error_function)
        expression_dict = cachetools.LRUCache(maxsize=1000)
        subset_selection_archive = subset_selection.RandomSubsetSelectionArchive(
            frequency=self.subset_change_frequency,
            predictors=predictors,
            response=response,
            subset_size=subset_size,
            expression_dict=expression_dict)
        evaluate_function = partial(
            subset_selection.fast_numpy_evaluate_subset,
            get_node_semantics=sp.get_node_semantics,
            context=pset.context,
            subset_selection_archive=subset_selection_archive,
            error_function=toolbox.error_func,
            expression_dict=expression_dict)
        toolbox.register("evaluate_error", evaluate_function)
        toolbox.register("assign_fitness",
                         afpo.assign_age_fitness_size_complexity)
        self.multi_archive = utils.get_archive(100)
        if self.log_mutate:
            mutation_stats_archive = archive.MutationStatsArchive(
                evaluate_function)
            toolbox.decorate(
                "mutate",
                operators.stats_collector(archive=mutation_stats_archive))
            self.multi_archive.archives.append(mutation_stats_archive)
        self.multi_archive.archives.append(subset_selection_archive)
        self.mstats = reports.configure_parametrized_inf_protected_stats()
        self.pop = toolbox.population(n=self.pop_size)
        toolbox.register("run",
                         afpo.pareto_optimization,
                         population=self.pop,
                         toolbox=toolbox,
                         xover_prob=self.xover_prob,
                         mut_prob=self.mut_prob,
                         ngen=self.ngen,
                         tournament_size=self.tournament_size,
                         num_randoms=self.num_randoms,
                         stats=self.mstats,
                         archive=self.multi_archive,
                         calc_pareto_front=False,
                         verbose=False,
                         reevaluate_population=True,
                         history=self.history)
        toolbox.register("save", reports.save_log_to_csv)
        toolbox.decorate("save", reports.save_archive(self.multi_archive))
        return toolbox
Beispiel #8
0
    def __init__(self,
                 genomeSize,
                 islePop,
                 hofSize,
                 evaluate=defaultEvalMax,
                 sel=defaultSelTournament,
                 net=networks.createIslands(10),
                 subroutine=defaultAlgorithmEaSimple,
                 mate=defaultTwoPoint,
                 mut=defaultMutFlipBit,
                 mapping=map,
                 beforeMigration=lambda x: None,
                 afterMigration=lambda x: None,
                 verbose=False,
                 dbconn=None,
                 jsonFile="init.json",
                 loadFromFile=False,
                 fixedLigands=-1):

        self.toolbox = base.Toolbox()
        self.history = tools.History()

        self.jsonFile = jsonFile
        self.loadFromFile = loadFromFile

        # Attribute generator
        self.toolbox.register("attr_bool", random.randint, 0, 1)

        # Structure initializers
        self.toolbox.register("individual", tools.initRepeat,
                              creator.Individual, self.toolbox.attr_bool,
                              genomeSize)  # @UndefinedVariable (for PyDev)
        self.toolbox.register("population", tools.initRepeat, list,
                              self.toolbox.individual)

        self.toolbox.register("individual_guess", self.initIndividual,
                              creator.Individual)
        self.toolbox.register("population_guess", self.initPopulation, list,
                              self.toolbox.individual_guess, self.jsonFile)

        self.toolbox.register("evaluate", evaluate)
        self.toolbox.register("mate", mate)
        self.toolbox.register("mutate", mut)
        self.toolbox.register("select", sel)
        self.toolbox.register("map", mapping)

        self.toolbox.decorate("mate", self.history.decorator)
        self.toolbox.decorate("mutate", self.history.decorator)

        self.net = net
        self.subroutine = subroutine
        self.islePop = islePop
        self.beforeMigration = beforeMigration
        self.afterMigration = afterMigration
        self.hof = self.buildHOF(hofSize)
        self.verbose = verbose
        self.dbconn = dbconn
        self.gen = 0
        self.novelty = []
        self.metrics = []
        self.islands = self.toolbox.population_guess(
        ) if self.loadFromFile else [
            self.toolbox.population(n=self.islePop)
            for i in range(len(self.net))
        ]

        if fixedLigands > -1:
            print "overriding population based on fixed ligand count of " + str(
                fixedLigands)
            for isle in self.islands:
                for ind in range(len(isle)):
                    isle[ind] = operators.fixActivation(
                        isle[ind], fixedLigands)
    def _fit(self, X, y, parameter_dict):
        self._cv_results = None
        # self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
        n_samples = _num_samples(X)
        if _num_samples(y) != n_samples:
            raise ValueError('Target [y], data [X] no coinciden')
        #self.cv = check_cv(self.cv, y=y, classifier=is_classifier(self.estimator))
        # self.cv = KFold(n_splits=10, shuffle=True, random_state=self.seed) # Just for
        toolbox = base.Toolbox()
        name_values, self.gene_type, maxints = _get_param_types_maxint(parameter_dict)
        if self.verbose:
            print("Tipos: %s, rangos: %s" % (self.gene_type, maxints))
        # registro de función Individuo
        toolbox.register("individual", _initIndividual, creator.Individual, maxints=maxints)
        # registro de función Población
        toolbox.register("population", tools.initRepeat, list, toolbox.individual)
        # Paralelísmo, create pool
        if not isinstance(self.n_jobs, int):
            self.n_jobs=1
        pool = Pool(self.n_jobs)
        toolbox.register("map", pool.map)
        # registro de función Evaluación
        toolbox.register("evaluate", _evalFunction,
						name_values=name_values, X=X, y=y,
						scorer=self.scorer_, cv=self.cv, uniform=self.uniform, verbose=self.verbose,
						error_score=self.error_score, fit_params=self.fit_params,
						score_cache=self.score_cache, result_cache=self.result_cache)
        # registro de función Cruce
        toolbox.register("mate", _cxIndividual, prob_cruce=self.gene_crossover_prob, 
                        gene_type=self.gene_type)
        # registro de función Mutación
        toolbox.register("mutate", _mutIndividual, prob_mutacion=self.gene_mutation_prob, maxints=maxints)
        # registro de función Selección
        toolbox.register("select", tools.selTournament, tournsize=self.tournament_size)
        # Creación de Población
        pop = toolbox.population(n=self.population_size)
        # Mejor Individuo que ha existido
        hof = tools.HallOfFame(1)
        # Stats
        stats = tools.Statistics(lambda ind: ind.fitness.values)
        stats.register("avg", np.nanmean)
        stats.register("min", np.nanmin)
        stats.register("max", np.nanmax)
        stats.register("std", np.nanstd)
        # Genealogía
        hist = tools.History()
        # Decoración de operadores de variaznza
        toolbox.decorate("mate", hist.decorator)
        toolbox.decorate("mutate", hist.decorator)
        hist.update(pop)
        # Posibles combinaciones
        if self.verbose:
            print('--- Evolve in {0} possible combinations ---'.format(np.prod(np.array(maxints) + 1)))
        pop, logbook = algorithms.eaSimple(pop, toolbox, cxpb=self.gene_crossover_prob, 
                                        mutpb=self.gene_mutation_prob,
										ngen=self.generations_number, stats=stats,
										halloffame=hof, verbose=self.verbose)
        # Save History
        self.all_history_ = hist
        self.all_logbooks_ = logbook
        # Mejor score y parametros
        current_best_score_ = hof[0].fitness.values[0]
        current_best_params_ = _individual_to_params(hof[0], name_values)
        if self.verbose:
            print("Best individual is: %s\nwith fitness: %s" % (
                current_best_params_, current_best_score_))
        if current_best_score_ > self.best_mem_score_:
            self.best_mem_score_ = current_best_score_
            self.best_mem_params_ = current_best_params_
        # fin paralelización, close pool
        pool.close()
        pool.join()
        self.best_score_ = current_best_score_
        self.best_params_ = current_best_params_
Beispiel #10
0
    def _fit(self, X, y, parameter_dict):
        self._cv_results = None  # To indicate to the property the need to update
        self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
        n_samples = _num_samples(X)
        X, y = indexable(X, y)

        if y is not None:
            if len(y) != n_samples:
                raise ValueError('Target variable (y) has a different number '
                                 'of samples (%i) than data (X: %i samples)' %
                                 (len(y), n_samples))
        cv = check_cv(self.cv, y=y, classifier=is_classifier(self.estimator))

        toolbox = base.Toolbox()

        name_values, gene_type, maxints = _get_param_types_maxint(
            parameter_dict)
        if self.gene_type is None:
            self.gene_type = gene_type

        if self.verbose:
            print("Types %s and maxint %s detected" %
                  (self.gene_type, maxints))

        toolbox.register("individual",
                         _initIndividual,
                         creator.Individual,
                         maxints=maxints)
        toolbox.register("population", tools.initRepeat, list,
                         toolbox.individual)

        # If n_jobs is an int, greater than 1 or less than 0 (indicating to use as
        # many jobs as possible) then we are going to create a default pool.
        # Windows users need to be warned of this feature as it only works properly
        # on linux. They need to encapsulate their pool in an if __name__ == "__main__"
        # wrapper so that pools are not recursively created when the module is reloaded in each map
        if isinstance(self.n_jobs, int):
            if self.n_jobs > 1 or self.n_jobs < 0:
                from multiprocessing import Pool  # Only imports if needed
                if os.name == 'nt':  # Checks if we are on Windows
                    warnings.warn((
                        "Windows requires Pools to be declared from within "
                        "an \'if __name__==\"__main__\":\' structure. In this "
                        "case, n_jobs will accept map functions as well to "
                        "facilitate custom parallelism. Please check to see "
                        "that all code is working as expected."))
                pool = Pool(self.n_jobs)
                toolbox.register("map", pool.map)

        # If it's not an int, we are going to pass it as the map directly
        else:
            try:
                toolbox.register("map", self.n_jobs)
            except Exception:
                raise TypeError(
                    "n_jobs must be either an integer or map function. Received: {}"
                    .format(type(self.n_jobs)))

        toolbox.register("evaluate",
                         _evalFunction,
                         name_values=name_values,
                         X=X,
                         y=y,
                         scorer=self.scorer_,
                         cv=cv,
                         iid=self.iid,
                         verbose=self.verbose,
                         error_score=self.error_score,
                         fit_params=self.fit_params,
                         score_cache=self.score_cache)

        toolbox.register("mate",
                         _cxIndividual,
                         indpb=self.gene_crossover_prob,
                         gene_type=self.gene_type)

        toolbox.register("mutate",
                         _mutIndividual,
                         indpb=self.gene_mutation_prob,
                         up=maxints)
        toolbox.register("select",
                         tools.selTournament,
                         tournsize=self.tournament_size)

        pop = toolbox.population(n=self.population_size)
        hof = tools.HallOfFame(1)

        # Stats
        stats = tools.Statistics(lambda ind: ind.fitness.values)
        stats.register("avg", np.nanmean)
        stats.register("min", np.nanmin)
        stats.register("max", np.nanmax)
        stats.register("std", np.nanstd)

        # History
        hist = tools.History()
        toolbox.decorate("mate", hist.decorator)
        toolbox.decorate("mutate", hist.decorator)
        hist.update(pop)

        if self.verbose:
            print('--- Evolve in {0} possible combinations ---'.format(
                np.prod(np.array(maxints) + 1)))

        pop, logbook = algorithms.eaSimple(pop,
                                           toolbox,
                                           cxpb=0.5,
                                           mutpb=0.2,
                                           ngen=self.generations_number,
                                           stats=stats,
                                           halloffame=hof,
                                           verbose=self.verbose)

        # Save History
        self.all_history_.append(hist)
        self.all_logbooks_.append(logbook)
        current_best_score_ = hof[0].fitness.values[0]
        current_best_params_ = _individual_to_params(hof[0], name_values)
        if self.verbose:
            print("Best individual is: %s\nwith fitness: %s" %
                  (current_best_params_, current_best_score_))

        if current_best_score_ > self.best_mem_score_:
            self.best_mem_score_ = current_best_score_
            self.best_mem_params_ = current_best_params_

        # Check memoization, potentially unknown bug
        # assert str(hof[0]) in self.score_cache, "Best individual not stored in score_cache for cv_results_."

        # Close your pools if you made them
        if isinstance(self.n_jobs, int) and (self.n_jobs > 1
                                             or self.n_jobs < 0):
            pool.close()
            pool.join()

        self.best_score_ = current_best_score_
        self.best_params_ = current_best_params_
Beispiel #11
0
    def _fit(self, cvs, parameter_dict):
        self._cv_results = None  # To indicate to the property the need to update
        self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)

        toolbox = base.Toolbox()
        toolbox1 = base.Toolbox()

        name_values, gene_type, maxints = _get_param_types_maxint(
            parameter_dict)
        if self.gene_type is None:
            self.gene_type = gene_type

        if self.verbose:
            print("Types %s and maxint %s detected" %
                  (self.gene_type, maxints))

        toolbox.register("individual",
                         _initIndividual,
                         creator.Individual,
                         maxints=maxints)
        toolbox.register("population", tools.initRepeat, list,
                         toolbox.individual)

        toolbox.register("evaluate", _evalFunction)

        toolbox.register("mate",
                         _cxIndividual,
                         indpb=self.gene_crossover_prob,
                         gene_type=self.gene_type)

        toolbox.register("mutate",
                         _mutIndividual,
                         indpb=self.gene_mutation_prob,
                         up=maxints)
        toolbox.register("select",
                         tools.selTournament,
                         tournsize=self.tournament_size)

        pop = toolbox.population(n=self.population_size)
        if len(self.init_params) > 0:
            for param in self.init_params:
                param_ind = creator.Individual(param)
                pop.pop()
                pop.insert(0, param_ind)
        hof = tools.HallOfFame(1, similar=lambda x, y: (x == y).all())

        # Stats
        stats = tools.Statistics(lambda ind: ind.fitness.values)
        stats.register("avg", np.nanmean)
        stats.register("min", np.nanmin)
        stats.register("max", np.nanmax)
        stats.register("std", np.nanstd)

        # History
        hist = tools.History()
        toolbox.decorate("mate", hist.decorator)
        toolbox.decorate("mutate", hist.decorator)
        hist.update(pop)

        if self.verbose:
            print('--- Evolve in {0} possible combinations ---'.format(
                np.prod(np.array(maxints) + 1)))
        with threadpool_limits(limits=1):
            pop, logbook = eaSimple(pop,
                                    toolbox,
                                    cxpb=0.5,
                                    mutpb=0.5,
                                    ngen=self.generations_number,
                                    stats=stats,
                                    halloffame=hof,
                                    verbose=self.verbose,
                                    njobs=self.n_jobs,
                                    inner_jobs=self.inner_jobs,
                                    name_values=name_values,
                                    scorer=self.scorer_,
                                    cvs=cvs,
                                    rated=self.rated,
                                    iid=self.iid,
                                    verbose1=self.verbose,
                                    error_score=self.error_score,
                                    fit_params=self.fit_params,
                                    score_cache=self.score_cache,
                                    estimator=self.estimator,
                                    path_group=self.path_group)

        # Save History
        self.all_history_.append(hist)
        self.all_logbooks_.append(logbook)
        current_best_score_ = hof[0].fitness.values[0]
        current_best_params_ = _individual_to_params(hof[0], name_values)
        if self.verbose:
            print("Best individual is: %s\nwith fitness: %s" %
                  (current_best_params_, current_best_score_))

        if current_best_score_ > self.best_mem_score_:
            self.best_mem_score_ = current_best_score_
            self.best_mem_params_ = current_best_params_

        self.best_score_ = current_best_score_
        self.best_params_ = current_best_params_
Beispiel #12
0
def evolution_obstacle_avoidance():
    print('Evolutionary program started!')
    # Just in case, close all opened connections
    vrep.simxFinish(-1)

    settings.CLIENT_ID = vrep.simxStart(
        '127.0.0.1',
        settings.PORT_NUM,
        True,
        True,
        5000,
        5)  # Connect to V-REP

    if settings.CLIENT_ID == -1:
        print('Failed connecting to remote API server')
        print('Program ended')
        return

    print('Connected to remote API server')

    robot = EvolvedRobot(
        None,
        client_id=settings.CLIENT_ID,
        id=None,
        op_mode=settings.OP_MODE)

    dump_config(settings.POPULATION,
                settings.N_GENERATIONS,
                settings.RUNTIME,
                settings.CXPB,
                settings.MUTPB)

    # Creating the appropriate type of the problem
    creator.create('FitnessMin', base.Fitness, weights=(-1.0,))
    creator.create('FitnessMax', base.Fitness, weights=(1.0,))
    creator.create('Individual', list, fitness=creator.FitnessMax)

    # Deap Initialization
    toolbox = base.Toolbox()

    history = tools.History()

    # Attribute generator random
    toolbox.register('attr_float', random.uniform, settings.MIN, settings.MAX)
    # Structure initializers; instantiate an individual or population
    toolbox.register(
        'individual',
        tools.initRepeat,
        creator.Individual,
        toolbox.attr_float,
        n=robot.chromosome_size)

    toolbox.register('population', tools.initRepeat, list, toolbox.individual)
    toolbox.register('map', map)

    # Register genetic operators
    # register the goal / fitness function
    toolbox.register('evaluate', partial(eval_robot, robot))
    # register the crossover operator
    toolbox.register('mate', tools.cxTwoPoint)
    # register a mutation operator with a probability to
    # flip each attribute/gene
    toolbox.register('mutate', tools.mutFlipBit, indpb=settings.MUTPB)
    # operator for selecting individuals for breeding the next
    # generation: each individual of the current generation
    # is replaced by the 'fittest' (best) of three individuals
    # drawn randomly from the current generation.
    toolbox.register('select', tools.selTournament, tournsize=3)

    # Decorate the variation operators
    toolbox.decorate('mate', history.decorator)
    toolbox.decorate('mutate', history.decorator)

    # instantiate the population
    # create an initial population of N individuals
    pop = toolbox.population(n=settings.POPULATION)
    history.update(pop)

    # object that contain the best individuals
    hof = tools.HallOfFame(20)
    # maintain stats of the evolution
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register('avg', np.mean)
    stats.register('std', np.std)
    stats.register('min', np.min)
    stats.register('max', np.max)

    # very basic evolutianry algorithm
    pop, log = algorithms.eaSimple(pop, toolbox,
                                   cxpb=settings.CXPB,
                                   mutpb=settings.MUTPB,
                                   ngen=settings.N_GENERATIONS,
                                   stats=stats,
                                   halloffame=hof,
                                   verbose=True)

    # plot the best individuals genealogy
    gen_best = history.getGenealogy(hof[0])
    graph = networkx.DiGraph(gen_best).reverse()
    colors = [toolbox.evaluate(history.genealogy_history[i])[0] for i in graph]
    networkx.draw(graph, node_color=colors, node_size=100)
    plt.savefig(settings.PATH_EA + 'genealogy_tree.pdf')

    # log Statistics
    with open(settings.PATH_EA + 'ea_statistics.txt', 'w') as s:
        s.write(log.__str__())

    # save the best genome
    with open(settings.PATH_EA + 'best.pkl', 'wb') as fp:
        pickle.dump(hof, fp)

    # Evolution records as a chronological list of dictionaries
    gen = log.select('gen')
    fit_mins = log.select('min')
    fit_avgs = log.select('avg')
    fit_maxs = log.select('max')
    
    plot_single_run(
        gen,
        fit_mins,
        fit_avgs,
        fit_maxs,
        ratio=0.35,
        save=settings.PATH_EA + 'evolved-obstacle.pdf')

    if (vrep.simxFinish(settings.CLIENT_ID) == -1):
        print('Evolutionary program failed to exit\n')
        return
Beispiel #13
0
def maximize(func,
             parameter_dict,
             args={},
             verbose=False,
             population_size=50,
             gene_mutation_prob=0.1,
             gene_crossover_prob=0.5,
             tournament_size=3,
             generations_number=10,
             gene_type=None,
             n_jobs=1,
             pre_dispatch='2*n_jobs',
             error_score='raise'):
    """ Same as _fit in EvolutionarySearchCV but without fitting data. More similar to scipy.optimize.

        Returns
        ------------------
        best_params_ : dict
            A list of parameters for the best learner.

        best_score_ : float
            The score of the learner described by best_params_

        score_results : tuple of 2-tuples ((dict, float), ...)
            The score of every individual evaluation indexed by it's parameters.

        hist : deap.tools.History object.
            Use to get the geneology data of the search.

        logbook: deap.tools.Logbook object.
            Includes the statistics of the evolution.
    """

    _check_param_grid(parameter_dict)
    creator.create("FitnessMax", base.Fitness, weights=(1.0, ))
    creator.create("Individual", list, fitness=creator.FitnessMax)

    toolbox = base.Toolbox()
    if n_jobs > 1:
        pool = Pool(processes=n_jobs)
        toolbox.register("map", pool.map)

    name_values, gene_type, maxints = _get_param_types_maxint(parameter_dict)

    if verbose:
        print("Types %s and maxint %s detected" % (gene_type, maxints))

    toolbox.register("individual",
                     _initIndividual,
                     creator.Individual,
                     maxints=maxints)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    toolbox.register("evaluate",
                     _evalFunction,
                     func,
                     name_values=name_values,
                     verbose=verbose,
                     error_score=error_score,
                     args=args)

    toolbox.register("mate",
                     _cxIndividual,
                     indpb=gene_crossover_prob,
                     gene_type=gene_type)

    toolbox.register("mutate",
                     _mutIndividual,
                     indpb=gene_mutation_prob,
                     up=maxints)
    toolbox.register("select", tools.selTournament, tournsize=tournament_size)

    # Tools
    pop = toolbox.population(n=population_size)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.nanmean)
    stats.register("min", np.nanmin)
    stats.register("max", np.nanmax)
    stats.register("std", np.nanstd)

    # History
    hist = tools.History()
    toolbox.decorate("mate", hist.decorator)
    toolbox.decorate("mutate", hist.decorator)
    hist.update(pop)

    if verbose:
        print('--- Evolve in {0} possible combinations ---'.format(
            np.prod(np.array(maxints) + 1)))

    pop, logbook = algorithms.eaSimple(pop,
                                       toolbox,
                                       cxpb=0.5,
                                       mutpb=0.2,
                                       ngen=generations_number,
                                       stats=stats,
                                       halloffame=hof,
                                       verbose=verbose)

    current_best_score_ = hof[0].fitness.values[0]
    current_best_params_ = _individual_to_params(hof[0], name_values)

    # Generate score_cache with real parameters
    _, individuals, each_scores = zip(
        *[(idx, indiv, np.mean(indiv.fitness.values))
          for idx, indiv in list(hist.genealogy_history.items()) if
          indiv.fitness.valid and not np.all(np.isnan(indiv.fitness.values))])
    unique_individuals = {
        str(indiv): (indiv, score)
        for indiv, score in zip(individuals, each_scores)
    }
    score_results = tuple([(_individual_to_params(indiv, name_values), score)
                           for indiv, score in unique_individuals.values()])

    if verbose:
        print("Best individual is: %s\nwith fitness: %s" %
              (current_best_params_, current_best_score_))

    if n_jobs > 1:
        pool.close()
        pool.join()

    return current_best_params_, current_best_score_, score_results, hist, logbook
Beispiel #14
0
def startOptimization(populationSize, archiveSize, nOfGenerations,
                      crossoverProbability, mutationProbability, arguments,
                      finalPath):

    #list of parameters with limit range and discretization
    attributeRange = [
        (0.0, 119.0, '1.'),
        (3.0, 30.0, '1.'),
        (3.0, 30.0, '1.'),
        (3.0, 30.0, '1.'),
        (3.0, 30.0, '1.'),
        (0.1, 2.0, '.01'),
        (0.1, 2.0, '.01'),
        (0.1, 2.0, '.01'),
        (0.1, 2.0, '.01'),
        (0.1, 1.0, '.01'),
        (0.1, 1.0, '.01'),
        (1.0, 15.0, '.1'),
        (1.0, 15.0, '.1'),
        (1.0, 15.0, '.1'),
        (1.0, 15.0, '.1'),
        (5.0, 30.0, '.1'),
    ]
    # variance for each parameter on which the Gaussian mutation acts
    sigma = [
        1, 1, 1, 1, 1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 1.0, 1.0, 1.0, 1.0, 1.0
    ]

    creator.create("FitnessPrecRec", base.Fitness, weights=(0.8, 1.2))
    creator.create("Individual",
                   array.array,
                   typecode='f',
                   fitness=creator.FitnessPrecRec)

    history = tools.History()
    logbook = tools.Logbook()
    stats = tools.Statistics(key=lambda ind: ind.fitness.values)
    stats.register("avg", numpy.mean, axis=0)
    stats.register("std", numpy.std, axis=0)
    stats.register("min", numpy.min, axis=0)
    stats.register("max", numpy.max, axis=0)

    toolbox = base.Toolbox()
    toolbox.register("individual", initFromRange, creator.Individual,
                     attributeRange)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)
    toolbox.register("populationFromFile", initFromFile, creator.Individual,
                     "seed.txt")
    toolbox.register("check", checkCondition)
    toolbox.register("evaluate", testAllMatch)
    toolbox.register("selectEnvironment", tools.selSPEA2, k=archiveSize)
    toolbox.register("selectMating",
                     tools.selTournament,
                     k=populationSize,
                     tournsize=2)
    toolbox.register("mate", tools.cxBlend, alpha=0.5)
    toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=sigma, indpb=1.0)
    toolbox.decorate("mate", checkBounds(attributeRange))
    toolbox.decorate("mutate", checkBounds(attributeRange))
    toolbox.decorate("mate", history.decorator)
    toolbox.decorate("mutate", history.decorator)
    toolbox.register("logFront", logStats)

    print('Initializing algorithm')
    startAllTime = time()

    # print('Initializing population')
    # startTime=time()
    population = toolbox.population(n=populationSize)
    toolbox.populationFromFile(population)
    # elapsedTime = time()-startTime
    # print('Elapsed {} s'.format(elapsedTime))

    history.update(population)

    output = None
    archive = []
    condition = False
    currentGeneration = 0
    while not (condition):
        print("Generation {}".format(currentGeneration))
        startTime = time()

        toolbox.evaluate(population, *arguments, test=False)

        # print('Starting environment selection')
        # startTime=time()
        archive = toolbox.selectEnvironment(population + archive)
        # elapsedTime = time()-startTime
        # print('Elapsed {} s\n'.format(elapsedTime))

        # Salva le statistiche e crea un grafico della generazione corrente
        record = stats.compile(archive)
        logbook.record(gen=currentGeneration, **record)
        avgV, maxV, minV, stdV = logbook.select('avg', 'max', 'min', 'std')
        a = currentGeneration
        print(
            "Gen: {}\nAverage precision: {}\nMaximum precision: {}\nAverage recall: {}\nMaximum recall: {}\nStandard precision: {}\nStandard recall: {}\n\n"
            .format(a, avgV[a][0], maxV[a][0], avgV[a][1], maxV[a][1],
                    stdV[a][0], stdV[a][1]))
        toolbox.logFront(population, archive, currentGeneration, logbook)

        condition = toolbox.check(archive)
        if (currentGeneration >= nOfGenerations) or condition:
            toolbox.evaluate(archive,
                             *arguments,
                             finalPath=finalPath,
                             test=True)
            output = archive
            break
        else:
            # print('Starting mating selection')
            # startTime=time()
            offspring = map(toolbox.clone, toolbox.selectMating(archive))
            # elapsedTime = time()-startTime
            # print('Elapsed {} s\n'.format(elapsedTime))

            # print('Starting crossover and mutation')
            # startTime=time()
            offspring = algorithms.varAnd(offspring, toolbox,
                                          crossoverProbability,
                                          mutationProbability)
            # elapsedTime = time()-startTime
            # print('Elapsed {} s\n'.format(elapsedTime))

            population[:] = offspring

            for ind in population:
                del ind.fitness.values
            currentGeneration += 1

            elapsedTime = time() - startTime
            print('Elapsed {} s\n'.format(elapsedTime))
    elapsedAllTime = time() - startAllTime
    print('Algorithm terminated in {} h\n'.format(elapsedAllTime / 3600))
    if (output is None):
        raise Exception("Algorithm terminated without returning values!")
Beispiel #15
0
def geneticModel(timeLimit,
                 distanceMatrix,
                 individualSize,
                 populationSize,
                 crossoverPB,
                 mutationPB,
                 nrGenerations,
                 notImprovingLimit,
                 keepHistory=False):
    toolbox = base.Toolbox()
    INDIVIDUAL_SIZE = individualSize

    # CREATE BASE TYPES
    # Fitness (=path length) has negative weight because it will be minimized
    # (a minimizing fitness is built using negatives weights, while a maximizing fitness has positive weights)
    creator.create('FitnessMin', base.Fitness, weights=(-1.0, ))
    # create individual class
    # Individual is identified by a list of floats, every float indicates the id of a node (a gene)
    creator.create('Individual', list, fitness=creator.FitnessMin)
    # indices indicates the list of individuals that compose a population.
    # Composed by a random sample taken from the range of len equal to the size of the population we want.
    # Random sample avoids the creation of duplicates in a single individual (each hole/gene is visited once)
    toolbox.register('indices', random.sample, range(INDIVIDUAL_SIZE),
                     INDIVIDUAL_SIZE)
    toolbox.register('individual', tools.initIterate, creator.Individual,
                     toolbox.indices)
    toolbox.register('population', tools.initRepeat, list, toolbox.individual)

    # SETUP GENETIC STEPS
    # The following steps are performed in the order: mate, mutate, select
    #toolbox.register('mate', tools.cxPartialyMatched)
    toolbox.register('mate', orderedCrossover)
    #toolbox.register('mutate', tools.mutShuffleIndexes, indpb=0.05)
    toolbox.register('mutate', twoOptMutation)
    # Tournsize indicates the nr of random individuals to take at each generation to extract the best fit.
    # Taking now 5% of the population, among this subset, the best is taken.
    # Tournament selects 5% of the population at random and keeps the fittest individual, this is cycled until
    # a number of individuals equal to populationSize is extracted from the original population, these are
    # the offsprings of the next generation
    toolbox.register('select',
                     tools.selTournament,
                     tournsize=int(round(populationSize * 0.05)))
    toolbox.register('evaluate', evalTSP, distanceMatrix=distanceMatrix)

    # LAUNCH OPTIMIZATION
    history = tools.History()
    # Decorate the variation operators
    toolbox.decorate("mate", history.decorator)
    toolbox.decorate("mutate", history.decorator)

    pop = toolbox.population(n=populationSize)
    history.update(pop)

    # Hall of fame will store only one best individual of each generation
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean)
    stats.register("std", np.std)
    stats.register("min", np.min)
    stats.register("max", np.max)

    sta = time.time()
    #pop, logb = algorithms.eaSimple(pop, toolbox, 0.7, 0.2, 30, stats=stats, halloffame=hof)
    pop, logb, generationLog = eaSimple(pop,
                                        toolbox,
                                        crossoverPB,
                                        mutationPB,
                                        nrGenerations,
                                        stats=stats,
                                        halloffame=hof,
                                        keepHistory=keepHistory,
                                        timeLimit=timeLimit,
                                        notImprovingLimit=notImprovingLimit,
                                        verbose=False)
    return pop, logb, hof, generationLog
Beispiel #16
0
def run_one_app(strategy_with_runner_name: str) -> bool:
    app_path = RequiredFeature('app_path').request()
    repetitions = RequiredFeature('repetitions').request()
    repetitions_offset = RequiredFeature('repetitions_offset').request()
    budget_manager = RequiredFeature('budget_manager').request()
    test_suite_evaluator = RequiredFeature('test_suite_evaluator').request()
    verbose_level = RequiredFeature('verbose_level').request()

    continue_on_repetition_failure = RequiredFeature(
        'continue_on_repetition_failure').request()

    app_name = os.path.basename(app_path)

    try:
        there_was_a_failed_repetition = False
        for repetition in range(repetitions_offset, repetitions):
            try:
                os.chdir(settings.WORKING_DIR)

                logbook = tools.Logbook()
                logbook.header = ['gen']
                features.provide('logbook', logbook)

                history = tools.History()
                features.provide('history', history)

                hall_of_fame = test_suite_evaluator.new_hall_of_fame()
                features.provide('hall_of_fame', hall_of_fame)

                result_dir = prepare_result_dir(app_name, repetition,
                                                strategy_with_runner_name)

                get_emulators_running(result_dir)

                test_generator = Evolutiz()

                budget_manager.start_budget()

                logger.log_progress(
                    f"\n-----> Starting repetition: {str(repetition)} for app: {app_name}, "
                    f"initial timestamp is: {str(budget_manager.start_time)}")
                test_generator.run()

                logger.log_progress(f"\nEvolutiz finished for app: {app_name}")

                time_budget_used = budget_manager.get_time_budget_used()
                if time_budget_used is not None:
                    logger.log_progress(
                        f"\nTime budget used: {time_budget_used:.2f} seconds\n"
                    )

                evaluations_budget_used = budget_manager.get_evaluations_budget_used(
                )
                if evaluations_budget_used is not None:
                    logger.log_progress(
                        f"\nEvaluations budget used: {evaluations_budget_used:d}\n"
                    )

                # wait for all MultipleQueueConsumerThread to terminate
                wait_for_working_threas_to_finish()
            except Exception as e:
                logger.log_progress(
                    f"\nThere was an error running repetition {repetition} on app: {app_name}"
                )
                if verbose_level > 0:
                    logger.log_progress(f"\n{str(e)}")
                if verbose_level > 1:
                    logger.log_progress(f"\n{traceback.format_exc()}")
                traceback.print_exc()

                there_was_a_failed_repetition = True
                if not continue_on_repetition_failure:
                    # there was a problem during current repetition, halt further executions of this subject
                    raise e
                # otherwise, keep running the remaining repetitions

        return not there_was_a_failed_repetition
    except Exception as e:
        logger.log_progress(
            f"\nThere was an error running evolutiz on app: {app_name}")
        if verbose_level > 0:
            logger.log_progress(f"\n{str(e)}")
        if verbose_level > 1:
            logger.log_progress(f"\n{traceback.format_exc()}")
        traceback.print_exc()
        return False
Beispiel #17
0
    def optimize(self, mutate_p=0.3, mate_p=0.3, population_size=12):
        """Run evolutionary algorithm, return best set of params found."""
        toolbox = base.Toolbox()
        toolbox.register('individual', self.init_individual,
                         creator.Individual)
        toolbox.register('population', tools.initRepeat, list,
                         toolbox.individual)

        toolbox.register('mate', self.crossover, indpb=0.3)
        toolbox.register('mutate', self.mutate_individual, indpb=0.3)
        toolbox.register('select', tools.selTournament, tournsize=3)
        toolbox.register('evaluate', self.evaluate)

        stats = tools.Statistics(lambda ind: ind.fitness.values)
        stats.register('avg', np.nanmean)
        stats.register('min', np.nanmin)
        stats.register('max', np.nanmax)
        stats.register('std', np.nanstd)
        stats.register('cache_size', self.get_cache_size)
        stats.register('cache_hits', self.get_cache_hits)
        stats.register('cache_misses', self.get_cache_misses)

        if not self.initialized:
            logger.info('Initializing from scratch!')
            self.population = toolbox.population(n=population_size)
            self.hof = tools.HallOfFame(3)
            self.logbook = tools.Logbook()
            self.logbook.header = ['gen', 'nevals'
                                   ] + (stats.fields if stats else [])
            self.initialized = True

        hist = tools.History()
        toolbox.decorate('mate', hist.decorator)
        toolbox.decorate('mutate', hist.decorator)
        hist.update(self.population)

        self.save_checkpoint()

        # evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in self.population if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        self.hof.update(self.population)

        record = stats.compile(self.population) if stats else {}
        self.logbook.record(gen=self.generation,
                            nevals=len(invalid_ind),
                            **record)
        logger.info(self.logbook.stream)

        logger.info('Begin the generational process!')
        self.save_checkpoint()
        while True:
            self.generation += 1
            # select the next generation individuals
            offspring = toolbox.select(self.population, len(self.population))

            # vary the pool of individuals
            offspring = algorithms.varAnd(offspring, toolbox, mate_p, mutate_p)

            # evaluate the individuals with an invalid fitness
            invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
            fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
            for ind, fit in zip(invalid_ind, fitnesses):
                ind.fitness.values = fit

            # update the hall of fame with the generated individuals
            self.hof.update(offspring)
            self.log_halloffame()

            # Replace the current population by the offspring
            self.population[:] = offspring

            # Append the current generation statistics to the logbook
            record = stats.compile(self.population) if stats else {}
            self.logbook.record(gen=self.generation,
                                nevals=len(invalid_ind),
                                **record)
            logger.info(self.logbook.stream)
            self.cache_hits = self.cache_misses = 0

            self.save_checkpoint()

        return self.individual_to_params(self.hof[0])
Beispiel #18
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--population_size', default=50)
    parser.add_argument('--hof_size', default=5)
    parser.add_argument('--ngen', default=40)
    parser.add_argument('--mutpb', default=0.2)
    parser.add_argument('--cxpb', default=0.5)
    args = parser.parse_args()

    # create types
    creator.create("FitnessMin", base.Fitness, weights=(-1.0, ))
    creator.create("Individual", numpy.ndarray, fitness=creator.FitnessMin)

    # initialize bag population containing random (valid) individuals
    toolbox = base.Toolbox()
    toolbox.register("individual",
                     init_individual,
                     creator.Individual,
                     shape=(10, 10))
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    # initialize operators and evaluation function
    toolbox.register("mate", cxTwoPointCopy)
    toolbox.register("mutate", mutate)
    toolbox.register("select", tools.selTournament, tournsize=3)
    toolbox.register("evaluate", evaluate)

    # create history object, which is used to get nice prints of the overall evolution process
    history = tools.History()
    # decorate the variation operator so that they can be used to retrieve a history
    # TODO history also presents conflicts with np.array
    # toolbox.decorate("mate", history.decorator)
    # toolbox.decorate("mutate", history.decorator)

    # create an initial population
    pop = toolbox.population(n=args.population_size)
    history.update(pop)
    # create a list of statistics to be retrieved during the evolution process (they are shown in the logbook)
    stats = tools.Statistics()
    # TODO change min function to accomodate to the 2-dimensional np.array
    stats.register('min', lambda x: min(evaluate(ind) for ind in x))
    # create a hall of fame, which contains the best individual/s that ever lived in the population during the evolution
    hof = tools.HallOfFame(maxsize=args.hof_size, similar=numpy.array_equal)

    # simplest evolutionary algorithm as presented in chapter 7 of Back, Fogel and Michalewicz, “Evolutionary Computation 1 : Basic Algorithms and Operators”, 2000.
    final_population, logbook = algorithms.eaSimple(population=pop,
                                                    toolbox=toolbox,
                                                    cxpb=args.cxpb,
                                                    mutpb=args.mutpb,
                                                    ngen=args.ngen,
                                                    stats=stats,
                                                    halloffame=hof,
                                                    verbose=True)

    # output results of the evolutionary algorithm
    print('*' * 100)
    print('FINAL POPULATION\n')
    print(final_population)
    print('*' * 100)
    print('HALL OF FAME\n')
    print(hof)
    print('*' * 100)
    print('BEST INDIVIDUAL')
    print(hof[0])
    print('\nEVALUATION')
    print(evaluate(hof[0]))

    plot_genetic(logbook)
def evolutionary_run(**kwargs):
    """ Conduct an evolutionary run using DEAP.  
    
    Args:
        gens: generations of evolution
        pop_size: population size
        mut_prob: mutation probability
    """

    # Establish name of the output files and write appropriate headers.
    out_fit_file = args.output_path + str(args.run_num) + "_fitnesses.dat"
    #out_hof_file = args.output_path+str(args.run_num)+"_hof.dat"
    out_time_file = args.output_path + str(args.run_num) + "_timing.dat"
    geneaology_file = args.output_path + str(args.run_num) + "_geneaology.dat"
    writeHeaders(out_fit_file)

    creator.create("Fitness", base.Fitness, weights=(
        1.0,
        -1.0,
        1.0,
        1.0,
    ))  # Maximize distance
    creator.create("Individual", kwargs['exp_class'], fitness=creator.Fitness)

    # Create the toolbox for setting up DEAP functionality.
    toolbox = base.Toolbox()

    # Create the toolbox for tracking history.
    history = tools.History()

    # Define an individual for use in constructing the population.
    toolbox.register("individual", hopper_utils.initIndividual,
                     creator.Individual)
    toolbox.register("mutate", hopper_utils.mutate)
    toolbox.register("mate", tools.cxTwoPoint)

    # Decorate the variation operators
    toolbox.decorate("mate", history.decorator)
    toolbox.decorate("mutate", history.decorator)

    # Create a population as a list.
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    # Register the evaluation function.
    toolbox.register("evaluate", evaluate_individual)

    # Register the selection function.
    toolbox.register("select", tools.selTournament, tournsize=2)

    # Create the Hall-of-Fame
    #hof = tools.HallOfFame(maxsize=100)

    # Multiprocessing component.
    cores = mpc.cpu_count()
    pool = mpc.Pool(processes=cores - 2)
    toolbox.register("map", pool.map)

    # Crossover and mutation probability
    cxpb, mutpb = 0.5, 0.05

    # Set the mutation value for hopper utils
    hopper_utils.mutate_chance = mutpb

    # Setup the population.
    pop = toolbox.population(n=kwargs['pop_size'])
    history.update(pop)

    # Request new id's for the population.
    # for ind in pop:
    #     ind.get_new_id()

    # Run the first set of evaluations.
    fitnesses = toolbox.map(toolbox.evaluate, pop)
    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit

    # Log the progress of the population. (For Generation 0)
    writeGeneration(out_fit_file, 0, pop)

    # writeTimeInformationHeaders(out_time_file)

    for g in range(1, args.gens):
        # select_time = time.time()
        # Pull out the elite individual to save for later.
        elite = tools.selBest(pop, k=1)

        pop = toolbox.select(pop, k=len(pop) - 1)
        pop = [toolbox.clone(ind) for ind in pop]
        # select_time = time.time() - select_time

        # Update the Hall of Fame
        #hof.update(pop)

        # id_time = time.time()
        # Request new id's for the population.
        for ind in pop:
            ind.get_new_id()
        # id_time = time.time() - id_time

        # cross_time = time.time()
        for child1, child2 in zip(pop[::2], pop[1::2]):
            if random.random() < cxpb:
                # Must serialize and deserialize due to the type of object.
                # child1_serialized, child2_serialized = toolbox.mate(child1.serialize(), child2.serialize())
                child1, child2 = toolbox.mate(child1, child2)
                #child1.deserialize(child1_serialized)
                #child2.deserialize(child2_serialized)
                del child1.fitness.values, child2.fitness.values
        # cross_time = time.time() - cross_time

        # mut_time = time.time()
        #for mutant in pop:
        #    toolbox.mutate(mutant)
        #    del mutant.fitness.values
        for i in range(len(pop)):
            #for ind in pop:
            pop[i] = toolbox.mutate(pop[i])[0]
            del pop[i].fitness.values
        # mut_time = time.time() - mut_time

        # eval_time = time.time()
        invalids = [ind for ind in pop if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalids)
        for ind, fit in zip(invalids, fitnesses):
            ind.fitness.values = fit
        # eval_time = time.time() - eval_time

        # Check to see if we have a new elite individual.
        new_elite = tools.selBest(pop, k=1)
        elite = tools.selBest([elite[0], new_elite[0]], k=1)

        # Add the elite individual back into the population.
        pop = elite + pop

        print("Generation " + str(g))
        # Log the progress of the population.
        writeGeneration(out_fit_file, g, pop)
        #history.update(pop)

    writeGeneaology(geneaology_file, history.genealogy_tree)
Beispiel #20
0
def maximize(func,
             parameter_dict,
             args={},
             verbose=False,
             population_size=50,
             gene_mutation_prob=0.1,
             gene_crossover_prob=0.5,
             tournament_size=3,
             generations_number=10,
             gene_type=None,
             n_jobs=1,
             error_score='raise'):

    toolbox = base.Toolbox()

    _check_param_grid(parameter_dict)
    if isinstance(n_jobs, int):
        # If n_jobs is an int, greater than 1 or less than 0 (indicating to use as
        # many jobs as possible) then we are going to create a default pool.
        # Windows users need to be warned of this feature as it only works properly
        # on linux. They need to encapsulate their pool in an if __name__ == "__main__"
        # wrapper so that pools are not recursively created when the module is reloaded in each map
        if isinstance(n_jobs, (int, float)):
            if n_jobs > 1 or n_jobs < 0:
                from multiprocessing import Pool  # Only imports if needed
                if os.name == 'nt':  # Checks if we are on Windows
                    warnings.warn((
                        "Windows requires Pools to be declared from within "
                        "an \'if __name__==\"__main__\":\' structure. In this "
                        "case, n_jobs will accept map functions as well to "
                        "facilitate custom parallelism. Please check to see "
                        "that all code is working as expected."))
                pool = Pool(n_jobs)
                toolbox.register("map", pool.map)
                warnings.warn(
                    "Need to create a creator. Run optimize.compile()")
            else:
                compile()

    # If it's not an int, we are going to pass it as the map directly
    else:
        try:
            toolbox.register("map", n_jobs)
        except Exception:
            raise TypeError(
                "n_jobs must be either an integer or map function. Received: {}"
                .format(type(n_jobs)))

    name_values, gene_type, maxints = _get_param_types_maxint(parameter_dict)

    if verbose:
        print("Types %s and maxint %s detected" % (gene_type, maxints))

    toolbox.register("individual",
                     _initIndividual,
                     creator.Individual,
                     maxints=maxints)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    toolbox.register("evaluate",
                     _evalFunction,
                     func,
                     name_values=name_values,
                     verbose=verbose,
                     error_score=error_score,
                     args=args)

    toolbox.register("mate",
                     _cxIndividual,
                     indpb=gene_crossover_prob,
                     gene_type=gene_type)

    toolbox.register("mutate",
                     _mutIndividual,
                     indpb=gene_mutation_prob,
                     up=maxints)
    toolbox.register("select", tools.selTournament, tournsize=tournament_size)

    # Tools
    pop = toolbox.population(n=population_size)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.nanmean)
    stats.register("min", np.nanmin)
    stats.register("max", np.nanmax)
    stats.register("std", np.nanstd)

    # History
    hist = tools.History()
    toolbox.decorate("mate", hist.decorator)
    toolbox.decorate("mutate", hist.decorator)
    hist.update(pop)

    if verbose:
        print('--- Evolve in {0} possible combinations ---'.format(
            np.prod(np.array(maxints) + 1)))

    pop, logbook = algorithms.eaSimple(pop,
                                       toolbox,
                                       cxpb=0.5,
                                       mutpb=0.2,
                                       ngen=generations_number,
                                       stats=stats,
                                       halloffame=hof,
                                       verbose=verbose)

    current_best_score_ = hof[0].fitness.values[0]
    current_best_params_ = _individual_to_params(hof[0], name_values)

    # Generate score_cache with real parameters
    _, individuals, each_scores = zip(
        *[(idx, indiv, np.mean(indiv.fitness.values))
          for idx, indiv in list(hist.genealogy_history.items()) if
          indiv.fitness.valid and not np.all(np.isnan(indiv.fitness.values))])
    unique_individuals = {
        str(indiv): (indiv, score)
        for indiv, score in zip(individuals, each_scores)
    }
    score_results = tuple([(_individual_to_params(indiv, name_values), score)
                           for indiv, score in unique_individuals.values()])

    if verbose:
        print("Best individual is: %s\nwith fitness: %s" %
              (current_best_params_, current_best_score_))

    # Close your pools if you made them
    if isinstance(n_jobs, int) and (n_jobs > 1 or n_jobs < 0):
        pool.close()
        pool.join()

    return current_best_params_, current_best_score_, score_results, hist, logbook
Beispiel #21
0
def GP_param_tuning(param):
    class HallOfFame(object):
        """The hall of fame contains the best individual that ever lived in the
        population during the evolution. It is lexicographically sorted at all
        time so that the first element of the hall of fame is the individual that
        has the best first fitness value ever seen, according to the weights
        provided to the fitness at creation time.
        The insertion is made so that old individuals have priority on new
        individuals. A single copy of each individual is kept at all time, the
        equivalence between two individuals is made by the operator passed to the
        *similar* argument.
        :param maxsize: The maximum number of individual to keep in the hall of
                        fame.
        :param similar: An equivalence operator between two individuals, optional.
                        It defaults to operator :func:`operator.eq`.
        The class :class:`HallOfFame` provides an interface similar to a list
        (without being one completely). It is possible to retrieve its length, to
        iterate on it forward and backward and to get an item or a slice from it.
        """
        def __init__(self, maxsize, similar=eq):
            self.maxsize = maxsize
            self.keys = list()
            self.items = list()
            self.similar = similar

        def update(self, population):
            """Update the hall of fame with the *population* by replacing the
            worst individuals in it by the best individuals present in
            *population* (if they are better). The size of the hall of fame is
            kept constant.

            :param population: A list of individual with a fitness attribute to
                               update the hall of fame with.
            """
            for ind in population:
                if len(self) == 0 and self.maxsize != 0:
                    # Working on an empty hall of fame is problematic for the
                    # "for else"
                    self.insert(population[0])
                    continue
                if (np.array(ind.fitness.wvalues) > np.array(
                        self[-1].fitness.wvalues)
                    ).all() or len(self) < self.maxsize:
                    for hofer in self:
                        # Loop through the hall of fame to check for any
                        # similar individual
                        if self.similar(ind, hofer):
                            break
                    else:
                        # The individual is unique and strictly better than
                        # the worst
                        if len(self) >= self.maxsize:
                            self.remove(-1)
                        self.insert(ind)

        def insert(self, item):
            """Insert a new individual in the hall of fame using the
            :func:`~bisect.bisect_right` function. The inserted individual is
            inserted on the right side of an equal individual. Inserting a new
            individual in the hall of fame also preserve the hall of fame's order.
            This method **does not** check for the size of the hall of fame, in a
            way that inserting a new individual in a full hall of fame will not
            remove the worst individual to maintain a constant size.
            :param item: The individual with a fitness attribute to insert in the
                         hall of fame.
            """
            item = deepcopy(item)
            i = bisect_right(self.keys, item.fitness)
            self.items.insert(len(self) - i, item)
            self.keys.insert(i, item.fitness)

        def remove(self, index):
            """Remove the specified *index* from the hall of fame.
            :param index: An integer giving which item to remove.
            """
            del self.keys[len(self) - (index % len(self) + 1)]
            del self.items[index]

        def clear(self):
            """Clear the hall of fame."""
            del self.items[:]
            del self.keys[:]

        def __len__(self):
            return len(self.items)

        def __getitem__(self, i):
            return self.items[i]

        def __iter__(self):
            return iter(self.items)

        def __reversed__(self):
            return reversed(self.items)

        def __str__(self):
            return str(self.items)

    def eaMuPlusLambdaTol(population,
                          toolbox,
                          mu,
                          lambda_,
                          ngen,
                          cxpb,
                          mutpb,
                          tol,
                          stats=None,
                          halloffame=None,
                          verbose=__debug__):
        global cxpb_orig, mutpb_orig
        """This is the :math:`(\mu + \lambda)` evolutionary algorithm.

        :param population: A list of individuals.
        :param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution
                        operators.
        :param mu: The number of individuals to select for the next generation.
        :param lambda\_: The number of children to produce at each generation.
        :param cxpb: The probability that an offspring is produced by crossover.
        :param mutpb: The probability that an offspring is produced by mutation.
        :param ngen: The number of generation.
        :param stats: A :class:`~deap.tools.Statistics` object that is updated
                      inplace, optional.
        :param halloffame: A :class:`~deap.tools.HallOfFame` object that will
                           contain the best individuals, optional.
        :param verbose: Whether or not to log the statistics.
        :returns: The final population
        :returns: A class:`~deap.tools.Logbook` with the statistics of the
                  evolution.

        The algorithm takes in a population and evolves it in place using the
        :func:`varOr` function. It returns the optimized population and a
        :class:`~deap.tools.Logbook` with the statistics of the evolution. The
        logbook will contain the generation number, the number of evalutions for
        each generation and the statistics if a :class:`~deap.tools.Statistics` is
        given as argument. The *cxpb* and *mutpb* arguments are passed to the
        :func:`varOr` function. The pseudocode goes as follow ::

            evaluate(population)
            for g in range(ngen):
                offspring = varOr(population, toolbox, lambda_, cxpb, mutpb)
                evaluate(offspring)
                population = select(population + offspring, mu)

        First, the individuals having an invalid fitness are evaluated. Second,
        the evolutionary loop begins by producing *lambda_* offspring from the
        population, the offspring are generated by the :func:`varOr` function. The
        offspring are then evaluated and the next generation population is
        selected from both the offspring **and** the population. Finally, when
        *ngen* generations are done, the algorithm returns a tuple with the final
        population and a :class:`~deap.tools.Logbook` of the evolution.

        This function expects :meth:`toolbox.mate`, :meth:`toolbox.mutate`,
        :meth:`toolbox.select` and :meth:`toolbox.evaluate` aliases to be
        registered in the toolbox. This algorithm uses the :func:`varOr`
        variation.
        """
        logbook = tools.Logbook()
        logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in population if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        if halloffame is not None:
            halloffame.update(population)

        record = stats.compile(population) if stats is not None else {}
        logbook.record(gen=0, nevals=len(invalid_ind), **record)
        if verbose:
            print(logbook.stream)

        min_fit = np.array(logbook.chapters["fitness"].select("min"))
        # Begin the generational process
        flag_change = False
        flag_limit = False
        gen = 1
        while gen < ngen + 1 and not (min_fit[-1] <= tol).all():
            # Vary the population
            offspring = varOr(population, toolbox, lambda_, cxpb, mutpb)

            # Evaluate the individuals with an invalid fitness
            invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
            fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
            for ind, fit in zip(invalid_ind, fitnesses):
                ind.fitness.values = fit

            # Update the hall of fame with the generated individuals
            if halloffame is not None:
                halloffame.update(offspring)

            # Select the next generation population
            population[:] = toolbox.select(population + offspring, mu)

            # Update the statistics with the new population
            record = stats.compile(population) if stats is not None else {}
            logbook.record(gen=gen, nevals=len(invalid_ind), **record)
            min_fit = np.array(logbook.chapters["fitness"].select("min"))
            min_actual = np.array(
                logbook.chapters["fitness"].select("min"))[-1]
            min_old = np.array(logbook.chapters["fitness"].select("min"))[-2]
            if verbose:
                print(logbook.stream)
            if (abs(min_actual - min_old) <
                    0.01).all() and flag_limit is False:
                cxpb = cxpb - 0.01
                mutpb = mutpb + 0.01
                print("change")
                flag_change = True
                if cxpb < 0.4:
                    cxpb = 0.4
                    mutpb = 0.5
                    print("limits")
                    flag_limit = True

            else:
                cxpb = cxpb_orig
                mutpb = mutpb_orig
                print("back to orig")
                if flag_change is True:
                    cxpb = cxpb_orig + 0.1
                    mutpb = mutpb_orig - 0.15
                    flag_change = False
                    print("orig after change")
                flag_limit = False

            gen += 1

        return population, logbook

    def Minn(ind):
        # global min_fit_log
        min_fit_log = ind[0]
        for i in range(len(ind)):
            if (np.array(ind[i]) < np.array(min_fit_log)).all():
                min_fit_log = ind[i]
        return min_fit_log

    def TriAdd(x, y, z):
        return x + y + z

    def Eph():
        return round(random.uniform(-1000, 1000), 6)

    def Abs(x):
        return abs(x)

    def Div(left, right):
        global flag
        try:
            x = left / right
            return x
        except (RuntimeError, RuntimeWarning, TypeError, ArithmeticError,
                BufferError, BaseException, NameError, ValueError,
                FloatingPointError, OverflowError):
            flag = True
            return 0.0

    def Mul(left, right):
        global flag
        try:
            # np.seterr(invalid='raise')
            return left * right
        except (RuntimeError, RuntimeWarning, TypeError, ArithmeticError,
                BufferError, BaseException, NameError, ValueError,
                FloatingPointError, OverflowError):
            flag = True
            return left

    def Sqrt(x):
        global flag
        try:
            if x > 0:
                return np.sqrt(x)
            else:
                return abs(x)
        except (RuntimeError, RuntimeWarning, TypeError, ArithmeticError,
                BufferError, BaseException, NameError, ValueError):
            flag = True
            return 0

    def Log(x):
        global flag
        try:
            if x > 0:
                return np.log(x)
            else:
                return abs(x)
        except (RuntimeError, RuntimeWarning, TypeError, ArithmeticError,
                BufferError, BaseException, NameError, ValueError):
            flag = True
            return 0

    def Exp(x):
        try:
            return np.exp(x)
        except (RuntimeError, RuntimeWarning, TypeError, ArithmeticError,
                BufferError, BaseException, NameError, ValueError):
            return 0

    def Sin(x):
        global flag
        try:
            return np.sin(x)
        except (RuntimeError, RuntimeWarning, TypeError, ArithmeticError,
                BufferError, BaseException, NameError, ValueError):
            flag = True
            return 0

    def Cos(x):
        global flag
        try:
            return np.cos(x)
        except (RuntimeError, RuntimeWarning, TypeError, ArithmeticError,
                BufferError, BaseException, NameError, ValueError):
            flag = True
            return 0

    def xmate(ind1, ind2):
        i1 = random.randrange(len(ind1))
        i2 = random.randrange(len(ind2))
        ind1[i1], ind2[i2] = gp.cxOnePoint(ind1[i1], ind2[i2])
        return ind1, ind2

    def xmut(ind, expr, strp):

        i1 = random.randrange(len(ind))
        i2 = random.randrange(len(ind[i1]))
        choice = random.random()
        if choice < strp:
            indx = gp.mutUniform(ind[i1], expr, pset=pset)
            ind[i1] = indx[0]
            return ind,
        else:
            '''this part execute the mutation on a random constant'''
            indx = gp.mutEphemeral(ind[i1], "one")
            ind[i1] = indx[0]
            return ind,

    # Direct copy from tools - modified for individuals with GP trees in an array
    def xselDoubleTournament(individuals, k, fitness_size, parsimony_size,
                             fitness_first):
        assert (1 <= parsimony_size <=
                2), "Parsimony tournament size has to be in the range [1, 2]."

        def _sizeTournament(individuals, k, select):
            chosen = []
            for i in range(k):
                # Select two individuals from the population
                # The first individual has to be the shortest
                prob = parsimony_size / 2.
                ind1, ind2 = select(individuals, k=2)

                lind1 = sum([len(gpt) for gpt in ind1])
                lind2 = sum([len(gpt) for gpt in ind2])
                if lind1 > lind2:
                    ind1, ind2 = ind2, ind1
                elif lind1 == lind2:
                    # random selection in case of a tie
                    prob = 0.5

                # Since size1 <= size2 then ind1 is selected
                # with a probability prob
                chosen.append(ind1 if random.random() < prob else ind2)

            return chosen

        def _fitTournament(individuals, k, select):
            chosen = []
            for i in range(k):
                aspirants = select(individuals, k=fitness_size)
                chosen.append(
                    max(aspirants, key=operator.attrgetter("fitness")))
            return chosen

        if fitness_first:
            tfit = partial(_fitTournament, select=tools.selRandom)
            return _sizeTournament(individuals, k, tfit)
        else:
            tsize = partial(_sizeTournament, select=tools.selRandom)
            return _fitTournament(individuals, k, tsize)

    ###############################  S Y S T E M - P A R A M E T E R S  ####################################################

    class Rocket:
        def __init__(self):
            self.GMe = 3.986004418 * 10**14  # Earth gravitational constant [m^3/s^2]
            self.Re = 6371.0 * 1000  # Earth Radius [m]
            self.Vr = np.sqrt(self.GMe / self.Re)  # m/s
            self.H0 = 10.0  # m
            self.V0 = 0.0
            self.M0 = 100000.0  # kg
            self.Mp = self.M0 * 0.99
            self.Cd = 0.6
            self.A = 4.0  # m2
            self.Isp = 300.0  # s
            self.g0 = 9.80665  # m/s2
            self.Tmax = self.M0 * self.g0 * 1.5
            self.MaxQ = 14000.0  # Pa
            self.MaxG = 8.0  # G
            self.Htarget = 400.0 * 1000  # m
            self.Rtarget = self.Re + self.Htarget  # m/s
            self.Vtarget = np.sqrt(self.GMe / self.Rtarget)  # m/s
            self.cosOld = 0
            self.eqOld = np.zeros((0))
            self.ineqOld = np.zeros((0))
            self.varOld = np.zeros((0))

        @staticmethod
        def air_density(h):
            beta = 1 / 8500.0  # scale factor [1/m]
            rho0 = 1.225  # kg/m3
            return rho0 * np.exp(-beta * h)

    Nstates = 5
    Ncontrols = 2

    size_pop = 100  # int(param["pop"])  # Pop size

    size_gen = 15  # Gen size
    Mu = int(size_pop)
    Lambda = int(size_pop * param["lambda"])
    cxpb = param["cxpb"]
    mutpb = (1 - param["cxpb"] - 0.1)
    mutpb_orig = mutpb
    cxpb_orig = cxpb
    limit_height = 20  # Max height (complexity) of the controller law
    limit_size = 400  # Max size (complexity) of the controller law
    nEph = param["nEph"]

    ii = 0

    ################################# M A I N ###############################################

    def main(size_gen, size_pop, Mu, Lambda, mutpb, cxpb, mutpb_orig,
             cxpb_orig):

        global Rfun, Thetafun, Vrfun, Vtfun, mfun
        global tfin, ii

        best_fit = sum([1e3, 1e3, 1e3, 1e3, 1e3])
        Rref = np.load("R.npy")
        Thetaref = np.load("Theta.npy")
        Vrref = np.load("Vr.npy")
        Vtref = np.load("Vt.npy")
        mref = np.load("m.npy")
        tref = np.load("time.npy")
        tfin = tref[-1]

        Rfun = PchipInterpolator(tref, Rref)
        Thetafun = PchipInterpolator(tref, Thetaref)
        Vrfun = PchipInterpolator(tref, Vrref)
        Vtfun = PchipInterpolator(tref, Vtref)
        mfun = PchipInterpolator(tref, mref)

        del Rref, Thetaref, Vrref, Vtref, mref, tref

        #pool = multiprocessing.Pool(nbCPU)

        toolbox.register("map", map)

        print("INITIAL POP SIZE: %d" % size_pop)

        print("GEN SIZE: %d" % size_gen)

        print("\n")

        random.seed()

        pop = toolbox.population(n=size_pop)
        history.update(pop)
        # hof = tools.HallOfFame(size_gen) ### OLD ###
        hof = HallOfFame(100)  ### NEW ###
        stats_fit = tools.Statistics(lambda ind: ind.fitness.values)
        # stats_size = tools.Statistics(len)
        # stats_height = tools.Statistics(operator.attrgetter("height"))
        mstats = tools.MultiStatistics(fitness=stats_fit)

        mstats.register("avg", np.mean, axis=0)
        mstats.register("min", Minn)
        mstats.register("max", np.max, axis=0)
        pset = 1

        ####################################   EVOLUTIONARY ALGORITHM   -  EXECUTION   #####################################

        # pop, log = algorithms.eaMuPlusLambda(pop, toolbox, Mu, Lambda, 0.6, 0.2, size_gen, stats=mstats, halloffame=hof,
        # verbose=True)  ### OLD ###
        try:
            pop, log = eaMuPlusLambdaTol(pop,
                                         toolbox,
                                         Mu,
                                         Lambda,
                                         size_gen,
                                         cxpb,
                                         mutpb,
                                         10,
                                         stats=mstats,
                                         halloffame=hof,
                                         verbose=True)  ### NEW ###
            fit = np.zeros((size_gen))
            for i in range(size_gen):
                fit[i] = sum(np.array(
                    log.chapters["fitness"].select("min")[i]))
            best_fit = min(fit)
            print(best_fit)
            return pop, log, hof, best_fit
        except (RuntimeWarning, RuntimeError, OverflowError, TypeError,
                ZeroDivisionError):
            return 1, 1, 1, best_fit

    ####################################################################################################################

    ##################################  F I T N E S S    F U N C T I O N    ################################################

    def evaluate(individual):
        global flag
        global pas
        global fitnnesoldvalue, fitness_old1, fitness_old2, fitness_old3, fitness_old4, fitness_old5
        global Rfun, Thetafun, Vrfun, Vtfun, mfun, Trfun
        global tfin

        penalty = np.zeros((17))

        flag = False
        pas = False

        # Transform the tree expression in a callable function

        fTr = toolbox.compile(expr=individual[0])
        fTt = toolbox.compile(expr=individual[1])
        x_ini = [obj.Re, 0.0, 0.0, 0.0, obj.M0]  # initial conditions

        def sys(t, x):
            global flag

            # State Variables

            R = x[0]
            theta = x[1]
            Vr = x[2]
            Vt = x[3]
            m = x[4]

            if R < obj.Re or np.isnan(R):
                penalty[0] = penalty[0] + abs(R - obj.Re) / obj.Htarget
                R = obj.Re
                flag = True
            if R > obj.Rtarget or np.isinf(R):
                penalty[1] = penalty[1] + abs(R - obj.Rtarget) / obj.Htarget
                R = obj.Rtarget
                flag = True
            if m < obj.M0 - obj.Mp or np.isnan(m):
                penalty[2] = penalty[2] + abs(m - (obj.M0 - obj.Mp)) / obj.M0
                m = obj.M0 - obj.Mp
                flag = True
            elif m > obj.M0 or np.isinf(m):
                penalty[3] = penalty[3] + abs(m - obj.M0) / obj.M0
                m = obj.M0
                flag = True
            if abs(Vr) > 1e3 or np.isinf(Vr):
                penalty[4] = penalty[4] + abs(Vr - 1e3) / 1e3
                if Vr > 0:
                    Vr = 1e3
                else:
                    Vr = -1e3
                flag = True
            if abs(Vt) > 1e4 or np.isinf(Vt):
                penalty[5] = penalty[5] + abs(Vt - 1e4) / 1e4
                if Vt > 0:
                    Vt = 1e4
                else:
                    Vt = -1e4
                flag = True
            if theta < 0 or np.isnan(theta):
                penalty[6] = penalty[6] + abs(np.rad2deg(theta))
                theta = 0
                flag = True
            elif np.rad2deg(theta) > 60 or np.isinf(theta):
                penalty[7] = penalty[7] + abs(np.rad2deg(theta) - 60)
                theta = np.deg2rad(60)
                flag = True

            r = Rfun(t)
            th = Thetafun(t)
            vr = Vrfun(t)
            vt = Vtfun(t)
            mf = mfun(t)

            er = r - R
            et = th - theta
            evr = vr - Vr
            evt = vt - Vt
            em = mf - m
            dxdt = np.zeros(Nstates)
            #print("Ft: ", fTt(er, et, evr, evt, em))
            #print("Fr: ", fTr(er, et, evr, evt, em))
            rho = obj.air_density(R - obj.Re)
            Dr = 0.5 * rho * Vr * np.sqrt(Vr**2 +
                                          Vt**2) * obj.Cd * obj.A  # [N]
            Dt = 0.5 * rho * Vt * np.sqrt(Vr**2 +
                                          Vt**2) * obj.Cd * obj.A  # [N]
            g = obj.g0 * (obj.Re / R)**2  # [m/s2]
            g0 = obj.g0
            Isp = obj.Isp

            Tr = fTr(er, et, evr, evt, em)
            Tt = fTt(er, et, evr, evt, em)

            if Tr < 0.0 or np.isnan(Tr):
                penalty[10] = 10000  # penalty[10] + abs(Tr)/obj.Tmax
                Tr = 0.0
                flag = True

            elif Tr > obj.Tmax or np.isinf(Tr):
                penalty[11] = 10000  # penalty[7] + abs(Tr - obj.Tmax)/obj.Tmax
                Tr = obj.Tmax
                flag = True

            if Tt < 0.0 or np.isnan(Tt):
                penalty[12] = 10000  # penalty[12] + abs(Tt)/obj.Tmax
                Tt = 0.0
                flag = True

            elif Tt > obj.Tmax or np.isinf(Tt):
                penalty[13] = 10000  # penalty[9] + abs(Tt - obj.Tmax)/obj.Tmax
                Tt = obj.Tmax
                flag = True

            dxdt[0] = Vr
            dxdt[1] = Vt / R
            dxdt[2] = Tr / m - Dr / m - g + Vt**2 / R
            dxdt[3] = Tt / m - Dt / m - (Vr * Vt) / R
            dxdt[4] = -np.sqrt(Tr**2 + Tt**2) / g0 / Isp

            return dxdt

        sol = solve_ivp(sys, [0.0, tfin], x_ini)
        y1 = sol.y[0, :]
        y2 = sol.y[1, :]
        y3 = sol.y[2, :]
        y4 = sol.y[3, :]
        y5 = sol.y[4, :]
        tt = sol.t
        if sol.t[-1] < tfin:
            flag = True
        pp = 0
        r = np.zeros(len(tt), dtype='float')
        theta = np.zeros(len(tt), dtype='float')
        vr = np.zeros(len(tt), dtype='float')
        vt = np.zeros(len(tt), dtype='float')
        m = np.zeros(len(tt), dtype='float')
        for i in tt:
            r[pp] = Rfun(i)
            theta[pp] = Thetafun(i)
            vr[pp] = Vrfun(i)
            vt[pp] = Vtfun(i)
            m[pp] = mfun(i)
            pp += 1

        err1 = (r - y1) / obj.Htarget
        err2 = np.rad2deg(theta - y2) / 60
        err3 = (vr - y3) / 1e3
        err4 = (vt - y4) / 1e4
        err5 = (m - y5) / obj.M0

        # STEP TIME SIZE
        i = 0
        pp = 1
        step = np.zeros(len(y1), dtype='float')
        step[0] = 0.0001
        while i < len(tt) - 1:
            step[pp] = tt[i + 1] - tt[i]
            i = i + 1
            pp = pp + 1

        # INTEGRAL OF ABSOLUTE ERROR (PERFORMANCE INDEX)
        IAE = np.zeros((5, len(err1)))
        j = 0
        for a, b, c, d, e, n in zip(err1, err2, err3, err4, err5, step):
            IAE[0][j] = n * abs(a)
            IAE[1][j] = n * abs(b)
            IAE[2][j] = n * abs(c)
            IAE[3][j] = n * abs(d)
            IAE[4][j] = n * abs(e)
            j = j + 1

        fitness = [
            sum(IAE[0]),
            sum(IAE[1]),
            sum(IAE[2]),
            sum(IAE[3]),
            sum(IAE[4])
        ]

        if flag is True:
            x = np.array([
                fitness[0] +
                (sum(penalty[0:2]) + sum(penalty[8:16])) / fitness[0],
                fitness[1] + (sum(penalty[6:15]) + penalty[-1]) / fitness[1],
                fitness[2] + (penalty[4] + sum(penalty[8:15])) / fitness[2],
                fitness[3] + (penalty[5] + sum(penalty[8:15])) / fitness[3],
                fitness[4] +
                (sum(penalty[2:4]) + sum(penalty[8:15])) / fitness[4]
            ])
        fitness = [
            sum(IAE[0]),
            sum(IAE[1]),
            sum(IAE[2]),
            sum(IAE[3]),
            sum(IAE[4])
        ]

        return x if flag is True else fitness

    pset = gp.PrimitiveSet("MAIN", 5)
    if param["TriAdd"] == 1:
        pset.addPrimitive(TriAdd, 3)
    if param["Add"] == 1:
        pset.addPrimitive(operator.add, 2, name="Add")
    if param["sub"] == 1:
        pset.addPrimitive(operator.sub, 2, name="Sub")
    if param["mul"] == 1:
        pset.addPrimitive(operator.mul, 2, name="Mul")
    if param["div"] == 1:
        pset.addPrimitive(Div, 2)
    if param["pow"] == 1:
        pset.addPrimitive(operator.pow, 2, name="Pow")
    if param["abs"] == 1:
        pset.addPrimitive(Abs, 1)
    if param["sqrt"] == 1:
        pset.addPrimitive(Sqrt, 1)
    if param["log"] == 1:
        pset.addPrimitive(Log, 1)
    if param["exp"] == 1:
        pset.addPrimitive(Exp, 1)
    if param["sin"] == 1:
        pset.addPrimitive(Sin, 1)
    if param["cos"] == 1:
        pset.addPrimitive(Cos, 1)
    if param["pi"] == 1:
        pset.addTerminal(np.pi, "pi")
    if param["e"] == 1:
        pset.addTerminal(np.e, name="nap")  # e Napier constant number

    for i in range(nEph):
        pset.addEphemeralConstant("rand{}{}".format(i, sum(param.values())),
                                  Eph)

    pset.renameArguments(ARG0='errR')
    pset.renameArguments(ARG1='errTheta')
    pset.renameArguments(ARG2='errVr')
    pset.renameArguments(ARG3='errVt')
    pset.renameArguments(ARG4='errm')

    ################################################## TOOLBOX #############################################################

    creator.create(
        "Fitness",
        base.Fitness,
        weights=(-param['fit1'], -param['fit2'], -param['fit3'],
                 -param['fit4'],
                 -param['fit5']))  # MINIMIZATION OF THE FITNESS FUNCTION

    creator.create("Individual", list, fitness=creator.Fitness, height=1)

    creator.create("SubIndividual", gp.PrimitiveTree, fitness=creator.Fitness)

    toolbox = base.Toolbox()
    # toolbox.register("expr", gp.genFull, pset=pset, min_=1, max_=2)   #### OLD ####
    toolbox.register("expr",
                     gp.genFull,
                     pset=pset,
                     type_=pset.ret,
                     min_=2,
                     max_=5)  ### NEW ###

    toolbox.register("leg", tools.initIterate, creator.SubIndividual,
                     toolbox.expr)  ### NEW ###
    toolbox.register("legs", tools.initRepeat, list, toolbox.leg,
                     n=Ncontrols)  ### NEW ###

    # toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.expr)  #### OLD ####
    toolbox.register("individual", tools.initIterate, creator.Individual,
                     toolbox.legs)  ### NEW ###

    # toolbox.register("population", tools.initRepeat, list, toolbox.individual)  #### OLD ####
    toolbox.register("population", tools.initRepeat, list,
                     toolbox.individual)  ### NEW ###

    # toolbox.register("lambdify", gp.compile, pset=pset) ### NEW ###
    # toolbox.register("stringify", gp.compile, pset=pset) ### NEW ###

    toolbox.register("compile", gp.compile, pset=pset)

    toolbox.register("evaluate", evaluate)  ### OLD ###
    # toolbox.register('evaluate', evaluate, toolbox=toolbox, sourceData=data, minTrades=minTrades, log=False) ###NEW ###

    # toolbox.register("select", tools.selDoubleTournament, fitness_size=3, parsimony_size=1, fitness_first=True) ### OLD ###
    toolbox.register("select", tools.selNSGA2)  ### NEW ###

    toolbox.register("mate", xmate)  ### NEW ###
    toolbox.register("expr_mut", gp.genFull, min_=2, max_=5)  ### NEW ###
    toolbox.register("mutate", xmut, expr=toolbox.expr_mut,
                     strp=param["strp"])  ### NEW ###

    # toolbox.register("mate", gp.cxOnePointLeafBiased,termpb=0.1) ### OLD ###
    # toolbox.register("mutate", gp.mutUniform, expr=toolbox.expr, pset=pset) ### OLD ###

    toolbox.decorate(
        "mate",
        gp.staticLimit(key=operator.attrgetter("height"),
                       max_value=limit_height))
    toolbox.decorate(
        "mutate",
        gp.staticLimit(key=operator.attrgetter("height"),
                       max_value=limit_height))

    toolbox.decorate("mate", gp.staticLimit(key=len, max_value=limit_size))
    # toolbox.decorate("mutate", gp.staticLimit(key=len, max_value=limit_size))

    history = tools.History()
    toolbox.decorate("mate", history.decorator)
    toolbox.decorate("mutate", history.decorator)

    #if __name__ == "__main__":
    obj = Rocket()
    print(param)
    pop, log, hof, best_fit = main(size_gen, size_pop, Mu, Lambda, mutpb, cxpb,
                                   mutpb_orig, cxpb_orig)

    return best_fit
Beispiel #22
0
    def _fit(self, X, y, parameter_dict):
        self._cv_results = None  # To indicate to the property the need to update
        self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
        n_samples = _num_samples(X)
        X, y = indexable(X, y)

        if y is not None:
            if len(y) != n_samples:
                raise ValueError('Target variable (y) has a different number '
                                 'of samples (%i) than data (X: %i samples)' %
                                 (len(y), n_samples))
        cv = check_cv(self.cv, y=y, classifier=is_classifier(self.estimator))

        creator.create("FitnessMax", base.Fitness, weights=(1.0, ))
        creator.create("Individual",
                       list,
                       est=clone(self.estimator),
                       fitness=creator.FitnessMax)

        toolbox = base.Toolbox()

        name_values, gene_type, maxints = _get_param_types_maxint(
            parameter_dict)
        if self.gene_type is None:
            self.gene_type = gene_type

        if self.verbose:
            print("Types %s and maxint %s detected" %
                  (self.gene_type, maxints))

        toolbox.register("individual",
                         _initIndividual,
                         creator.Individual,
                         maxints=maxints)
        toolbox.register("population", tools.initRepeat, list,
                         toolbox.individual)

        if self.n_jobs > 1:
            pool = Pool(processes=self.n_jobs)
            toolbox.register("map", pool.map)

        toolbox.register("evaluate",
                         _evalFunction,
                         name_values=name_values,
                         X=X,
                         y=y,
                         scorer=self.scorer_,
                         cv=cv,
                         iid=self.iid,
                         verbose=self.verbose,
                         error_score=self.error_score,
                         fit_params=self.fit_params,
                         score_cache=self.score_cache)

        toolbox.register("mate",
                         _cxIndividual,
                         indpb=self.gene_crossover_prob,
                         gene_type=self.gene_type)

        toolbox.register("mutate",
                         _mutIndividual,
                         indpb=self.gene_mutation_prob,
                         up=maxints)
        toolbox.register("select",
                         tools.selTournament,
                         tournsize=self.tournament_size)

        pop = toolbox.population(n=self.population_size)
        hof = tools.HallOfFame(1)

        # Stats
        stats = tools.Statistics(lambda ind: ind.fitness.values)
        stats.register("avg", np.nanmean)
        stats.register("min", np.nanmin)
        stats.register("max", np.nanmax)
        stats.register("std", np.nanstd)

        # History
        hist = tools.History()
        toolbox.decorate("mate", hist.decorator)
        toolbox.decorate("mutate", hist.decorator)
        hist.update(pop)

        if self.verbose:
            print('--- Evolve in {0} possible combinations ---'.format(
                np.prod(np.array(maxints) + 1)))

        pop, logbook = algorithms.eaSimple(pop,
                                           toolbox,
                                           cxpb=0.5,
                                           mutpb=0.2,
                                           ngen=self.generations_number,
                                           stats=stats,
                                           halloffame=hof,
                                           verbose=self.verbose)

        # Save History
        self.all_history_.append(hist)
        self.all_logbooks_.append(logbook)
        current_best_score_ = hof[0].fitness.values[0]
        current_best_params_ = _individual_to_params(hof[0], name_values)
        if self.verbose:
            print("Best individual is: %s\nwith fitness: %s" %
                  (current_best_params_, current_best_score_))

        if current_best_score_ > self.best_mem_score_:
            self.best_mem_score_ = current_best_score_
            self.best_mem_params_ = current_best_params_

        # Check memoization, potentially unknown bug
        assert str(
            hof[0]
        ) in self.score_cache, "Best individual not stored in score_cache for cv_results_."

        if self.n_jobs > 1:
            pool.close()
            pool.join()

        self.best_score_ = current_best_score_
        self.best_params_ = current_best_params_
Beispiel #23
0
def run(cxpb, mutpb, n, tour, termpb, popu, ngen):
    global run_i

    pset = gp.PrimitiveSetTyped("main", [array], float)
    pset.addPrimitive(SMA, [array, int, int], float)
    pset.addPrimitive(operator.add, [float, float], float)
    pset.addPrimitive(part, [array, int], float)
    pset.addPrimitive(shift, [array, int], float)
    pset.addEphemeralConstant("randI{i}".format(i=run_i),
                              lambda: random.randint(0, n - 1), int)
    pset.addEphemeralConstant("randF{i}".format(i=run_i),
                              lambda: random.uniform(-1, 1), float)
    pset.addPrimitive(operator.sub, [float, float], float)
    pset.addPrimitive(operator.mul, [float, float], float)
    pset.addPrimitive(protectedDiv, [float, float], float)
    pset.addPrimitive(IF2, [float, float, float, float], float)
    run_i += 1

    creator.create("FitnessMax", base.Fitness, weights=(1.0, ))
    creator.create("Individual",
                   gp.PrimitiveTree,
                   fitness=creator.FitnessMax,
                   pset=pset)

    toolbox.register("expr", genGrow_edit, pset=pset, min_=1, max_=15)
    toolbox.register("individual", tools.initIterate, creator.Individual,
                     toolbox.expr)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)
    toolbox.register("compile", gp.compile, pset=pset)

    toolbox.register("evaluate", fitness_predictor, arg=errors, n=n)

    toolbox.register("select", tools.selTournament, tournsize=tour)
    toolbox.register("mate", gp.cxOnePointLeafBiased, termpb=termpb)
    toolbox.register("expr_mut", genGrow_edit, min_=0, max_=5)
    toolbox.register("mutate", gp.mutUniform, expr=toolbox.expr_mut, pset=pset)
    toolbox.decorate(
        "mate", gp.staticLimit(key=operator.attrgetter("height"),
                               max_value=17))
    toolbox.decorate(
        "mutate",
        gp.staticLimit(key=operator.attrgetter("height"), max_value=17))

    #HISTORY
    #HISTORY
    #HISTORY
    history = tools.History()
    # Decorate the variation operators
    toolbox.decorate("mate", history.decorator)
    toolbox.decorate("mutate", history.decorator)

    stats_fit = tools.Statistics(lambda ind: ind.fitness.values)
    stats_size = tools.Statistics(len)
    mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)
    mstats.register("avg", numpy.mean)
    mstats.register("std", numpy.std)
    mstats.register("min", numpy.min)
    mstats.register("max", numpy.max)

    if parallel == 1:
        from scoop import futures
        toolbox.register("map", futures.map)  #PARALLELIZATION
    elif parallel == 2:
        import multiprocessing
        pool = multiprocessing.Pool(6)
        toolbox.register("map", pool.map)  #PARALLELIZATION

    pop = toolbox.population(n=popu)

    #HISTORY
    #HISTORY
    #HISTORY
    history.update(pop)

    hof = tools.HallOfFame(1)
    pop, log = algorithms.eaSimple(pop,
                                   toolbox,
                                   cxpb,
                                   mutpb,
                                   ngen,
                                   stats=mstats,
                                   halloffame=hof,
                                   verbose=True)

    # return hof[0].fitness.values[0]   #FOR CMA-ES
    # return log                        #FOR PLOT
    return history  #FOR HISTORY
Beispiel #24
0
    def __init__(self,
                 kernel,
                 bin,
                 profile,
                 mutop,
                 timeout=30,
                 fitness='time',
                 popsize=128,
                 llvm_src_filename='cuda-device-only-kernel.ll',
                 use_fitness_map=True,
                 combine_positive_epistasis=False,
                 CXPB=0.8,
                 MUPB=0.1,
                 err_rate='0.01',
                 global_seed=None):
        self.CXPB = CXPB
        self.MUPB = MUPB
        self.err_rate = err_rate
        self.kernels = kernel
        self.appBinary = bin
        self.timeout = timeout
        self.fitness_function = fitness
        self.use_fitness_map = use_fitness_map
        self.combine_positive_epistasis = combine_positive_epistasis
        self.popsize = popsize
        self.mutop = mutop.split(',')
        self.rng = {}
        if global_seed is not None:
            random.seed(global_seed)

        try:
            with open(llvm_src_filename, 'r') as f:
                self.initSrcEnc = f.read().encode()
        except IOError:
            print("File {} does not exist".format(llvm_src_filename))
            exit(1)

        self.verifier = profile['verify']

        # Tools initialization
        # Detect GPU property
        cuda.init()
        # TODO: check if there are multiple GPUs.
        SM_MAJOR, SM_MINOR = cuda.Device(0).compute_capability()
        self.mgpu = 'sm_' + str(SM_MAJOR) + str(SM_MINOR)
        print(f'[Initializing GEVO] GPU compute capability: {self.mgpu}')

        # check Nvidia Profiler exists
        self.nvprof_path = shutil.which('nvprof')
        if self.nvprof_path is None:
            raise Exception('nvprof cannot be found')
        print(f'[Initializing GEVO] nvprof detected: {self.nvprof_path}')

        # Minimize both performance and error
        creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0))
        creator.create("Individual",
                       irind.llvmIRrep,
                       fitness=creator.FitnessMin)
        self.history = tools.History()
        self.toolbox = base.Toolbox()
        self.toolbox.register('mutate', self.mutLLVM)
        self.toolbox.register('mate', self.cxOnePointLLVM)
        # self.toolbox.register('select', tools.selDoubleTournament, fitness_size=2, parsimony_size=1.4, fitness_first=True)
        self.toolbox.register('select', tools.selNSGA2)
        self.toolbox.register('individual',
                              creator.Individual,
                              srcEnc=self.initSrcEnc,
                              mgpu=self.mgpu)
        self.toolbox.register('population', tools.initRepeat, list,
                              self.toolbox.individual)
        # Decorate the variation operators
        self.toolbox.decorate("mate", self.history.decorator)
        self.toolbox.decorate("mutate", self.history.decorator)

        self.stats = tools.Statistics(lambda ind: ind.fitness.values)
        self.stats.register("min", min)
        self.stats.register("max", max)

        self.logbook = tools.Logbook()
        self.paretof = tools.ParetoFront()
        self.logbook.header = "gen", "evals", "min", "max"

        # Set up testcase
        self.origin = creator.Individual(self.initSrcEnc, self.mgpu)
        self.origin.ptx(self.cudaPTX)
        arg_array = [[]]
        for i, arg in enumerate(profile['args']):
            if arg.get('bond', None) is None:
                arg_array_next = [
                    e[:] for e in arg_array for _ in range(len(arg['value']))
                ]
                arg_array = arg_array_next
                for e1, e2 in zip(arg_array, cycle(arg['value'])):
                    e1.append(e2)
            else:
                for e in arg_array:
                    bonded_arg = arg['bond'][0]
                    bonded_idx = profile['args'][bonded_arg]['value'].index(
                        e[bonded_arg])
                    e.append(arg['value'][bonded_idx])

        arg_array = [[str(e) for e in args] for args in arg_array]

        self.testcase = []
        for i in range(len(arg_array)):
            self.testcase.append(
                self._testcase(self, i, kernel, bin, profile['verify']))
        with Progress(
                "[Initializing GEVO] Evaluate original program with test cases",
                "({task.completed} / {task.total})",
                rich.progress.TimeElapsedColumn()) as progress:
            task = progress.add_task("", total=len(arg_array))
            for tc, arg in zip(self.testcase, arg_array):
                tc.args = arg
                tc.evaluate()
                progress.update(task, advance=1)

        self.ofits = [tc.fitness[0] for tc in self.testcase]
        self.oerrs = [tc.fitness[1] for tc in self.testcase]
        self.origin.fitness.values = (sum(self.ofits) / len(self.ofits),
                                      max(self.oerrs))
        self.editFitMap[tuple()] = self.origin.fitness.values
        print(
            f"Average fitness of the original program: ({self.origin.fitness.values[0]:.2f}, {self.origin.fitness.values[1]:.2f})"
        )
        print("Individual test cases:")
        for fit, err in zip(self.ofits, self.oerrs):
            print(f"\t({fit:.2f}, {err:.2f})")
        self.positive_epistasis = {}
        self.negative_epistasis = {}
        self.need_discussion = {}
Beispiel #25
0
def main():
    global POPULATION_SIZE
    global GENERATIONS_LIMIT

    print("[*] Genetic algorithm: One Max Problem")

    # Create classes for evolution
    # Inherit fitness class with additionalatribute weights
    creator.create("FitnessMax", base.Fitness, weights=(1.0, ))
    # create Individual class inheriting the class "list" & FitnessMax class in its fitness attribute.
    creator.create("Individual", list, fitness=creator.FitnessMax)

    toolbox = base.Toolbox()
    history = tools.History()

    # Register creates aliases to functions
    # Register generation function, random integer of 1s &/or 0s
    toolbox.register("attr_bool", random.randint, 0, 1)
    # Register individual initialization function
    toolbox.register("individual", tools.initRepeat, creator.Individual,
                     toolbox.attr_bool, 100)
    # Register population inintialization function
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    # Register evaluation function
    toolbox.register("evaluate", evaluate_ones)

    toolbox.register("mate", tools.cxTwoPoint)
    toolbox.register("mutate", tools.mutFlipBit, indpb=0.05)
    toolbox.register("select", tools.selTournament, tournsize=3)

    # For plotting
    toolbox.decorate("mate", history.decorator)
    toolbox.decorate("mutate", history.decorator)

    # Instantiate a population (list) of POPULATION_SIZE members
    pop = toolbox.population(n=POPULATION_SIZE)
    # Update history
    history.update(pop)

    # The underlying algorithm
    '''
    cross_probability = 0.5
    mutation_probability = 0.2

    # Evaluate the population
    fitnesses = map(toolbox.evaluate, pop)

    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit

    # Obtain fitness values for individuals
    fits = [ind.fitness.values[0] for ind in pop]

    generations = 0
    target = 100

    while (max(fits) < target and generations < GENERATIONS_LIMIT):
        generations += 1

        print("[+] Generation: " + str(generations))

        # Select the next generation individuals
        offspring = toolbox.select(pop, len(pop))
        # Clone the selected individuals
        # Individuals are modified inplace, ensures references aren't used
        offspring = list(map(toolbox.clone, offspring))

        # Crossover offspring
        for child1, child2 in zip(offspring[::2], offspring[1::2]):
            if random.random() < cross_probability:
                toolbox.mate(child1, child2)
                del child1.fitness.values
                del child2.fitness.values

        # Mutate the offspring
        for mutant in offspring:
            if random.random() < mutation_probability:
                toolbox.mutate(mutant)
                del mutant.fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = map(toolbox.evaluate, invalid_ind)

        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # Replace population with current offspring
        pop[:] = offspring

        # Gather all the fitnesses in one list
        fits = [ind.fitness.values[0] for ind in pop]

        length = len(pop)
        sum2 = sum(x*x for x in fits)

        print("[+] Min %s" % min(fits))
        print("[+] Max %s" % max(fits))
    # End while

    mean = sum(fits) / length
    std = abs(sum2 / length - mean**2)**0.5
    print("[+] Mean %s" % mean)
    print("[+] Std dev %s" % std)
    '''
    # End of the underlying algorithm

    # '''
    # Simpler & cleaner implementation
    # Output formatting in columns
    # Iterates

    # The hall of fame contains the best individual that ever lived
    # in the population during the evolution.
    hof = tools.HallOfFame(1)  # Only one individual
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("Mean", numpy.mean)
    stats.register("Std dev", numpy.std)
    stats.register("Min", numpy.min)
    stats.register("Max", numpy.max)

    # cxpb  - Crossover probability
    # mutpb - Mutation probability
    # ngen  - Max generations number, always iterates to this point
    pop, log = algorithms.eaSimple(pop,
                                   toolbox,
                                   cxpb=0.5,
                                   mutpb=0.2,
                                   ngen=GENERATIONS_LIMIT,
                                   stats=stats,
                                   halloffame=hof,
                                   verbose=True)
    # '''

    #'''
    # Takes some time to plot
    mplt.title("Genetic Algorithm, one max")
    graph = nx.DiGraph(history.genealogy_tree)
    # Make the grah top-down
    graph = graph.reverse()
    colours = [
        toolbox.evaluate(history.genealogy_history[i])[0] for i in graph
    ]
    nx.draw(graph, node_color=colours)
    mplt.show()
Beispiel #26
0
def main():
   global gr
   # random.seed(64)
   hist = tools.History()
   i = 0
   ifile = ''
   ofile = ''
   pfile = ''
   total = len(sys.argv)
   myopts, args = getopt.getopt(sys.argv[1:],"p:n:s:c:m:i:o:")
   #
   # print ("Call: %s " % str(myopts))
   # o == option
   # a == argument for the option o
   for o, a in myopts:
      if o == '-s':
         size = int(a)
      elif o == '-c':
         pcrs = float(a)
      elif o == '-m':
         pmut = float(a)
      elif o == '-n':
         niter = int(a)
      elif o == '-p':
         pfile = a
      elif o == '-i':
         ifile = a
      else:
         print("Usage: %s -L psize -s isize -c pcross -m pmut -n Niter -i ifile " % sys.argv[0])
      
   # Comenzamos a leer el csv
   f = open ( ifile, 'r')
   njobs = int((leerlineacsv(f))[0])    # Numero de jobs a procesar
   lista = leerlineacsv(f)
   if len(lista) < njobs:
	print "No hay componentes suficientes DJ ",lista
   else:
	dj=[int(x) for x in lista]  # Duracion nominal de cada job

   T = int((leerlineacsv(f))[0])    # Leemos el total de pasos de tiempo
   lista = leerlineacsv(f)
   if len(lista) < T:
	print "No hay componentes suficientes Et ",lista
   else:
	et=[float(x) for x in lista] # Leemos el coste en cada paso de tiempo

   nest = int((leerlineacsv(f))[0])    # Leemos el numero de estados
   mce=[]
   for i in range(nest):
    	lista = leerlineacsv(f)
    	if len(lista) < nest:
	   print "No hay componentes suficientes Est ",lista
        else:
	   mce.append([float(x) for x in lista])

   mcd=[]
   for i in range(nest):
    	lista = leerlineacsv(f)
    	if len(lista) < nest:
	   print "No hay componentes suficientes Est ",lista
        else:
	   mcd.append([int(x) for x in lista])

   gr=graph(mcd) # GR sera usado en evaluaciones
   nv=int((leerlineacsv(f))[0])  # Leemos el numero de factores de velocidad
   lista = leerlineacsv(f)
   if len(lista) < nv:
	print "No hay componentes suficientes Fv ",lista
   else:
	fv=[float(x) for x in lista]

   lista = leerlineacsv(f)
   if len(lista) < nv:
	print "No hay componentes suficientes Fp ",lista
   else:
	fp=[float(x) for x in lista]
    
   ntr = int((leerlineacsv(f))[0])    # Leemos el numero de transiciones
   vtr=[]
   for i in range(ntr):
    	lista = leerlineacsv(f)
    	if len(lista) < 4:
	   print "No hay componentes suficientes TrVar ",lista
        else:
	   vtr.append([lista[0],lista[1],int(lista[2]),float(lista[3])])

   npy = int((leerlineacsv(f))[0])    # Leemos el numero de penalties
   pty =[]
   for i in range(npy):
    	lista = leerlineacsv(f)
    	if len(lista) < 2:
	   print "No hay componentes suficientes Pnlty ",lista
        else:
	   pty.append([int(lista[0]),float(lista[1])])

   trns = [mce,mcd,vtr]  # Transiciones: Mat energia/pasoT + Mat duracion
   fvp = [fv,fp,pty]
   # print njobs, T, nest, nv
   # print "DJ:",dj
   # print "T :",et
   print "Margen: ",T-sum(dj)
   #
   creator.create("FitnessMin",base.Fitness,weights=(-1.0,)) 
   creator.create("Individual",list,fitness=creator.FitnessMin)
   toolbox = base.Toolbox()
   #
   toolbox.register("attr_wait",random.randint,0,(T-sum(dj)-1))
   toolbox.register("indices",random.sample,range(njobs),njobs)
   toolbox.register("attr_pow",random.randint,0,nv-1)
   toolbox.register("waiting",tools.initRepeat, list,toolbox.attr_wait,njobs)
   toolbox.register("speed",tools.initRepeat, list,toolbox.attr_pow,njobs)
   toolbox.register("individual", tools.initCycle, creator.Individual,(toolbox.waiting,toolbox.indices,toolbox.speed), 1)
   toolbox.register("population", tools.initRepeat, list, toolbox.individual)
   # Operator registering
   toolbox.register("evaluate", evaluate,et,dj,trns,fvp,0)
   toolbox.register("mate", crossover,pcrs)
   toolbox.register("mutate", tools.mutUniformInt,0,(T-sum(dj)), indpb=pmut)
   toolbox.register("select", tools.selTournament, tournsize=3)
   toolbox.decorate("mate", checkBounds(T,dj,fvp))
   toolbox.decorate("mutate", checkBounds(T,dj,fvp))
   #
   pop = toolbox.population(n=size)
   fitnesses = map(functools.partial(evaluate, cost=et,dj=dj,trns=trns,fvp=fvp,out=0),pop)
   for ind, fit in zip(pop, fitnesses):
       ind.fitness.values = fit
   #
   for g in range(niter):
        print "-- Generation %i --" % g
        
        # Select the next generation individuals
        offspring = toolbox.select(pop, len(pop))
        # Clone the selected individuals
        offspring = map(toolbox.clone, offspring)
    
        # Apply crossover and mutation on the offspring
        for child1, child2 in zip(offspring[::2], offspring[1::2]):
            if random.random() < pcrs:
               crossover(child1, child2, pcrs)
               del child1.fitness.values
               del child2.fitness.values

        for mutant in offspring:
            if random.random() < pmut:
               mutation(mutant,mu=0.0,sigma=5.,pr=pmut,ll=0,ul=nv-1)
               del mutant.fitness.values
    
        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = map(functools.partial(evaluate, cost=et,dj=dj,trns=trns,fvp=fvp,out=0),invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit
        
        print "  Evaluated %i individuals" % len(invalid_ind)
        # The population is entirely replaced by the offspring
        pop[0:9] = tools.selBest(pop,10)
        pop[10:] = offspring[0:min(len(offspring),size)]
        # Gather all the fitnesses in one list and print the stats
        fits = [ind.fitness.values[0] for ind in pop]
        print "  Min %s" % min(fits)
    
   print "-- End of (successful) evolution --"
   best_ind = tools.selBest(pop, 1)[0]
   length = len(pop)
   mean = sum(fits) / length
   sum2 = sum(x*x for x in fits)
   std = abs(sum2 / length - mean**2)**0.5
   print " ======================== "
   print "  Min %s" % min(fits)
   print "  Max %s" % max(fits)
   print "  Avg %s" % mean
   print "  Std %s" % std
   print " ======================== "
   print "Best individual is %s, %s" % (best_ind, best_ind.fitness.values)
   print "Phenotype: %s \n" % evaluate(best_ind,et,dj,trns,fvp,1)
def _history():
    history = tools.History()
    toolbox.decorate("mate", history.decorator)
    toolbox.decorate("mutate", history.decorator)
    return history
Beispiel #28
0
def search(search_method,
           n,
           ngen,
           ref_point,
           cxpb,
           cxindpb,
           mutindpb,
           log_dir,
           channel,
           request_queue,
           callback_queue,
           worker_args,
           reference_file,
           initial_cache_file,
           verbose=False):

    if not isinstance(search_method, SearchMethod):
        raise ValueError('search_method should be: ' + str(', '.join(map(str, list(SearchMethod)))))

    reference_inds = read_reference_file(representation, reference_file)

    if reference_inds:
        print("Loaded reference file:", reference_file)

    toolbox = base.Toolbox()

    toolbox.register("individual", representation.create_random_individual, creator.Individual)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    if search_method == SearchMethod.GENETIC:
        toolbox.register("mate", representation.mate, indpb=cxindpb)
        toolbox.register("mutate", representation.mutate, indpb=mutindpb)

    toolbox.register("select", tools.selNSGA2)

    brief_base_path = get_brief_base_path(log_dir)
    pop_writer = SummaryWriter(brief_base_path + '/population')
    hof_writer = SummaryWriter(brief_base_path + '/hall_of_fame')

    external_cache = read_external_cache_file(initial_cache_file)

    checkpoint_base_path = get_checkpoint_base_path(log_dir)
    latest_checkpoint = get_latest_checkpoint(checkpoint_base_path)

    if latest_checkpoint:
        # A checkpoint file has been found, then load the data from the file
        print('Restoring search from checkpoint:', latest_checkpoint[1], ' ...')
        cp = restore_checkpoint(latest_checkpoint[1])

        if search_method != cp['search_method']:
            raise ValueError('Cannot mix search_method in the same directory')

        random.setstate(cp['rndstate'])
        startArchIndex = cp['lastArchIndex'] + 1
        current_gen = cp['generation']
        start_gen = current_gen + 1

        history = cp['history']

        if search_method == SearchMethod.GENETIC:
            # Decorate the variation operators
            toolbox.decorate("mate", history.decorator)
            toolbox.decorate("mutate", history.decorator)

        samples = cp['samples']
        hypervolumes = cp['hypervolumes']
        hof_hypervolumes = cp['hof_hypervolumes']

        pop = cp['population']

        if len(pop) != cp['population_count']:
            raise ValueError('Cannot change population count for restored search')

        hof = cp['hof']

        # This is just to assign the crowding distance to the individuals
        # no actual selection is done
        pop = toolbox.select(pop, len(pop))

        # Restore cache from checkpoint
        cache = cp['cache']
        print('Restored', len(cache), 'entries in cache')

        if len(external_cache) > 0:
            print('Merging', len(external_cache), 'entries from external cache in actual cache with', len(cache), 'entries in it.')
            cache = merge_caches(external_cache, cache)
            print('cache has now', len(cache), 'entries.')

        print('Normalizing cache...')
        start = time.time()
        cache = normalize_cache(cache)
        end = time.time()
        print('Cache normalization completed in:', end-start, 's')

        evaluateIndividuals = getEvaluateIndividuals(cache=cache,
                                                     channel=channel,
                                                     request_queue=request_queue,
                                                     callback_queue=callback_queue,
                                                     worker_args=worker_args,
                                                     start=startArchIndex)

        # Evaluate reference points
        if reference_inds:
            print('='*80)
            print("Starting evaluation for reference points:")
            print('='*80)
            reference_inds, _, _ = evaluateIndividuals(reference_inds, 'references')

            hof_writer.add_text('References sorted by accuracy',
                                representation.generate_markdown_table(sorted(reference_inds, key=lambda x: x.fitness.values[1], reverse=True)),
                                0)

    else:
        # start a new evolution
        print('Starting new %s search...'%(search_method))
        startArchIndex = 0
        current_gen = 0
        start_gen = 1

        history = tools.History()

        if search_method == SearchMethod.GENETIC:
            # Decorate the variation operators
            toolbox.decorate("mate", history.decorator)
            toolbox.decorate("mutate", history.decorator)

        samples = []
        hypervolumes = []
        hof_hypervolumes = []

        hof = tools.ParetoFront()

        cache = {}

        if len(external_cache) > 0:
            print('Merging', len(external_cache), 'entries from external cache in actual cache with', len(cache), 'entries in it.')
            cache = merge_caches(external_cache, cache)
            print('cache has now', len(cache), 'entries.')

        evaluateIndividuals = getEvaluateIndividuals(cache=cache,
                                                     channel=channel,
                                                     request_queue=request_queue,
                                                     callback_queue=callback_queue,
                                                     worker_args=worker_args,
                                                     start=startArchIndex)

        # Evaluate reference points
        if reference_inds:
            print('='*80)
            print("Starting evaluation for reference points:")
            print('='*80)
            reference_inds, _, _ = evaluateIndividuals(reference_inds, 'references')

            hof_writer.add_text('References sorted by accuracy',
                                representation.generate_markdown_table(sorted(reference_inds, key=lambda x: x.fitness.values[1], reverse=True)),
                                0)


        # Evaluate the initial individuals
        print('='*80)
        print("Starting evaluation for generation: %d/%d"%(current_gen, ngen-1))
        print('='*80)
        pop, lastArchIndex, metrics = evaluateIndividuals(toolbox.population(n=n), current_gen)

        samples.append(samples_count(current_gen, n))

        # This is just to assign the crowding distance to the individuals
        # no actual selection is done
        pop = toolbox.select(pop, len(pop))

        hof.update(pop)
        history.update(pop)

        hypervolumes.append(hypervolume(pop, ref_point))
        hof_hypervolumes.append(hypervolume(hof, ref_point))

        # SUMMARIES
        # Scalar: Hypervolume
        pop_writer.add_scalar('hypervolume', hypervolumes[-1], samples[-1])
        hof_writer.add_scalar('hypervolume', hof_hypervolumes[-1], samples[-1])

        # Scalar: cxindpb
        pop_writer.add_scalar('pb/cxindpb', cxindpb, samples[-1])
        hof_writer.add_scalar('pb/cxindpb', cxindpb, samples[-1])

        # Scalar: mutindpb
        pop_writer.add_scalar('pb/mutindpb', mutindpb, samples[-1])
        hof_writer.add_scalar('pb/mutindpb', mutindpb, samples[-1])

        # Scalar: train_batch_size
        pop_writer.add_scalar('batch_size/train', worker_args['train_batch_size'], samples[-1])
        hof_writer.add_scalar('batch_size/train', worker_args['train_batch_size'], samples[-1])

        # Scalar: eval_batch_size
        pop_writer.add_scalar('batch_size/eval', worker_args['eval_batch_size'], samples[-1])
        hof_writer.add_scalar('batch_size/eval', worker_args['eval_batch_size'], samples[-1])

        # Scalar: metrics
        writeMetricsInTB(pop_writer, metrics, samples[-1])
        writeMetricsInTB(hof_writer, metrics, samples[-1])

        # Text: Paretor front
        pop_pareto_front = tools.sortNondominated(pop, len(pop), first_front_only=True)[0]

        pop_writer.add_text('Pareto front sorted by accuracy',
                            representation.generate_markdown_table(sorted(pop_pareto_front, key=lambda x: x.fitness.values[1], reverse=True)),
                            samples[-1])
        hof_writer.add_text('Pareto front sorted by accuracy',
                            representation.generate_markdown_table(sorted(hof, key=lambda x: x.fitness.values[1], reverse=True)),
                            samples[-1])

        # Image: Pareto front
        pop_writer.add_image('Pareto_front', get_pareto_image(pop, reference_inds, current_gen), samples[-1])
        hof_writer.add_image('Pareto_front', get_pareto_image(hof, reference_inds, current_gen), samples[-1])

        # save checkpoint for gen = 0
        save_checkpoint(checkpoint_base_path + '/' + checkpoint_name(current_gen),
                        search_method=search_method,
                        rndstate=random.getstate(),
                        lastArchIndex = lastArchIndex,
                        generation=current_gen,
                        history=history,
                        samples=samples,
                        hypervolumes=hypervolumes,
                        hof_hypervolumes=hof_hypervolumes,
                        logbook=None,
                        population=pop,
                        hof=hof,
                        cache=cache)

    # Begin the generational process
    for gen in range(start_gen, ngen):
        # Vary the population
        if search_method == SearchMethod.GENETIC:
            offspring = tools.selTournamentDCD(pop, len(pop))
            offspring = [toolbox.clone(ind) for ind in offspring]

            for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
                if random.random() <= cxpb:
                    toolbox.mate(ind1, ind2)

                toolbox.mutate(ind1)
                toolbox.mutate(ind2)
                del ind1.fitness.values, ind2.fitness.values
                if hasattr(ind1, 'archIndex'):
                    del ind1.archIndex
                if hasattr(ind2, 'archIndex'):
                    del ind2.archIndex


        elif search_method == SearchMethod.RANDOM:
            offspring = toolbox.population(n=n)

            for ind in offspring:
                del ind.fitness.values
        else:
            raise ValueError('Use of undefined SearchMethod: ' + str(search_method))

        print('='*80)
        print("Starting evaluation for generation: %d/%d"%(gen, ngen-1))
        print('='*80)

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        valid_ind = [ind for ind in offspring if ind.fitness.valid]

        invalid_ind, lastArchIndex, metrics = evaluateIndividuals(invalid_ind, gen)

        offspring = valid_ind + invalid_ind

        samples.append(samples_count(gen, n))

        # Update the hall of fame with the generated individuals
        hof.update(offspring)

        # Select the next generation population
        pop = toolbox.select(pop + offspring, n)

        hypervolumes.append(hypervolume(pop, ref_point))
        hof_hypervolumes.append(hypervolume(hof, ref_point))

        # SUMMARIES
        # Scalar: Hypervolume
        pop_writer.add_scalar('hypervolume', hypervolumes[-1], samples[-1])
        hof_writer.add_scalar('hypervolume', hof_hypervolumes[-1], samples[-1])

        # Scalar: cxindpb
        pop_writer.add_scalar('pb/cxindpb', cxindpb, samples[-1])
        hof_writer.add_scalar('pb/cxindpb', cxindpb, samples[-1])

        # Scalar: mutindpb
        pop_writer.add_scalar('pb/mutindpb', mutindpb, samples[-1])
        hof_writer.add_scalar('pb/mutindpb', mutindpb, samples[-1])

        # Scalar: train_batch_size
        pop_writer.add_scalar('batch_size/train', worker_args['train_batch_size'], samples[-1])
        hof_writer.add_scalar('batch_size/train', worker_args['train_batch_size'], samples[-1])

        # Scalar: eval_batch_size
        pop_writer.add_scalar('batch_size/eval', worker_args['eval_batch_size'], samples[-1])
        hof_writer.add_scalar('batch_size/eval', worker_args['eval_batch_size'], samples[-1])

        # Scalar: metrics
        writeMetricsInTB(pop_writer, metrics, samples[-1])
        writeMetricsInTB(hof_writer, metrics, samples[-1])

        # Text: Paretor front
        pop_pareto_front = tools.sortNondominated(pop, len(pop), first_front_only=True)[0]

        pop_writer.add_text('Pareto front sorted by accuracy',
                            representation.generate_markdown_table(sorted(pop_pareto_front, key=lambda x: x.fitness.values[1], reverse=True)),
                            samples[-1])
        hof_writer.add_text('Pareto front sorted by accuracy',
                            representation.generate_markdown_table(sorted(hof, key=lambda x: x.fitness.values[1], reverse=True)),
                            samples[-1])

        # Image: Pareto front
        pop_writer.add_image('Pareto_front', get_pareto_image(pop, reference_inds, gen), samples[-1])
        hof_writer.add_image('Pareto_front', get_pareto_image(hof, reference_inds, gen), samples[-1])

        # save checkpoint for gen
        save_checkpoint(checkpoint_base_path + '/' + checkpoint_name(gen),
                        search_method=search_method,
                        rndstate=random.getstate(),
                        lastArchIndex = lastArchIndex,
                        generation=gen,
                        history=history,
                        samples=samples,
                        hypervolumes=hypervolumes,
                        hof_hypervolumes=hof_hypervolumes,
                        logbook=None,
                        population=pop,
                        hof=hof,
                        cache=cache)

        print("hypervolume is %f" % hypervolumes[-1])

    print(len(cache), 'entries in cache')
Beispiel #29
0
toolbox.register("population", initRepeatParallel.initPopulation, list, toolbox.individual)

toolbox.register("evaluate", evaluate_suite)
# mate crossover two suites
toolbox.register("mate", tools.cxUniform, indpb=0.5)
# mutate should change seq order in the suite as well
toolbox.register("mutate", mut_suite, indpb=0.5)
# toolbox.register("select", tools.selTournament, tournsize=5)
toolbox.register("select", select_without_duplicates)

toolbox.register("select_most_diverse", select_most_diverse)

toolbox.register("pareto_front", tools.sortNondominated)

# log the history
history = tools.History()
# Decorate the variation operators
toolbox.decorate("mate", history.decorator)
toolbox.decorate("mutate", history.decorator)


def get_package_name(path):
    apk_path = None
    if path.endswith(".apk"):
        apk_path = path
    else:
        for file_name in os.listdir(path + "/bin"):
            if file_name == "bugroid-instrumented.apk":
                apk_path = path + "/bin/bugroid-instrumented.apk"
                break
            elif file_name.endswith("-debug.apk"):
Beispiel #30
0
    def GAEvolve(popSize, ma, m, n, rsi, dfFit, s, cashbal, yearnow):

        ###############################################################################
        def evaluateInd(individual):
            # To test ga without using AssetFuzzy, uncomment the 2 lines below and comment the 3rd and 4th line below
            # s = str(individual)
            # result = len(s)
            result = self.fitness(ma, m, n, rsi, dfFit, s, cashbal, yearnow)
            #Fitness function calls fuzzy
            return result,

        ###############################################################################

        creator.create("FitnessMax", base.Fitness, weights=(1.0, ))
        creator.create("Individual", list, fitness=creator.FitnessMax)
        toolbox = base.Toolbox()

        MAMethod = ['SMA', 'AMA', 'TFMA', 'TMA']
        MValue = ['10', '20', '50', '100', '200']
        NValue = ['1', '3', '5', '10', '15', '20']
        RSIPeriod = ['5', '10', '14', '20', '25']

        N_CYCLES = 1

        toolbox.register("attr_mamethod", random.choice, MAMethod)
        toolbox.register("attr_mvalue", random.choice, MValue)
        toolbox.register("attr_nvalue", random.choice, NValue)
        toolbox.register("attr_rsiperiod", random.choice, RSIPeriod)

        toolbox.register("individual",
                         tools.initCycle,
                         creator.Individual,
                         (toolbox.attr_mamethod, toolbox.attr_mvalue,
                          toolbox.attr_nvalue, toolbox.attr_rsiperiod),
                         n=N_CYCLES)

        toolbox.register("population", tools.initRepeat, list,
                         toolbox.individual)

        # Operator registering
        toolbox.register("evaluate", evaluateInd)
        toolbox.register("mate", tools.cxTwoPoint)
        toolbox.register("mutate", tools.mutShuffleIndexes, indpb=0)
        toolbox.register("select", tools.selBest)

        history = tools.History()
        # Decorate the variation operators
        toolbox.decorate("mate", history.decorator)
        toolbox.decorate("mutate", history.decorator)
        MU, LAMBDA = popSize, 20
        pop = toolbox.population(n=MU)
        history.update(pop)
        print('\n%d elems in the History' % len(pop))
        pp = pprint.PrettyPrinter(indent=4)
        pp.pprint(pop)

        # hof = tools.ParetoFront()
        hof = tools.HallOfFame(10)
        stats = tools.Statistics(lambda ind: ind.fitness.values)
        stats.register("avg", numpy.mean, axis=0)
        stats.register("std", numpy.std, axis=0)
        stats.register("min", numpy.min, axis=0)
        stats.register("max", numpy.max, axis=0)

        pop, logbook = algorithms.eaMuPlusLambda(pop,
                                                 toolbox,
                                                 mu=MU,
                                                 lambda_=LAMBDA,
                                                 cxpb=0.7,
                                                 mutpb=0.3,
                                                 ngen=40,
                                                 stats=stats,
                                                 halloffame=hof)
        ##Best set of rules should be returned by fitness function and needs to be incorporated to next generation
        ##chekit / bharat to do

        print('\n%d elems in the HallOfFame' % len(hof))
        pp = pprint.PrettyPrinter(indent=4)
        pp.pprint(pop)

        return pop