def test_consecutive_callback():
    callback = ConsecutiveStopping(generations=3)
    assert check_callback(callback) == [callback]

    logbook = Logbook()

    logbook.record(fitness=0.9)
    logbook.record(fitness=0.8)
    logbook.record(fitness=0.83)

    # Not enough records to decide
    assert not callback(logbook=logbook)

    logbook.record(fitness=0.85)
    logbook.record(fitness=0.81)

    # Current record is better that at least of of the previous 3 records
    assert not callback(logbook=logbook)

    logbook.record(fitness=0.8)

    # Current record is worst that the 3 previous ones
    assert callback(logbook=logbook)
    assert callback(logbook=logbook, record={"fitness": 0.8})

    with pytest.raises(Exception) as excinfo:
        callback()
    assert str(excinfo.value) == "logbook parameter must be provided"
Exemplo n.º 2
0
    def _search(self):
        """Apply the search algorithm.

        :return: The best individuals found
        :rtype: :py:class:`~deap.tools.HallOfFame` of individuals
        :return: A logbook with the statistics of the evolution
        :rtype: :py:class:`~deap.tools.Logbook`
        :return: The runtime of the algorithm
        :rtype: :py:class:`float`
        """
        # The population will be a list of individuals
        self._toolbox.register("population", initRepeat, list,
                               self._toolbox.individual)

        # Try to load the state of the last checkpoint
        try:
            pop, start_gen, logbook, runtime = self.__load_checkpoint()
        # If a checkpoint can't be loaded, start a new execution
        except Exception:
            # Create the initial population
            pop = self._toolbox.population(n=self.pop_size)

            # First generation
            start_gen = 0

            # Computing runtime
            runtime = 0

            # Create the logbook
            logbook = Logbook()
            logbook.header = list(self.stats_names) + \
                (self._stats.fields if self._stats else [])

            # Evaluate the individuals with an invalid fitness
            num_evals = self._eval(pop)

            # Compile statistics about the population
            self._do_stats(pop, start_gen, num_evals, logbook)

        # Run all the generations
        for gen in range(start_gen + 1, self.n_gens + 1):
            start_time = time.perf_counter()
            self._do_generation(pop, gen, logbook)
            end_time = time.perf_counter()
            runtime += end_time - start_time

            # Save the wrapper state at each checkpoint
            if gen % self.checkpoint_freq == 0:
                self.__save_checkpoint(pop, gen, logbook, runtime)

        # Save the last state
        self.__save_checkpoint(pop, self.n_gens, logbook, runtime)

        return self._select_best(pop), logbook, runtime
def test_delta_callback():
    callback = DeltaThreshold(0.001)
    assert check_callback(callback) == [callback]

    logbook = Logbook()

    logbook.record(fitness=0.9)

    # Not enough records to decide
    assert not callback(logbook=logbook)

    logbook.record(fitness=0.923)
    logbook.record(fitness=0.914)

    # Abs difference is not bigger than the threshold
    assert not callback(logbook=logbook)

    logbook.record(fitness=0.9141)

    # Abs difference is bigger than the threshold
    assert callback(logbook=logbook)
    assert callback(logbook=logbook, record={"fitness": 0.9141})

    with pytest.raises(Exception) as excinfo:
        callback()
    assert str(excinfo.value) == "logbook parameter must be provided"
Exemplo n.º 4
0
def plotStats(logbook: tools.Logbook):
    # plot statistics:
    minFitnessValues, meanFitnessValues = logbook.select("min", "avg")
    plt.figure(1)
    sns.set_style("whitegrid")
    plt.plot(minFitnessValues, color='red')
    plt.plot(meanFitnessValues, color='green')
    plt.xlabel('Generation')
    plt.ylabel('Min / Average Fitness')
    plt.title('Min and Average fitness over Generations')
Exemplo n.º 5
0
def eaNigel(population, toolbox, ngen, goal=0, stats=None,
             halloffame=None, history=None, verbose=__debug__):
    """This algorithm is a simple evolutionary algorithm.

    :param population: A list of individuals.
    :param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution
                    operators.
    :param ngen: The number of generation.
    :param goal: Stop looking if the best score in the halloffame is less than this value
    :param stats: A :class:`~deap.tools.Statistics` object that is updated
                  inplace, optional.
    :param halloffame: A :class:`~deap.tools.HallOfFame` object that will
                       contain the best individuals, optional.
    :param verbose: Whether or not to log the statistics.
    :returns: The final population
    :returns: A class:`~deap.tools.Logbook` with the statistics of the
              evolution
    """
    logbook = Logbook()
    logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in population if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    if halloffame is not None:
        halloffame.update(population)

    if history is not None:
        history.update(population)

    record = stats.compile(population) if stats else {}
    logbook.record(gen=0, nevals=len(invalid_ind), **record)

    # Begin the generational process
    gen = 0
    while halloffame[0].fitness.values[0] > goal and gen < ngen:
        # Select the next generation individuals
        offspring = toolbox.procreate(population)

        # Replace the current population by the offspring
        population[:] = offspring

        # Update the hall of fame with the generated individuals
        if halloffame is not None:
            halloffame.update(population)
#            population[-1] = halloffame[0]  # always include the best ever in the offspring

        if history is not None:
            history.update(population)

        # Append the current generation statistics to the logbook
        record = stats.compile(population) if stats else {}
        gen += 1
        logbook.record(gen=gen, nevals=len(invalid_ind), **record)
        print(logbook.stream)

    return population, logbook
def test_threshold_callback():
    callback = ThresholdStopping(threshold=0.8)
    assert check_callback(callback) == [callback]
    assert not callback(record={"fitness": 0.5})
    assert callback(record={"fitness": 0.9})

    # test callback using LogBook instead of a record
    logbook = Logbook()
    logbook.record(fitness=0.93)
    logbook.record(fitness=0.4)

    assert not callback(logbook=logbook)

    logbook.record(fitness=0.95)

    assert callback(logbook=logbook)

    with pytest.raises(Exception) as excinfo:
        callback()
    assert (str(excinfo.value) ==
            "At least one of record or logbook parameters must be provided")
Exemplo n.º 7
0
    def __init__(self, bset):
        self.bset = bset
        pop = [Population.INDIVIDUAL_CLASS(self.bset) for _ in range(self.POPULATION_SIZE)]
        super(Population, self).__init__(pop)

        self.stats = Statistics(lambda ind: ind.fitness.values)
        self.stats.register("avg", np.mean)
        self.stats.register("std", np.std)
        self.stats.register("min", np.min)
        self.stats.register("max", np.max)

        self.logbook = Logbook()
        self.logbook.header = ['gen'] + self.stats.fields

        self.hof = HallOfFame(1)
        self.generation = 0

        # do an initial evaluation
        for ind in self:
            ind.fitness.values = ind.evaluate()
Exemplo n.º 8
0
def plot_evolution(evolution: Logbook):
    args = ['gen', 'max', 'mean', 'surface', 'count']
    ev_it, ev_max, ev_mean, ev_surface, ev_count = evolution.select(*args)

    ev_online = np.array(ev_mean).cumsum() / np.arange(1, len(ev_mean) + 1)
    ev_offline = np.array(ev_max).cumsum() / np.arange(1, len(ev_max) + 1)

    fig, (ax1, ax2, ax3) = plt.subplots(1, 3, sharex='all', figsize=(12, 4))

    # zbieznosc
    ax1.plot(ev_it, ev_max, label='max')
    ax1.plot(ev_it, ev_mean, label='mean')
    ax1.plot(ev_it, ev_offline, label='offline')
    ax1.plot(ev_it, ev_online, label='online')
    ax1.set_title('Zbieżność algorytmu')
    ax1.set_xlabel('Iteracja')
    ax1.set_ylabel('Przystosowanie')
    ax1.grid()
    ax1.legend()

    # surface
    ax2.plot(ev_it, np.array(ev_surface))
    ax2.set_title('Suma powierchni najlepszego osobnika')
    ax2.set_xlabel('Iteracja')
    ax2.set_ylabel('Powierzchnia')
    ax2.grid()

    # count
    ax3.plot(ev_it, ev_count)
    ax3.set_title('Liczba skrzyń najlepszego osobnika')
    ax3.set_xlabel('Iteracja')
    ax3.set_ylabel('Liczba skrzyń')
    ax3.grid()

    plt.tight_layout()

    return fig
toolbox = Toolbox()
# toolbox.register("generate", generate, _wf, rm, estimator)
toolbox.register("generate", heft_gen)
toolbox.register("fitness", fitness, _wf, rm, estimator)
toolbox.register("estimate_force", compound_force)
toolbox.register("update", compound_update, W, C)
toolbox.register("G", G)
toolbox.register("kbest", Kbest)

stats = Statistics()
stats.register("min", lambda pop: numpy.min([p.fitness.mofit for p in pop]))
stats.register("avr", lambda pop: numpy.average([p.fitness.mofit for p in pop]))
stats.register("max", lambda pop: numpy.max([p.fitness.mofit for p in pop]))
stats.register("std", lambda pop: numpy.std([p.fitness.mofit for p in pop]))

logbook = Logbook()
logbook.header = ["gen", "G", "kbest"] + stats.fields





def do_exp():
    pop, _logbook, best = run_gsa(toolbox, stats, logbook, pop_size, 0, iter_number, None, kbest, ginit, **{"w":W, "c":C})

    schedule = build_schedule(_wf, rm, estimator,  best)
    Utility.validate_static_schedule(_wf, schedule)
    makespan = Utility.makespan(schedule)
    print("Final makespan: {0}".format(makespan))
    print("Heft makespan: {0}".format(Utility.makespan(heft_schedule)))
    return makespan
Exemplo n.º 10
0
toolbox = Toolbox()
# toolbox.register("generate", generate, _wf, rm, estimator)
toolbox.register("generate", heft_gen)
toolbox.register("fitness", fitness, _wf, rm, estimator, sorted_tasks)
toolbox.register("force_vector_matrix", force_vector_matrix)
toolbox.register("velocity_and_position", velocity_and_position, beta=0.0)
toolbox.register("G", G)
toolbox.register("kbest", Kbest)

stats = Statistics()
stats.register("min", lambda pop: numpy.min([p.fitness.mofit for p in pop]))
stats.register("avr", lambda pop: numpy.average([p.fitness.mofit for p in pop]))
stats.register("max", lambda pop: numpy.max([p.fitness.mofit for p in pop]))
stats.register("std", lambda pop: numpy.std([p.fitness.mofit for p in pop]))

logbook = Logbook()
logbook.header = ("gen", "G", "kbest", "min", "avr", "max", "std")



pop_size = 40
iter_number = 200
kbest = pop_size
ginit = 2

def do_exp():
    pop, _logbook, best = run_gsa(toolbox, stats, logbook, pop_size, iter_number, kbest, ginit)
    solution = {MAPPING_SPECIE: list(best.entity.items()), ORDERING_SPECIE: sorted_tasks}
    schedule = build_schedule(_wf, estimator, rm, solution)
    Utility.validate_static_schedule(_wf, schedule)
    makespan = Utility.makespan(schedule)
Exemplo n.º 11
0
def eaSimple(population, toolbox, cxpb, mutpb, ngen, stats=None,
             halloffame=None, verbose=__debug__, pset=None, store=True):
    """

    Parameters
    ----------
    population
    toolbox
    cxpb
    mutpb
    ngen
    stats
    halloffame
    verbose
    pset
    store

    Returns
    -------

    """
    len_pop = len(population)
    logbook = Logbook()
    logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])

    random_seed = random.randint(1, 1000)
    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in population if not ind.fitness.valid]

    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    # fitnesses = parallelize(n_jobs=4, func=toolbox.evaluate, iterable=invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit[0],
        ind.expr = fit[1]

    if halloffame is not None:
        halloffame.update(population)
    random.seed(random_seed)
    record = stats.compile_(population) if stats else {}
    logbook.record(gen=0, nevals=len(invalid_ind), **record)
    if verbose:
        print(logbook.stream)
    data_all = {}
    # Begin the generational process
    for gen in range(1, ngen + 1):

        if store:
            if pset:
                subp = partial(sub, subed=pset.rep_name_list, subs=pset.name_list)
                data = [{"score": i.fitness.values[0], "expr": subp(i.expr)} for i in halloffame.items[-5:]]
            else:
                data = [{"score": i.fitness.values[0], "expr": i.expr} for i in halloffame.items[-5:]]
            data_all['gen%s' % gen] = data
        # select_gs the next generation individuals
        offspring = toolbox.select_gs(population, len_pop)

        # Vary the pool of individuals
        offspring = varAnd(offspring, toolbox, cxpb, mutpb)
        if halloffame is not None:
            offspring.extend(halloffame)

        random_seed = random.randint(1, 1000)
        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]

        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        # fitnesses = parallelize(n_jobs=4, func=toolbox.evaluate, iterable=invalid_ind)

        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit[0],
            ind.expr = fit[1]

        # Update the hall of fame with the generated individuals
        if halloffame is not None:
            halloffame.update(offspring)

            if halloffame.items[-1].fitness.values[0] >= 0.95:
                print(halloffame.items[-1])
                print(halloffame.items[-1].fitness.values[0])
                break
        random.seed(random_seed)
        # Replace the current population by the offspring
        population[:] = offspring

        # Append the current generation statistics to the logbook
        record = stats.compile_(population) if stats else {}
        logbook.record(gen=gen, nevals=len(invalid_ind), **record)
        if verbose:
            print(logbook.stream)
    store = Store()
    store.to_txt(data_all)
    return population, logbook
Exemplo n.º 12
0
class Population(list):
    """
    A collection of individuals
    """
    INDIVIDUAL_CLASS = Individual
    POPULATION_SIZE = 100
    CLONE_BEST = 5
    MAX_MATE_ATTEMPTS = 10
    MATE_MUTATE_CLONE = (80, 18, 2)

    def __init__(self, bset):
        self.bset = bset
        pop = [Population.INDIVIDUAL_CLASS(self.bset) for _ in range(self.POPULATION_SIZE)]
        super(Population, self).__init__(pop)

        self.stats = Statistics(lambda ind: ind.fitness.values)
        self.stats.register("avg", np.mean)
        self.stats.register("std", np.std)
        self.stats.register("min", np.min)
        self.stats.register("max", np.max)

        self.logbook = Logbook()
        self.logbook.header = ['gen'] + self.stats.fields

        self.hof = HallOfFame(1)
        self.generation = 0

        # do an initial evaluation
        for ind in self:
            ind.fitness.values = ind.evaluate()

    def select(self, k):
        """Probablistic select *k* individuals among the input *individuals*. The
        list returned contains references to the input *individuals*.

        :param k: The number of individuals to select.
        :returns: A list containing k individuals.

        The individuals returned are randomly selected from individuals according
        to their fitness such that the more fit the individual the more likely
        that individual will be chosen.  Less fit individuals are less likely, but
        still possibly, selected.
        """
        # adjusted pop is a list of tuples (adjusted fitness, individual)
        adjusted_pop = [(1.0 / (1.0 + i.fitness.values[0]), i) for i in self]

        # normalised_pop is a list of tuples (float, individual) where the float indicates
        # a 'share' of 1.0 that the individual deserves based on it's fitness relative to
        # the other individuals. It is sorted so the best chances are at the front of the list.
        denom = sum([fit for fit, ind in adjusted_pop])
        normalised_pop = [(fit / denom, ind) for fit, ind in adjusted_pop]
        normalised_pop = sorted(normalised_pop, key=lambda i: i[0], reverse=True)

        # randomly select with a fitness bias
        # FIXME: surely this can be optimized?
        selected = []
        for x in range(k):
            rand = random.random()
            accumulator = 0.0
            for share, ind in normalised_pop:
                accumulator += share
                if rand <= accumulator:
                    selected.append(ind)
                    break
        if len(selected) == 1:
            return selected[0]
        else:
            return selected

    def evolve(self):
        """
        Evolve this population by one generation
        """
        self.logbook.record(gen=self.generation, **self.stats.compile(self))
        self.hof.update(self)
        print(self.logbook.stream)

        # the best x of the population are cloned directly into the next generation
        offspring = self[:self.CLONE_BEST]

        # rest of the population clone, mate, or mutate at random
        for idx in range(len(self) - self.CLONE_BEST):

            # decide how to alter this individual
            rand = random.randint(0,100)

            for _ in range(0, self.MAX_MATE_ATTEMPTS):
                try:
                    if rand < self.MATE_MUTATE_CLONE[0]:  # MATE/CROSSOVER
                        receiver, contributor = self.select(2)
                        child = receiver.clone()
                        child.mate(contributor)
                        break
                    elif rand < (self.MATE_MUTATE_CLONE[0] + self.MATE_MUTATE_CLONE[1]):  # MUTATE
                        ind = self.select(1)
                        child = ind.clone()
                        child.mutate()
                        break
                    else:
                        child = self.select(1).clone()
                        break
                except BirthError:
                    continue
            # generate new blood when reproduction fails so badly
            else:
                child = Population.INDIVIDUAL_CLASS(self.bset)

            offspring.append(child)
        self[:] = offspring
        self.generation += 1

        # evaluate every individual and sort
        for ind in self:
            if not len(ind.fitness.values):
                ind.fitness.values = ind.evaluate()
        self.sort(key=lambda i: i.fitness.values[0])
Exemplo n.º 13
0
def eaSimple(population,
             toolbox,
             cxpb,
             mutpb,
             ngen,
             stats=None,
             halloffame=None,
             verbose=__debug__,
             pset=None,
             store=True):
    """

    Parameters
    ----------
    population
    toolbox
    cxpb
    mutpb
    ngen
    stats
    halloffame
    verbose
    pset
    store
    Returns
    -------

    """
    rst = random.getstate()
    len_pop = len(population)
    logbook = Logbook()
    logbook.header = [] + (stats.fields if stats else [])
    data_all = {}
    random.setstate(rst)

    for gen in range(1, ngen + 1):
        "评价"
        rst = random.getstate()
        """score"""
        invalid_ind = [ind for ind in population if not ind.fitness.valid]
        fitnesses = toolbox.parallel(iterable=population)
        for ind, fit, in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit[0],
            ind.expr = fit[1]
            ind.dim = fit[2]
            ind.withdim = fit[3]
        random.setstate(rst)

        rst = random.getstate()
        """elite"""
        add_ind = []
        add_ind1 = toolbox.select_kbest_target_dim(population,
                                                   K_best=0.01 * len_pop)
        add_ind2 = toolbox.select_kbest_dimless(population,
                                                K_best=0.01 * len_pop)
        add_ind3 = toolbox.select_kbest(population, K_best=5)
        add_ind += add_ind1
        add_ind += add_ind2
        add_ind += add_ind3
        elite_size = len(add_ind)
        random.setstate(rst)

        rst = random.getstate()
        """score"""
        if store:
            subp = functools.partial(sub,
                                     subed=pset.rep_name_list,
                                     subs=pset.real_name_list)
            data = {
                "gen{}_pop{}".format(gen, n): {
                    "gen": gen,
                    "pop": n,
                    "score": i.fitness.values[0],
                    "expr": str(subp(i.expr)),
                }
                for n, i in enumerate(population) if i is not None
            }
            data_all.update(data)
        random.setstate(rst)

        rst = random.getstate()
        """record"""
        if halloffame is not None:
            halloffame.update(add_ind3)
            if len(halloffame.items
                   ) > 0 and halloffame.items[-1].fitness.values[0] >= 0.95:
                print(halloffame.items[-1])
                print(halloffame.items[-1].fitness.values[0])
                break
        random.setstate(rst)

        rst = random.getstate()
        """Dynamic output"""

        record = stats.compile(population) if stats else {}
        logbook.record(gen=gen, pop=len(population), **record)

        if verbose:
            print(logbook.stream)
        random.setstate(rst)
        """crossover, mutate"""
        offspring = toolbox.select_gs(population, len_pop - elite_size)
        # Vary the pool of individuals
        offspring = varAnd(offspring, toolbox, cxpb, mutpb)

        rst = random.getstate()
        """re-run"""
        offspring.extend(add_ind)
        population[:] = offspring
        random.setstate(rst)

    store = Store()
    store.to_csv(data_all)
    return population, logbook
Exemplo n.º 14
0
toolbox = Toolbox()
# toolbox.register("generate", generate, _wf, rm, estimator)
toolbox.register("generate", heft_gen)
toolbox.register("fitness", fitness, _wf, rm, estimator)
toolbox.register("estimate_force", compound_force)
toolbox.register("update", compound_update, W, C)
toolbox.register("G", G)
toolbox.register("kbest", Kbest)

stats = Statistics()
stats.register("min", lambda pop: numpy.min([p.fitness.mofit for p in pop]))
stats.register("avr", lambda pop: numpy.average([p.fitness.mofit for p in pop]))
stats.register("max", lambda pop: numpy.max([p.fitness.mofit for p in pop]))
stats.register("std", lambda pop: numpy.std([p.fitness.mofit for p in pop]))

logbook = Logbook()
logbook.header = ["gen", "G", "kbest"] + stats.fields





def do_exp():
    pop, _logbook, best = run_gsa(toolbox, stats, logbook, pop_size, 0, iter_number, None, kbest, ginit, **{"w":W, "c":C})

    schedule = build_schedule(_wf, rm, estimator,  best)
    Utility.validate_static_schedule(_wf, schedule)
    makespan = Utility.makespan(schedule)
    print("Final makespan: {0}".format(makespan))
    print("Heft makespan: {0}".format(Utility.makespan(heft_schedule)))
    return makespan
Exemplo n.º 15
0
def do_inherited_pop_exp(saver, alg_builder, chromosome_cleaner_builder,
                         wf_name, **params):
    _wf = wf(wf_name)
    rm = ExperimentResourceManager(rg.r(params["resource_set"]["nodes_conf"]))
    estimator = SimpleTimeCostEstimator(**params["estimator_settings"])
    chromosome_cleaner = chromosome_cleaner_builder(_wf, rm, estimator)
    dynamic_heft = DynamicHeft(_wf, rm, estimator)

    logbook = Logbook()

    stats = tools.Statistics(lambda ind: ind.fitness.values[0])
    stats.register("avg", numpy.mean)
    stats.register("std", numpy.std)
    stats.register("min", numpy.min)
    stats.register("max", numpy.max)

    ga = alg_builder(_wf,
                     rm,
                     estimator,
                     params["init_sched_percent"],
                     log_book=logbook,
                     stats=stats,
                     alg_params=params["alg_params"])

    machine = GaHeftOldPopExecutor(heft_planner=dynamic_heft,
                                   wf=_wf,
                                   resource_manager=rm,
                                   estimator=estimator,
                                   stat_saver=None,
                                   ga_builder=lambda: ga,
                                   chromosome_cleaner=chromosome_cleaner,
                                   **params["executor_params"])

    machine.init()
    print("Executor start")
    machine.run()
    print("Executor stop")

    resulted_schedule = machine.current_schedule
    stat_data = machine.executor_stat_data

    Utility.validate_dynamic_schedule(_wf, resulted_schedule)

    data = {
        "wf_name": wf_name,
        "params": params,
        "result": {
            "random_init_logbook":
            stat_data["random_init_logbook"],
            "inherited_init_logbook":
            stat_data["inherited_init_logbook"],
            "makespan":
            Utility.makespan(resulted_schedule),
            ## TODO: this function should be remade to adapt under conditions of dynamic env
            #"overall_transfer_time": Utility.overall_transfer_time(resulted_schedule, _wf, estimator),
            "overall_execution_time":
            Utility.overall_execution_time(resulted_schedule),
            "overall_failed_tasks_count":
            Utility.overall_failed_tasks_count(resulted_schedule)
        }
    }

    if saver is not None:
        saver(data)

    return data
Exemplo n.º 16
0
    def __init__(self, pset, pop=500, gen=20, mutate_prob=0.5, mate_prob=0.8, hall=1, re_hall=1,
                 re_Tree=None, initial_min=None, initial_max=3, max_value=5,
                 scoring=(r2_score,), score_pen=(1,), filter_warning=True, cv=1,
                 add_coef=True, inter_add=True, inner_add=False, vector_add=False, out_add=False, flat_add=False,
                 cal_dim=False, dim_type=None, fuzzy=False, n_jobs=1, batch_size=40,
                 random_state=None, stats=None, verbose=True, migrate_prob=0,
                 tq=True, store=False, personal_map=False, stop_condition=None, details=False, classification=False,
                 score_object="y", sub_mu_max=1, limit_type="h_bgp", batch_para=False):
        """

        Parameters
        ----------
        pset:SymbolSet
            the feature x and target y and others should have been added.
        pop: int
            number of population.
        gen: int
            number of generation.
        mutate_prob:float
            probability of mutate.
        mate_prob:float
            probability of mate(crossover).
        initial_max:int
            max initial size of expression when first producing.
        initial_min : None,int
            min initial size of expression when first producing.
        max_value:int
            max size of expression.
        limit_type: "height" or "length",","h_bgp"
            limitation type for max_value, but don't affect initial_max, initial_min.
        hall:int,>=1
            number of HallOfFame (elite) to maintain.
        re_hall:None or int>=2
            Notes: only valid when hall
            number of HallOfFame to add to next generation.
        re_Tree: int
            number of new features to add to next generation.
            0 is false to add.
        personal_map:bool or "auto"
            "auto" is using 'premap' and with auto refresh the 'premap' with individual.\n
            True is just using constant 'premap'.\n
            False is just use the prob of terminals.
        scoring: list of Callable, default is [sklearn.metrics.r2_score,]
            See Also ``sklearn.metrics``
        score_pen: tuple of  1, -1 or float but 0.
            >0 : max problem, best is positive, worse -np.inf.
            <0 : min problem, best is negative, worse np.inf.

            Notes:
                if multiply score method, the scores must be turn to same dimension in prepossessing
                or weight by score_pen. Because the all the selection are stand on the mean(w_i*score_i)

            Examples::

                scoring = [r2_score,]
                score_pen= [1,]

        cv:sklearn.model_selection._split._BaseKFold,int
            the shuffler must be False,
            default=1 means no cv.
        filter_warning:bool
            filter warning or not.
        add_coef:bool
            add coef in expression or not.
        inter_add:bool
            add intercept constant or not.
        inner_add:bool
            add inner coefficients or not.
        out_add:bool
            add out coefficients or not.
        flat_add:bool
            add flat coefficients or not.
        n_jobs:int
            default 1, advise 6.
        batch_size:int
            default 40, depend of machine.
        random_state:int
            None,int.
        cal_dim:bool
            escape the dim calculation.
        dim_type:Dim or None or list of Dim
            "coef": af(x)+b. a,b have dimension,f(x)'s dimension is not dnan. \n
            "integer": af(x)+b. f(x) is with integer dimension. \n
            [Dim1,Dim2]: f(x)'s dimension in list. \n
            Dim: f(x) ~= Dim. (see fuzzy) \n
            Dim: f(x) == Dim. \n
            None: f(x) == pset.y_dim
        fuzzy:bool
            choose the dim with same base with dim_type, such as m,m^2,m^3.
        stats:dict
            details of logbook to show. \n
            Map:\n
            values
                = {"max": np.max, "mean": np.mean, "min": np.mean, "std": np.std, "sum": np.sum}
            keys
                = {\n
                   "fitness": just see fitness[0], \n
                   "fitness_dim_max": max problem, see fitness with demand dim,\n
                   "fitness_dim_min": min problem, see fitness with demand dim,\n
                   "dim_is_target": demand dim,\n
                   "coef":  dim is True, coef have dim, \n
                   "integer":  dim is integer, \n
                   ...
                   }

            if stats is None, default is:

            for cal_dim=True:
                stats = {"fitness_dim_max": ("max",), "dim_is_target": ("sum",)}

            for cal_dim=False
                stats = {"fitness": ("max",)}

            if self-definition, the key is func to get attribute of each ind.

            Examples::

                def func(ind):
                    return ind.fitness[0]
                stats = {func: ("mean",), "dim_is_target": ("sum",)}

        verbose:bool
            print verbose logbook or not.
        tq:bool
            print progress bar or not.
        store:bool or path
            bool or path.
        stop_condition:callable
            stop condition on the best ind of hall, which return bool,the true means stop loop.

            Examples::

                def func(ind):
                    c = ind.fitness.values[0]>=0.90
                    return c

        details:bool
            return expr and predict_y or not.

        classification: bool
            classification or not.

        score_object:
            score by y or delta y (for implicit function).
        """
        super(BaseLoop, self).__init__()

        assert initial_max <= max_value, "the initial size of expression should less than max_value limitation"
        if cal_dim:
            assert all(
                [isinstance(i, Dim) for i in pset.dim_ter_con.values()]), \
                "all import dim of pset should be Dim object."

        self.details = details
        self.max_value = max_value
        self.pop = pop
        self.gen = gen
        self.mutate_prob = mutate_prob
        self.mate_prob = mate_prob
        self.migrate_prob = migrate_prob
        self.verbose = verbose
        self.cal_dim = cal_dim
        self.re_hall = re_hall
        self.re_Tree = re_Tree
        self.store = store
        self.limit_type = limit_type
        self.data_all = []
        self.personal_map = personal_map
        self.stop_condition = stop_condition
        self.population = []
        self.rand_state = None
        self.random_state = random_state
        self.sub_mu_max = sub_mu_max
        self.population_next = []

        self.cpset = CalculatePrecisionSet(pset, scoring=scoring, score_pen=score_pen,
                                           filter_warning=filter_warning, cal_dim=cal_dim,
                                           add_coef=add_coef, inter_add=inter_add, inner_add=inner_add,
                                           vector_add=vector_add, out_add=out_add, flat_add=flat_add, cv=cv,
                                           n_jobs=n_jobs, batch_size=batch_size, tq=tq,
                                           fuzzy=fuzzy, dim_type=dim_type, details=details,
                                           classification=classification, score_object=score_object,
                                           batch_para=batch_para
                                           )

        Fitness_ = newclass.create("Fitness_", Fitness, weights=score_pen)
        self.PTree = newclass.create("PTrees", SymbolTree, fitness=Fitness_)
        # def produce
        if initial_min is None:
            initial_min = 2
        self.register("genGrow", genGrow, pset=self.cpset, min_=initial_min, max_=initial_max + 1,
                      personal_map=self.personal_map)
        self.register("genFull", genFull, pset=self.cpset, min_=initial_min, max_=initial_max + 1,
                      personal_map=self.personal_map)
        self.register("genHalf", genGrow, pset=self.cpset, min_=initial_min, max_=initial_max + 1,
                      personal_map=self.personal_map)
        self.register("gen_mu", genGrow, min_=1, max_=self.sub_mu_max + 1, personal_map=self.personal_map)
        # def selection

        self.register("select", selTournament, tournsize=2)

        self.register("selKbestDim", selKbestDim,
                      dim_type=self.cpset.dim_type, fuzzy=self.cpset.fuzzy)
        self.register("selBest", selBest)

        self.register("mate", cxOnePoint)
        # def mutate

        self.register("mutate", mutUniform, expr=self.gen_mu, pset=self.cpset)

        self.decorate("mate", staticLimit(key=operator.attrgetter(limit_type), max_value=self.max_value))
        self.decorate("mutate", staticLimit(key=operator.attrgetter(limit_type), max_value=self.max_value))

        if stats is None:
            if cal_dim:
                if score_pen[0] > 0:
                    stats = {"fitness_dim_max": ("max",), "dim_is_target": ("sum",)}
                else:
                    stats = {"fitness_dim_min": ("min",), "dim_is_target": ("sum",)}
            else:
                if score_pen[0] > 0:
                    stats = {"fitness": ("max",)}
                else:
                    stats = {"fitness": ("min",)}

        self.stats = Statis_func(stats=stats)
        logbook = Logbook()
        logbook.header = ['gen'] + (self.stats.fields if self.stats else [])
        self.logbook = logbook

        if hall is None:
            hall = 1
        self.hall = HallOfFame(hall)

        if re_hall is None:
            self.re_hall = None
        else:
            if re_hall == 1 or re_hall == 0:
                print("re_hall should more than 1")
                re_hall = 2
            assert re_hall >= hall, "re_hall should more than hall"
            self.re_hall = HallOfFame(re_hall)
Exemplo n.º 17
0
Arquivo: srwc.py Projeto: boliqq07/BGP
def eaSimple(population,
             toolbox,
             cxpb,
             mutpb,
             ngen,
             stats=None,
             halloffame=None,
             verbose=__debug__,
             pset=None,
             store=True):
    """

    Parameters
    ----------
    population
    toolbox
    cxpb
    mutpb
    ngen
    stats
    halloffame
    verbose
    pset
    store
    Returns
    -------

    """
    rst = random.getstate()
    len_pop = len(population)
    logbook = Logbook()
    logbook.header = [] + (stats.fields if stats else [])
    data_all = {}
    random.setstate(rst)

    for gen in range(1, ngen + 1):
        "评价"
        rst = random.getstate()
        """score"""
        invalid_ind = [ind for ind in population if not ind.fitness.valid]
        fitnesses = toolbox.parallel(iterable=population)
        for ind, fit, in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit[0],
            ind.expr = fit[1]
            ind.y_dim = fit[2]
            ind.withdim = fit[3]
        random.setstate(rst)

        rst = random.getstate()
        """elite"""
        add_ind = []
        add_ind1 = toolbox.select_kbest_target_dim(population,
                                                   K_best=0.05 * len_pop)
        add_ind += add_ind1
        elite_size = len(add_ind)
        random.setstate(rst)

        rst = random.getstate()
        """score"""

        random.setstate(rst)

        rst = random.getstate()
        """record"""
        if halloffame is not None:
            halloffame.update(add_ind1)
            if len(halloffame.items
                   ) > 0 and halloffame.items[-1].fitness.values[0] >= 0.9999:
                print(halloffame.items[-1])
                print(halloffame.items[-1].fitness.values[0])
                break
        random.setstate(rst)

        rst = random.getstate()
        """Dynamic output"""

        record = stats.compile_(population) if stats else {}
        logbook.record(gen=gen, pop=len(population), **record)

        if verbose:
            print(logbook.stream)
        random.setstate(rst)
        """crossover, mutate"""
        offspring = toolbox.select_gs(population, len_pop - elite_size)
        # Vary the pool of individuals
        offspring = varAnd(offspring, toolbox, cxpb, mutpb)

        rst = random.getstate()
        """re-run"""
        offspring.extend(add_ind)
        population[:] = offspring
        random.setstate(rst)

    store = Store()
    store.to_csv(data_all)
    return population, logbook
Exemplo n.º 18
0
def eaMuPlusLambda_hack(population, toolbox, mu, lambda_, cxpb, mutpb, ngen,
                   stats=None, halloffame=None, verbose=__debug__):
    """This is the :math:`(\mu + \lambda)` evolutionary algorithm.
    :param population: A list of individuals.
    :param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution
                    operators.
    :param mu: The number of individuals to select for the next generation.
    :param lambda\_: The number of children to produce at each generation.
    :param cxpb: The probability that an offspring is produced by crossover.
    :param mutpb: The probability that an offspring is produced by mutation.
    :param ngen: The number of generation.
    :param stats: A :class:`~deap.tools.Statistics` object that is updated
                  inplace, optional.
    :param halloffame: A :class:`~deap.tools.HallOfFame` object that will
                       contain the best individuals, optional.
    :param verbose: Whether or not to log the statistics.
    :returns: The final population
    :returns: A class:`~deap.tools.Logbook` with the statistics of the
              evolution.
    The algorithm takes in a population and evolves it in place using the
    :func:`varOr` function. It returns the optimized population and a
    :class:`~deap.tools.Logbook` with the statistics of the evolution. The
    logbook will contain the generation number, the number of evaluations for
    each generation and the statistics if a :class:`~deap.tools.Statistics` is
    given as argument. The *cxpb* and *mutpb* arguments are passed to the
    :func:`varOr` function. The pseudocode goes as follow ::
        evaluate(population)
        for g in range(ngen):
            offspring = varOr(population, toolbox, lambda_, cxpb, mutpb)
            evaluate(offspring)
            population = select(population + offspring, mu)
    First, the individuals having an invalid fitness are evaluated. Second,
    the evolutionary loop begins by producing *lambda_* offspring from the
    population, the offspring are generated by the :func:`varOr` function. The
    offspring are then evaluated and the next generation population is
    selected from both the offspring **and** the population. Finally, when
    *ngen* generations are done, the algorithm returns a tuple with the final
    population and a :class:`~deap.tools.Logbook` of the evolution.
    This function expects :meth:`toolbox.mate`, :meth:`toolbox.mutate`,
    :meth:`toolbox.select` and :meth:`toolbox.evaluate` aliases to be
    registered in the toolbox. This algorithm uses the :func:`varOr`
    variation.
    """
    logbook = Logbook()
    logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in population if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    if halloffame is not None:
        halloffame.update(population)

    record = stats.compile(population) if stats is not None else {}
    logbook.record(gen=0, nevals=len(invalid_ind), **record)
    if verbose:
        print(logbook.stream)
        
    # --- Hack ---
    all_generations = {}
    # ------------

    # Begin the generational process
    for gen in range(1, ngen + 1):
        # Vary the population
        offspring = varOr(population, toolbox, lambda_, cxpb, mutpb)

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # Update the hall of fame with the generated individuals
        if halloffame is not None:
            halloffame.update(offspring)
            
        # --- Hack ---
        all_generations[gen] = population + offspring
        # ------------

        # Select the next generation population
        population[:] = toolbox.select(population + offspring, mu)
        

        # Update the statistics with the new population
        record = stats.compile(population) if stats is not None else {}
        logbook.record(gen=gen, nevals=len(invalid_ind), **record)
        if verbose:
            print(logbook.stream)

    return population, logbook, all_generations
Exemplo n.º 19
0
def eaSimple(population,
             toolbox,
             cxpb,
             mutpb,
             ngen,
             stats=None,
             halloffame=None,
             verbose=__debug__,
             pset=None,
             store=True):
    """

    Parameters
    ----------
    population
    toolbox
    cxpb
    mutpb
    ngen
    stats
    halloffame
    verbose
    pset
    store
    Returns
    -------

    """
    rst = random.getstate()
    len_pop = len(population)
    logbook = Logbook()
    logbook.header = ['gen', 'pop'] + (stats.fields if stats else [])

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in population if not ind.fitness.valid]

    # fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    fitnesses = toolbox.parallel(iterable=population)
    for ind, fit, in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit[0],
        ind.expr = fit[1]
        ind.dim = fit[2]
        ind.withdim = fit[3]

    add_ind = toolbox.select_kbest_target_dim(population, K_best=0.1 * len_pop)
    if halloffame is not None:
        halloffame.update(add_ind)

    record = stats.compile(population) if stats else {}
    logbook.record(gen=0, nevals=len(population), **record)
    if verbose:
        print(logbook.stream)
    data_all = {}

    # Begin the generational process
    random.setstate(rst)
    for gen in range(1, ngen + 1):
        rst = random.getstate()

        if store:
            rst = random.getstate()
            target_dim = toolbox.select_kbest_target_dim.keywords['dim_type']
            subp = functools.partial(sub,
                                     subed=pset.rep_name_list,
                                     subs=pset.real_name_list)
            data = {
                "gen{}_pop{}".format(gen, n): {
                    "gen":
                    gen,
                    "pop":
                    n,
                    "score":
                    i.fitness.values[0],
                    "expr":
                    str(subp(i.expr)),
                    "with_dim":
                    1 if i.withdim else 0,
                    "dim_is_target_dim":
                    1 if i.dim in target_dim else 0,
                    "gen_dim":
                    "{}{}".format(gen, 1 if i.withdim else 0),
                    "gen_target_dim":
                    "{}{}".format(gen, 1 if i.dim in target_dim else 0),
                    "socre_dim":
                    i.fitness.values[0] if i.withdim else 0,
                    "socre_target_dim":
                    i.fitness.values[0] if i.dim in target_dim else 0,
                }
                for n, i in enumerate(population) if i is not None
            }
            data_all.update(data)
        random.setstate(rst)
        # select_gs the next generation individuals
        offspring = toolbox.select_gs(population, len_pop)

        # Vary the pool of individuals
        offspring = varAnd(offspring, toolbox, cxpb, mutpb)

        rst = random.getstate()

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        # fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        # fitnesses = parallelize(n_jobs=3, func=toolbox.evaluate, iterable=invalid_ind,  respective=False)
        fitnesses = toolbox.parallel(iterable=invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit[0],
            ind.expr = fit[1]
            ind.dim = fit[2]
            ind.withdim = fit[3]

        add_ind = toolbox.select_kbest_target_dim(population,
                                                  K_best=0.1 * len_pop)
        add_ind2 = toolbox.select_kbest_dimless(population,
                                                K_best=0.2 * len_pop)
        add_ind3 = toolbox.select_kbest(population, K_best=5)
        offspring += add_ind
        offspring += add_ind2
        offspring += add_ind3

        # Update the hall of fame with the generated individuals
        if halloffame is not None:
            halloffame.update(add_ind)

            if len(halloffame.items
                   ) > 0 and halloffame.items[-1].fitness.values[0] >= 0.95:
                print(halloffame.items[-1])
                print(halloffame.items[-1].fitness.values[0])
                break
        # Replace the current population by the offspring
        population[:] = offspring

        # Append the current generation statistics to the logbook
        record = stats.compile(population) if stats else {}
        logbook.record(gen=gen, nevals=len(population), **record)
        if verbose:
            print(logbook.stream)

        random.setstate(rst)

    store = Store()
    store.to_csv(data_all)
    return population, logbook
Exemplo n.º 20
0
def multiEaSimple(population, toolbox, cxpb, mutpb, ngen, stats=None,
                  halloffame=None, verbose=__debug__, pset=None, store=True, alpha=1):
    """

    Parameters
    ----------
    population
    toolbox
    cxpb
    mutpb
    ngen
    stats
    halloffame
    verbose
    pset
    store
    alpha

    Returns
    -------

    """
    logbook = Logbook()
    logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in population if not ind.fitness.valid]
    random_seed = random.randint(1, 1000)
    # fitnesses = list(toolbox.map(toolbox.evaluate, [str(_) for _ in invalid_ind]))
    # fitnesses2 = toolbox.map(toolbox.evaluate2, [str(_) for _ in invalid_ind])
    fitnesses = parallelize(n_jobs=6, func=toolbox.evaluate, iterable=[str(_) for _ in invalid_ind])
    fitnesses2 = parallelize(n_jobs=6, func=toolbox.evaluate2, iterable=[str(_) for _ in invalid_ind])

    def funcc(a, b):
        """

        Parameters
        ----------
        a
        b

        Returns
        -------

        """
        return (alpha * a + b) / 2

    for ind, fit, fit2 in zip(invalid_ind, fitnesses, fitnesses2):
        ind.fitness.values = funcc(fit[0], fit2[0]),
        ind.values = (fit[0], fit2[0])
        ind.expr = (fit[1], fit2[1])
    if halloffame is not None:
        halloffame.update(population)
    random.seed(random_seed)
    record = stats.compile_(population) if stats else {}
    logbook.record(gen=0, nevals=len(invalid_ind), **record)
    if verbose:
        print(logbook.stream)
    data_all = {}
    # Begin the generational process
    for gen in range(1, ngen + 1):
        # select_gs the next generation individuals
        offspring = toolbox.select_gs(population, len(population))
        # Vary the pool of individuals
        offspring = varAnd(offspring, toolbox, cxpb, mutpb)
        if halloffame is not None:
            offspring.extend(halloffame.items[-2:])

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        random_seed = random.randint(1, 1000)
        # fitnesses = toolbox.map(toolbox.evaluate, [str(_) for _ in invalid_ind])
        # fitnesses2 = toolbox.map(toolbox.evaluate2, [str(_) for _ in invalid_ind])
        fitnesses = parallelize(n_jobs=6, func=toolbox.evaluate, iterable=[str(_) for _ in invalid_ind])
        fitnesses2 = parallelize(n_jobs=6, func=toolbox.evaluate2, iterable=[str(_) for _ in invalid_ind])

        for ind, fit, fit2 in zip(invalid_ind, fitnesses, fitnesses2):
            ind.fitness.values = funcc(fit[0], fit2[0]),
            ind.values = (fit[0], fit2[0])
            ind.expr = (fit[1], fit2[1])

        # Update the hall of fame with the generated individuals
        if halloffame is not None:
            halloffame.update(offspring)
            if halloffame.items[-1].fitness.values[0] >= 0.95:
                print(halloffame.items[-1])
                print(halloffame.items[-1].fitness.values[0])
                print(halloffame.items[-1].values[0])
                print(halloffame.items[-1].values[1])
                break

        if store:
            if pset:
                subp = partial(sub, subed=pset.rep_name_list, subs=pset.name_list)
                data = [{"score": i.values[0], "expr": subp(i.expr[0])} for i in halloffame.items[-2:]]
                data2 = [{"score": i.values[1], "expr": subp(i.expr[1])} for i in halloffame.items[-2:]]
            else:
                data = [{"score": i.values[0], "expr": i.expr} for i in halloffame.items[-2:]]
                data2 = [{"score": i.values[1], "expr": i.expr[2]} for i in halloffame.items[-2:]]
            data_all['gen%s' % gen] = list(zip(data, data2))
        random.seed(random_seed)
        # Replace the current population by the offspring
        population[:] = offspring
        # Append the current generation statistics to the logbook
        record = stats.compile_(population) if stats else {}
        logbook.record(gen=gen, nevals=len(invalid_ind), **record)
        if verbose:
            print(logbook.stream)
    if store:
        store1 = Store()
        store1.to_txt(data_all)

    return population, logbook