Beispiel #1
0
def sizeOfMovementSelection(individuals, k, tournsize, removal_constant=100, fit_attr="fitness"):
    size_avg = sum([len(ind) for ind in individuals])/len(individuals)
    for ind in individuals:
        if len(ind)>size_avg:
            ind.fitness.values = (ind.fitness.values[0]+100,) 
    chosen = []
    for i in range(k):
        aspirants = selRandom(individuals, tournsize)
        chosen.append(max(aspirants, key=attrgetter(fit_attr)))
    return chosen
Beispiel #2
0
def select_tournament(individuals, k, tournsize):
    calc_crowd(individuals)
    sorted_ind = sort_by_domination(individuals, k)

    barage = sorted_ind[-1]
    del sorted_ind[-1]
    chosen = []
    flatenned_sorted = list(itertools.chain(*sorted_ind))
    for i in range(k - len(flatenned_sorted)):
        aspirants = selection.selRandom(barage, tournsize)
        chosen.append(max(aspirants, key=cmp_to_key(crowd_cmp)))

    flatenned_sorted.extend(chosen)
    return flatenned_sorted
def tigerMosquitoSelection(individuals, k, tournsize, useFitness):
    """Select the best individual/s according to some method. The list returned contains
    references to the input *individuals*.
    Required
    :param individuals: A list of individuals to select from.
    :param k: The number of individuals to select.
    Defined by the user
    :param tournsize: The number of individuals participating in each tournament.
    :param useFitness: If Fitness or pheromone should be used
    :returns: A list of selected individuals.
    """
    chosen = []
    for i in range(k):
        # Code to do selection here, using parameter `tournsize`
        sel_individuals = selRandom(individuals, tournsize)
        if useFitness:
            # use fitness to decide
            best_individual = max(sel_individuals, key=attrgetter('fitness'))
        else:
            # use pheromone to decide
            best_individual = max(sel_individuals, key=attrgetter('pheromone'))

        chosen.append(best_individual)
    return chosen
def pso_ga(func, pmin, pmax, smin, smax, int_idx, params, ga, dv):
    # Setting params
    c1, c2, wmin, wmax, ga_iter_min, ga_iter_max, iter_gamma, ga_num_min, ga_num_max, num_beta,\
    tourn_size, cxpb, mutpb, indpd, eta,\
    pso_iter, swarm_size = \
    params['c1'], params['c2'], params['wmin'], params['wmax'],\
    params['ga_iter_min'], params['ga_iter_max'], params['iter_gamma'],\
    params['ga_num_min'], params['ga_num_max'], params['num_beta'],\
    params['tourn_size'], params['cxpd'], params['mutpd'], params['indpd'], params['eta'],\
    params['pso_iter'], params['swarm_size']

    # int_idx must be a list. If a single number is given, convert to list.
    if isinstance(int_idx,int):
        int_idx = [int_idx]

    creator.create("FitnessMin", base.Fitness, weights=(-1.0,))  # Minimization of a single scalar value
    creator.create("Particle", list, fitness=creator.FitnessMin, speed=list,
                   smin=None, smax=None, best=None, int_idx=None)

    toolbox = base.Toolbox()
    toolbox.register("particle", generate_part, dim=len(pmin), pmin=pmin, pmax=pmax, smin=smin, smax=smax,
                     int_idx=int_idx)
    toolbox.register("population", tools.initRepeat, list, toolbox.particle)
    toolbox.register("update", updateParticle, c1=c1, c2=c2)
    toolbox.register("evaluate", func)

    toolbox.register("mate", tools.cxTwoPoint)
    #toolbox.register("mutate", ga_hybrid_polymutate, low=pmin, up=pmax, indpb=indpd, eta=eta)
    toolbox.register("mutate", ga_hybrid_gaussianmutate, low=pmin, up=pmax, indpb=indpd, sigma=smax)

    pop = toolbox.population(n=swarm_size)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean)
    stats.register("std", np.std)
    stats.register("min", np.min)
    stats.register("max", np.max)

    logbook = tools.Logbook()
    logbook.header = ["gen", "evals"] + stats.fields

    best = None
    pso_hof_num = max(1,round(ga_num_min*0.2))
    pso_hof = tools.HallOfFame(pso_hof_num)

    for g in range(pso_iter):
        # PSO segment first
        for part in pop:
            part.fitness.values = toolbox.evaluate(part)
            # Note: Fitness comparisons will compare the weighted value. Since weight is negative,
            # the comparison would be opposite unless you specify .values instead.
            if not part.best or part.best.fitness.values[0] > part.fitness.values[0]:
                part.best = creator.Particle(part)
                part.best.fitness.values = part.fitness.values
            if not best or best.fitness.values[0] > part.fitness.values[0]:
                best = creator.Particle(part)
                best.fitness.values = part.fitness.values

        for part in pop:
            # Linear annealing for inertia velocity coefficient (the w weights)
            toolbox.update(part, best=best, w=wmax - (wmax-wmin)*(g/pso_iter)**iter_gamma)

        if ga:
            # GA segment
            # Start at max and approach min
            ga_pop = round(ga_num_min + (g/pso_iter)**iter_gamma*(ga_num_max-ga_iter_min))
            ga_gen = round(ga_iter_min + (g/pso_iter)**num_beta*(ga_iter_max-ga_iter_min))
            if len(pso_hof) == 0:
                ga_mask = [1 for _ in range(ga_pop)] + [0 for _ in range(swarm_size-ga_pop)]
                random.shuffle(ga_mask)
                population = [x for x,mask in zip(pop, ga_mask) if mask == 1]
            else:
                ga_pop += - pso_hof_num
                ga_mask = [1 for _ in range(ga_pop)] + [0 for _ in range(swarm_size - ga_pop)]
                random.shuffle(ga_mask)
                population = [x for x, mask in zip(pop, ga_mask) if mask == 1] + pso_hof.items

            halloffame = tools.HallOfFame(ga_pop)
            halloffame.update(population)
            ga_eval = 0
            # Begin the generational process
            for gen in range(ga_gen):
                # Select the next generation individuals. Built in tournament selector does not work for multi-objective
                # offspring = toolbox.select(population, len(population))
                # Own selection using tournment. Will work for multi-objective.
                chosen = []
                for i in range(ga_pop):
                    aspirants = selRandom(population, tourn_size)
                    scores = [x.fitness.values[0] for x in aspirants]
                    f = lambda i: scores[i]
                    chosen_idx = min(range(len(scores)), key=f)
                    chosen.append(aspirants[chosen_idx])
                    pass
                offspring = chosen

                # Vary the pool of individuals
                offspring = varAnd(offspring, toolbox, cxpb, mutpb)

                # Evaluate the individuals with an invalid fitness
                invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
                ga_eval += len(invalid_ind)
                fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
                for ind, fit in zip(invalid_ind, fitnesses):
                    ind.fitness.values = fit

                # Update the hall of fame with the generated individuals
                halloffame.update(offspring)

                # Replace the current population by the offspring
                population[:] = offspring

            counter = 0
            if best.fitness.values[0] > halloffame[0].fitness.values[0]:
                best = creator.Particle(halloffame[0])
                best.fitness.values = halloffame[0].fitness.values
            for idx, mask in enumerate(ga_mask):
                if mask == 1:
                    try:
                        if pop[idx].fitness.values[0] > halloffame[counter].fitness.values[0]:
                            pop[idx] = halloffame[counter]
                            pop[idx].best = creator.Particle(part)
                            pop[idx].best.fitness.values = halloffame[counter].fitness.values
                        counter += 1
                    except IndexError:
                        break
        pso_hof.update(pop)

        # Gather all the fitnesses in one list and print the stats
        try:
            logbook.record(gen=g, evals=len(pop) + ga_eval, **stats.compile(pop))
        except UnboundLocalError:
            # Means ga=False and ga_eval is not assigned
            logbook.record(gen=g, evals=len(pop), **stats.compile(pop))
        #print(best)
        print(logbook.stream)

    print(best.fitness.values)

    # Printing to excel
    write_excel = create_excel_file('./results/pso_ga_results.xlsx')
    wb = openpyxl.load_workbook(write_excel)
    ws = wb[wb.sheetnames[-1]]

    ws.cell(1, 1).value = 'Optimal Decision Values'
    print_array_to_excel(dv, (2, 1), ws=ws, axis=1)
    print_array_to_excel(best, (3, 1), ws=ws, axis=1)

    genfit = logbook.select("gen")
    avgfit = logbook.select("avg")
    stdfit = logbook.select("std")
    minfit = logbook.select("min")
    maxfit = logbook.select("max")

    ws.cell(5, 1).value = 'gen'
    ws.cell(6, 1).value = 'avg'
    ws.cell(7, 1).value = 'std'
    ws.cell(8, 1).value = 'min'
    ws.cell(9, 1).value = 'max'

    print_array_to_excel(genfit, (5, 2), ws=ws, axis=1)
    print_array_to_excel(avgfit, (6, 2), ws=ws, axis=1)
    print_array_to_excel(stdfit, (7, 2), ws=ws, axis=1)
    print_array_to_excel(minfit, (8, 2), ws=ws, axis=1)
    print_array_to_excel(maxfit, (9, 2), ws=ws, axis=1)

    wb.save(write_excel)


    return pop, logbook, best
Beispiel #5
0
def own_ea(population,
           toolbox,
           tourn_size,
           cxpb,
           mutpb,
           ngen,
           stats=None,
           halloffame=None,
           verbose=__debug__):
    """
    This is modified from DEAP package simple_ea algorithm. Below is the official documentation from DEAP.

    This algorithm reproduce the simplest evolutionary algorithm as
    presented in chapter 7 of [Back2000]_.

    :param population: A list of individuals.
    :param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution
                    operators.
    :param cxpb: The probability of mating two individuals.
    :param mutpb: The probability of mutating an individual.
    :param ngen: The number of generation.
    :param stats: A :class:`~deap.tools.Statistics` object that is updated
                  inplace, optional.
    :param halloffame: A :class:`~deap.tools.HallOfFame` object that will
                       contain the best individuals, optional.
    :param verbose: Whether or not to log the statistics.
    :returns: The final population
    :returns: A class:`~deap.tools.Logbook` with the statistics of the
              evolution

    The algorithm takes in a population and evolves it in place using the
    :meth:`varAnd` method. It returns the optimized population and a
    :class:`~deap.tools.Logbook` with the statistics of the evolution. The
    logbook will contain the generation number, the number of evalutions for
    each generation and the statistics if a :class:`~deap.tools.Statistics` is
    given as argument. The *cxpb* and *mutpb* arguments are passed to the
    :func:`varAnd` function. The pseudocode goes as follow ::

        evaluate(population)
        for g in range(ngen):
            population = select(population, len(population))
            offspring = varAnd(population, toolbox, cxpb, mutpb)
            evaluate(offspring)
            population = offspring

    As stated in the pseudocode above, the algorithm goes as follow. First, it
    evaluates the individuals with an invalid fitness. Second, it enters the
    generational loop where the selection procedure is applied to entirely
    replace the parental population. The 1:1 replacement ratio of this
    algorithm **requires** the selection procedure to be stochastic and to
    select multiple times the same individual, for example,
    :func:`~deap.tools.selTournament` and :func:`~deap.tools.selRoulette`.
    Third, it applies the :func:`varAnd` function to produce the next
    generation population. Fourth, it evaluates the new individuals and
    compute the statistics on this population. Finally, when *ngen*
    generations are done, the algorithm returns a tuple with the final
    population and a :class:`~deap.tools.Logbook` of the evolution.

    .. note::

        Using a non-stochastic selection method will result in no selection as
        the operator selects *n* individuals from a pool of *n*.

    This function expects the :meth:`toolbox.mate`, :meth:`toolbox.mutate`,
    :meth:`toolbox.select` and :meth:`toolbox.evaluate` aliases to be
    registered in the toolbox.

    .. [Back2000] Back, Fogel and Michalewicz, "Evolutionary Computation 1 :
       Basic Algorithms and Operators", 2000.
    """

    logbook = tools.Logbook()
    logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in population if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    if halloffame is not None:
        halloffame.update(population)

    record = stats.compile(population) if stats else {}
    logbook.record(gen=0, nevals=len(invalid_ind), **record)
    if verbose:
        print(logbook.stream)

    population_store = [[list(pop) for pop in population]]
    population_stats = [class_counter(population)]

    start_gen = 1

    k = len(population)
    # Begin the generational process
    for gen in range(start_gen, ngen + 1):
        # Select the next generation individuals. Built in tournament selector does not work for multi-objective
        #offspring = toolbox.select(population, len(population))
        # Own selection using tournment. Will work for multi-objective.
        chosen = []
        for i in range(k):
            aspirants = selRandom(population, tourn_size)
            scores = [sum(x.fitness.wvalues) for x in aspirants]
            f = lambda i: scores[i]
            chosen_idx = max(range(len(scores)), key=f)
            chosen.append(aspirants[chosen_idx])
            pass
        offspring = chosen

        # Vary the pool of individuals
        offspring = varAnd(offspring, toolbox, cxpb, mutpb)

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # Update the hall of fame with the generated individuals
        if halloffame is not None:
            halloffame.update(offspring)

        # Replace the current population by the offspring
        population[:] = offspring

        # Append the current generation statistics to the logbook
        record = stats.compile(population) if stats else {}
        logbook.record(gen=gen, nevals=len(invalid_ind), **record)

        # Log population and population statistics for 0, 1 (ESD), 2 (SP)
        population_store.append(([list(pop) for pop in population],
                                 [pop.fitness.values for pop in population]))
        population_stats.append(class_counter(population))

        if verbose:
            print(logbook.stream)

    return population, logbook
Beispiel #6
0
def pso_ga(func,
           pmin,
           pmax,
           smin,
           smax,
           int_idx,
           params,
           ga,
           initial_guess=None):
    '''

    :param func:
    :param pmin:
    :param pmax:
    :param smin:
    :param smax:
    :param int_idx:
    :param params:
    :param ga:
    :param initial_guess: List of list, where each nested list is 1 initial guess vector you want to use.
    :return:
    '''
    # Setting params
    c1, c2, wmin, wmax, ga_iter_min, ga_iter_max, iter_gamma, ga_num_min, ga_num_max, num_beta,\
    tourn_size, cxpb, mutpb, indpd, eta,\
    pso_iter, swarm_size = \
    params['c1'], params['c2'], params['wmin'], params['wmax'],\
    params['ga_iter_min'], params['ga_iter_max'], params['iter_gamma'],\
    params['ga_num_min'], params['ga_num_max'], params['num_beta'],\
    params['tourn_size'], params['cxpd'], params['mutpd'], params['indpd'], params['eta'],\
    params['pso_iter'], params['swarm_size']

    # int_idx must be a list. If a single number is given, convert to list.
    if isinstance(int_idx, int):
        int_idx = [int_idx]

    creator.create("FitnessMin", base.Fitness,
                   weights=(-1.0, ))  # Minimization of a single scalar value
    creator.create("Particle",
                   list,
                   fitness=creator.FitnessMin,
                   speed=list,
                   smin=None,
                   smax=None,
                   best=None,
                   int_idx=None)

    toolbox = base.Toolbox()
    toolbox.register("particle",
                     generate_part,
                     dim=len(pmin),
                     pmin=pmin,
                     pmax=pmax,
                     smin=smin,
                     smax=smax,
                     int_idx=int_idx)
    toolbox.register("population", tools.initRepeat, list, toolbox.particle)
    toolbox.register("update", updateParticle, c1=c1, c2=c2)
    toolbox.register("evaluate", func)

    toolbox.register("mate", tools.cxTwoPoint)
    #toolbox.register("mutate", ga_hybrid_polymutate, low=pmin, up=pmax, indpb=indpd, eta=eta)
    toolbox.register("mutate",
                     ga_hybrid_gaussianmutate,
                     low=pmin,
                     up=pmax,
                     indpb=indpd,
                     sigma=[(u - l) * 5 for u, l in zip(pmax, pmin)])

    pop = toolbox.population(n=swarm_size)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean)
    stats.register("std", np.std)
    stats.register("min", np.min)
    stats.register("max", np.max)

    logbook = tools.Logbook()
    logbook.header = ["gen", "evals"] + stats.fields

    best = None
    pso_hof_num = max(1, round(ga_num_min * 0.2))
    pso_hof = tools.HallOfFame(pso_hof_num)

    # Evaluate boundary points first
    boundary_points = [
        list(x)
        for x in itertools.product(*[[x, y] for x, y in zip(pmin, pmax)])
    ]
    boundary_evals = [func(point) for point in boundary_points]

    if initial_guess:
        initial_guess += boundary_points
    else:
        initial_guess = boundary_points

    if len(initial_guess) < len(pop):
        for ig, single_p in zip(initial_guess, pop):
            single_p[:] = ig
    else:
        print(
            'Warning: More initial guesses given than the swarm population size!'
        )
        for ig, single_p in zip(initial_guess[-len(pop):], pop):
            single_p[:] = ig

    for g in range(pso_iter):
        # PSO segment first
        for part in pop:
            try:
                idx = boundary_points.index(part)
                part.fitness.values = boundary_evals[idx]
            except ValueError:
                # Means current part is not a boundary point
                part.fitness.values = toolbox.evaluate(part)

            # Note: Fitness comparisons will compare the weighted value. Since weight is negative,
            # the comparison would be opposite unless you specify .values instead.
            if not part.best or part.best.fitness.values[
                    0] > part.fitness.values[0]:
                part.best = creator.Particle(part)
                part.best.fitness.values = part.fitness.values
            if not best or best.fitness.values[0] > part.fitness.values[0]:
                best = creator.Particle(part)
                best.fitness.values = part.fitness.values
        for part in pop:
            # Linear annealing for inertia velocity coefficient (the w weights)
            toolbox.update(part,
                           best=best,
                           w=wmax - (wmax - wmin) * g / pso_iter)
        if ga:
            # GA segment
            # Start at min and approach max to ensure diversity towards end of PSO
            ga_pop = round(ga_num_min + (g / pso_iter)**num_beta *
                           (ga_num_max - ga_iter_min))
            ga_gen = round(ga_iter_min + (g / pso_iter)**iter_gamma *
                           (ga_iter_max - ga_iter_min))
            if len(pso_hof) == 0:
                ga_mask = [1 for _ in range(ga_pop)
                           ] + [0 for _ in range(swarm_size - ga_pop)]
                random.shuffle(ga_mask)
                population = [x for x, mask in zip(pop, ga_mask) if mask == 1]
            else:
                ga_pop += -pso_hof_num
                ga_mask = [1 for _ in range(ga_pop)
                           ] + [0 for _ in range(swarm_size - ga_pop)]
                random.shuffle(ga_mask)
                population = [x for x, mask in zip(pop, ga_mask) if mask == 1
                              ] + pso_hof.items

            halloffame = tools.HallOfFame(ga_pop)
            halloffame.update(population)
            ga_eval = 0
            # Begin the generational process
            for gen in range(ga_gen):
                # Select the next generation individuals. Built in tournament selector does not work for multi-objective
                # offspring = toolbox.select(population, len(population))
                # Own selection using tournment. Will work for multi-objective.
                chosen = []
                for i in range(ga_pop):
                    aspirants = selRandom(population, tourn_size)
                    scores = [x.fitness.values[0] for x in aspirants]
                    f = lambda i: scores[i]
                    chosen_idx = min(range(len(scores)), key=f)
                    chosen.append(aspirants[chosen_idx])
                    pass
                offspring = chosen

                # Vary the pool of individuals
                offspring = varAnd(offspring, toolbox, cxpb, mutpb)

                # Evaluate the individuals with an invalid fitness
                invalid_ind = [
                    ind for ind in offspring if not ind.fitness.valid
                ]
                ga_eval += len(invalid_ind)
                fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
                for ind, fit in zip(invalid_ind, fitnesses):
                    ind.fitness.values = fit

                # Update the hall of fame with the generated individuals
                halloffame.update(offspring)

                # Replace the current population by the offspring
                population[:] = offspring

            counter = 0
            if best.fitness.values[0] > halloffame[0].fitness.values[0]:
                best = creator.Particle(halloffame[0])
                best.fitness.values = halloffame[0].fitness.values
            for idx, mask in enumerate(ga_mask):
                if mask == 1:
                    try:
                        if pop[idx].fitness.values[0] > halloffame[
                                counter].fitness.values[0]:
                            pop[idx] = halloffame[counter]
                            pop[idx].best = creator.Particle(part)
                            pop[idx].best.fitness.values = halloffame[
                                counter].fitness.values
                        counter += 1
                    except IndexError:
                        break

        pso_hof.update(pop)

        # Gather all the fitnesses in one list and print the stats
        try:
            logbook.record(gen=g,
                           evals=len(pop) + ga_eval,
                           **stats.compile(pop))
        except UnboundLocalError:
            # Means ga=False and ga_eval is not assigned
            logbook.record(gen=g, evals=len(pop), **stats.compile(pop))
        #print(best)
        print(logbook.stream)

    print(best.fitness.values)
    return pop, logbook, best