Esempio n. 1
0
    def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
        self.domain = domain_factory()

        tournament_ratio = self.params_gphh.tournament_ratio
        pop_size = self.params_gphh.pop_size
        n_gen = self.params_gphh.n_gen
        min_tree_depth = self.params_gphh.min_tree_depth
        max_tree_depth = self.params_gphh.max_tree_depth
        crossover_rate = self.params_gphh.crossover_rate
        mutation_rate = self.params_gphh.mutation_rate

        creator.create("FitnessMin", Fitness, weights=(self.weight,))
        creator.create("Individual", PrimitiveTree, fitness=creator.FitnessMin)

        self.toolbox = Toolbox()
        self.toolbox.register("expr", genHalfAndHalf, pset=self.pset, min_=min_tree_depth, max_=max_tree_depth)
        self.toolbox.register("individual", tools.initIterate, creator.Individual, self.toolbox.expr)
        self.toolbox.register("population", tools.initRepeat, list, self.toolbox.individual)
        self.toolbox.register("compile", gp.compile, pset=self.pset)

        if self.evaluation_method == EvaluationGPHH.SGS:
            self.toolbox.register("evaluate", self.evaluate_heuristic, domains=self.training_domains)
        # if self.evaluation_method == EvaluationGPHH.SGS_DEVIATION:
        #     self.toolbox.register("evaluate", self.evaluate_heuristic_sgs_deviation, domains=self.training_domains)
        elif self.evaluation_method == EvaluationGPHH.PERMUTATION_DISTANCE:
            self.toolbox.register("evaluate", self.evaluate_heuristic_permutation, domains=self.training_domains)
        # self.toolbox.register("evaluate", self.evaluate_heuristic, domains=[self.training_domains[1]])

        self.toolbox.register("select", tools.selTournament, tournsize=int(tournament_ratio * pop_size))
        self.toolbox.register("mate", gp.cxOnePoint)
        self.toolbox.register("expr_mut", gp.genFull, min_=0, max_=max_tree_depth)
        self.toolbox.register("mutate", gp.mutUniform, expr=self.toolbox.expr_mut, pset=self.pset)

        self.toolbox.decorate("mate", gp.staticLimit(key=operator.attrgetter("height"), max_value=17))
        self.toolbox.decorate("mutate", gp.staticLimit(key=operator.attrgetter("height"), max_value=17))

        stats_fit = tools.Statistics(lambda ind: ind.fitness.values)
        stats_size = tools.Statistics(len)
        mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)
        mstats.register("avg", np.mean)
        mstats.register("std", np.std)
        mstats.register("min", np.min)
        mstats.register("max", np.max)

        pop = self.toolbox.population(n=pop_size)
        hof = tools.HallOfFame(1)
        self.hof = hof
        pop, log = algorithms.eaSimple(pop, self.toolbox, crossover_rate, mutation_rate, n_gen, stats=mstats,
                                       halloffame=hof, verbose=True)

        self.best_heuristic = hof[0]
        print('best_heuristic: ', self.best_heuristic)

        self.func_heuristic = self.toolbox.compile(expr=self.best_heuristic)
        self.policy = GPHHPolicy(self.domain, self.domain_model,
                                 self.func_heuristic,
                                 features=self.list_feature,
                                 params_gphh=self.params_gphh,
                                 recompute_cpm=True)
Esempio n. 2
0
    def alg(fixed_schedule_part, initial_schedule, current_time=0.0, initial_population=None, only_new_pops=False):

        n = alg_params["n"]

        ### generate heft_based population
        init_ind_count = int(n * init_sched_percent)
        heft_particle = initial_schedule if isinstance(initial_schedule, (CompoundParticle, GsaCompoundParticle)) \
            else generate_func(wf, rm, estimator, initial_schedule, fixed_schedule_part, current_time)
        init_arr = [deepcopy(heft_particle) for _ in range(init_ind_count)]
        generated_arr = [generate_func(wf, rm, estimator,
                                          schedule=None,
                                          fixed_schedule_part=fixed_schedule_part,
                                          current_time=current_time)
                                 for _ in range(n - init_ind_count)]
        heft_based_population = init_arr + generated_arr

        ### generate new population
        random_population = [generate_func(wf, rm, estimator,
                                          schedule=None,
                                          fixed_schedule_part=fixed_schedule_part,
                                          current_time=current_time)
                                 for _ in range(n)]

        populations = {
            "inherited": initial_population,
            "heft_based": heft_based_population,
            "random": random_population
        }

        if "inherited" in populations and(populations["inherited"] is None or len(populations["inherited"])):
            del populations["inherited"]

        def migration(populations, k):
            pops = [pop for name, pop in sorted(populations.items(), key=lambda x: x[0])]
            migRing(pops, k, selection=emigrant_selection)



        task_map = {task.id: task for task in wf.get_all_unique_tasks()}
        node_map = {node.name: node for node in rm.get_nodes()}
        schedule_builder = ParticleScheduleBuilder(wf, rm, estimator,
                                                   task_map, node_map,
                                                   fixed_schedule_part)
        pf_schedule = partial(schedule_builder, current_time=current_time)

        toolbox = Toolbox()
        toolbox.register("run_alg", algorithm(pf_schedule=pf_schedule, generate_=lambda n: None, **alg_params))
        toolbox.register("migration", migration)

        lb, st = deepcopy(log_book), deepcopy(stats)

        pop, logbook, best = run_mpga(toolbox=toolbox, logbook=lb, stats=st, initial_populations=populations, **alg_params)
        resulted_schedule = pf_schedule(best)
        result = (best, pop, resulted_schedule, None), logbook
        return result
Esempio n. 3
0
def run_dcga(wf, estimator, rm, heft_mapping, heft_ordering, **params):

    cxpb = params["cxpb"]#0.9
    mutpb = params["mutpb"]#0.9
    ngen = params["ngen"]#100
    pop_size = params["pop_size"]#100

    ctx = {'env': Env(wf, rm, estimator)}

    toolbox = Toolbox()
    toolbox.register("select", tools.selTournament, tournsize=2)
    toolbox.register("mate", _mate, ctx)
    toolbox.register("mutate", _mutate, ctx)
    toolbox.register("evaluate", lambda x: [fitness_mapping_and_ordering(ctx, {MAPPING_SPECIE: x[0], ORDERING_SPECIE: x[1]})])

    # heft_mapping = extract_mapping_from_ga_file("../../temp/heft_etalon_full_tr100_m100.json", rm)

    pop_mapping = mapping_heft_based_initialize(ctx, pop_size, heft_mapping, 3)
    pop_ordering = ordering_heft_based_initialize(ctx, pop_size, heft_ordering, 3)
    pop = [ListBasedIndividual(el) for el in zip(pop_mapping, pop_ordering)]
    for p in pop:
        p.fitness = Fitness(0)

    stat = tools.Statistics(key=lambda x: x.fitness)
    stat.register("solsstat", lambda pop: [{"best": numpy.max(pop).values[0]}])

    final_pop, logbook = deap.algorithms.eaSimple(pop, toolbox, cxpb, mutpb, ngen, stat)
    best = max(final_pop, key=lambda x: x.fitness)
    return best.fitness.values[0], logbook
Esempio n. 4
0
def add_seeded_individuals(toolbox: base.Toolbox, options: dict, ccl_objects: dict,
                           primitive_set: gp.PrimitiveSetTyped) -> List[gp.PrimitiveTree]:
    """Add individuals specified by user and their mutations"""
    pop = []
    codes = set()
    raw_codes = []
    with open(options['seeded_individuals']) as f:
        for line in f:
            raw_codes.append(line.strip())

    for no, ind in enumerate(raw_codes):
        try:
            x = creator.Individual(gp.PrimitiveTree.from_string(ind, primitive_set))
        except TypeError:
            raise RuntimeError(f'Incorrect seeded individual (probably incorrect symbol): {ind}')
        try:
            sympy_code = str(generate_sympy_expr(x, ccl_objects))
        except RuntimeError:
            raise RuntimeError(f'Initial individual causes problem: {ind}')
        print(f'[Seed {no:2d} No mutation]: {sympy_code}')
        x.sympy_code = sympy_code
        pop.append(x)
        i = 0
        codes.add(sympy_code)
        while i < options['initial_seed_mutations']:
            y = toolbox.clone(x)
            try:
                y, = toolbox.mutate(y)
            except IndexError:
                raise RuntimeError(f'Incorrect seeded individual (probably wrong arity): {ind}')
            if not check_symbol_counts(y, options):
                continue
            try:
                mut_sympy_expr = generate_sympy_expr(y, ccl_objects)
                mut_sympy_code = str(mut_sympy_expr)
            except RuntimeError:
                continue

            if mut_sympy_code in codes:
                continue

            if mut_sympy_expr.has(sympy.zoo, sympy.oo, sympy.nan, sympy.I):
                continue

            if options['max_constant_allowed'] is not None and not check_max_constant(mut_sympy_expr, options):
                continue

            codes.add(mut_sympy_code)
            i += 1
            print(f'[Seed {no:2d} Mutation {i:2d}]: {mut_sympy_code}')
            y.sympy_code = mut_sympy_code
            pop.append(y)
    return pop
Esempio n. 5
0
def create_pso_alg(pf_schedule, generate_, **params):
    def fit_converter(func):
        def wrap(*args, **kwargs):
            x = func(*args, **kwargs)
            m = Utility.makespan(x)
            return FitnessStd(values=(m, 0.0))

        return wrap

    def componoud_update(w, c1, c2, p, best, pop, min=-1, max=1):
        mapping_update(w, c1, c2, p.mapping, best.mapping, pop)
        ordering_update(w,
                        c1,
                        c2,
                        p.ordering,
                        best.ordering,
                        pop,
                        min=min,
                        max=max)

    toolbox = Toolbox()
    toolbox.register("population", generate_)
    toolbox.register("fitness", fit_converter(pf_schedule))
    toolbox.register("update", componoud_update)

    pso_alg = partial(run_pso, toolbox=toolbox, **params)
    return pso_alg
Esempio n. 6
0
    def toolbox(self, mapMatrix, rankList, ordFilter):

        _wf, rm, estimator = self.env()
        heft_schedule = self.heft_schedule()

        heft_particle = rd_order.generate(_wf, rm, estimator, mapMatrix,
                                          rankList, ordFilter, heft_schedule)

        heft_gen = lambda n: [
            deepcopy(heft_particle)
            if random.random() > 1.00 else rd_order.generate(
                _wf, rm, estimator, mapMatrix, rankList, ordFilter)
            for _ in range(n)
        ]

        #def componoud_update(w, c1, c2, p, best, pop, g):
        def componoud_update(w, c1, c2, p, best, pop):
            #if g%2 == 0:
            rd_map.update(w, c1, c2, p.mapping, best.mapping, pop)
            #else:
            rd_order.ordering_update(w, c1, c2, p.ordering, best.ordering, pop)

        toolbox = Toolbox()
        toolbox.register("population", heft_gen)
        toolbox.register("fitness", rd_order.fitness, _wf, rm, estimator)
        toolbox.register("update", componoud_update)
        return toolbox
Esempio n. 7
0
    def toolbox(self):

        _wf, rm, estimator = self.env()
        heft_schedule = self.heft_schedule()

        heft_particle = om_order.generate(_wf, rm, estimator, heft_schedule)

        heft_gen = lambda n: [
            deepcopy(heft_particle)
            if random.random() > 1.00 else om_order.generate(
                _wf, rm, estimator) for _ in range(n)
        ]

        def componoud_update(w, c1, c2, p, best, pop, min=-1, max=1):
            om_map.update(w, c1, c2, p.mapping, best.mapping, pop)
            om_order.ordering_update(w,
                                     c1,
                                     c2,
                                     p.ordering,
                                     best.ordering,
                                     pop,
                                     min=min,
                                     max=max)

        toolbox = Toolbox()
        toolbox.register("population", heft_gen)
        toolbox.register("fitness", om_order.fitness, _wf, rm, estimator)
        toolbox.register("update", componoud_update)
        return toolbox
    def toolbox(self, transfer):

        _wf, rm, estimator = self.env()
        estimator.transfer_time = transfer
        heft_schedule = self.heft_schedule()

        heft_particle = generate(_wf, rm, estimator, heft_schedule)

        heft_gen = lambda n: ([
            deepcopy(heft_particle)
            if random.random() > 1.00 else generate(_wf, rm, estimator)
            for _ in range(n - 1)
        ] + [deepcopy(heft_particle)])

        #heft_gen = lambda n: ([deepcopy(heft_particle) if random.random() > 1.00 else generate(_wf, rm, estimator) for _ in range(n)])

        def componoud_update(w, c1, c2, p, best, pop, min=-1, max=1):
            #doMap = random.random()
            #if doMap < 0.5:
            mapping_update(w, c1, c2, p.mapping, best.mapping, pop)
            ordering_update(w,
                            c1,
                            c2,
                            p.ordering,
                            best.ordering,
                            pop,
                            min=min,
                            max=max)

        toolbox = Toolbox()
        toolbox.register("population", heft_gen)
        toolbox.register("fitness", fitness, _wf, rm, estimator)
        toolbox.register("update", componoud_update)
        return toolbox
Esempio n. 9
0
    def toolbox(self, mapMatrix, rankList, ordFilter):

        _wf, rm, estimator = self.env()
        estimator.transfer_time = 500
        heft_schedule = self.heft_schedule()

        heft_particle = generate(_wf, rm, estimator, mapMatrix, rankList,
                                 ordFilter, heft_schedule)
        heft_gen = lambda n: ([
            deepcopy(heft_particle) if random.random() > 1.00 else generate(
                _wf, rm, estimator, mapMatrix, rankList, ordFilter)
            for _ in range(n - 1)
        ] + [deepcopy(heft_particle)])

        #heft_gen = lambda n: [deepcopy(heft_particle) if random.random() > 1.00 else generate(_wf, rm, estimator, mapMatrix, rankList, ordFilter) for _ in range(n)]

        def componoud_update(w, c1, c2, p, best, pop):
            #doMap = random.random()
            #if doMap < 0.1:
            mapping_update(w, c1, c2, p.mapping, best.mapping, pop)
            #else:
            ordering_update(w, c1, c2, p.ordering, best.ordering, pop)

        toolbox = Toolbox()
        toolbox.register("population", heft_gen)
        toolbox.register("fitness", fitness, _wf, rm, estimator)
        toolbox.register("update", componoud_update)
        return toolbox
Esempio n. 10
0
def generate_population(toolbox: base.Toolbox, ccl_objects: dict, options: dict) -> List[gp.PrimitiveTree]:
    """Generate initial population"""

    pop = []
    codes: Set[str] = set()
    pbar = tqdm.tqdm(total=options['population_size'])
    while len(pop) < options['population_size']:
        ind = toolbox.individual()
        if not check_symbol_counts(ind, options):
            continue
        try:
            sympy_expr = generate_sympy_expr(ind, ccl_objects)
            if sympy_expr.has(sympy.zoo, sympy.oo, sympy.nan, sympy.I):
                continue
            sympy_code = str(sympy_expr)
        except RuntimeError:
            continue

        if options['max_constant_allowed'] is not None and not check_max_constant(sympy_expr, options):
            continue

        if options['unique_population']:
            if sympy_code in codes:
                continue

        codes.add(sympy_code)
        ind.sympy_code = sympy_code
        pop.append(ind)
        pbar.update()

    pbar.close()
    return pop
Esempio n. 11
0
def genealogy_plot(history: tools.support.History,
                   toolbox: base.Toolbox) -> np.ndarray:
    '''
    plotting function for genealogical history of the GA run.
    :param history: dict, dict with history
    :param toolbox: gp.toolbox, for using the appropriate eval
    :return: ndarray, 3-Channel RGB array from plot
    '''
    graph = networkx.DiGraph(history.genealogy_tree)
    graph = graph.reverse()  # Make the graph top-down

    fig, ax = plt.subplots(figsize=(8, 6), dpi=120)
    try:
        colors = [
            toolbox.evaluate(history.genealogy_history[i])[0] for i in graph
        ]
    except:  # catch all for failures
        colors = [i for i in range(history.genealogy_index)]

    positions = graphviz_layout(graph, prog="dot")
    networkx.draw(graph,
                  positions,
                  node_color=colors,
                  with_labels=True,
                  font_size1=10,
                  alpha=0.75,
                  ax=ax)

    plt.title('Evolution: Genealogy Tree', fontsize='xx-large')
    fig.canvas.draw()
    image = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')
    image = image.reshape(fig.canvas.get_width_height()[::-1] + (3, ))
    plt.close(fig)
    return image
Esempio n. 12
0
def apply_mutation(
    population: list,
    toolbox: base.Toolbox,
    mut_neurons_prob: float,
    mut_layers_prob: float,
) -> int:
    """Mutate the population with probabilities.

    Mutate weights and bias elements for every individual. Then randomly mutate
    neuron count for ``mut_neurons_prob`` individuals and layer count for
    ``mut_layers_prob`` individuals.

    :param population: list of individuals.
    :param toolbox: object where the mutation operators are defined.
    :param mut_neurons_prob: probability to mute a neuron (append/pop).
    :param mut_layers_prob: probability to mute a layer (append/pop).
    :returns: the number of individuals mutated.

    """
    mutated_individuals = 0

    for index, mutant in enumerate(population):
        mut_bias_genes = mut_weights_genes = neuron_diff = layer_diff = 0

        mut_bias_genes = toolbox.mutate_bias(mutant)
        mut_weights_genes = toolbox.mutate_weights(mutant)
        mutated_individuals += 1
        del mutant.fitness.values

        # Ensure that we don't modify the hidden layers if they are constant
        if not mutant.constant_hidden_layers:
            if random.random() < mut_neurons_prob:
                neuron_diff = toolbox.mutate_neuron(mutant)

            if random.random() < mut_layers_prob:
                layer_diff = toolbox.mutate_layer(mutant)

        DGPLOGGER.debug(
            f"    For individual {index}:\n"
            f"        {mut_bias_genes} mutated bias genes\n"
            f"        {mut_weights_genes} mutated weights genes\n"
            f"        {neuron_diff} neuron changes\n"
            f"        {layer_diff} layer changes\n"
        )

    return mutated_individuals
Esempio n. 13
0
	def _initialize_toolbox(self, toolbox: base.Toolbox):
		toolbox.register(
			'individual',
			TrapDistribution,
			constrained_nodes = self._unique_constrained_nodes,
			trap_amount = self._trap_amount,
			random_initialization = True,
		)

		toolbox.register('evaluate', lambda d: self._constraint_set.score(d))
		toolbox.register('mate', mate_distributions, distributor=self)
		toolbox.register('mutate', mutate_trap_distribution)
Esempio n. 14
0
	def _initialize_toolbox(self, toolbox: base.Toolbox):
		toolbox.register(
			'individual',
			DistributionDelta,
			origin = self._origin_trap_distribution,
			added_nodes = self._added,
			removed_node_indexes = self._removed_trap_indexes,
			max_trap_difference = self._max_trap_delta,
			trap_amount_delta = self.trap_amount - self._origin_trap_distribution.trap_amount,
		)

		toolbox.register('evaluate', lambda d: self._constraint_set.score(d.trap_distribution))
		toolbox.register('mate', mate_distribution_deltas)
		toolbox.register('mutate', mutate_distribution_delta)
Esempio n. 15
0
    def create_set_ind(cls, toolbox: base.Toolbox, fitness_function: callable,
                       possible_values: list):
        """
        Method to create the genotype of the individuals represented by a Set from the list of
        possible values received as an argument.

        Parameters
        ----------
        toolbox: base.Toolbox
            DEAP Toolbox instance.

        fitness_function: callable
            DEAP fitness function.

        possible_values: list
            List of possible values to insert into the individual.
        """
        creator.create('Individual', set, fitness=fitness_function)
        toolbox.register('attr_set',
                         initSetGenotype,
                         possible_values=list(possible_values))
        toolbox.register('individual', tools.initIterate, creator.Individual,
                         toolbox.attr_set)
Esempio n. 16
0
def run_dcga(wf, estimator, rm, heft_mapping, heft_ordering, **params):

    cxpb = params["cxpb"]  #0.9
    mutpb = params["mutpb"]  #0.9
    ngen = params["ngen"]  #100
    pop_size = params["pop_size"]  #100

    ctx = {'env': Env(wf, rm, estimator)}

    toolbox = Toolbox()
    toolbox.register("select", tools.selTournament, tournsize=2)
    toolbox.register("mate", _mate, ctx)
    toolbox.register("mutate", _mutate, ctx)
    toolbox.register(
        "evaluate", lambda x: [
            fitness_mapping_and_ordering(ctx, {
                MAPPING_SPECIE: x[0],
                ORDERING_SPECIE: x[1]
            })
        ])

    # heft_mapping = extract_mapping_from_ga_file("../../temp/heft_etalon_full_tr100_m100.json", rm)

    pop_mapping = mapping_heft_based_initialize(ctx, pop_size, heft_mapping, 3)
    pop_ordering = ordering_heft_based_initialize(ctx, pop_size, heft_ordering,
                                                  3)
    pop = [ListBasedIndividual(el) for el in zip(pop_mapping, pop_ordering)]
    for p in pop:
        p.fitness = Fitness(0)

    stat = tools.Statistics(key=lambda x: x.fitness)
    stat.register("solsstat", lambda pop: [{"best": numpy.max(pop).values[0]}])

    final_pop, logbook = deap.algorithms.eaSimple(pop, toolbox, cxpb, mutpb,
                                                  ngen, stat)
    best = max(final_pop, key=lambda x: x.fitness)
    return best.fitness.values[0], logbook
Esempio n. 17
0
    def create_permutation_ind(cls,
                               toolbox: base.Toolbox,
                               fitness_function: callable,
                               initial_values: list = None,
                               individual_size: int = None):
        """
        Method that allows to create and register (following the guidelines defined in DEAP) the
        genotype of the individuals (registered as 'Individual') and the generating function of
        individuals (registered as 'individual').

        Parameters
        ----------
        toolbox: base.Toolbox
            DEAP Toolbox instance.

        fitness_function: callable
            DEAP fitness function.

        initial_values: list, default=None
            List of list of initial genotypes used for the creation of the initial population, this
            allows incorporating a priori knowledge about better solutions and usually gives better
            results than random initialisation of the genotypes.

            If this parameter is not provided, it will be necessary to provide tha argument
            individual_size.

        individual_size: int, default=None
            Size of the individual genotype.

            If this parameter is not provided, it will be necessary to provide tha argument
            individual_size.

        Notes
        -----
        Parameters initial_values and individual_size cannot be provided at the same time.
        """
        assert (initial_values is None or individual_size is None) and \
               not (initial_values is None and individual_size is None), \
               'Either the initial_values or individual_size must be provided.'

        # Create from initial values
        if initial_values is not None:
            ind_generator = lambda initial_values: initial_values[
                random.randint(0,
                               len(initial_values) - 1)]
            toolbox.register('attr_permutation', ind_generator, initial_values)
        # Create randomly
        else:
            toolbox.register('attr_permutation', random.sample,
                             range(individual_size), individual_size)

        creator.create('Individual', list, fitness=fitness_function)
        toolbox.register('individual', tools.initIterate, creator.Individual,
                         toolbox.attr_permutation)
Esempio n. 18
0
    def alg(fixed_schedule_part, initial_schedule, current_time=0.0):
        def generate_(n):
            init_ind_count = int(n*init_sched_percent)
            res = []
            if initial_schedule is not None and init_ind_count > 0:
                heft_particle = pso_generate(wf, rm, estimator, initial_schedule)
                init_arr = [deepcopy(heft_particle) for _ in range(init_ind_count)]
                res = res + init_arr
            if n - init_ind_count > 0:
                generated_arr = [pso_generate(wf, rm, estimator,
                                          schedule=None,
                                          fixed_schedule_part=fixed_schedule_part,
                                          current_time=current_time)
                                 for _ in range(n - init_ind_count)]
                res = res + generated_arr
            return res

        def fit_converter(func):
            def wrap(*args, **kwargs):
                x = func(*args, **kwargs)
                m = Utility.makespan(x)
                return FitnessStd(values=(m, 0.0))
            return wrap

        def componoud_update(w, c1, c2, p, best, pop, min=-1, max=1):
            mapping_update(w, c1, c2, p.mapping, best.mapping, pop)
            ordering_update(w, c1, c2, p.ordering, best.ordering, pop, min=min, max=max)

        task_map = {task.id: task for task in wf.get_all_unique_tasks()}
        node_map = {node.name: node for node in rm.get_nodes()}

        schedule_builder = ParticleScheduleBuilder(wf, rm, estimator,
                                                   task_map, node_map,
                                                   fixed_schedule_part)
        pf_schedule = partial(schedule_builder, current_time=current_time)

        toolbox = Toolbox()
        toolbox.register("population", generate_)
        toolbox.register("fitness", fit_converter(pf_schedule))
        toolbox.register("update", componoud_update)

        pop, logbook, best = run_pso(toolbox=toolbox, **params)

        resulted_schedule = pf_schedule(best)
        result = (best, pop, resulted_schedule, None), logbook
        return result
Esempio n. 19
0
    def toolbox(self, rankList):
        _wf, rm, estimator = self.env()
        heft_schedule = self.heft_schedule()

        heft_particle = generate(_wf, rm, estimator, rankList, heft_schedule)

        #heft_gen = lambda n: ([deepcopy(heft_particle) if random.random() > 1.00 else generate(_wf, rm, estimator, rankList) for _ in range(n-1)] + [deepcopy(heft_particle)])
        heft_gen = lambda n: ([
            deepcopy(heft_particle) if random.random() > 1.00 else generate(
                _wf, rm, estimator, rankList) for _ in range(n)
        ])

        def componoud_update(w, c1, c2, p, best, pop):
            mapping_update(w, c1, c2, p.mapping, best.mapping, pop)
            ordering_update(w, c1, c2, p.ordering, best.ordering, pop)

        toolbox = Toolbox()
        toolbox.register("population", heft_gen)
        toolbox.register("fitness", fitness, _wf, rm, estimator)
        toolbox.register("update", componoud_update)
        return toolbox
Esempio n. 20
0
    def toolbox(self):

        _wf, rm, estimator = self.env()
        heft_schedule = self.heft_schedule()

        heft_particle = generate(_wf, rm, estimator, heft_schedule)

        heft_gen = lambda n: [deepcopy(heft_particle) if random.random() > 1.00 else generate(_wf, rm, estimator) for _ in range(n)]

        def componoud_update(w, c1, c2, p, best, pop, min=-1, max=1):
            mapping_update(w, c1, c2, p.mapping, best.mapping, pop)
            ordering_update(w, c1, c2, p.ordering, best.ordering, pop, min=min, max=max)

        toolbox = Toolbox()
        toolbox.register("population", heft_gen)
        toolbox.register("fitness", fitness, _wf, rm, estimator)
        toolbox.register("update", componoud_update)
        return toolbox
Esempio n. 21
0
def create_pso_alg(pf_schedule, generate_, **params):
    def fit_converter(func):
        def wrap(*args, **kwargs):
            x = func(*args, **kwargs)
            m = Utility.makespan(x)
            return FitnessStd(values=(m, 0.0))
        return wrap

    def componoud_update(w, c1, c2, p, best, pop, min=-1, max=1):
        mapping_update(w, c1, c2, p.mapping, best.mapping, pop)
        ordering_update(w, c1, c2, p.ordering, best.ordering, pop, min=min, max=max)


    toolbox = Toolbox()
    toolbox.register("population", generate_)
    toolbox.register("fitness", fit_converter(pf_schedule))
    toolbox.register("update", componoud_update)

    pso_alg = partial(run_pso, toolbox=toolbox, **params)
    return pso_alg
Esempio n. 22
0
def create_gsa_alg(pf_schedule, generate_, **params):
    def fit_converter(func):
        def wrap(*args, **kwargs):
            x = func(*args, **kwargs)
            m = Utility.makespan(x)
            return FitnessStd(values=(m, 0.0))
        return wrap

    def compound_force(p, pop, kbest, G):
        mapping_force = force(p.mapping, (p.mapping for p in pop), kbest, G)
        ordering_force = force(p.ordering, (p.ordering for p in pop), kbest, G)
        return (mapping_force, ordering_force)

    def compound_update(w, c, p, min=-1, max=1):
        gsa_mapping_update(w, c, p.mapping)
        gsa_ordering_update(w, c,  p.ordering, min, max)
        pass

    W, C = params["w"], params["w"]

    if "generations_count_before_merge" in params and "generations_count_after_merge" in params:
        all_iterations_count = int(params["generations_count_before_merge"]) + int(params["generations_count_after_merge"])
    else:
        all_iterations_count = None

    toolbox = Toolbox()
    toolbox.register("generate", generate_)
    toolbox.register("fitness", fit_converter(pf_schedule))
    toolbox.register("estimate_force", compound_force)
    toolbox.register("update", compound_update, W, C)
    toolbox.register("G", partial(G, all_iter_number=all_iterations_count))
    toolbox.register("kbest", partial(Kbest, all_iter_number=all_iterations_count))

    pso_alg = partial(run_gsa, toolbox=toolbox, **params)
    return pso_alg
Esempio n. 23
0
def execute(toolbox: base.Toolbox, cases: int = 100) -> List[str]:
    population = toolbox.population(n=POPULATION_SIZE)
    hall_of_fame = tools.ParetoFront()

    stats = tools.Statistics(lambda i: i.fitness.values)
    stats.register("avg", numpy.mean, axis=0)
    stats.register("std", numpy.std, axis=0)
    stats.register("min", numpy.min, axis=0)
    stats.register("max", numpy.max, axis=0)

    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max", "best"

    # Evaluate every individuals
    for individual in population:
        individual.fitness.values = toolbox.evaluate(individual)

    hall_of_fame.update(population)
    record = stats.compile(population)
    logbook.record(gen=0, evals=len(population), **record)
    print(logbook.stream)

    generated_cases = list
    last_fitness = float('inf')
    current_fitness = None
    generation_count = 1
    while generation_count <= MAX_GENERATIONS and (
            last_fitness != current_fitness
            or current_fitness == float('inf')):
        last_fitness = current_fitness

        # Select the next generation individuals
        offspring = toolbox.select(population, floor(POPULATION_SIZE * 0.9))

        # Clone the selected individuals
        offspring = list(toolbox.map(toolbox.clone, offspring))

        # Add new individuals from the population
        offspring += toolbox.population(n=POPULATION_SIZE - len(offspring))

        # Apply crossover and mutation on the offspring
        for child1, child2 in zip(offspring[::2], offspring[1::2]):
            if not random() < MATE_RATIO:
                continue
            toolbox.mate(child1, child2)
            del child1.fitness.values
            del child2.fitness.values

        for mutant in offspring:
            if not random() < MUTATION_RATIO:
                continue
            toolbox.mutate(mutant)
            del mutant.fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [
            individual for individual in offspring
            if not individual.fitness.valid
        ]
        for individual in offspring:
            individual.fitness.values = toolbox.evaluate(individual)

        generated_cases = tools.selBest(population, k=cases)
        current_fitness = sum(
            toolbox.map(op.itemgetter(0),
                        toolbox.map(toolbox.evaluate, generated_cases)))
        best = choice(generated_cases)
        word = "".join(best)

        # Select the next generation population
        population = toolbox.select(population + offspring, POPULATION_SIZE)
        record = stats.compile(population)
        logbook.record(gen=generation_count,
                       evals=len(invalid_ind),
                       best=word,
                       **record)
        print(logbook.stream)

        generation_count += 1

    return [''.join(case) for case in generated_cases]
Esempio n. 24
0
    def alg(fixed_schedule_part, initial_schedule, current_time=0.0, initial_population=None):



        ga_functions = GAFunctions2(wf, rm, estimator)
        generate = partial(ga_generate, ga_functions=ga_functions,
                           fixed_schedule_part=fixed_schedule_part,
                           current_time=current_time, init_sched_percent=init_sched_percent,
                           initial_schedule=initial_schedule)

        toolbox = Toolbox()
        toolbox.register("generate", generate)
        toolbox.register("evaluate", fit_converter(ga_functions.build_fitness(fixed_schedule_part, current_time)))
        toolbox.register("clone", deepcopy)
        toolbox.register("mate", ga_functions.crossover)
        toolbox.register("sweep_mutation", ga_functions.sweep_mutation)
        toolbox.register("mutate", ga_functions.mutation)
        # toolbox.register("select_parents", )
        toolbox.register("select", tools.selRoulette)
        pop, logbook, best = run_ga(toolbox=toolbox, **alg_params)

        resulted_schedule = ga_functions.build_schedule(best, fixed_schedule_part, current_time)
        result = (best, pop, resulted_schedule, None), logbook
        return result
    def __call__(self):
        _wf = wf(self.wf_name)
        rm = ExperimentResourceManager(rg.r([10, 15, 25, 30]))
        estimator = ModelTimeEstimator(bandwidth=10)

        empty_fixed_schedule_part = Schedule({node: [] for node in rm.get_nodes()})

        heft_schedule = run_heft(_wf, rm, estimator)

        fixed_schedule = empty_fixed_schedule_part

        ga_functions = GAFunctions2(_wf, rm, estimator)

        generate = partial(ga_generate, ga_functions=ga_functions,
                           fixed_schedule_part=fixed_schedule,
                           current_time=0.0, init_sched_percent=0.05,
                           initial_schedule=heft_schedule)

        stats = tools.Statistics(lambda ind: ind.fitness.values[0])
        stats.register("avg", numpy.mean)
        stats.register("std", numpy.std)
        stats.register("min", numpy.min)
        stats.register("max", numpy.max)

        logbook = tools.Logbook()
        logbook.header = ["gen", "evals"] + stats.fields

        toolbox = Toolbox()
        toolbox.register("generate", generate)
        toolbox.register("evaluate", fit_converter(ga_functions.build_fitness(empty_fixed_schedule_part, 0.0)))
        toolbox.register("clone", deepcopy)
        toolbox.register("mate", ga_functions.crossover)
        toolbox.register("sweep_mutation", ga_functions.sweep_mutation)
        toolbox.register("mutate", ga_functions.mutation)
        # toolbox.register("select_parents", )
        # toolbox.register("select", tools.selTournament, tournsize=4)
        toolbox.register("select", tools.selRoulette)
        pop, logbook, best = run_ga(toolbox=toolbox,
                                logbook=logbook,
                                stats=stats,
                                **self.GA_PARAMS)

        resulted_schedule = ga_functions.build_schedule(best, empty_fixed_schedule_part, 0.0)

        ga_makespan = Utility.makespan(resulted_schedule)
        return (ga_makespan, logbook)
Esempio n. 26
0
_wf = wf("Montage_100")
rm = ExperimentResourceManager(rg.r([10, 15, 25, 30]))
estimator = SimpleTimeCostEstimator(comp_time_cost=0, transf_time_cost=0, transferMx=None,
                                            ideal_flops=20, transfer_time=100)
sorted_tasks = HeftHelper.heft_rank(_wf, rm, estimator)

heft_schedule = run_heft(_wf, rm, estimator)
heft_mapping = schedule_to_position(heft_schedule)
heft_mapping.velocity = Velocity({})

heft_gen = lambda n: [deepcopy(heft_mapping) if random.random() > 1.0 else generate(_wf, rm, estimator, 1)[0] for _ in range(n)]

W, C1, C2 = 0.9, 0.6, 0.2
GEN, N = 500, 30

toolbox = Toolbox()
toolbox.register("population", heft_gen)
toolbox.register("fitness", fitness,  _wf, rm, estimator, sorted_tasks)
toolbox.register("update", update)

stats = tools.Statistics(lambda ind: ind.fitness.values[0])
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)

logbook = tools.Logbook()
logbook.header = ["gen", "evals"] + stats.fields


Esempio n. 27
0
 def register_selection_operator(self, toolbox: Toolbox) -> None:
     # self.toolbox.register("select", tools.selTournament, tournsize=5)
     toolbox.register("select", tools.selNSGA2)
Esempio n. 28
0
 def register_crossover_operator(self, toolbox: Toolbox) -> None:
     # the crossover between individuals is a uniform crossover
     # that means each test case has 50-50 probability of ending up in each of the new individuals
     toolbox.register("mate", tools.cxUniform, indpb=0.5)
Esempio n. 29
0
    def alg(fixed_schedule_part,
            initial_schedule,
            current_time=0.0,
            initial_population=None):

        ga_functions = GAFunctions2(wf, rm, estimator)
        generate = partial(ga_generate,
                           ga_functions=ga_functions,
                           fixed_schedule_part=fixed_schedule_part,
                           current_time=current_time,
                           init_sched_percent=init_sched_percent,
                           initial_schedule=initial_schedule)

        toolbox = Toolbox()
        toolbox.register("generate", generate)
        toolbox.register(
            "evaluate",
            fit_converter(
                ga_functions.build_fitness(fixed_schedule_part, current_time)))
        toolbox.register("clone", deepcopy)
        toolbox.register("mate", ga_functions.crossover)
        toolbox.register("sweep_mutation", ga_functions.sweep_mutation)
        toolbox.register("mutate", ga_functions.mutation)
        # toolbox.register("select_parents", )
        toolbox.register("select", tools.selRoulette)
        pop, logbook, best = run_ga(toolbox=toolbox, **alg_params)

        resulted_schedule = ga_functions.build_schedule(
            best, fixed_schedule_part, current_time)
        result = (best, pop, resulted_schedule, None), logbook
        return result
Esempio n. 30
0
    def alg(fixed_schedule_part,
            initial_schedule,
            current_time=0.0,
            initial_population=None,
            only_new_pops=False):

        n = alg_params["n"]

        ### generate heft_based population
        init_ind_count = int(n * init_sched_percent)
        heft_particle = initial_schedule if isinstance(initial_schedule, (CompoundParticle, GsaCompoundParticle)) \
            else generate_func(wf, rm, estimator, initial_schedule, fixed_schedule_part, current_time)
        init_arr = [deepcopy(heft_particle) for _ in range(init_ind_count)]
        generated_arr = [
            generate_func(wf,
                          rm,
                          estimator,
                          schedule=None,
                          fixed_schedule_part=fixed_schedule_part,
                          current_time=current_time)
            for _ in range(n - init_ind_count)
        ]
        heft_based_population = init_arr + generated_arr

        ### generate new population
        random_population = [
            generate_func(wf,
                          rm,
                          estimator,
                          schedule=None,
                          fixed_schedule_part=fixed_schedule_part,
                          current_time=current_time) for _ in range(n)
        ]

        populations = {
            "inherited": initial_population,
            "heft_based": heft_based_population,
            "random": random_population
        }

        if "inherited" in populations and (populations["inherited"] is None
                                           or len(populations["inherited"])):
            del populations["inherited"]

        def migration(populations, k):
            pops = [
                pop for name, pop in sorted(populations.items(),
                                            key=lambda x: x[0])
            ]
            migRing(pops, k, selection=emigrant_selection)

        task_map = {task.id: task for task in wf.get_all_unique_tasks()}
        node_map = {node.name: node for node in rm.get_nodes()}
        schedule_builder = ParticleScheduleBuilder(wf, rm, estimator, task_map,
                                                   node_map,
                                                   fixed_schedule_part)
        pf_schedule = partial(schedule_builder, current_time=current_time)

        toolbox = Toolbox()
        toolbox.register(
            "run_alg",
            algorithm(pf_schedule=pf_schedule,
                      generate_=lambda n: None,
                      **alg_params))
        toolbox.register("migration", migration)

        lb, st = deepcopy(log_book), deepcopy(stats)

        pop, logbook, best = run_mpga(toolbox=toolbox,
                                      logbook=lb,
                                      stats=st,
                                      initial_populations=populations,
                                      **alg_params)
        resulted_schedule = pf_schedule(best)
        result = (best, pop, resulted_schedule, None), logbook
        return result
Esempio n. 31
0
def create_gsa_alg(pf_schedule, generate_, **params):
    def fit_converter(func):
        def wrap(*args, **kwargs):
            x = func(*args, **kwargs)
            m = Utility.makespan(x)
            return FitnessStd(values=(m, 0.0))

        return wrap

    def compound_force(p, pop, kbest, G):
        mapping_force = force(p.mapping, (p.mapping for p in pop), kbest, G)
        ordering_force = force(p.ordering, (p.ordering for p in pop), kbest, G)
        return (mapping_force, ordering_force)

    def compound_update(w, c, p, min=-1, max=1):
        gsa_mapping_update(w, c, p.mapping)
        gsa_ordering_update(w, c, p.ordering, min, max)
        pass

    W, C = params["w"], params["w"]

    if "generations_count_before_merge" in params and "generations_count_after_merge" in params:
        all_iterations_count = int(
            params["generations_count_before_merge"]) + int(
                params["generations_count_after_merge"])
    else:
        all_iterations_count = None

    toolbox = Toolbox()
    toolbox.register("generate", generate_)
    toolbox.register("fitness", fit_converter(pf_schedule))
    toolbox.register("estimate_force", compound_force)
    toolbox.register("update", compound_update, W, C)
    toolbox.register("G", partial(G, all_iter_number=all_iterations_count))
    toolbox.register("kbest",
                     partial(Kbest, all_iter_number=all_iterations_count))

    pso_alg = partial(run_gsa, toolbox=toolbox, **params)
    return pso_alg
Esempio n. 32
0
    def test_fixed_ordering(self):
        _wf = wf("Montage_25")
        rm = ExperimentResourceManager(rg.r([10, 15, 25, 30]))
        estimator = SimpleTimeCostEstimator(comp_time_cost=0, transf_time_cost=0, transferMx=None,
                                            ideal_flops=20, transfer_time=100)
        sorted_tasks = HeftHelper.heft_rank(_wf, rm, estimator)

        heft_schedule = run_heft(_wf, rm, estimator)
        heft_mapping = schedule_to_position(heft_schedule)

        heft_gen = lambda: heft_mapping if random.random() > 0.95 else generate(_wf, rm, estimator)


        toolbox = Toolbox()
        # toolbox.register("generate", generate, _wf, rm, estimator)
        toolbox.register("generate", heft_gen)
        toolbox.register("fitness", fitness, _wf, rm, estimator, sorted_tasks)

        toolbox.register("force_vector_matrix", force_vector_matrix, rm)
        toolbox.register("velocity_and_position", velocity_and_position, _wf, rm, estimator)
        toolbox.register("G", G)
        toolbox.register("kbest", Kbest)

        statistics = Statistics()
        statistics.register("min", lambda pop: numpy.min([p.fitness.mofit for p in pop]))
        statistics.register("avr", lambda pop: numpy.average([p.fitness.mofit for p in pop]))
        statistics.register("max", lambda pop: numpy.max([p.fitness.mofit for p in pop]))
        statistics.register("std", lambda pop: numpy.std([p.fitness.mofit for p in pop]))

        logbook = Logbook()
        logbook.header = ("gen", "G", "kbest", "min", "avr", "max", "std")

        pop_size = 100
        iter_number = 100
        kbest = pop_size
        ginit = 5

        final_pop = run_gsa(toolbox, statistics, logbook, pop_size, iter_number, kbest, ginit)

        best = min(final_pop, key=lambda x: toolbox.fitness(x).mofit)
        solution = {MAPPING_SPECIE: list(zip(sorted_tasks, best)), ORDERING_SPECIE: sorted_tasks}
        schedule = build_schedule(_wf, estimator, rm, solution)
        Utility.validate_static_schedule(_wf, schedule)
        makespan = Utility.makespan(schedule)
        print("Final makespan: {0}".format(makespan))

        pass
Esempio n. 33
0
def do_exp(wf_name):
    _wf = wf(wf_name)
    rm = ExperimentResourceManager(rg.r([10, 15, 25, 30]))
    estimator = SimpleTimeCostEstimator(comp_time_cost=0, transf_time_cost=0, transferMx=None,
                                                ideal_flops=20, transfer_time=100)

    empty_fixed_schedule_part = Schedule({node: [] for node in rm.get_nodes()})

    heft_schedule = run_heft(_wf, rm, estimator)

    ga_functions = GAFunctions2(_wf, rm, estimator)

    generate = partial(ga_generate, ga_functions=ga_functions,
                               fixed_schedule_part=empty_fixed_schedule_part,
                               current_time=0.0, init_sched_percent=0.05,
                               initial_schedule=heft_schedule)


    stats = tools.Statistics(lambda ind: ind.fitness.values[0])
    stats.register("avg", numpy.mean)
    stats.register("std", numpy.std)
    stats.register("min", numpy.min)
    stats.register("max", numpy.max)

    logbook = tools.Logbook()
    logbook.header = ["gen", "evals"] + stats.fields

    toolbox = Toolbox()
    toolbox.register("generate", generate)
    toolbox.register("evaluate", fit_converter(ga_functions.build_fitness(empty_fixed_schedule_part, 0.0)))
    toolbox.register("clone", deepcopy)
    toolbox.register("mate", ga_functions.crossover)
    toolbox.register("sweep_mutation", ga_functions.sweep_mutation)
    toolbox.register("mutate", ga_functions.mutation)
    # toolbox.register("select_parents", )
    # toolbox.register("select", tools.selTournament, tournsize=4)
    toolbox.register("select", tools.selRoulette)
    pop, logbook, best = run_ga(toolbox=toolbox,
                                logbook=logbook,
                                stats=stats,
                                **GA_PARAMS)

    resulted_schedule = ga_functions.build_schedule(best, empty_fixed_schedule_part, 0.0)

    Utility.validate_static_schedule(_wf, resulted_schedule)

    ga_makespan = Utility.makespan(resulted_schedule)
    return ga_makespan
Esempio n. 34
0
File: srwc.py Progetto: boliqq07/BGP
def mainPart(x_,
             y_,
             pset,
             max_=5,
             pop_n=100,
             random_seed=2,
             cxpb=0.8,
             mutpb=0.1,
             ngen=5,
             tournsize=3,
             max_value=10,
             double=False,
             score=None,
             cal_dim=True,
             target_dim=None,
             inter_add=True,
             iner_add=True,
             random_add=False,
             store=True):
    """

    Parameters
    ----------
    target_dim
    max_
    inter_add
    iner_add
    random_add
    cal_dim
    score
    double
    x_
    y_
    pset
    pop_n
    random_seed
    cxpb
    mutpb
    ngen
    tournsize
    max_value

    Returns
    -------

    """
    if score is None:
        score = [r2_score, explained_variance_score]

    if cal_dim:
        assert all([isinstance(i, Dim) for i in pset.dim_list
                    ]), "all import dim of pset should be Dim object"

    random.seed(random_seed)
    toolbox = Toolbox()

    if isinstance(pset, ExpressionSet):
        PTrees = ExpressionTree
        Generate = genHalfAndHalf
        mutate = mutNodeReplacement
        mate = cxOnePoint
    elif isinstance(pset, FixedSet):
        PTrees = FixedTree
        Generate = generate_index
        mutate = mutUniForm_index
        mate = partial(cxOnePoint_index, pset=pset)

    else:
        raise NotImplementedError("get wrong pset")
    if double:
        Fitness_ = creator.create("Fitness_", Fitness, weights=(1.0, 1.0))
    else:
        Fitness_ = creator.create("Fitness_", Fitness, weights=(1.0, ))

    PTrees_ = creator.create("PTrees_",
                             PTrees,
                             fitness=Fitness_,
                             dim=dnan,
                             withdim=0)
    toolbox.register("generate", Generate, pset=pset, min_=1, max_=max_)
    toolbox.register("individual",
                     initIterate,
                     container=PTrees_,
                     generator=toolbox.generate)
    toolbox.register('population',
                     initRepeat,
                     container=list,
                     func=toolbox.individual)
    # def selection
    toolbox.register("select_gs", selTournament, tournsize=tournsize)
    toolbox.register("select_kbest_target_dim",
                     selKbestDim,
                     dim_type=target_dim,
                     fuzzy=True)
    toolbox.register("select_kbest_dimless", selKbestDim, dim_type="integer")
    toolbox.register("select_kbest", selKbestDim, dim_type='ignore')
    # def mate
    toolbox.register("mate", mate)
    # def mutate
    toolbox.register("mutate", mutate, pset=pset)
    if isinstance(pset, ExpressionSet):
        toolbox.decorate(
            "mate",
            staticLimit(key=operator.attrgetter("height"),
                        max_value=max_value))
        toolbox.decorate(
            "mutate",
            staticLimit(key=operator.attrgetter("height"),
                        max_value=max_value))
    # def elaluate
    toolbox.register("evaluate",
                     calculatePrecision,
                     pset=pset,
                     x=x_,
                     y=y_,
                     scoring=score[0],
                     cal_dim=cal_dim,
                     inter_add=inter_add,
                     iner_add=iner_add,
                     random_add=random_add)
    toolbox.register("evaluate2",
                     calculatePrecision,
                     pset=pset,
                     x=x_,
                     y=y_,
                     scoring=score[1],
                     cal_dim=cal_dim,
                     inter_add=inter_add,
                     iner_add=iner_add,
                     random_add=random_add)
    toolbox.register("parallel",
                     parallelize,
                     n_jobs=1,
                     func=toolbox.evaluate,
                     respective=False)
    toolbox.register("parallel2",
                     parallelize,
                     n_jobs=1,
                     func=toolbox.evaluate2,
                     respective=False)

    pop = toolbox.population(n=pop_n)

    haln = 10
    hof = HallOfFame(haln)

    stats1 = Statistics(lambda ind: ind.fitness.values[0]
                        if ind and ind.y_dim in target_dim else 0)
    stats1.register("max", np.max)

    stats2 = Statistics(lambda ind: ind.y_dim in target_dim if ind else 0)
    stats2.register("countable_number", np.sum)
    stats = MultiStatistics(score1=stats1, score2=stats2)

    population, logbook = eaSimple(pop,
                                   toolbox,
                                   cxpb=cxpb,
                                   mutpb=mutpb,
                                   ngen=ngen,
                                   stats=stats,
                                   halloffame=hof,
                                   pset=pset,
                                   store=store)

    return population, hof
Esempio n. 35
0
ginit = 10
W, C = 0.2, 0.5


def compound_force(p, pop, kbest, G):
    mapping_force = force(p.mapping, (p.mapping for p in pop), kbest, G)
    ordering_force = force(p.ordering, (p.ordering for p in pop), kbest, G)
    return (mapping_force, ordering_force)

def compound_update(w, c, p, min=-1, max=1):
    mapping_update(w, c, p.mapping)
    ordering_update(w, c,  p.ordering, min, max)
    pass


toolbox = Toolbox()
# toolbox.register("generate", generate, _wf, rm, estimator)
toolbox.register("generate", heft_gen)
toolbox.register("fitness", fitness, _wf, rm, estimator)
toolbox.register("estimate_force", compound_force)
toolbox.register("update", compound_update, W, C)
toolbox.register("G", G)
toolbox.register("kbest", Kbest)

stats = Statistics()
stats.register("min", lambda pop: numpy.min([p.fitness.mofit for p in pop]))
stats.register("avr", lambda pop: numpy.average([p.fitness.mofit for p in pop]))
stats.register("max", lambda pop: numpy.max([p.fitness.mofit for p in pop]))
stats.register("std", lambda pop: numpy.std([p.fitness.mofit for p in pop]))

logbook = Logbook()
Esempio n. 36
0
from heft.core.environment.Utility import wf, Utility
from heft.experiments.cga.mobjective.utility import SimpleTimeCostEstimator
from heft.experiments.cga.utilities.common import repeat

_wf = wf("Montage_40")
rm = ExperimentResourceManager(rg.r([10, 15, 25, 30]))
estimator = SimpleTimeCostEstimator(comp_time_cost=0, transf_time_cost=0, transferMx=None,
                                            ideal_flops=20, transfer_time=100)
sorted_tasks = HeftHelper.heft_rank(_wf, rm, estimator)

heft_schedule = run_heft(_wf, rm, estimator)
heft_mapping = schedule_to_position(heft_schedule)

heft_gen = lambda n: [deepcopy(heft_mapping) if random.random() > 1.0 else generate(_wf, rm, estimator, 1)[0] for _ in range(n)]

toolbox = Toolbox()
# toolbox.register("generate", generate, _wf, rm, estimator)
toolbox.register("generate", heft_gen)
toolbox.register("fitness", fitness, _wf, rm, estimator, sorted_tasks)
toolbox.register("force_vector_matrix", force_vector_matrix)
toolbox.register("velocity_and_position", velocity_and_position, beta=0.0)
toolbox.register("G", G)
toolbox.register("kbest", Kbest)

stats = Statistics()
stats.register("min", lambda pop: numpy.min([p.fitness.mofit for p in pop]))
stats.register("avr", lambda pop: numpy.average([p.fitness.mofit for p in pop]))
stats.register("max", lambda pop: numpy.max([p.fitness.mofit for p in pop]))
stats.register("std", lambda pop: numpy.std([p.fitness.mofit for p in pop]))

logbook = Logbook()
Esempio n. 37
0
 def register_mutation_operator(self, toolbox: Toolbox) -> None:
     toolbox.register("mutate", self.sapienz_mut_suite, indpb=0.5)
ginit = 10
W, C = 0.2, 0.5


def compound_force(p, pop, kbest, G):
    mapping_force = force(p.mapping, (p.mapping for p in pop), kbest, G)
    ordering_force = force(p.ordering, (p.ordering for p in pop), kbest, G)
    return (mapping_force, ordering_force)

def compound_update(w, c, p, min=-1, max=1):
    mapping_update(w, c, p.mapping)
    ordering_update(w, c,  p.ordering, min, max)
    pass


toolbox = Toolbox()
# toolbox.register("generate", generate, _wf, rm, estimator)
toolbox.register("generate", heft_gen)
toolbox.register("fitness", fitness, _wf, rm, estimator)
toolbox.register("estimate_force", compound_force)
toolbox.register("update", compound_update, W, C)
toolbox.register("G", G)
toolbox.register("kbest", Kbest)

stats = Statistics()
stats.register("min", lambda pop: numpy.min([p.fitness.mofit for p in pop]))
stats.register("avr", lambda pop: numpy.average([p.fitness.mofit for p in pop]))
stats.register("max", lambda pop: numpy.max([p.fitness.mofit for p in pop]))
stats.register("std", lambda pop: numpy.std([p.fitness.mofit for p in pop]))

logbook = Logbook()
Esempio n. 39
0
    position = particleind.entity
    ordering = particleind.ordering
    solution = construct_solution(position, ordering)

    sched = build_schedule(_wf, estimator, rm, solution)
    makespan = Utility.makespan(sched)
    ## TODO: make a real estimation later
    fit = FitnessStd(values=(makespan, 0.0))
    ## TODO: make a normal multi-objective fitness estimation
    fit.mofit = makespan
    return fit

    return basefitness(_wf, rm, estimator, solution)


toolbox = Toolbox()
# common functions
toolbox.register("map", map)
toolbox.register("clone", deepcopy)
toolbox.register("population", population)
toolbox.register("fitness", fitness)

# pso functions
toolbox.register("update", update)
# ga functions
toolbox.register("mutate", mutate)
toolbox.register("mate", mate)
toolbox.register("select", tools.selNSGA2)

ga = partial(run_nsga2,
             toolbox=toolbox,
Esempio n. 40
0
heft_schedule = run_heft(_wf, rm, estimator)
heft_mapping = schedule_to_position(heft_schedule)

heft_mapping.velocity = MappingParticle.Velocity({})

heft_gen = lambda n: [
    deepcopy(heft_mapping)
    if random.random() > 1.0 else generate(_wf, rm, estimator, 1)[0]
    for _ in range(n)
]

W, C1, C2 = 0.1, 0.6, 0.2
GEN, N = 300, 50

toolbox = Toolbox()
toolbox.register("population", heft_gen)
toolbox.register("fitness", fitness, _wf, rm, estimator, sorted_tasks)
toolbox.register("update", update)

stats = tools.Statistics(lambda ind: ind.fitness.values[0])
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)

logbook = tools.Logbook()
logbook.header = ["gen", "evals"] + stats.fields


def do_exp():
Esempio n. 41
0
    def test_fixed_ordering(self):
        _wf = wf("Montage_25")
        rm = ExperimentResourceManager(rg.r([10, 15, 25, 30]))
        estimator = SimpleTimeCostEstimator(comp_time_cost=0,
                                            transf_time_cost=0,
                                            transferMx=None,
                                            ideal_flops=20,
                                            transfer_time=100)
        sorted_tasks = HeftHelper.heft_rank(_wf, rm, estimator)

        heft_schedule = run_heft(_wf, rm, estimator)
        heft_mapping = schedule_to_position(heft_schedule)

        heft_gen = lambda: heft_mapping if random.random(
        ) > 0.95 else generate(_wf, rm, estimator)

        toolbox = Toolbox()
        # toolbox.register("generate", generate, _wf, rm, estimator)
        toolbox.register("generate", heft_gen)
        toolbox.register("fitness", fitness, _wf, rm, estimator, sorted_tasks)

        toolbox.register("force_vector_matrix", force_vector_matrix, rm)
        toolbox.register("velocity_and_position", velocity_and_position, _wf,
                         rm, estimator)
        toolbox.register("G", G)
        toolbox.register("kbest", Kbest)

        statistics = Statistics()
        statistics.register(
            "min", lambda pop: numpy.min([p.fitness.mofit for p in pop]))
        statistics.register(
            "avr", lambda pop: numpy.average([p.fitness.mofit for p in pop]))
        statistics.register(
            "max", lambda pop: numpy.max([p.fitness.mofit for p in pop]))
        statistics.register(
            "std", lambda pop: numpy.std([p.fitness.mofit for p in pop]))

        logbook = Logbook()
        logbook.header = ("gen", "G", "kbest", "min", "avr", "max", "std")

        pop_size = 100
        iter_number = 100
        kbest = pop_size
        ginit = 5

        final_pop = run_gsa(toolbox, statistics, logbook, pop_size,
                            iter_number, kbest, ginit)

        best = min(final_pop, key=lambda x: toolbox.fitness(x).mofit)
        solution = {
            MAPPING_SPECIE: list(zip(sorted_tasks, best)),
            ORDERING_SPECIE: sorted_tasks
        }
        schedule = build_schedule(_wf, estimator, rm, solution)
        Utility.validate_static_schedule(_wf, schedule)
        makespan = Utility.makespan(schedule)
        print("Final makespan: {0}".format(makespan))

        pass
Esempio n. 42
0
    position = particleind.entity
    ordering = particleind.ordering
    solution = construct_solution(position, ordering)

    sched = build_schedule(_wf, estimator, rm, solution)
    makespan = Utility.makespan(sched)
    ## TODO: make a real estimation later
    fit = FitnessStd(values=(makespan, 0.0))
    ## TODO: make a normal multi-objective fitness estimation
    fit.mofit = makespan
    return fit

    return basefitness(_wf, rm, estimator, solution)


toolbox = Toolbox()
# common functions
toolbox.register("map", map)
toolbox.register("clone", deepcopy)
toolbox.register("population", population)
toolbox.register("fitness", fitness)

# pso functions
toolbox.register("update", update)
# ga functions
toolbox.register("mutate", mutate)
toolbox.register("mate", mate)
toolbox.register("select", tools.selNSGA2)

ga = partial(run_nsga2, toolbox=toolbox, logbook=None, stats=None,
             n=N, crossover_probability=CXPB, mutation_probability=MU)
Esempio n. 43
0
    def toolbox(self):

        _wf, rm, estimator = self.env()
        heft_schedule = self.heft_schedule()

        heft_particle = generate(_wf, rm, estimator, heft_schedule)

        heft_gen = lambda n: [deepcopy(heft_particle) if random.random() > 1.00 else generate(_wf, rm, estimator) for _ in range(n)]

        def componoud_update(w, c1, c2, p, best, pop, min=-1, max=1):
            mapping_update(w, c1, c2, p.mapping, best.mapping, pop)
            ordering_update(w, c1, c2, p.ordering, best.ordering, pop, min=min, max=max)

        def compound_force_vector_matrix():
            raise NotImplementedError()

        def compound_velocity_and_postion():
            raise NotImplementedError()


        toolbox = Toolbox()
        toolbox.register("generate", heft_gen)
        toolbox.register("fitness", fitness, _wf, rm, estimator, sorted_tasks)
        toolbox.register("force_vector_matrix", compound_force_vector_matrix)
        toolbox.register("velocity_and_position", compound_velocity_and_postion, beta=0.0)
        toolbox.register("G", G)
        toolbox.register("kbest", Kbest)
        return toolbox
Esempio n. 44
0
def run_opd_ga(toolbox: base.Toolbox,
               popsize: int,
               gens: int,
               cxpb: float,
               mutpb: float,
               elitism_k: int,
               new_inds_per_gen: int,
               target_lambda: int,
               verbose: bool = False):

    if verbose:
        print('Starting GA...')

    fitness_history = []

    pop = toolbox.generate_population(n=popsize)

    fitnesses = toolbox.parallel_map(toolbox.evaluate, pop)
    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit

    if verbose:
        print('Initial evaluation done.')

    fits = [ind.fitness.values[0] for ind in pop]
    fitness_history += [fits.copy()]

    g = 0
    min_fits, max_fits, avg_fits, var_fits = [], [], [], []
    while min(fits) > target_lambda and g < gens:
        g = g + 1

        elite = tools.selBest(pop, elitism_k)
        offspring = toolbox.select(
            pop,
            len(pop) - elitism_k - new_inds_per_gen) + elite
        offspring = list(
            map(toolbox.clone,
                offspring)) + toolbox.generate_population(n=new_inds_per_gen)

        for child1, child2 in zip(offspring[::2], offspring[1::2]):

            if random.random() < cxpb:
                toolbox.mate(child1, child2)
                del child1.fitness.values
                del child2.fitness.values

        for mutant in offspring:
            if random.random() < mutpb:
                toolbox.mutate(mutant)
                del mutant.fitness.values

        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.parallel_map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        pop[:] = offspring
        fits = [ind.fitness.values[0] for ind in pop]
        fitness_history += [fits.copy()]

        if verbose == 2:
            min_fits += [min(fits)]
            max_fits += [max(fits)]
            avg_fits += [statistics.mean(fits)]
            var_fits += [statistics.variance(fits)]

        if verbose and (g % (gens // 10) == 0 or g == 1):
            print(f'Generation {g}')
            if verbose == 2:
                print('  Min %s' % min_fits[-1])
                print('  Max %s' % max_fits[-1])
                print('  Avg %s' % avg_fits[-1])
                print('  Var %s' % var_fits[-1])

    timeout = min(fits) == target_lambda

    return {'fitness_history': fitness_history, 'timeout': timeout}
Esempio n. 45
0
heft_schedule = run_heft(_wf, rm, estimator)
heft_mapping = schedule_to_position(heft_schedule).entity



initial_state = State()
initial_state.mapping = heft_mapping
# initial_state.mapping = generate(_wf, rm, estimator, 1)[0].entity
initial_state.ordering = sorted_tasks

T, N = 20, 1000



toolbox = Toolbox()
toolbox.register("energy", energy, _wf, rm, estimator)
toolbox.register("update_T", update_T, T)
toolbox.register("neighbor", mapping_neighbor, _wf, rm, estimator, 1)
toolbox.register("transition_probability", transition_probability)
# use just a const to define number of attempts
toolbox.register("attempts_count", lambda T: 100)

logbook = tools.Logbook()
logbook.header = ["gen", "T", "val"]

stats = tools.Statistics(lambda ind: ind.energy.values[0])
stats.register("val", lambda arr: arr[0])

def do_exp():
    best, log, current = run_sa(