コード例 #1
0
def plot_genetic_algorithm_metaheuristic(results, print_results=False):
    feasible_values = []
    infeasible_values = []
    data = results[0]
    data_length = len(data)

    for d in data:
        gen = d.get_gen()
        val = d.get_value()
        val_tuple = (gen, f(val))
        if (check_all_constraints(val, constraints)):
            feasible_values.append(val_tuple)
        else:
            infeasible_values.append(val_tuple)

    # Population / Generation scatter
    plt.title("Individual values per generation")
    plt.scatter([v[0] for v in feasible_values],
                [v[1] for v in feasible_values],
                marker=".",
                c="b",
                label="Feasible indiv's")
    plt.scatter([v[0] for v in infeasible_values],
                [v[1] for v in infeasible_values],
                marker=".",
                c="r",
                label="Infeasible indiv's")

    plt.legend(framealpha=1)
    plt.xlabel("Generation")
    plt.ylabel("Value")
    if (print_results):
        print("Feasible: ", len(feasible_values))
        print("Infeasible: ", len(infeasible_values))
    plt.show()
コード例 #2
0
ファイル: tabooSearch.py プロジェクト: DTopping256/OT
def taboo_search(f,
                 is_maximisation,
                 constraints,
                 DEFAULTS,
                 s_0=None,
                 stopping_cond=None,
                 stop_args=None,
                 taboo_memory=None,
                 neighbourhood_func=None,
                 neighbourhood_args=None,
                 print_workings=False):
    # Default parameters
    s_0 = DEFAULTS.s_0 if s_0 is None else s_0
    stopping_cond = DEFAULTS.stopping_cond if stopping_cond is None else stopping_cond
    stop_args = DEFAULTS.stop_args if stop_args is None else stop_args
    taboo_memory = DEFAULTS.taboo_memory if taboo_memory is None else taboo_memory
    neighbourhood_func = DEFAULTS.neighbourhood_func if neighbourhood_func is None else neighbourhood_func
    neighbourhood_args = DEFAULTS.neighbourhood_args if neighbourhood_args is None else neighbourhood_args

    # Initial values of local variables
    taboo_list = [s_0]
    s = {k: v for k, v in s_0.items()}
    i = 1

    # Keeps track of where the algorithm has been (without affecting the answer)
    history = [s]
    viable_neighbours = None
    while True:
        neighbourhood = neighbourhood_func(s, **neighbourhood_args)
        neighbourhood = list(
            filter(lambda item: item not in taboo_list, neighbourhood))
        neighbourhood = list(
            filter(
                lambda item: check_all_constraints(
                    item, constraints, print_workings), neighbourhood))
        viable_neighbours = len(neighbourhood)
        if (viable_neighbours > 0):
            neighbourhood.sort(key=lambda item: f(item),
                               reverse=is_maximisation)
            best = neighbourhood[0]
            if ((is_maximisation and f(best) > f(s))
                    or (not is_maximisation and f(best) < f(s))):
                s = {k: v for k, v in best.items()}
                history.append(best)
            if (len(taboo_list) == taboo_memory):
                taboo_list.pop(0)
            taboo_list.append(best)
        if (print_workings == True):
            print(
                "Iteration: {},\tCurrent solution: {},\tTaboo list: {}".format(
                    i, s, taboo_list))
        if (stopping_cond(i, s, viable_neighbours, **stop_args)):
            break
        i += 1
    # Add final solution
    if (i == 1):
        return ([], False)
    history.append(s)
    return (history, s)
コード例 #3
0
def genetic_algorithm(fitness_function,
                      is_maximisation,
                      GA_defaults,
                      GA_funcs,
                      population_size=None,
                      epochs=None,
                      fitness_upper_bound=None,
                      selection_function=None,
                      cross_over_amount=None,
                      mutation_chance=None,
                      show_workings=False):
    # Default variable assignment
    population_size = GA_defaults.population_size if population_size is None else population_size
    epochs = GA_defaults.epochs if epochs is None else epochs
    fitness_upper_bound = GA_defaults.fitness_upper_bound if fitness_upper_bound is None else fitness_upper_bound
    selection_function = GA_defaults.selection_function if selection_function is None else selection_function
    cross_over_amount = GA_defaults.cross_over_amount if cross_over_amount is None else cross_over_amount
    mutation_chance = GA_defaults.mutation_chance if mutation_chance is None else mutation_chance

    # Initial population instantiation
    population = []
    for p in range(population_size):
        population.append(individual(GA_funcs))

    # Genetic algorithm loop
    cumulative_population = []
    result = None
    if (show_workings):
        print(
            "Genetic Algorithm (top 10 results per generation)\n\n\tPopulation size: {}\n\tEpochs/Generations: {}\n\tSelected population: {}%\n\tCrossover amount: {}\n\tMutation chance: {}%\n________________________________________________________________________________"
            .format(population_size, epochs, fitness_upper_bound * 100,
                    cross_over_amount, mutation_chance * 100))
    for e in range(epochs):
        # Where the next population will be stored
        next_gen_pop = []
        # Sort population by their values when input through the fitness function. Highest to lowest
        population.sort(key=lambda ind: fitness_function(ind.get_value()),
                        reverse=is_maximisation)
        # Put sorted population into cumulative population
        cumulative_population.extend(population)
        if (show_workings):
            print("Gen: {}  \tValues: [{}]".format(
                e + 1,
                ', '.join([str(population[i].get_value())
                           for i in range(10)])))
        # Kill population which aren't in the fitness_upper_bound
        population = population[0:int(fitness_upper_bound * population_size)]
        # Create next generation population
        for p in range(int(len(population) / 2) - 1):
            individuals = selection_function(p, population)
            # Create an amount of next generation offspring with these individuals greater than or equal to the amount previous generation
            for m in range(math.ceil(2 / fitness_upper_bound)):
                next_gen_pop.append(
                    breed_individuals(individuals[0], individuals[1],
                                      GA_defaults, GA_funcs, cross_over_amount,
                                      mutation_chance))
        # Replace current generation population with new generation population
        population = next_gen_pop[0:population_size]
    # After all the epochs pick the first individual (fittest) in the population and obtain their value.
    solution = population[0].get_value()
    if (not check_all_constraints(solution, constraints)):
        solution = False
    # Return the history of the population (cumulative population) and the solution.
    return (cumulative_population, solution)
コード例 #4
0
    plt.ylabel("Value")
    if (print_results):
        print("Feasible: ", len(feasible_values))
        print("Infeasible: ", len(infeasible_values))
    plt.show()


# In[4]:

search_space_max = []
for d in ["x1", "x2", "x3", "x4"]:
    for i in range(10000):
        cx = {"x1": 0, "x2": 0, "x3": 0, "x4": 0}
        val = i / 1000
        cx[d] = val
        if (not check_all_constraints(cx, constraints)):
            search_space_max.append(val - 0.001)
            break
'''
encode and decode (in quinary counting system, with 6 digits of memory) gives my data structure the search space of 0.000 - 15.624 to (3 D.P)

This is suitable because:
    - I don't need negative nubmers since this is outside the feasible region.
    - I don't need values on any axis greater than 5 or 6 (as shown by search_space_max)
'''

print(search_space_max)

# In[5]:

コード例 #5
0
def gradient_descent(pds,
                     constraints,
                     DEFAULTS,
                     is_maximisation=None,
                     x_0s=None,
                     max_i=None,
                     step_m=None,
                     e_g=None,
                     e_x=None,
                     print_workings=False):
    # Default parameters
    is_maximisation = DEFAULTS.is_maximisation if is_maximisation is None else is_maximisation
    x_0s = DEFAULTS.x_0s if x_0s is None else x_0s
    max_i = DEFAULTS.max_i if max_i is None else max_i
    step_m = DEFAULTS.step_m if step_m is None else step_m
    e_g = DEFAULTS.e_g if e_g is None else e_g
    e_x = DEFAULTS.e_x if e_x is None else e_x

    # Set the current x and create a way of storing the previous steps.
    current_xs = x_0s
    gs = {d: round(pds[d](v), 3) for d, v in current_xs.items()}
    step_array = []

    # Whether to print the workings
    if (print_workings == True):
        print(
            "--------------------------------------------------------------------------------------------\nIteration\tX\tg\tdiff\nStart (0)\t{}\t{}\t{}"
            .format({k: round(v, 3)
                     for k, v in current_xs.items()}, gs, "N/A"))

    # Loop for a maximum of max_i
    for i in range(max_i):
        # Check if constraints are satisfied before continuing
        if (not check_all_constraints(current_xs, constraints)):
            if (i == 0):
                return ([], False)
            current_xs = step_array[-1]
            step_array.pop()
            break
        # Set previous x
        step_array.append(current_xs)
        # Find the current x
        if (not is_maximisation):
            step_m = -step_m
        current_xs = {
            d: round(v + step_m * gs[d], 3)
            for d, v in current_xs.items()
        }
        # Get a new gradient
        gs = {d: round(pds[d](v), 3) for d, v in current_xs.items()}
        gavg = 0
        for d, v in gs.items():
            gavg += v
        gavg /= len(gs)
        # Find difference in x
        diff = 0
        for d, v in current_xs.items():
            diff += (v - step_array[i][d])**2
        diff = round(math.sqrt(diff), 3)
        # Whether to print the workings
        if (print_workings == True):
            print("{}\t\t{}\t{}\t{}".format(
                i + 1, {k: round(v, 3)
                        for k, v in current_xs.items()}, gs, diff))
        # Check if either of the tolerance conditions are met, if so stop the loop.
        if (abs(gavg) < e_g or abs(diff) < e_x):
            break

    # Add final x to step_array
    step_array.append(current_xs)
    # Return a tuple of all steps and final answer
    return (step_array, current_xs)
コード例 #6
0
def simulated_annealing(f,
                        DEFAULTS,
                        is_maximisation=None,
                        s_0=None,
                        t_0=None,
                        neighbourhood_func=None,
                        step_size=None,
                        temp_reduc_func=None,
                        acc_prob_func=None,
                        stop_cond=None,
                        max_i=None,
                        max_epoch=None,
                        tolerance=None,
                        print_workings=False):
    # Default parameters
    is_maximisation = DEFAULTS.is_maximisation if is_maximisation is None else is_maximisation
    s_0 = DEFAULTS.s_0 if s_0 is None else s_0
    t_0 = DEFAULTS.t_0 if t_0 is None else t_0
    neighbourhood_func = DEFAULTS.neighbourhood_func if neighbourhood_func is None else neighbourhood_func
    step_size = DEFAULTS.step_size if step_size is None else step_size
    temp_reduc_func = DEFAULTS.temp_reduc_func if temp_reduc_func is None else temp_reduc_func
    acc_prob_func = DEFAULTS.acc_prob_func if acc_prob_func is None else acc_prob_func
    stop_cond = DEFAULTS.stop_cond if stop_cond is None else stop_cond
    max_i = DEFAULTS.max_i if max_i is None else max_i
    max_epoch = DEFAULTS.max_epoch if max_epoch is None else max_epoch
    tolerance = DEFAULTS.tolerance if tolerance is None else tolerance

    # Sets the initial value of s_n (when n = 0)
    solution = {k: v for k, v in s_0.items()}
    # Sets the initial value of s_n-1 (not used for the first iteration of the outer loop)
    prev_solution = {k: v for k, v in s_0.items()}
    # The current temperature
    temperature = t_0

    # The iterations of the outer (while) loop.
    iteration_counter = 0
    step_array = []
    while (iteration_counter == 0 or not stop_cond(
            iteration_counter, max_i, solution, prev_solution, tolerance)):
        if (not check_all_constraints(solution, constraints, print_workings)):
            if (iteration_counter > 0):
                solution = {k: v for k, v in step_array[-1][0].items()}
                temperature = step_array[-1][1]
                step_array = step_array[:-1]
            if (iteration_counter == 0):
                solution = False
            break
        if (print_workings == True):
            print(
                "--------------------------------------------------------------------------------------------\ns_{}: {}"
                .format(iteration_counter,
                        {k: round(v, 3)
                         for k, v in solution.items()}))
        prev_solution = {k: v for k, v in solution.items()}
        step_array.append((prev_solution, temperature))
        for epoch in range(1, max_epoch + 1):
            neighbourhood = neighbourhood_func(solution, step_size)
            possible_solution = neighbourhood[random.randrange(
                0, len(neighbourhood))]
            solution_eval_diff = f(possible_solution) - f(solution)
            # If the difference between the possible solution (solution picked in the current epoch) and the solution (the solution of the current iteration of epochs) when put through the acceptance probablity function, is greater than random noise then pick it.
            accepted = solution_eval_diff > 0 if is_maximisation else solution_eval_diff < 0
            if (accepted or acc_prob_func(solution_eval_diff, temperature) >
                    random.random()):
                # Set a new value of solution
                solution = {k: v for k, v in possible_solution.items()}
                accepted = True
            if (print_workings == True):
                print(
                    "Epoch: {}\ts: {}    \tt: {}\tAccepted: {}\tDiff: {}".
                    format(
                        epoch,
                        {k: round(v, 3)
                         for k, v in possible_solution.items()},
                        round(temperature, 3), accepted,
                        round(solution_eval_diff, 3)))
        # Reduce the temperature and increment the iteration counter
        temperature = temp_reduc_func(temperature)
        iteration_counter += 1
    if (iteration_counter > 0):
        step_array.append(({k: v for k, v in solution.items()}, temperature))
    return (step_array, solution)