def taboo_search(f, is_maximisation, constraints, DEFAULTS, s_0=None, stopping_cond=None, stop_args=None, taboo_memory=None, neighbourhood_func=None, neighbourhood_args=None, print_workings=False): # Default parameters s_0 = DEFAULTS.s_0 if s_0 is None else s_0 stopping_cond = DEFAULTS.stopping_cond if stopping_cond is None else stopping_cond stop_args = DEFAULTS.stop_args if stop_args is None else stop_args taboo_memory = DEFAULTS.taboo_memory if taboo_memory is None else taboo_memory neighbourhood_func = DEFAULTS.neighbourhood_func if neighbourhood_func is None else neighbourhood_func neighbourhood_args = DEFAULTS.neighbourhood_args if neighbourhood_args is None else neighbourhood_args # Initial values of local variables taboo_list = [s_0] s = {k: v for k, v in s_0.items()} i = 1 # Keeps track of where the algorithm has been (without affecting the answer) history = [s] viable_neighbours = None while True: neighbourhood = neighbourhood_func(s, **neighbourhood_args) neighbourhood = list( filter(lambda item: item not in taboo_list, neighbourhood)) neighbourhood = list( filter( lambda item: check_all_constraints( item, constraints, print_workings), neighbourhood)) viable_neighbours = len(neighbourhood) if (viable_neighbours > 0): neighbourhood.sort(key=lambda item: f(item), reverse=is_maximisation) best = neighbourhood[0] if ((is_maximisation and f(best) > f(s)) or (not is_maximisation and f(best) < f(s))): s = {k: v for k, v in best.items()} history.append(best) if (len(taboo_list) == taboo_memory): taboo_list.pop(0) taboo_list.append(best) if (print_workings == True): print( "Iteration: {},\tCurrent solution: {},\tTaboo list: {}".format( i, s, taboo_list)) if (stopping_cond(i, s, viable_neighbours, **stop_args)): break i += 1 # Add final solution if (i == 1): return ([], False) history.append(s) return (history, s)
def plot_genetic_algorithm_metaheuristic(results, print_results=False): feasible_values = [] infeasible_values = [] data = results[0] data_length = len(data) for d in data: gen = d.get_gen() val = d.get_value() val_tuple = (gen, f(val)) if (check_all_constraints(val, constraints)): feasible_values.append(val_tuple) else: infeasible_values.append(val_tuple) # Population / Generation scatter plt.title("Individual values per generation") plt.scatter([v[0] for v in feasible_values], [v[1] for v in feasible_values], marker=".", c="b", label="Feasible indiv's") plt.scatter([v[0] for v in infeasible_values], [v[1] for v in infeasible_values], marker=".", c="r", label="Infeasible indiv's") plt.legend(framealpha=1) plt.xlabel("Generation") plt.ylabel("Value") if (print_results): print("Feasible: ", len(feasible_values)) print("Infeasible: ", len(infeasible_values)) plt.show()
def get_evals(approx): if approx == True: model = MLP(params.neural_net_hiddens) model.load_state_dict(torch.load('./model.pth')) model.eval() # Make data. X = np.arange(params.x_0[0] - params.delta_0, params.x_0[0] + params.delta_0, 0.05) Y = np.arange(params.x_0[1] - params.delta_0, params.x_0[0] + params.delta_0, 0.05) X, Y = np.meshgrid(X, Y) Z = [] for j in range(len(X)): for i in range(len(X[0])): # your loop order was backwards point = np.array([[X[j][i], Y[j][i]]], dtype=np.float32) inp = torch.from_numpy(point) if approx == False: val = f(point) else: val = model(inp).data.numpy() Z.append(val) Z = np.array(Z).reshape(X.shape) return X, Y, Z
# In[13]: dna1 = encode_dna({"x1": 0.0, "x2": 1.0, "x3": 2.0, "x4": 3.0}) dna2 = encode_dna({"x1": 0.05, "x2": 0.43, "x3": 0.2, "x4": 0.1}) print(decode_dna(cross_over(dna1, dna2, 2))) # In[11]: funcs = GA_functions(initial_val, encode_dna, decode_dna, mutate_dna, cross_over) # In[12]: fitness_function = lambda cx: f(cx) if check_all_constraints(cx, constraints ) else 0 # In[65]: results = genetic_algorithm(fitness_function, True, DEFAULTS, funcs, show_workings=True) # In[66]: print_all_constraints(results[1], constraints) print("Profit: ", f(results[1]), "\n", results[1])
DEFAULTS = TS_defaults(s_0=s_0, stopping_cond=stopping_cond, stop_args=stopping_args, taboo_memory=taboo_memory, neighbourhood_func=neighbourhood_func, neighbourhood_args=neighbourhood_args) # In[4]: results = taboo_search(f, True, constraints, DEFAULTS, print_workings=True) # In[5]: print_all_constraints(results[1], constraints) print("Profit: ", f(results[1])) # In[6]: # Imports my plotting module import batch_plotting as batch_plt # Imports my spiral coordinate generating module from utilities import n_dim_spiral # In[7]: # Batch testing start position starting_point_results = [] end_point = [] spiral = n_dim_spiral({"x1": 0, "x2": 0, "x3": 0, "x4": 0}, 1000, 0.1)
x_0s = {"x1": 0, "x2": 0, "x3": 0, "x4": 0} max_i = 1000 step_m = 0.1 e_g = 0.001 e_x = 0.001 DEFAULTS = GD_defaults(True, x_0s, max_i, step_m, e_g, e_x) # In[6]: results = gradient_descent(pds, constraints, DEFAULTS, print_workings=True) # In[7]: print_all_constraints(results[1], constraints) print("Profit: ", f(results[1])) # In[9]: # Imports my plotting module import batch_plotting as batch_plt # Imports my spiral coordinate generating module from utilities import n_dim_spiral # In[8]: # Batch testing start position starting_point_results = [] spiral = n_dim_spiral({"x1": 0, "x2": 0, "x3": 0, "x4": 0}, 2000, 0.05) for i in range(len(spiral)):
def simulated_annealing(f, DEFAULTS, is_maximisation=None, s_0=None, t_0=None, neighbourhood_func=None, step_size=None, temp_reduc_func=None, acc_prob_func=None, stop_cond=None, max_i=None, max_epoch=None, tolerance=None, print_workings=False): # Default parameters is_maximisation = DEFAULTS.is_maximisation if is_maximisation is None else is_maximisation s_0 = DEFAULTS.s_0 if s_0 is None else s_0 t_0 = DEFAULTS.t_0 if t_0 is None else t_0 neighbourhood_func = DEFAULTS.neighbourhood_func if neighbourhood_func is None else neighbourhood_func step_size = DEFAULTS.step_size if step_size is None else step_size temp_reduc_func = DEFAULTS.temp_reduc_func if temp_reduc_func is None else temp_reduc_func acc_prob_func = DEFAULTS.acc_prob_func if acc_prob_func is None else acc_prob_func stop_cond = DEFAULTS.stop_cond if stop_cond is None else stop_cond max_i = DEFAULTS.max_i if max_i is None else max_i max_epoch = DEFAULTS.max_epoch if max_epoch is None else max_epoch tolerance = DEFAULTS.tolerance if tolerance is None else tolerance # Sets the initial value of s_n (when n = 0) solution = {k: v for k, v in s_0.items()} # Sets the initial value of s_n-1 (not used for the first iteration of the outer loop) prev_solution = {k: v for k, v in s_0.items()} # The current temperature temperature = t_0 # The iterations of the outer (while) loop. iteration_counter = 0 step_array = [] while (iteration_counter == 0 or not stop_cond( iteration_counter, max_i, solution, prev_solution, tolerance)): if (not check_all_constraints(solution, constraints, print_workings)): if (iteration_counter > 0): solution = {k: v for k, v in step_array[-1][0].items()} temperature = step_array[-1][1] step_array = step_array[:-1] if (iteration_counter == 0): solution = False break if (print_workings == True): print( "--------------------------------------------------------------------------------------------\ns_{}: {}" .format(iteration_counter, {k: round(v, 3) for k, v in solution.items()})) prev_solution = {k: v for k, v in solution.items()} step_array.append((prev_solution, temperature)) for epoch in range(1, max_epoch + 1): neighbourhood = neighbourhood_func(solution, step_size) possible_solution = neighbourhood[random.randrange( 0, len(neighbourhood))] solution_eval_diff = f(possible_solution) - f(solution) # If the difference between the possible solution (solution picked in the current epoch) and the solution (the solution of the current iteration of epochs) when put through the acceptance probablity function, is greater than random noise then pick it. accepted = solution_eval_diff > 0 if is_maximisation else solution_eval_diff < 0 if (accepted or acc_prob_func(solution_eval_diff, temperature) > random.random()): # Set a new value of solution solution = {k: v for k, v in possible_solution.items()} accepted = True if (print_workings == True): print( "Epoch: {}\ts: {} \tt: {}\tAccepted: {}\tDiff: {}". format( epoch, {k: round(v, 3) for k, v in possible_solution.items()}, round(temperature, 3), accepted, round(solution_eval_diff, 3))) # Reduce the temperature and increment the iteration counter temperature = temp_reduc_func(temperature) iteration_counter += 1 if (iteration_counter > 0): step_array.append(({k: v for k, v in solution.items()}, temperature)) return (step_array, solution)