def __init__(self, learning_grade, entries, output, steps, betha = 0.5, function=FunctionsType.TANH, isLinear = True): self.learning_grade = learning_grade self.steps = steps self.output = output self.entries = entries self.entry_cols = len(entries[0]) self.weights_initializer() self.betha = betha self.function = Function(function) self.isLinear = isLinear
def Optimise_direction_vector(x_n, Lambda, N_Function, trust_region): a = np.zeros((N_Function, 1)) b = np.zeros((N_Function, 1)) U = np.zeros((N_Function, N_Function)) S = np.zeros((N_Function, 1)) V_T = np.zeros((N_Function, N_Function)) jacobian = Jacobian(x_n, Lambda, N_Function) U, S, V_T = np.linalg.svd(jacobian) # Singular Value Decomposition function = Function(x_n, Lambda, N_Function) g = U.T @ function S = S.reshape(N_Function, 1) for i in range(N_Function): b[i] = (S[i]**4) a[i] = b[i] * (g[i]**2) Lagrange_multiplier_0 = abs( (np.amin(a))**2 / (2 * (np.amin(b))**2) * (1 / trust_region**2 - 1) ) # Initial estimate of Lagrange multiplier based on a_min and b_min Lagrange_multiplier = Newton_Modified( Lagrange_multiplier_0, (trust_region**2), a, b, N_Function) # Use rational Newton to find Lagrange Multiplier w = np.zeros((N_Function, 1)) for i in range(N_Function): w[i] = -S[i]**2 * g[i] / (S[i]**4 + Lagrange_multiplier) direction_vector = V_T.T @ w return direction_vector
def createFunctions(self,script): """find all Functions and create class classes on those bits of text""" functions = [] scriptArray = script.splitlines() #Go through each line looking for class text for index,line in enumerate(scriptArray): if len(line) > 4: if line[0:3] == "def": #looks for ending of the class finishLine = None for index2,line2 in enumerate(scriptArray[index+1::]): if finishLine is None and len(line2) > 0 and line2[0] != " ": finishLine = index2 # Creats a class with the relevant code appending it to the classes array if finishLine is not None: functions.append(Function("\n".join(scriptArray[index:finishLine]))) else: functions.append(Function("\n".join(scriptArray[index::])))
def __init__(self, parent, controller, game): tk.Frame.__init__(self, parent) self.controller = controller self.game = game self.function = Function() self.canvas_width = self.winfo_screenwidth() self.canvas_height = self.winfo_screenheight() self.canvas = Canvas(self, width=self.canvas_width, height=self.canvas_height) self.canvas.pack() self.graph_x_end = self.canvas_width - 100 self.graph_y_end = self.canvas_height - 150 self.x_span = self.graph_x_end - self.graph_x_start
def Test_Step(x_n, direction_vector, residual, Lambda, N_Function): x_test = copy(x_n) x_test = x_test + direction_vector # New approximation function_test = Function(x_test, Lambda, N_Function) residual_test = np.linalg.norm(function_test, 2) # Residual at new approximation. # If the residual is less than at previous iteration accept the new step else reject. if residual_test <= residual: accept = 1 else: accept = 0 return accept
def setUp(self): Function.setup(self)
def tearDown(self): Function.teardown(self)
class SimplePerceptronLinear: weights = [] def __init__(self, learning_grade, entries, output, steps, betha = 0.5, function=FunctionsType.TANH, isLinear = True): self.learning_grade = learning_grade self.steps = steps self.output = output self.entries = entries self.entry_cols = len(entries[0]) self.weights_initializer() self.betha = betha self.function = Function(function) self.isLinear = isLinear def weights_initializer(self): for idx in range(self.entry_cols): multiplier = np.random.choice([-1, 1]) SimplePerceptronLinear.weights.append(multiplier*round(random.random(), 5)) def update_weights(self, weights, update, entry): delta = 0 if self.isLinear: delta_weights = np.dot(update, entry) else: # si no es linear -> learning_grade*(salida - activacion)*g'(h)*x_i delta = update*self.function.calculate_derivative(self.betha, self.get_excitement(entry, weights)) delta_weights = np.dot(delta, entry) return np.add(weights, delta_weights) def get_excitement(self, entry, weights): total = 0 for e, w in zip(entry, weights): total += (e * w) return total def get_activation(self,excitement): if self.isLinear: return excitement return self.function.calculate(self.betha, excitement) def predict(self, entry, _weights = None): if _weights is None: _weights = self.weights excitement = (self.get_excitement(entry, _weights) + entry[-1]) # excitement = (self.get_excitement(entry, _weights)) return self.get_activation(excitement) def calculate_error(self, error): return 0.5 * pow(error, 2) def perform(self, _entries=None, _output=None): if _entries is None: _entries = self.entries if _output is None: _output = self.output i = 0 size = len(_entries) total_error = 100 error_min = 2 * size last_errors = [] predictions = [] test_weights = self.weights.copy() while abs(total_error) > 0.001 and i < self.steps: total_error = 0 for idx in range(size): prediction = self.predict(_entries[idx], test_weights) temp_error = (_output[idx] - prediction) update = self.learning_grade * temp_error if idx == size: first_error = temp_error test_weights = self.update_weights(test_weights, update, _entries[idx]) if temp_error < error_min: error_min = temp_error min_weights = test_weights total_error += temp_error total_error = self.calculate_error(total_error/size) last_errors.append(total_error) predictions.append(prediction) i += 1 Graph.graph_no_linear(last_errors, self.isLinear) return test_weights def pick_training_sets(self, test_size): indexes = [] for i in range(test_size): indexes.append(np.random.randint(len(self.entries), size=test_size)) training_entries = [] training_output = [] for idx in range(len(indexes)): training_entries.append(self.entries[idx]) training_output.append(self.output[idx]) return [training_entries, training_output] def test(self, test_size=10): training_set = self.pick_training_sets(test_size) training_entries = training_set[0] training_output = training_set[1] print("Training...") weight_output = self.perform(training_entries, training_output) test_set = self.pick_training_sets(test_size) test_entries = test_set[0] test_output = test_set[1] print("Testing...") final_predictions = [] for e in test_entries: final_predictions.append(self.predict(e,weight_output)) print("-\t\t\tFile Output\t\t\t|\t\t\tPerceptron Output-") for idx in range(len(test_entries)): print("*\t" + str(test_output[idx]) + "\t|\t" + str(final_predictions[idx]) + "*") return
def Newton_Hook(trust_region, trust_region_min, n_iteration_max, x_0, Lambda, N_Function, alpha, beta, tolerance_function, tolerance_variable): n_iteration = 0 x_n = copy(x_0) function = np.zeros((N_Function, 1)) jacobian = np.zeros((N_Function, N_Function)) direction_vector = np.zeros((N_Function, 1)) converged = 0 iteration_history = [] # Stores residual and error at each iteration. while not (converged): if n_iteration > n_iteration_max: print('Maximum number of Newton-Hook iterations reached.') break if trust_region < trust_region_min: print( 'Size of trust region (%e) is smaller than minimum allowed (%e).' % (trust_region, trust_region_min)) break # Find Newton Step function = Function(x_n, Lambda, N_Function) jacobian = Jacobian(x_n, Lambda, N_Function) direction_vector = -np.linalg.solve(jacobian, function) Newton_step = np.linalg.norm(direction_vector, 2) residual = np.linalg.norm(function, 2) # Test Newton-step if within trust region if Newton_step < trust_region: if Test_Step(x_n, direction_vector, residual, Lambda, N_Function): x_n += direction_vector # Update approximation if step accepted n_iteration += 1 else: trust_region *= alpha # Reduce trust region by factor $\alpha$ if step not accepted # Find optimum direction vector and step if Newton-step is outside trust region. else: direction_vector = Optimise_direction_vector( x_n, Lambda, N_Function, trust_region) # Compute new direction vector if Test_Step(x_n, direction_vector, residual, Lambda, N_Function): # Test new direction vector x_n += direction_vector # Update approximation if step accepted trust_region *= beta # Increase trust region by factor $\beta$ if the optimised direction vector is accepted n_iteration += 1 else: trust_region *= alpha # Reduce trust region by factor $\alpha$ if step not accepted function = Function(x_n, Lambda, N_Function) # Evaaluate f(x_n) residual = np.linalg.norm(function, 2) # Residual = ||f(x_n)||_2 error = np.linalg.norm(direction_vector, 2) # Error = ||dx||_2 iteration_history.append( [n_iteration, Newton_step, residual, error, trust_region]) # Check for convergence if residual < tolerance_function and error < tolerance_variable: converged = 1 break # If converged, plot and return x_n if converged: iteration_history = np.asarray(iteration_history) fig, ax = plt.subplots() ax.loglog(iteration_history[:, 0], iteration_history[:, 1], color='red', linestyle='dashed', marker='o', markersize=5.0, linewidth=1.5, label='Newton step size') ax.loglog(iteration_history[:, 0], iteration_history[:, 2], color='green', linestyle='dashed', marker='D', markersize=5.0, linewidth=1.5, label='Residual') ax.loglog(iteration_history[:, 0], iteration_history[:, 3], color='blue', linestyle='dashed', marker='v', markersize=5.0, linewidth=1.5, label='Error') ax.loglog(iteration_history[:, 0], iteration_history[:, 4], color='black', linestyle='dashed', marker='s', markersize=5.0, linewidth=1.5, label='Trust region') plt.legend() plt.tight_layout() plt.show() return x_n # If not converged, ensure graceful exit. else: sys.exit('Newton-Hook failed to converge after %d iteration.' % (n_iteration))
def run_test(): Function.run_initialze() time.sleep(2) Function.htmlreport_Run()
class Graph(tk.Frame): # pixels from the left from where the diagram starts graph_x_start = 150 # pixels from the top from where the diagram starts graph_y_start = 100 # x, y points that are drawn for the player curve points = [] def __init__(self, parent, controller, game): tk.Frame.__init__(self, parent) self.controller = controller self.game = game self.function = Function() self.canvas_width = self.winfo_screenwidth() self.canvas_height = self.winfo_screenheight() self.canvas = Canvas(self, width=self.canvas_width, height=self.canvas_height) self.canvas.pack() self.graph_x_end = self.canvas_width - 100 self.graph_y_end = self.canvas_height - 150 self.x_span = self.graph_x_end - self.graph_x_start def new_point(self, new_point, color="red"): # calculate x, y position in pixels x = self.graph_x_start + (new_point[0] * self.pixels_per_second) y = self.graph_y_end - ( (new_point[1] - self.game.y_min) * self.pixels_per_cm) if len(self.points) > 0: self.canvas.create_line(self.points[-1][0], self.points[-1][1], x, y, fill=color, width=3) self.points.append([x, y]) def draw_start_point(self, y): x = self.graph_x_start y = self.graph_y_end - ((y - self.game.y_min) * self.pixels_per_cm) self.canvas.delete('start_point') self.canvas.create_oval(x - 5, y - 5, x + 5, y + 5, fill="red", tags='start_point') def reset(self, randomize_function=False, draw_function=True): """resets and clears the graph""" self.points.clear() self.canvas.delete("all") self.pixels_per_second = (self.graph_x_end - self.graph_x_start) / self.game.total_time self.pixels_per_cm = (self.graph_y_end - self.graph_y_start) / ( self.game.y_max - self.game.y_min) draw_axis(self.canvas, self.graph_x_start, self.graph_y_start, self.graph_x_end, self.graph_y_end, self.game.y_min, self.game.y_max) draw_button_info(self.canvas, "back", "replay", "right") if draw_function: if randomize_function: self.function.randomize_transformation() self.function.draw(self.canvas, self.graph_x_start, self.graph_y_start, self.graph_x_end, self.graph_y_end, self.game.interval, width=3, color="black") def draw_score(self, score): self.canvas.create_text(self.canvas_width / 2, self.canvas_height * 1 / 8, text="Your score is " + str(score), font=("Times", 70)) self.canvas.create_text(self.canvas_width / 2, self.canvas_height * 1 / 4, text="All scores: ", font=("Times", 50)) for i, score in enumerate(self.game.scores): self.canvas.create_text(self.canvas_width / 2, self.canvas_height * 1 / 3 + 50 * i, text=str(i + 1) + ". player " + str(score), font=("Times", 30)) def draw_countdown(self, seconds): self.canvas.delete("countdown") self.canvas.create_text(self.canvas_width / 2, self.canvas_height / 2, text=str(seconds), font=("Times", 80), tags="countdown", fill="darkblue") def add_function(self, func): self.function.set_scale(self.game.y_min, self.game.y_max, self.game.total_time) self.function.set_func_type(func, rand_transform=True) self.function.draw(self.canvas, self.graph_x_start, self.graph_y_start, self.graph_x_end, self.graph_y_end, self.game.interval, width=3, color="black") def on_button_pressed(self, button_index): if button_index == 0: self.game.reset() self.controller.show_frame("Functions") if button_index == 1: self.game.restart() if button_index == 2: self.game.restart(randomize_function=True)
def __init__(self, parent, controller, game): self.game = game tk.Frame.__init__(self, parent) # get screen information for scaling + setting up canvas self.width = self.winfo_screenwidth() self.height = self.winfo_screenheight() self.canvas = Canvas(self, width=self.width, height=self.height) self.canvas.pack() self.canvas.create_text(self.width / 2, self.height / 4, text="Select a function", font="Times 50 italic bold") self.num_buttons = 6 # +2 to let one button width space on each side # +1 because there is one less space between buttons than there are buttons # /4 because the space between buttons is 1/4 the button width button_width = self.width / ((self.num_buttons + 2) + (self.num_buttons + 1) / 4) button_height = button_width button_gap = button_width / 4 self.buttons = [] function = Function() function.set_scale(y_min=0, y_max=1, total_time=1) for i in range(self.num_buttons): # (button_width*num_buttons + button_gap*(num_buttons-1))/2 to get the left most coordiante # i(button_width + button_gap) to get the current x coordinate x1 = self.width / 2 - (button_width * self.num_buttons + button_gap * (self.num_buttons - 1)) / 2 + i * ( button_width + button_gap) y1 = self.height / 2 - button_height / 2 x2 = self.width / 2 - (button_width * self.num_buttons + button_gap * (self.num_buttons - 1)) / 2 + ( i + 1) * button_width + i * button_gap y2 = self.height / 2 + button_height / 2 self.buttons.append(self.canvas.create_rectangle(x1, y1, x2, y2, fill="white")) # sets the right function that is displayed inside the menu if i == 0: function.set_func_type("lin") function.set_transformations(0, 0.25, 1, 0.5) elif i == 1: function.set_func_type("step") function.set_step_transformations(0.25, 3 / 4, 3 / 4) elif i == 2: function.set_func_type("exp") function.set_transformations(0, 0, 5, 0.0234) elif i == 3: function.set_func_type("log") function.set_transformations(-1, 0, 30, 0.221) elif i == 4: function.set_func_type("quad") function.set_transformations(0.5, 0.25, 1, 2) elif i == 5: function.set_func_type("sin") function.set_transformations(0, 0.5, 6.3, 0.3) function.draw(self.canvas, x1, y1, x2, y2, interval=0.05, width=2) # colour the first button orange self.canvas.itemconfig(self.buttons[0], fill="orange") function.set_func_type("lin") function.set_transformations(0, 0.25, 1, 0.5) draw_button_info(self.canvas, "left", "select", "right")