@return: The score. We are trying to minimize this score. """ global input_data global output_data # Calculate the actual output of the polynomial with the specified coefficients. actual_output = [] for input_data in training_input: x = input_data[0] output_data = poly(coeff, x) actual_output.append(output_data) # Build the training set. training_input, training_ideal = build_training_set() # Extract the input and ideal training. training_input = np.array(training_input) training_ideal = np.array(training_ideal) # Starting point for coefficients. x0 = [0, 0, 0] # Perform the train. train = TrainGreedRandom(-10, 10) train.stop_score = 100 train.display_iteration = True result = train.train(x0, score_funct) # Evaluate the polynomial. print("Final polynomial") print_poly(result)
@return: The MSE error. """ # Setup the long-term memory that we would like to test. network.copy_memory(x) # Present all inputs to the network and accumulate the output for each. actual_output = [] for input_data in training_input: output_data = network.compute_regression(input_data) actual_output.append(output_data) # Compare the actual output with the ideal expected output and calculate the MSE error. return ErrorCalculation.mse(np.array(actual_output), training_ideal) # Use the initial long term memory of the network as the starting state. x0 = list(network.long_term_memory) # Train with greedy random. train = TrainGreedRandom(-1, 1) train.display_iteration = True train.max_iterations = 100000 train.stop_score = 0.05 result = train.train(x0, score_funct) # Copy the final trained long-term memory to the network so we can use it for evaluation. network.copy_memory(result) # Display the output for the XOR. XOR will not be trained perfectly. You should see that the (0,1) and (1,0) inputs # are both close to 1.0, whereas the (1,1) and (0,0) are close to 0.0. for input_data in training_input: output_data = network.compute_regression(input_data) print(str(input_data) + " -> " + str(output_data))
@param x: The long term memory that we are to score. @return: The MSE error. """ # Setup the long-term memory that we would like to test. network.copy_memory(x) # Present all inputs to the network and accumulate the output for each. actual_output = [] for input_data in training_input: output_data = network.compute_regression(input_data) actual_output.append(output_data) # Compare the actual output with the ideal expected output and calculate the MSE error. return ErrorCalculation.mse(np.array(actual_output), training_ideal) # Use the initial long term memory of the network as the starting state. x0 = list(network.long_term_memory) # Train with greedy random. train = TrainGreedRandom(-1, 1) train.display_iteration = True train.max_iterations = 100000 train.stop_score = 0.05 result = train.train(x0, score_funct) # Copy the final trained long-term memory to the network so we can use it for evaluation. network.copy_memory(result) # Display the output for the XOR. XOR will not be trained perfectly. You should see that the (0,1) and (1,0) inputs # are both close to 1.0, whereas the (1,1) and (0,0) are close to 0.0. for input_data in training_input: output_data = network.compute_regression(input_data) print(str(input_data) + " -> " + str(output_data))
global input_data global output_data # Calculate the actual output of the polynomial with the specified coefficients. actual_output = [] for input_data in training_input: x = input_data[0] output_data = poly(coeff, x) actual_output.append(output_data) return ErrorCalculation.sse(np.array(actual_output), training_ideal) # Build the training set. training_input, training_ideal = build_training_set() # Extract the input and ideal training. training_input = np.array(training_input) training_ideal = np.array(training_ideal) # Starting point for coefficients. x0 = [0, 0, 0] # Perform the train. train = TrainGreedRandom(-10, 10) train.stop_score = 100 train.display_iteration = True result = train.train(x0, score_funct) # Evaluate the polynomial. print("Final polynomial") print_poly(result)