def q2d(resistances): """ Question 2(d): Plot a graph of R versus N. Find a function R(N) that fits the curve reasonably well and is asymptotically correct as N tends to infinity, as far as you can tell. :param resistances: a dictionary of resistance values for each N value """ print('\n=== Question 2(d) ===') f = plt.figure() ax = f.gca() ax.xaxis.set_major_locator(MaxNLocator(integer=True)) x_range = [float(x) for x in resistances.keys()] y_range = [float(y) for y in resistances.values()] plt.plot(x_range, y_range, 'o', label='Data points') x_new = np.linspace(x_range[0], x_range[-1], num=len(x_range) * 10) coeffs = poly.polyfit(np.log(x_range), y_range, deg=1) polynomial_fit = poly.polyval(np.log(x_new), coeffs) plt.plot(x_new, polynomial_fit, '{}-'.format('C0'), label='${:.2f}\log(N) + {:.2f}$'.format(coeffs[1], coeffs[0])) plt.xlabel('N') plt.ylabel('R ($\Omega$)') plt.grid(True) plt.legend() f.savefig('report/plots/q2d.pdf', bbox_inches='tight') save_rows_to_csv('report/csv/q2a.csv', zip(resistances.keys(), resistances.values()), header=('N', 'R (Omega)'))
def q1d(): """ Question 1(d): Write a program that reads from a file a list of network branches (Jk, Rk, Ek) and a reduced incidence matrix, and finds the voltages at the nodes of the network. Use the code from part (a) to solve the matrix problem. """ print('\n=== Question 1(d) ===') for i in range(1, 7): A = Matrix.csv_to_matrix('{}/incidence_matrix_{}.csv'.format( NETWORK_DIRECTORY, i)) Y, J, E = csv_to_network_branch_matrices( '{}/network_branches_{}.csv'.format(NETWORK_DIRECTORY, i)) # print('Y: {}'.format(Y)) # print('J: {}'.format(J)) # print('E: {}'.format(E)) x = solve_linear_network(A, Y, J, E) print('Solved for x in network {}:'.format( i)) # TODO: Create my own test circuits here node_numbers = [] voltage_values = [] for j in range(len(x)): print('V{} = {:.3f} V'.format(j + 1, x[j][0])) node_numbers.append(j + 1) voltage_values.append('{:.3f}'.format(x[j][0])) save_rows_to_csv('report/csv/q1_circuit_{}.csv'.format(i), zip(node_numbers, voltage_values), header=('Node', 'Voltage (V)'))
def q3b(): """ Question 3(b): With h = 0.02, explore the effect of varying omega. :return: the best omega value found for SOR """ print('\n=== Question 3(b) ===') h = 0.02 min_num_iterations = float('inf') best_omega = float('inf') omegas = [] num_iterations = [] potentials = [] for omega_diff in range(10): omega = 1 + omega_diff / 10 print('Omega: {}'.format(omega)) iter_relaxer = successive_over_relaxation(omega, EPSILON, h) print('Quarter grid: {}'.format(iter_relaxer.phi.mirror_horizontal())) print('Num iterations: {}'.format(iter_relaxer.num_iterations)) potential = iter_relaxer.get_potential(X_QUERY, Y_QUERY) print('Potential at ({}, {}): {:.3f} V'.format(X_QUERY, Y_QUERY, potential)) if iter_relaxer.num_iterations < min_num_iterations: best_omega = omega min_num_iterations = min(min_num_iterations, iter_relaxer.num_iterations) omegas.append(omega) num_iterations.append(iter_relaxer.num_iterations) potentials.append('{:.3f}'.format(potential)) print('Best number of iterations: {}'.format(min_num_iterations)) print('Best omega: {}'.format(best_omega)) f = plt.figure() x_range = omegas y_range = num_iterations plt.plot(x_range, y_range, 'o-', label='Number of iterations') plt.xlabel('$\omega$') plt.ylabel('Number of Iterations') plt.grid(True) f.savefig('report/plots/q3b.pdf', bbox_inches='tight') save_rows_to_csv('report/csv/q3b_potential.csv', zip(omegas, potentials), header=('Omega', 'Potential (V)')) save_rows_to_csv('report/csv/q3b_iterations.csv', zip(omegas, num_iterations), header=('Omega', 'Iterations')) return best_omega
def q2c(): """ Question 2(c): Modify your program to exploit the sparse nature of the matrices to save computation time. :return: the mesh resistances and the timings for N = 2, 3 ... 10 """ print('\n=== Question 2(c) ===') resistances, runtimes, choleski_runtimes = find_mesh_resistances(banded=True) save_rows_to_csv('report/csv/q2c.csv', zip(runtimes.keys(), runtimes.values()), header=('N', 'Runtime (s)')) save_rows_to_csv('report/csv/q2c_choleski.csv', zip(choleski_runtimes.keys(), choleski_runtimes.values()), header=('N', 'Runtime (ms)')) return resistances, runtimes, choleski_runtimes
def q2ab(): """ Question 2(a): Using the program you developed in question 1, find the resistance, R, between the node at the bottom left corner of the mesh and the node at the top right corner of the mesh, for N = 2, 3, ..., 10. Question 2(b):Are the timings you observe for your practical implementation consistent with this? :return: the timings for finding the mesh resistance for N = 2, 3 ... 10 """ print('\n=== Question 2(a)(b) ===') _, runtimes, choleski_runtimes = find_mesh_resistances(banded=False) save_rows_to_csv('report/csv/q2b.csv', zip(runtimes.keys(), runtimes.values()), header=('N', 'Runtime (s)')) save_rows_to_csv('report/csv/q2b_choleski.csv', zip(choleski_runtimes.keys(), choleski_runtimes.values()), header=('N', 'Runtime (ms)')) return runtimes, choleski_runtimes
def random_predict_mnist(num_epochs=100, csv_filename='random'): test_output = mnist.test_labels() num_test_examples = test_output.size rows = [] header = 'epoch,test_accuracy' print(header) for epoch in range(num_epochs): correct = 0 for label in test_output: random_choice = random.randint(0, 9) if label == random_choice: correct += 1 test_accuracy = correct / num_test_examples print('{},{:.6f}'.format(epoch, test_accuracy)) rows.append((epoch, test_accuracy)) save_rows_to_csv(csv_filename, rows, header.split(','))
def q3c(omega): """ Question 3(c): With an appropriate value of w, chosen from the above experiment, explore the effect of decreasing h on the potential. :param omega: the omega value to be used by SOR :return: the h values, potential values and number of iterations """ print('\n=== Question 3(c): SOR ===') h = 0.04 h_values = [] potential_values = [] iterations_values = [] for i in range(NUM_H_ITERATIONS): h = h / 2 print('h: {}'.format(h)) print('1/h: {}'.format(1 / h)) iter_relaxer = successive_over_relaxation(omega, EPSILON, h) # print(phi.mirror_horizontal()) potential = iter_relaxer.get_potential(X_QUERY, Y_QUERY) num_iterations = iter_relaxer.num_iterations print('Num iterations: {}'.format(num_iterations)) print('Potential at ({}, {}): {:.3f} V'.format(X_QUERY, Y_QUERY, potential)) h_values.append(1 / h) potential_values.append('{:.3f}'.format(potential)) iterations_values.append(num_iterations) f = plt.figure() x_range = h_values y_range = potential_values plt.plot(x_range, y_range, 'o-', label='Data points') plt.xlabel('1 / h') plt.ylabel('Potential at [0.06, 0.04] (V)') plt.grid(True) f.savefig('report/plots/q3c_potential.pdf', bbox_inches='tight') f = plt.figure() x_range = h_values y_range = iterations_values x_new = np.linspace(x_range[0], x_range[-1], num=len(x_range) * 10) polynomial_coeffs = poly.polyfit(x_range, y_range, deg=3) polynomial_fit = poly.polyval(x_new, polynomial_coeffs) N = sp.symbols("1/h") poly_label = sum( sp.S("{:.5f}".format(v)) * N**i for i, v in enumerate(polynomial_coeffs)) equation = '${}$'.format(sp.printing.latex(poly_label)) plt.plot(x_new, polynomial_fit, '{}-'.format('C0'), label=equation) plt.plot(x_range, y_range, 'o', label='Data points') plt.xlabel('1 / h') plt.ylabel('Number of Iterations') plt.grid(True) plt.legend(fontsize='small') f.savefig('report/plots/q3c_iterations.pdf', bbox_inches='tight') save_rows_to_csv('report/csv/q3c_potential.csv', zip(h_values, potential_values), header=('1/h', 'Potential (V)')) save_rows_to_csv('report/csv/q3c_iterations.csv', zip(h_values, iterations_values), header=('1/h', 'Iterations')) return h_values, potential_values, iterations_values
def q3d(): """ Question 3(d): Use the Jacobi method to solve this problem for the same values of h used in part (c). :return: the h values, potential values and number of iterations """ print('\n=== Question 3(d): Jacobi ===') h = 0.04 h_values = [] potential_values = [] iterations_values = [] for i in range(NUM_H_ITERATIONS): h = h / 2 print('h: {}'.format(h)) iter_relaxer = jacobi_relaxation(EPSILON, h) potential = iter_relaxer.get_potential(X_QUERY, Y_QUERY) num_iterations = iter_relaxer.num_iterations print('Num iterations: {}'.format(num_iterations)) print('Potential at ({}, {}): {:.3f} V'.format(X_QUERY, Y_QUERY, potential)) h_values.append(1 / h) potential_values.append('{:.3f}'.format(potential)) iterations_values.append(num_iterations) f = plt.figure() x_range = h_values y_range = potential_values plt.plot(x_range, y_range, 'C1o-', label='Data points') plt.xlabel('1 / h') plt.ylabel('Potential at [0.06, 0.04] (V)') plt.grid(True) f.savefig('report/plots/q3d_potential.pdf', bbox_inches='tight') f = plt.figure() x_range = h_values y_range = iterations_values plt.plot(x_range, y_range, 'C1o', label='Data points') plt.xlabel('1 / h') plt.ylabel('Number of Iterations') x_new = np.linspace(x_range[0], x_range[-1], num=len(x_range) * 10) polynomial_coeffs = poly.polyfit(x_range, y_range, deg=4) polynomial_fit = poly.polyval(x_new, polynomial_coeffs) N = sp.symbols("1/h") poly_label = sum( sp.S("{:.5f}".format(v if i < 3 else -v)) * N**i for i, v in enumerate(polynomial_coeffs)) equation = '${}$'.format(sp.printing.latex(poly_label)) plt.plot(x_new, polynomial_fit, '{}-'.format('C1'), label=equation) plt.grid(True) plt.legend(fontsize='small') f.savefig('report/plots/q3d_iterations.pdf', bbox_inches='tight') save_rows_to_csv('report/csv/q3d_potential.csv', zip(h_values, potential_values), header=('1/h', 'Potential (V)')) save_rows_to_csv('report/csv/q3d_iterations.csv', zip(h_values, iterations_values), header=('1/h', 'Iterations')) return h_values, potential_values, iterations_values
def test_mnist_one_hot(num_train_examples=-1, num_test_examples=-1, hidden_layers=(100,), sigmoid='tanh', learning_rate=0.01, layer_decay=1.0, momentum=0.0, batch_size=100, num_epochs=100, csv_filename=None, return_test_accuracies=True): # Collect and preprocess the data. if sigmoid == 'logistic': train_input = convert_mnist_images_logistic(mnist.train_images()[:num_train_examples]) train_output = convert_mnist_labels_one_hot( mnist.train_labels()[:num_train_examples], positive=0.9, negative=0.1) test_input = convert_mnist_images_logistic(mnist.test_images()[:num_test_examples]) test_output = convert_mnist_labels_one_hot(mnist.test_labels()[:num_test_examples], positive=0.9, negative=0.1) elif sigmoid == 'tanh': train_input, mean_shift, std_scale = convert_mnist_images_train_tanh(mnist.train_images()[:num_train_examples]) train_output = convert_mnist_labels_one_hot( mnist.train_labels()[:num_train_examples], positive=1.0, negative=-1.0) test_input = convert_mnist_images_test_tanh(mnist.test_images()[:num_test_examples], mean_shift, std_scale) test_output = convert_mnist_labels_one_hot(mnist.test_labels()[:num_test_examples], positive=1.0, negative=-1.0) else: raise ValueError('Invalid sigmoid function.') # Create and train the neural network. layer_sizes = (784,) + hidden_layers + (10,) weight_decay = 0.0 nn = NeuralNetwork(layer_sizes, sigmoid=sigmoid, weight_decay=weight_decay) num_examples = train_input.shape[0] num_iterations = (num_examples // batch_size) * num_epochs rows = None if csv_filename is not None: rows = [] test_accuracies = None if return_test_accuracies: test_accuracies = [] def callback(iteration): if iteration % (num_examples // batch_size) == 0: epoch = iteration // (num_examples // batch_size) training_prediction_accuracy = get_prediction_accuracy(nn, train_input, train_output) test_prediction_accuracy = get_prediction_accuracy(nn, test_input, test_output) training_loss = nn.get_loss(train_input, train_output) test_loss = nn.get_loss(test_input, test_output) print('{},{:.6f},{:.6f},{:.6f},{:.6f}'.format(epoch, training_prediction_accuracy, test_prediction_accuracy, training_loss, test_loss)) if csv_filename is not None: rows.append((epoch, training_prediction_accuracy, test_prediction_accuracy, training_loss, test_loss)) if return_test_accuracies: test_accuracies.append(test_prediction_accuracy) print('Network Parameters') print('layer_sizes: {}, sigmoid: {}, weight_decay: {}'.format(layer_sizes, sigmoid, weight_decay)) print('Training Parameters') print('num_iterations: {}, learning_rate: {}, layer_decay: {}, momentum: {}, batch_size: {}'.format( num_iterations, learning_rate, layer_decay, momentum, batch_size)) print('') header = 'epoch,training_accuracy,test_accuracy,training_loss,test_loss' print(header) stochastic_gradient_descent(nn, train_input, train_output, num_iterations=num_iterations, learning_rate=learning_rate, layer_decay=layer_decay, momentum=momentum, batch_size=batch_size, callback=callback) if csv_filename is not None: save_rows_to_csv(csv_filename, rows, header.split(',')) if return_test_accuracies: return test_accuracies