def test_gradient(): nn = NeuralNetwork([2, 3, 1]) X = np.arange(-1, 1, 0.01) y = np.power(X, 2) error1 = np.abs(compute_activations(X[0], nn)[-1] - y[0]) derivs = network_gradient(X[0], y[0], nn) nn.weights = modify_weights(nn, derivs, 0.1) error2 = np.abs(compute_activations(X[0], nn)[-1] - y[0]) print(error1, error2)
def x_squared_plot(): layer_sizes = [2, 6, 6, 1] nn = NeuralNetwork(layer_sizes) X = np.arange(-1, 1, 0.05) y = np.power(X, 2) nn = SGD(X, y, nn, max_iter = 1500, printing=True) out = [compute_activations(x, nn)[-1] for x in X] plt.scatter(X, y) plt.plot(X, out, c='r') plt.grid() plt.show()
def test_backprop(): x = np.array([1, 2]) y = -2 activations = ne.compute_activations(x, network) expected_activations = [np.array([1, 1, 2]), np.array([ 0.76159416, 0.76159416, -0.76159416]), -0.90925167399694251] for a, b in zip(activations, expected_activations): assert np.allclose(a, b, atol=0.001) deltas = ne.compute_deltas(y, activations, network.weights, network.activation_deriv) expected_deltas = [np.array([ 0, -0, -1.37425893]), np.array([-0.45808631, 0.91617262,-0.45808631]), -1.0907483260030575] for a, b in zip(deltas, expected_deltas): assert np.allclose(a, b, atol=0.001)
def plot_decision_fn(X): out = [compute_activations(x, nn)[-1] for x in X] return out