def test_backward(self): layer = Sigmoid() x = np.random.rand(2) y = layer.forward(x) deriv_grad = layer.backward(np.ones(1)) numerical_grad_matrix = numerical_gradient.calc(layer.forward, x) # the numerical grad in this case is a matrix made of zeros with # dJ/dx_i only in the diagonal num_grad = np.diagonal(numerical_grad_matrix) numerical_gradient.assert_are_similar(deriv_grad, num_grad)
layer1.forward(trainingData[batch]) activation1.forward(layer1.outputs) layer2.forward(activation1.outputs) activation2.forward(layer2.outputs) cost.forward(activation2.outputs, labels[batch], 10) for sample in range(activation2.outputs.shape[1]): if np.argmax(activation2.outputs[:, sample]) == np.argmax( labels[batch, sample]): correct += 1 cost.backward(activation2.outputs, labels[batch], 10) activation2.backward(layer2.outputs, layer2.weights.shape[0], BATCH_SIZE) layer2.backward(activation1.outputs) activation1.backward(layer1.outputs) layer1.backward(trainingData[batch]) delta1 = np.zeros((cost.prime.shape[0], cost.prime.shape[1])) for i in range(cost.prime.shape[0]): delta1[i] = np.matmul(cost.prime[i], activation2.prime[i]) delta1_wrt_L2 = np.matmul(delta1, layer2.input_prime) delta2 = np.zeros( (activation1.prime.shape[0], activation1.prime.shape[2])) for i in range(activation1.prime.shape[2]): delta2[:, i] = np.matmul(delta1_wrt_L2[i], activation1.prime[:, :, i]) C_wrt_W2 = np.zeros( (delta1.shape[0], delta1.shape[1], layer2.weights_prime.shape[1]))
from layers import Sigmoid except ImportError: print('Library Module Can Not Found') # Test1(Vector) layer = Sigmoid() x = np.array([0.1, -0.2, 0.3, -0.4, 0.5]) print(x) y = layer.forward(x) print(y) print(layer.out) dout = np.array([-0.1, -0.2, -0.3, 0.4, -0.5]) dout = layer.backward(dout) print(dout) print('=========================================') # Test2(Matrix) x = np.array([ [0.1, -0.5, 1.0], [0.2, -0.6, 2.0], [0.3, -0.7, 3.0], [0.4, -0.8, 4.0] ]) y = layer.forward(x) print(y) print(layer.out)