Exemplo n.º 1
0
 def test_3(self):
      
     # Test the dimensions of the Jacobian matrices against Theta matrices for first architecture#
     n_i1 = 4  # Number of input neurons
     n_h1 = 2  # Number of hidden layers
     n_o1 = 2  # Number of output neurons
      
     ann1 = Ann(n_i=n_i1, n_h=n_h1, n_o=n_o1)  # Create this architecture
     x1 = [1, 2, 3, 4]  # Array as first example
     y1 = [1, 0]
     J = ann1.backward(x1, y1)
     for l in range(0, ann1.L - 1):
         self.assertEqual(ann1.Thetas[l].shape, J[l].shape)
          
     # Test the dimensions of the Jacobian matrices against Theta matrices for second architecture#
     n_i1 = 40  # Number of input neurons
     n_h1 = 3  # Number of hidden layers
     n_o1 = 10  # Number of output neurons
      
     ann1 = Ann(n_i=n_i1, n_h=n_h1, n_o=n_o1)  # Create this architecture
     x1 = 10 * [1, 2, 3, 4]  # Array as first example
     y1 = [1, 0, 1, 1, 0, 0, 1, 0, 1, 0]
     J = ann1.backward(x1, y1)
     for l in range(0, ann1.L - 1):
         self.assertEqual(ann1.Thetas[l].shape, J[l].shape)
          
     # Test the dimensions of the Jacobian matrices against Theta matrices for third architecture#
     n_i1 = 40  # Number of input neurons
     n_h1 = 0  # Number of hidden layers
     n_o1 = 10  # Number of output neurons
      
     ann1 = Ann(n_i=n_i1, n_h=n_h1, n_o=n_o1)  # Create this architecture
     x1 = 10 * [1, 2, 3, 4]  # Array as first example
     y1 = [1, 0, 1, 1, 0, 0, 1, 0, 1, 0]
     J = ann1.backward(x1, y1)
     for l in range(0, ann1.L - 1):
         self.assertEqual(ann1.Thetas[l].shape, J[l].shape)
    def test_3(self):

        # Test the dimensions of the Jacobian matrices against Theta matrices for first architecture#
        n_i1 = 4  # Number of input neurons
        n_h1 = 2  # Number of hidden layers
        n_o1 = 2  # Number of output neurons

        ann1 = Ann(n_i=n_i1, n_h=n_h1, n_o=n_o1)  # Create this architecture
        x1 = [1, 2, 3, 4]  # Array as first example
        y1 = [1, 0]
        J = ann1.backward(x1, y1)
        for l in range(0, ann1.L - 1):
            self.assertEqual(ann1.Thetas[l].shape, J[l].shape)

        # Test the dimensions of the Jacobian matrices against Theta matrices for second architecture#
        n_i1 = 40  # Number of input neurons
        n_h1 = 3  # Number of hidden layers
        n_o1 = 10  # Number of output neurons

        ann1 = Ann(n_i=n_i1, n_h=n_h1, n_o=n_o1)  # Create this architecture
        x1 = 10 * [1, 2, 3, 4]  # Array as first example
        y1 = [1, 0, 1, 1, 0, 0, 1, 0, 1, 0]
        J = ann1.backward(x1, y1)
        for l in range(0, ann1.L - 1):
            self.assertEqual(ann1.Thetas[l].shape, J[l].shape)

        # Test the dimensions of the Jacobian matrices against Theta matrices for third architecture#
        n_i1 = 40  # Number of input neurons
        n_h1 = 0  # Number of hidden layers
        n_o1 = 10  # Number of output neurons

        ann1 = Ann(n_i=n_i1, n_h=n_h1, n_o=n_o1)  # Create this architecture
        x1 = 10 * [1, 2, 3, 4]  # Array as first example
        y1 = [1, 0, 1, 1, 0, 0, 1, 0, 1, 0]
        J = ann1.backward(x1, y1)
        for l in range(0, ann1.L - 1):
            self.assertEqual(ann1.Thetas[l].shape, J[l].shape)
Exemplo n.º 3
0
 def test_4(self):
     # Gradient checking (check that a numerical approximation of the gradient is (almost) equal to our backpropagation derivation)#
      
     # First data-set with one example
     arrs = []
     labels = []
     arrs.append([1, 2, 4, 5, 5, 5])
     labels.append('cat')
     ann = Ann(arrs, labels, n_h=10)  # Create Ann with these train_examples and labels
     J = ann.backward(ann.train_examples[0].arr, ann.train_examples[0].y)
     T_original = copy.deepcopy(ann.Thetas)
      
     for l in range(0, ann.L - 1):
         shape_J = J[l].shape
         eps = 0.0001  # epsilon for a numerical approximation of the gradient
         for i in range(0, shape_J[0]):
             for j in range(0, shape_J[1]):
                 T_e = np.zeros(shape_J)  # Matrix of zeros
                 T_e[i][j] = eps
                 ann.Thetas[l] = T_original[l] + T_e
                 cost_e = ann.cost()  # Cost at Theta + eps
                 ann.Thetas[l] = T_original[l] - T_e
                 cost_minus_e = ann.cost()  # Cost at Theta - eps
                 P = (cost_e - cost_minus_e) / (2 * eps)  # Numerical approximation
                 J_ij = J[l].item(i, j)  # Backpropagation derivation
                  
                 # print(P, '\t', J_ij, '\t', abs(P - J_ij), (l, i, j))
                  
                 # if (P < 0 and J_ij > 0 or P > 0 and J_ij < 0):
                 #    self.fail()
                  
                 self.assertAlmostEqual(P, J_ij, delta=0.001)
                 ann.Thetas = copy.deepcopy(T_original)
      
     # Second data-set with several train_examples
     arrs = []
     labels = []
     classes = ('cat', 'dog')
     for m in range(0, 100):
         arr = [random.random() for x in range(0, 20)]
         label = classes[random.random() > 0.5]
         arrs.append(arr)
         labels.append(label)
     ann = Ann(arrs, labels, n_h=2)  # Create Ann with these train_examples and labels
     # L-1 matrices of partial derivatives for first example
     J = ann.backward_batch()
     T_original = copy.deepcopy(ann.Thetas)
      
     for l in range(0, ann.L - 1):
         shape_J = J[l].shape
         eps = 0.0001  # epsilon for a numerical approximation of the gradient
         a = random.sample(range(0, shape_J[0]), 2)
         b = random.sample(range(0, shape_J[1]), 2)
         for i in a:
             for j in b:
                 T_e = np.zeros(shape_J)  # Matrix of zeros
                 T_e[i][j] = eps
                 ann.Thetas[l] = T_original[l] + T_e
                 cost_e = ann.cost()  # Cost at Theta + eps
                 ann.Thetas[l] = T_original[l] - T_e
                 cost_minus_e = ann.cost()  # Cost at Theta - eps
                 P = (cost_e - cost_minus_e) / (2 * eps)  # Numerical approximation
                 J_ij = J[l].item(i, j)  # Backpropagation derivation
                  
                 self.assertAlmostEqual(P, J_ij, delta=0.001)
                 ann.Thetas = copy.deepcopy(T_original)
    def test_4(self):
        # Gradient checking (check that a numerical approximation of the gradient is (almost) equal to our backpropagation derivation)#

        # First data-set with one example
        arrs = []
        labels = []
        arrs.append([1, 2, 4, 5, 5, 5])
        labels.append('cat')
        ann = Ann(arrs, labels,
                  n_h=10)  # Create Ann with these train_examples and labels
        J = ann.backward(ann.train_examples[0].arr, ann.train_examples[0].y)
        T_original = copy.deepcopy(ann.Thetas)

        for l in range(0, ann.L - 1):
            shape_J = J[l].shape
            eps = 0.0001  # epsilon for a numerical approximation of the gradient
            for i in range(0, shape_J[0]):
                for j in range(0, shape_J[1]):
                    T_e = np.zeros(shape_J)  # Matrix of zeros
                    T_e[i][j] = eps
                    ann.Thetas[l] = T_original[l] + T_e
                    cost_e = ann.cost()  # Cost at Theta + eps
                    ann.Thetas[l] = T_original[l] - T_e
                    cost_minus_e = ann.cost()  # Cost at Theta - eps
                    P = (cost_e - cost_minus_e) / (2 * eps
                                                   )  # Numerical approximation
                    J_ij = J[l].item(i, j)  # Backpropagation derivation

                    # print(P, '\t', J_ij, '\t', abs(P - J_ij), (l, i, j))

                    # if (P < 0 and J_ij > 0 or P > 0 and J_ij < 0):
                    #    self.fail()

                    self.assertAlmostEqual(P, J_ij, delta=0.001)
                    ann.Thetas = copy.deepcopy(T_original)

        # Second data-set with several train_examples
        arrs = []
        labels = []
        classes = ('cat', 'dog')
        for m in range(0, 100):
            arr = [random.random() for x in range(0, 20)]
            label = classes[random.random() > 0.5]
            arrs.append(arr)
            labels.append(label)
        ann = Ann(arrs, labels,
                  n_h=2)  # Create Ann with these train_examples and labels
        # L-1 matrices of partial derivatives for first example
        J = ann.backward_batch()
        T_original = copy.deepcopy(ann.Thetas)

        for l in range(0, ann.L - 1):
            shape_J = J[l].shape
            eps = 0.0001  # epsilon for a numerical approximation of the gradient
            a = random.sample(range(0, shape_J[0]), 2)
            b = random.sample(range(0, shape_J[1]), 2)
            for i in a:
                for j in b:
                    T_e = np.zeros(shape_J)  # Matrix of zeros
                    T_e[i][j] = eps
                    ann.Thetas[l] = T_original[l] + T_e
                    cost_e = ann.cost()  # Cost at Theta + eps
                    ann.Thetas[l] = T_original[l] - T_e
                    cost_minus_e = ann.cost()  # Cost at Theta - eps
                    P = (cost_e - cost_minus_e) / (2 * eps
                                                   )  # Numerical approximation
                    J_ij = J[l].item(i, j)  # Backpropagation derivation

                    self.assertAlmostEqual(P, J_ij, delta=0.001)
                    ann.Thetas = copy.deepcopy(T_original)