Exemplo n.º 1
0
    def test_1(self):
        classes = ('smiley', 'frowny')
        arrs = []
        labels = []
        file_names = []

        for c in classes:
            files = [name for name in os.listdir('../library/' + c)]
            for el in files:
                img = Image.open('../library/' + c + '/' + el).convert('L')
                img = img.resize((50, 50), Image.ANTIALIAS)
                arrs.append(img.getdata())
                labels.append(c)
                file_names.append(el)

        name = '../Ann-models/model_n_i_2500_n_o_34_n_h_2 2015-05-27 22:15:24.990089.annm'
        model = pickle.load(open(name, 'rb'))[0][0]
        ann = Ann(model)
        print(ann.h(arrs[0]))
 def test_1(self):
     classes = ('smiley', 'frowny')
     arrs = []
     labels = []
     file_names = []
 
     for c in classes:
         files = [name for name in os.listdir('../library/' + c)]
         for el in files:
             img = Image.open('../library/' + c + '/' + el).convert('L')
             img = img.resize((50, 50), Image.ANTIALIAS)
             arrs.append(img.getdata())
             labels.append(c)
             file_names.append(el)
     
     
     name = '../Ann-models/model_n_i_2500_n_o_34_n_h_2 2015-05-27 22:15:24.990089.annm'
     model = pickle.load(open(name, 'rb'))[0][0]
     ann = Ann(model)
     print(ann.h(arrs[0]))
Exemplo n.º 3
0
 def test_2(self):
     # Test for forward-propagation#
      
     # First architecture test#
     # Logistic regression (0 hidden layers) forward propagation test#
     n_i1 = 4  # Number of input neurons
     n_h1 = 0  # Number of hidden layers
     n_o1 = 1  # Number of output neurons
      
     ann1 = Ann(n_i=n_i1, n_h=n_h1, n_o=n_o1)  # Create this architecture
     x1 = [1, 2, 3, 4]  # Array as first example
     x2 = [-1, -1, -1, -1]  # Array as second example
      
     # Set all weights to zero#
     for i in range(0, len(ann1.Thetas)):
         shape = ann1.Thetas[i].shape
         self.assertEqual(shape, (1, 5))
         ann1.Thetas[i] = np.zeros(shape)
     self.assertEqual(ann1.h(x1), 0.5)
     self.assertEqual(ann1.h(x2), 0.5)
      
     # Set all weights to one#
     for i in range(0, len(ann1.Thetas)):
         shape = ann1.Thetas[i].shape
         self.assertEqual(shape, (1, 5))
         ann1.Thetas[i] = np.ones(shape)
     self.assertAlmostEqual(ann1.h(x1), 0.999, delta=0.001)
     self.assertAlmostEqual(ann1.h(x2), 0.0474, delta=0.0001)
      
     # Set all weights randomly between -1 and 1 (and test the range of output)#
     ann1 = Ann(n_i=n_i1, n_h=n_h1, n_o=n_o1)  # Create this architecture
     self.assertAlmostEqual(ann1.h(x1), 0.5, delta=0.5)  # Sigmoid always gives values between 0 and 1
     self.assertAlmostEqual(ann1.h(x2), 0.5, delta=0.5)
      
     # Custom Thetas weights#
     M = np.matrix([[1, -1, 0.5, -0.3, 2]])
     ann1.Thetas[0] = M
     self.assertAlmostEqual(ann1.h(x1), 0.786, delta=0.001)
     self.assertAlmostEqual(ann1.h(x2), 0.858, delta=0.001)
      
     # Second architecture test#
     # 1 hidden layer forward propagation test#
     n_i1 = 4  # Number of input neurons
     n_h1 = 1  # Number of hidden layers
     n_o1 = 1  # Number of output neurons
      
     ann1 = Ann(n_i=n_i1, n_h=n_h1, n_o=n_o1)  # Create this architecture
     x1 = [1, 2, 3, 4]  # Array as first example
     x2 = [-1, -1, -1, -1]  # Array as second example
      
     # Set all weights to zero#
     for i in range(0, len(ann1.Thetas)):
         shape = ann1.Thetas[i].shape
         ann1.Thetas[i] = np.zeros(shape)
     self.assertEqual(ann1.h(x1), 0.5)
     self.assertEqual(ann1.h(x2), 0.5)
      
     # Set all weights to one#
     for i in range(0, len(ann1.Thetas)):
         shape = ann1.Thetas[i].shape
         ann1.Thetas[i] = np.ones(shape)
     self.assertAlmostEqual(ann1.h(x1), 0.993, delta=0.001)
     self.assertAlmostEqual(ann1.h(x2), 0.767, delta=0.001)  
      
     # Set all weights randomly between -1 and 1 (and test the range of output)#
     ann1 = Ann(n_i=n_i1, n_h=n_h1, n_o=n_o1)  # Create this architecture
     self.assertAlmostEqual(ann1.h(x1), 0.5, delta=0.5)  # Sigmoid always gives values between 0 and 1
     self.assertAlmostEqual(ann1.h(x2), 0.5, delta=0.5)
      
     # Custom Thetas weights#
     M1 = np.matrix([[1, -1, 0.5, -0.3, 2],
                    [1, -1, 0.5, -0.3, 2],
                    [1, -1, 0.5, -0.3, 2],
                    [1, -1, 0.5, -0.3, 2]])
     M2 = np.matrix([[1, 1, -1, 0.5, -1]])
     ann1.Thetas[0] = M1
     ann1.Thetas[1] = M2
     # a^(1) Should be [0.786 0.786 0.786 0.786 1]^T#
     self.assertAlmostEqual(ann1.h(x1), 0.545, delta=0.001)
     # a^(1) Should be [0.858 0.858 0.858 0.858 1]^T#
     self.assertAlmostEqual(ann1.h(x2), 0.571, delta=0.001)
    def test_2(self):
        # Test for forward-propagation#

        # First architecture test#
        # Logistic regression (0 hidden layers) forward propagation test#
        n_i1 = 4  # Number of input neurons
        n_h1 = 0  # Number of hidden layers
        n_o1 = 1  # Number of output neurons

        ann1 = Ann(n_i=n_i1, n_h=n_h1, n_o=n_o1)  # Create this architecture
        x1 = [1, 2, 3, 4]  # Array as first example
        x2 = [-1, -1, -1, -1]  # Array as second example

        # Set all weights to zero#
        for i in range(0, len(ann1.Thetas)):
            shape = ann1.Thetas[i].shape
            self.assertEqual(shape, (1, 5))
            ann1.Thetas[i] = np.zeros(shape)
        self.assertEqual(ann1.h(x1), 0.5)
        self.assertEqual(ann1.h(x2), 0.5)

        # Set all weights to one#
        for i in range(0, len(ann1.Thetas)):
            shape = ann1.Thetas[i].shape
            self.assertEqual(shape, (1, 5))
            ann1.Thetas[i] = np.ones(shape)
        self.assertAlmostEqual(ann1.h(x1), 0.999, delta=0.001)
        self.assertAlmostEqual(ann1.h(x2), 0.0474, delta=0.0001)

        # Set all weights randomly between -1 and 1 (and test the range of output)#
        ann1 = Ann(n_i=n_i1, n_h=n_h1, n_o=n_o1)  # Create this architecture
        self.assertAlmostEqual(
            ann1.h(x1), 0.5,
            delta=0.5)  # Sigmoid always gives values between 0 and 1
        self.assertAlmostEqual(ann1.h(x2), 0.5, delta=0.5)

        # Custom Thetas weights#
        M = np.matrix([[1, -1, 0.5, -0.3, 2]])
        ann1.Thetas[0] = M
        self.assertAlmostEqual(ann1.h(x1), 0.786, delta=0.001)
        self.assertAlmostEqual(ann1.h(x2), 0.858, delta=0.001)

        # Second architecture test#
        # 1 hidden layer forward propagation test#
        n_i1 = 4  # Number of input neurons
        n_h1 = 1  # Number of hidden layers
        n_o1 = 1  # Number of output neurons

        ann1 = Ann(n_i=n_i1, n_h=n_h1, n_o=n_o1)  # Create this architecture
        x1 = [1, 2, 3, 4]  # Array as first example
        x2 = [-1, -1, -1, -1]  # Array as second example

        # Set all weights to zero#
        for i in range(0, len(ann1.Thetas)):
            shape = ann1.Thetas[i].shape
            ann1.Thetas[i] = np.zeros(shape)
        self.assertEqual(ann1.h(x1), 0.5)
        self.assertEqual(ann1.h(x2), 0.5)

        # Set all weights to one#
        for i in range(0, len(ann1.Thetas)):
            shape = ann1.Thetas[i].shape
            ann1.Thetas[i] = np.ones(shape)
        self.assertAlmostEqual(ann1.h(x1), 0.993, delta=0.001)
        self.assertAlmostEqual(ann1.h(x2), 0.767, delta=0.001)

        # Set all weights randomly between -1 and 1 (and test the range of output)#
        ann1 = Ann(n_i=n_i1, n_h=n_h1, n_o=n_o1)  # Create this architecture
        self.assertAlmostEqual(
            ann1.h(x1), 0.5,
            delta=0.5)  # Sigmoid always gives values between 0 and 1
        self.assertAlmostEqual(ann1.h(x2), 0.5, delta=0.5)

        # Custom Thetas weights#
        M1 = np.matrix([[1, -1, 0.5, -0.3, 2], [1, -1, 0.5, -0.3, 2],
                        [1, -1, 0.5, -0.3, 2], [1, -1, 0.5, -0.3, 2]])
        M2 = np.matrix([[1, 1, -1, 0.5, -1]])
        ann1.Thetas[0] = M1
        ann1.Thetas[1] = M2
        # a^(1) Should be [0.786 0.786 0.786 0.786 1]^T#
        self.assertAlmostEqual(ann1.h(x1), 0.545, delta=0.001)
        # a^(1) Should be [0.858 0.858 0.858 0.858 1]^T#
        self.assertAlmostEqual(ann1.h(x2), 0.571, delta=0.001)