예제 #1
0
 def test_sigmoid(self):
     # This is an example test case. We encourage you to write more such test cases.
     # You can test your code unit-wise (functions, classes, etc).
     x = torch.FloatTensor([[-140, -0.2, -0.6, 0, 0.1, 0.5, 2, 50], [-1, -20, -0.8, 10, 1, 0.5, 2.771, 41]])
     # y = torch.FloatTensor([[4.53979e-05, 0.45016, 0.35434, 0.5, 0.52498, 0.62246, 0.88079, 0.9999], 
     #                         [4.53979e-05, 0.45016, 0.35434, 0.5, 0.52498, 0.62246, 0.88079, 0.9999]])
     precision = 0.000001
     self.assertTrue(torch.le(torch.abs(activation.sigmoid(x) - x.sigmoid()), precision).all())
예제 #2
0
    def calculate_grad(self, inputs, d1, d2, dout):
        """Calculates gradients for backpropagation
        
        This function is used to calculate gradients like loss w.r.t. weights and biases.

        Args:
            inputs (torch.tensor): inputs to train neural network. Size (batch_size, N_in) 
            dout (torch.tensor): error at output. Size like aout or a3 (or z3)
            d2 (torch.tensor): error at hidden layer 2. Size like a2 (or z2)
            d1 (torch.tensor): error at hidden layer 1. Size like a1 (or z1)

        Returns:
            dw1 (torch.tensor): Gradient of loss w.r.t. w1. Size like w1
            db1 (torch.tensor): Gradient of loss w.r.t. b1. Size like b1
            dw2 (torch.tensor): Gradient of loss w.r.t. w2. Size like w2
            db2 (torch.tensor): Gradient of loss w.r.t. b2. Size like b2
            dw3 (torch.tensor): Gradient of loss w.r.t. w3. Size like w3
            db3 (torch.tensor): Gradient of loss w.r.t. b3. Size like b3
        """
        m = inputs.shape[0]

        #dw3 = (doutT.a2)
        dw3 = torch.matmul(torch.t(dout), activation.sigmoid(self.cache['z2']))

        #dw2 = (d2T.a1 )
        dw2 = torch.matmul(torch.t(d2), activation.sigmoid(self.cache['z1']))

        #dw1 = (d1T.inputs)
        dw1 = torch.matmul(torch.t(d1), inputs)

        #db1 = sum(d2)
        db1 = torch.sum(d1, 0)

        #db2 = sum(d2)
        db2 = torch.sum(d2, 0)

        # db3 = sum(dout)
        db3 = torch.sum(dout, 0)
        return dw1, db1, dw2, db2, dw3, db3
예제 #3
0
    def forward(self, inputs):
        """Forward pass of neural network

        Calculates score for each class.

        Args:
            inputs (torch.tensor): inputs to train neural network. Size (batch_size, N_in) 

        Returns:
            outputs (torch.tensor): predictions from neural network. Size (batch_size, N_out)
        """

        #calculaing weighted sum & storing it in cache
        self.cache['z1'] = self.weighted_sum(inputs, self.weights['w1'],
                                             self.biases['b1'])
        a1 = activation.sigmoid(self.cache['z1'])
        self.cache['z2'] = self.weighted_sum(a1, self.weights['w2'],
                                             self.biases['b2'])
        a2 = activation.sigmoid(self.cache['z2'])
        self.cache['z3'] = self.weighted_sum(a2, self.weights['w3'],
                                             self.biases['b3'])
        outputs = activation.softmax(self.cache['z3'])
        return outputs