def conv_layer(x):
            """
            the derivative check in the gradient checker relates to the input of the function
            hence, the input should be z - since the backward step computes @loss / @z
            """

            conv1 = nn.Conv2d(in_channels=1, out_channels=2, kernel_size=2)
            sigmoid1 = nn.Sigmoid()
            conv2 = nn.Conv2d(in_channels=2, out_channels=4, kernel_size=2)
            sigmoid2 = nn.Sigmoid()
            flatten = nn.Flatten()
            linear = nn.Linear(4, 2)
            softmax = nn.Softmax()

            # forward pass
            a = sigmoid1(conv1(x))
            a = sigmoid2(conv2(a))
            a_flatten = flatten(a)
            dist = softmax(linear(a_flatten))

            # backward
            labels = np.zeros(dist.shape)
            labels[:, 1] = 1
            loss = -np.log(np.sum(dist * labels, axis=1))

            softmax_grad = softmax.backward(labels)
            linear_grad = linear.backward(softmax_grad)
            flatten_grad = flatten.backward(linear_grad)
            sigmoid2_grad = sigmoid2.backward(flatten_grad)
            conv2_grad = conv2.backward(sigmoid2_grad)
            sigmoid1_grad = sigmoid1.backward(conv2_grad)
            conv1_grad = conv1.backward(sigmoid1_grad)

            return loss, conv1_grad
Beispiel #2
0
    def test_forwardWrongInputListDimension(self):
        m = M.Sigmoid()

        try:
            m.forward(FloatTensor([[2]]), FloatTensor([[2]]))
        except ValueError:
            return 0

        return 1
Beispiel #3
0
    def test_backwardBeforeForward(self):
        input = FloatTensor([[1, -3, 1], [0, -1, 2]])

        m = M.Sigmoid()

        try:
            output = m.backward(input)
        except:
            return 0

        return 1
Beispiel #4
0
    def test_forwardCorrectOutput(self):
        input = FloatTensor([[1, -3, 1], [0, -1, 2]])
        expected_output = FloatTensor([[0.731058578630005,0.047425873177567,0.731058578630005], \
                                        [0.500000000000000,0.268941421369995,0.880797077977882]])

        m = M.Sigmoid()
        output = m.forward(input)
        if not areEqual(output, expected_output, tol=1e-5):
            return 1

        return 0
        def sigmoid_layer(x):
            sigmoid = nn.Sigmoid()
            softmax = nn.Softmax()

            a = softmax(sigmoid(x))

            num_classes = x.shape
            labels = np.zeros(num_classes)
            labels[:, 0] = 1

            loss = -np.log(np.sum(a * labels, axis=1))
            softmax_grad = softmax.backward(labels)
            sigmoid_grad = sigmoid.backward(softmax_grad)

            return loss, sigmoid_grad
Beispiel #6
0
    def test_backwardCorrectOutput(self):
        input = FloatTensor([[1, -3, 1], [0, -1, 2]])

        sigmoid_value = FloatTensor([[0.731058578630005,0.047425873177567,0.731058578630005], \
                                     [0.500000000000000,0.268941421369995,0.880797077977882]])

        dsigma = sigmoid_value * (1 - sigmoid_value)

        m = M.Sigmoid()
        m.forward(input)
        grad = FloatTensor([[1, 2, 3], [3, 2, 1]])

        output = m.backward(grad)
        output_expected = dsigma * grad

        if not areEqual(output, output_expected):
            return 1

        return 0