示例#1
0
    def test_numeric_gradient(self):
        # Vector valued function
        def vec_valued(X):
            # return np.array([(X[0]*X[1]), (X[0]+X[1])])
            return np.array([(X[0] * X[1]), (X[0] + X[1])], dtype=np.float32)

        # Gradient
        grad = evaluate_gradient(vec_valued, np.array([2, 3],
                                                      dtype=np.float32))
        sys.stderr.write(str(grad))
示例#2
0
    def test_backward_vec(self):
        s = np.zeros(10)
        s[1] = 10
        result = self.model.forward_vec(s, 1)

        # Compare with Numeric Gradient
        up_grad = 1
        grad_a = self.model.backward_vec(up_grad)
        soft = lambda x: self.model.forward_vec(x, 1)
        grad_num = evaluate_gradient(soft, s)
        np.testing.assert_almost_equal(grad_a, grad_num)
 def test_backward(self):
     data = np.abs(np.random.randn(10))
     # data= np.array([0.5, 0.35, 0.0015])
     forward = self.model.forward(data)
     up_grad = np.random.randn(forward.shape[0])
     result = self.model.backward(up_grad)
     # Assertion
     np.testing.assert_almost_equal(result, (-1 / data) * up_grad)
     # Compare with Numeric Gradient
     func = lambda x: self.model.forward(x) * up_grad
     grad_num = evaluate_gradient(self.model.forward, data, h=1e-3)
     np.testing.assert_almost_equal(result, grad_num, decimal=3)
示例#4
0
    def test_backward(self):
        scores = np.array([[-2.85, 0.86, 0.28], [-2.85, 1.86, 0.28]]).T
        labels = np.array([2, 2])
        forward = self.model.forward(scores, labels)
        up_grad = np.random.randn(*forward.shape)
        result = self.model.backward(up_grad)

        func = lambda x: self.model.forward(x, labels)
        exp_result = evaluate_gradient(func, scores) * up_grad
        sys.stderr.write(str(result.shape))
        # Assertion
        np.testing.assert_almost_equal(result, exp_result)
示例#5
0
    def test_backward(self):
        s = np.zeros((10, 100))
        s[1, :] = 10
        win_idx = np.ones(100, dtype="int64")
        result = self.model.forward(s, win_idx)
        up_grad = np.random.randn(*result.shape)

        # Compare with Numeric Gradient
        soft = lambda x: self.model.forward(x, win_idx) * up_grad
        grad_a = self.model.backward(up_grad)
        grad_num = evaluate_gradient(soft, s)
        np.testing.assert_almost_equal(grad_a, grad_num)
示例#6
0
    def test_backward(self):
        data= np.array([[56,231,24,2],[0,210,240,20]]).T
        # Run forward pass to store data in FCLayer object
        self.model.forward(data)
        up_grad= np.array(np.random.randn(*(self.model.W_b.shape[0],data.shape[1])))
        # up_grad= np.array(np.ones((self.model.W_b.shape[0],data.shape[1])))
        result= self.model.backward(up_grad)

        def func(x): 
            self.model= FCLayer(x)
            return self.model.forward(data)*up_grad
        exp_result= evaluate_gradient(func, self.model.W_b)

        # Assertion
        # sys.stderr.write(str(result))
        # sys.stderr.write(str(exp_result))
        np.testing.assert_almost_equal(result, exp_result)
    def test_backward_multiclass_svm(self):
        data = np.array([[-1, 29, 14, -60], [-15, 22, -44, 56]]).T
        labels = np.array([2, 0])
        # Randomize weights
        self.W_b = np.random.randn(*self.W_b.shape) * 0.01

        multi_svm_loss = MulticlassSVMLoss()
        fc_layer = FCLayer(self.W_b)
        # Forward pass
        L_i = multi_svm_loss.forward(fc_layer.forward(data), labels)
        Loss = np.sum(L_i)
        # Backprop
        up_grad = np.random.randn(*L_i.shape)
        result = fc_layer.backward(multi_svm_loss.backward(up_grad))

        # Numeric gradient
        def func(x):
            fc_layer = FCLayer(x)
            return multi_svm_loss.forward(fc_layer.forward(data),
                                          labels) * up_grad

        exp_result = evaluate_gradient(func, self.W_b)
        # Assertion
        np.testing.assert_almost_equal(result, exp_result)
    def test_backward(self):
        data = np.array([[-1, 29, 14, -60], [-15, 22, -44, 56]]).T
        labels = np.array([2, 0])
        # Randomize weights
        self.W_b = np.random.randn(*self.W_b.shape) * 0.01
        softmax = Softmax()
        neg_nat_log = NegNatLog()
        fc_layer = FCLayer(self.W_b)
        for idx in range(1):
            # Analytic gradient
            # Forward pass
            scores = fc_layer.forward(data)
            smx = softmax.forward(scores, labels)
            # sys.stderr.write(str(scores.shape))
            L_i = neg_nat_log.forward(smx)
            L = np.sum(L_i) / L_i.shape[0]
            sys.stderr.write(str(L) + "\n")
            # Backprop
            up_grad = np.ones(data.shape[1]) / data.shape[1]
            ana_grad = fc_layer.backward(
                softmax.backward(neg_nat_log.backward(up_grad)))

            def func(W_b):
                # Loss per data sample
                L_i = neg_nat_log.forward(
                    softmax.forward(fc_layer.forward(data), labels))
                # Total Loss
                return np.sum(L_i) / L_i.shape[0]

            # Numeric gradient
            num_grad = evaluate_gradient(func, self.W_b)

            # Assertion
            np.testing.assert_almost_equal(ana_grad, num_grad, decimal=4)
            # Update gradient
            self.W_b += -ana_grad * 1e-4