def test_affine_relu(self):
     x = np.random.randn(2, 3, 4)
     w = np.random.randn(12, 10)
     b = np.random.randn(10)
     dout = np.random.randn(2, 10)
     
     out, cache = affine_relu_forward(x, w, b)
     dx, dw, db = affine_relu_backward(dout, cache)
     
     dx_num = eval_numerical_gradient_array(lambda x: affine_relu_forward(x, w, b)[0], x, dout)
     dw_num = eval_numerical_gradient_array(lambda w: affine_relu_forward(x, w, b)[0], w, dout)
     db_num = eval_numerical_gradient_array(lambda b: affine_relu_forward(x, w, b)[0], b, dout)
     
     print 'Testing affine_relu_forward:'
     print 'dx error: ', self.rel_error(dx_num, dx)
     print 'dw error: ', self.rel_error(dw_num, dw)
     print 'db error: ', self.rel_error(db_num, db)
     return
Example #2
0
 def check_conv_naive_backward(self):
     x = np.random.randn(4, 3, 5, 5)
     w = np.random.randn(2, 3, 3, 3)
     b = np.random.randn(2,)
     dout = np.random.randn(4, 2, 5, 5)
     conv_param = {'stride': 1, 'pad': 1}
     
     dx_num = eval_numerical_gradient_array(lambda x: conv_forward_naive(x, w, b, conv_param)[0], x, dout)
     dw_num = eval_numerical_gradient_array(lambda w: conv_forward_naive(x, w, b, conv_param)[0], w, dout)
     db_num = eval_numerical_gradient_array(lambda b: conv_forward_naive(x, w, b, conv_param)[0], b, dout)
     
     out, cache = conv_forward_naive(x, w, b, conv_param)
     dx, dw, db = conv_backward_naive(dout, cache)
     
     # Your errors should be around 1e-9'
     print 'Testing conv_backward_naive function'
     print 'dx error: ', self.rel_error(dx, dx_num)
     print 'dw error: ', self.rel_error(dw, dw_num)
     print 'db error: ', self.rel_error(db, db_num)
     return
    def test_affine_backward(self):
        # Test the affine_backward function

        x = np.random.randn(10, 2, 3)
        w = np.random.randn(6, 5)
        b = np.random.randn(5)
        dout = np.random.randn(10, 5)
        
        dx_num = eval_numerical_gradient_array(lambda x: affine_forward(x, w, b)[0], x, dout)
        dw_num = eval_numerical_gradient_array(lambda w: affine_forward(x, w, b)[0], w, dout)
        db_num = eval_numerical_gradient_array(lambda b: affine_forward(x, w, b)[0], b, dout)
        
        _, cache = affine_forward(x, w, b)
        dx, dw, db = affine_backward(dout, cache)
        
        # The error should be around 1e-10
        print 'Testing affine_backward function:'
        print 'dx error: ', self.rel_error(dx_num, dx)
        print 'dw error: ', self.rel_error(dw_num, dw)
        print 'db error: ', self.rel_error(db_num, db)
        return
Example #4
0
    def check_dropout_backward(self):
        x = np.random.randn(10, 10) + 10
        dout = np.random.randn(*x.shape)

        dropout_param = {'mode': 'train', 'p': 0.8, 'seed': 123}
        out, cache = dropout_forward(x, dropout_param)
        dx = dropout_backward(dout, cache)
        dx_num = eval_numerical_gradient_array(
            lambda xx: dropout_forward(xx, dropout_param)[0], x, dout)

        print 'dx relative error: ', self.rel_error(dx, dx_num)
        return
Example #5
0
    def check_conv_relu(self):
        from assignment2.cs231n.layer_utils import conv_relu_forward, conv_relu_backward

        x = np.random.randn(2, 3, 8, 8)
        w = np.random.randn(3, 3, 3, 3)
        b = np.random.randn(3,)
        dout = np.random.randn(2, 3, 8, 8)
        conv_param = {'stride': 1, 'pad': 1}
        
        out, cache = conv_relu_forward(x, w, b, conv_param)
        dx, dw, db = conv_relu_backward(dout, cache)
        
        dx_num = eval_numerical_gradient_array(lambda x: conv_relu_forward(x, w, b, conv_param)[0], x, dout)
        dw_num = eval_numerical_gradient_array(lambda w: conv_relu_forward(x, w, b, conv_param)[0], w, dout)
        db_num = eval_numerical_gradient_array(lambda b: conv_relu_forward(x, w, b, conv_param)[0], b, dout)
        
        print 'Testing conv_relu:'
        print 'dx error: ', self.rel_error(dx_num, dx)
        print 'dw error: ', self.rel_error(dw_num, dw)
        print 'db error: ', self.rel_error(db_num, db)
        return
 def test_relu_backward(self):
     x = np.random.randn(10, 10)
     dout = np.random.randn(*x.shape)
     
     dx_num = eval_numerical_gradient_array(lambda x: relu_forward(x)[0], x, dout)
     
     _, cache = relu_forward(x)
     dx = relu_backward(dout, cache)
     
     # The error should be around 1e-12
     print 'Testing relu_backward function:'
     print 'dx error: ', self.rel_error(dx_num, dx)
     return
Example #7
0
 def check_spatial_batch_norm_backward(self):
     N, C, H, W = 2, 3, 4, 5
     x = 5 * np.random.randn(N, C, H, W) + 12
     gamma = np.random.randn(C)
     beta = np.random.randn(C)
     dout = np.random.randn(N, C, H, W)
     
     bn_param = {'mode': 'train'}
     fx = lambda x: spatial_batchnorm_forward(x, gamma, beta, bn_param)[0]
     fg = lambda a: spatial_batchnorm_forward(x, gamma, beta, bn_param)[0]
     fb = lambda b: spatial_batchnorm_forward(x, gamma, beta, bn_param)[0]
     
     dx_num = eval_numerical_gradient_array(fx, x, dout)
     da_num = eval_numerical_gradient_array(fg, gamma, dout)
     db_num = eval_numerical_gradient_array(fb, beta, dout)
     
     _, cache = spatial_batchnorm_forward(x, gamma, beta, bn_param)
     dx, dgamma, dbeta = spatial_batchnorm_backward(dout, cache)
     print 'dx error: ', self.rel_error(dx_num, dx)
     print 'dgamma error: ', self.rel_error(da_num, dgamma)
     print 'dbeta error: ', self.rel_error(db_num, dbeta)
     return
Example #8
0
 def check_max_pooling_naive_backward(self):
     x = np.random.randn(3, 2, 8, 8)
     dout = np.random.randn(3, 2, 4, 4)
     pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}
     
     dx_num = eval_numerical_gradient_array(lambda x: max_pool_forward_naive(x, pool_param)[0], x, dout)
     
     out, cache = max_pool_forward_naive(x, pool_param)
     dx = max_pool_backward_naive(dout, cache)
     
     # Your error should be around 1e-12
     print 'Testing max_pool_backward_naive function:'
     print 'dx error: ', self.rel_error(dx, dx_num)
     return
    def backnorm_backward(self):
        # Gradient check batchnorm backward pass

        N, D = 4, 5
        x = 5 * np.random.randn(N, D) + 12
        gamma = np.random.randn(D)
        beta = np.random.randn(D)
        dout = np.random.randn(N, D)

        bn_param = {'mode': 'train'}
        fx = lambda x: batchnorm_forward(x, gamma, beta, bn_param)[0]
        fg = lambda a: batchnorm_forward(x, gamma, beta, bn_param)[0]
        fb = lambda b: batchnorm_forward(x, gamma, beta, bn_param)[0]

        dx_num = eval_numerical_gradient_array(fx, x, dout)
        da_num = eval_numerical_gradient_array(fg, gamma, dout)
        db_num = eval_numerical_gradient_array(fb, beta, dout)

        _, cache = batchnorm_forward(x, gamma, beta, bn_param)
        dx, dgamma, dbeta = batchnorm_backward(dout, cache)
        print 'dx error: ', self.rel_error(dx_num, dx)
        print 'dgamma error: ', self.rel_error(da_num, dgamma)
        print 'dbeta error: ', self.rel_error(db_num, dbeta)
        return