Esempio n. 1
0
 def check_fast_conv(self):
     
     
     x = np.random.randn(100, 3, 31, 31)
     w = np.random.randn(25, 3, 3, 3)
     b = np.random.randn(25,)
     dout = np.random.randn(100, 25, 16, 16)
     conv_param = {'stride': 2, 'pad': 1}
     
     t0 = time()
     out_naive, cache_naive = conv_forward_naive(x, w, b, conv_param)
     t1 = time()
     out_fast, cache_fast = conv_forward_fast(x, w, b, conv_param)
     t2 = time()
     
     print 'Testing conv_forward_fast:'
     print 'Naive: %fs' % (t1 - t0)
     print 'Fast: %fs' % (t2 - t1)
     print 'Speedup: %fx' % ((t1 - t0) / (t2 - t1))
     print 'Difference: ', self.rel_error(out_naive, out_fast)
     
     t0 = time()
     dx_naive, dw_naive, db_naive = conv_backward_naive(dout, cache_naive)
     t1 = time()
     dx_fast, dw_fast, db_fast = conv_backward_fast(dout, cache_fast)
     t2 = time()
     
     print '\nTesting conv_backward_fast:'
     print 'Naive: %fs' % (t1 - t0)
     print 'Fast: %fs' % (t2 - t1)
     print 'Speedup: %fx' % ((t1 - t0) / (t2 - t1))
     print 'dx difference: ', self.rel_error(dx_naive, dx_fast)
     print 'dw difference: ', self.rel_error(dw_naive, dw_fast)
     print 'db difference: ', self.rel_error(db_naive, db_fast)
     return
Esempio n. 2
0
def conv_relu_forward(x, w, b, conv_param):
    """
    A convenience layer that performs a convolution followed by a ReLU.

    Inputs:
    - x: Input to the convolutional layer
    - w, b, conv_param: Weights and parameters for the convolutional layer

    Returns a tuple of:
    - out: Output from the ReLU
    - cache: Object to give to the backward pass
    """
    a, conv_cache = conv_forward_fast(x, w, b, conv_param)
    out, relu_cache = relu_forward(a)
    cache = (conv_cache, relu_cache)
    return out, cache
def conv_relu_forward(x, w, b, conv_param):
    """
    A convenience layer that performs a convolution followed by a ReLU.

    Inputs:
    - x: Input to the convolutional layer
    - w, b, conv_param: Weights and parameters for the convolutional layer

    Returns a tuple of:
    - out: Output from the ReLU
    - cache: Object to give to the backward pass
    """
    a, conv_cache = conv_forward_fast(x, w, b, conv_param)
    out, relu_cache = relu_forward(a)
    cache = (conv_cache, relu_cache)
    return out, cache
Esempio n. 4
0
def blur_image(X):
  """
  A very gentle image blurring operation, to be used as a regularizer for image
  generation.
  
  Inputs:
  - X: Image data of shape (N, 3, H, W)
  
  Returns:
  - X_blur: Blurred version of X, of shape (N, 3, H, W)
  """
  w_blur = np.zeros((3, 3, 3, 3))
  b_blur = np.zeros(3)
  blur_param = {'stride': 1, 'pad': 1}
  for i in xrange(3):
    w_blur[i, i] = np.asarray([[1, 2, 1], [2, 188, 2], [1, 2, 1]], dtype=np.float32)
  w_blur /= 200.0
  return conv_forward_fast(X, w_blur, b_blur, blur_param)[0]
Esempio n. 5
0
def conv_relu_pool_forward(x, w, b, conv_param, pool_param):
    """
    Convenience layer that performs a convolution, a ReLU, and a pool.

    Inputs:
    - x: Input to the convolutional layer
    - w, b, conv_param: Weights and parameters for the convolutional layer
    - pool_param: Parameters for the pooling layer

    Returns a tuple of:
    - out: Output from the pooling layer
    - cache: Object to give to the backward pass
    """
    a, conv_cache = conv_forward_fast(x, w, b, conv_param)
    s, relu_cache = relu_forward(a)
    out, pool_cache = max_pool_forward_fast(s, pool_param)
    cache = (conv_cache, relu_cache, pool_cache)
    return out, cache
Esempio n. 6
0
def blur_image(X):
    """
    A very gentle image blurring operation, to be used as a regularizer for image
    generation.
  
    Inputs:
    - X: Image data of shape (N, 3, H, W)
  
    Returns:
    - X_blur: Blurred version of X, of shape (N, 3, H, W)
    """
    w_blur = np.zeros((3, 3, 3, 3))
    b_blur = np.zeros(3)
    blur_param = {'stride': 1, 'pad': 1}
    for i in range(3):
        w_blur[i, i] = np.asarray([[1, 2, 1], [2, 188, 2], [1, 2, 1]], dtype=np.float32)
    w_blur /= 200.0
    return conv_forward_fast(X, w_blur, b_blur, blur_param)[0]
def conv_relu_pool_forward(x, w, b, conv_param, pool_param):
    """
    Convenience layer that performs a convolution, a ReLU, and a pool.

    Inputs:
    - x: Input to the convolutional layer
    - w, b, conv_param: Weights and parameters for the convolutional layer
    - pool_param: Parameters for the pooling layer

    Returns a tuple of:
    - out: Output from the pooling layer
    - cache: Object to give to the backward pass
    """
    a, conv_cache = conv_forward_fast(x, w, b, conv_param)
    s, relu_cache = relu_forward(a)
    out, pool_cache = max_pool_forward_fast(s, pool_param)
    cache = (conv_cache, relu_cache, pool_cache)
    return out, cache
Esempio n. 8
0
def blur_image(X):
    """
    模糊操作,用于图像生成
   

    Inputs:
    - X: 图像,形状是 (N, 3, H, W)

    Returns:
    - X_blur: 模糊的 X, 形状是 (N, 3, H, W)
    """
    from cs231n.fast_layers import conv_forward_fast
    w_blur = np.zeros((3, 3, 3, 3))
    b_blur = np.zeros(3)
    blur_param = {'stride': 1, 'pad': 1}
    for i in range(3):
        w_blur[i, i] = np.asarray([[1, 2, 1], [2, 188, 2], [1, 2, 1]],
                                  dtype=np.float32)
    w_blur /= 200.0
    return conv_forward_fast(X, w_blur, b_blur, blur_param)[0]
Esempio n. 9
0
# by running the following:

# Rel errors should be around e-9 or less
from cs231n.fast_layers import conv_forward_fast, conv_backward_fast
from time import time
np.random.seed(231)
x = np.random.randn(100, 3, 31, 31)
w = np.random.randn(25, 3, 3, 3)
b = np.random.randn(25, )
dout = np.random.randn(100, 25, 16, 16)
conv_param = {'stride': 2, 'pad': 1}

t0 = time()
out_naive, cache_naive = conv_forward_naive(x, w, b, conv_param)
t1 = time()
out_fast, cache_fast = conv_forward_fast(x, w, b, conv_param)
t2 = time()

print('Testing conv_forward_fast:')
print('Naive: %fs' % (t1 - t0))
print('Fast: %fs' % (t2 - t1))
print('Speedup: %fx' % ((t1 - t0) / (t2 - t1)))
print('Difference: ', rel_error(out_naive, out_fast))

t0 = time()
dx_naive, dw_naive, db_naive = conv_backward_naive(dout, cache_naive)
t1 = time()
dx_fast, dw_fast, db_fast = conv_backward_fast(dout, cache_fast)
t2 = time()

print('\nTesting conv_backward_fast:')
Esempio n. 10
0
 def _forward(self, x):
     global params
     return conv_forward_fast(x, params[self.n('w')], params[self.n('b')],
                              self.conv_param)
Esempio n. 11
0
    def loss(self, X, y=None):
        """
    Evaluate loss and gradient for the three-layer convolutional network.
    
    Input / output: Same API as TwoLayerNet in fc_net.py.
    """
        W1, b1 = self.params['W1'], self.params['b1']
        W2, b2 = self.params['W2'], self.params['b2']
        W3, b3 = self.params['W3'], self.params['b3']

        # pass conv_param to the forward pass for the convolutional layer
        filter_size = W1.shape[2]
        conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}

        # pass pool_param to the forward pass for the max-pooling layer
        pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}

        #conv
        #conv - relu - 2x2 max pool - affine - relu - affine - softmax
        convout = None
        print "ipiut", X.shape
        convout, self.cache['Input'] = conv_forward_fast(X, W1, b1, conv_param)
        print "conv out", convout.shape
        relu1, self.cache['relu1'] = relu_forward(convout)
        print "relu out", relu1.shape
        maxpoolout, self.cache['maxpool'] = max_pool_forward_fast(
            relu1, pool_param)
        print "maxpool out", maxpoolout.shape
        affineout1, self.cache['affine1'] = affine_forward(maxpoolout, W2, b2)
        print "affine out", affineout1.shape

        reluout2, self.cache['relu2'] = relu_forward(affineout1)
        print "relu2", reluout2.shape
        affineout2, self.cache['affine2'] = affine_forward(reluout2, W3, b3)
        print "affine out2", affineout2.shape

        ############################################################################
        # TODO: Implement the forward pass for the three-layer convolutional net,  #
        # computing the class scores for X and storing them in the scores          #
        # variable.                                                                #
        ############################################################################
        pass
        ############################################################################s
        #                             END OF YOUR CODE                             #
        ############################################################################

        if y is None:
            return scores

        loss, grads = 0, {}
        loss, dx = softmax_loss(affineout2, y)
        grads['W3'] = np.dot(reluout2.T, dx)
        grads['b3'] = np.sum(dx, axis=0)

        dx[affineout2 <= 0] = 0
        #relu2back=relu_backward(drelu2,self.cache['relu2'])
        print "W2 shape:", self.params['W2'].shape
        affineback1, dw, db = affine_backward(
            dx, [reluout2, self.params['W3'], self.params['b3']])

        grads['W2'] = np.dot(maxpoolout.T, affineback1)
        grads['b2'] = np.sum(affineback1, axis=0)
        grads['W1'] = self.params['W1']
        grads['b1'] = self.params['b1']

        ############################################################################
        # TODO: Implement the backward pass for the three-layer convolutional net, #
        # storing the loss and gradients in the loss and grads variables. Compute  #
        # data loss using softmax, and make sure that grads[k] holds the gradients #
        # for self.params[k]. Don't forget to add L2 regularization!               #
        ############################################################################
        pass
        ############################################################################
        #                             END OF YOUR CODE                             #
        ############################################################################

        return loss, grads
Esempio n. 12
0
  """ returns relative error """
  return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))

from cs231n.fast_layers import conv_forward_fast, conv_backward_fast
from time import time
x = np.random.randn(100, 3, 31, 31)
w = np.random.randn(25, 3, 3, 3)
b = np.random.randn(25,)
dout = np.random.randn(100, 25, 16, 16)
conv_param = {'stride': 2, 'pad': 1}


t0 = time()
out_naive, cache_naive = conv_forward_naive(x, w, b, conv_param)
t1 = time()
out_fast, cache_fast = conv_forward_fast(x, w, b, conv_param)
t2 = time()

print 'Testing conv_forward_fast:'
print 'Naive: %fs' % (t1 - t0)
print 'Fast: %fs' % (t2 - t1)
print 'Speedup: %fx' % ((t1 - t0) / (t2 - t1))
print 'Difference: ', rel_error(out_naive, out_fast)

t0 = time()
dx_naive, dw_naive, db_naive = conv_backward_naive(dout, cache_naive)
t1 = time()
dx_fast, dw_fast, db_fast = conv_backward_fast(dout, cache_fast)
t2 = time()

print '\nTesting conv_backward_fast:'