コード例 #1
0
ファイル: layers.py プロジェクト: ykwon0407/cs231_2015
def max_pool_backward_naive(dout, cache):
  """
  A naive implementation of the backward pass for a max pooling layer.

  Inputs:
  - dout: Upstream derivatives
  - cache: A tuple of (x, pool_param) as in the forward pass.

  Returns:
  - dx: Gradient with respect to x
  """
  dx = None
  x, pool_param = cache
  pool_height, pool_width, stride = pool_param['pool_height'], pool_param['pool_width'], pool_param['stride']
  N, C, H, W = x.shape
  #############################################################################
  # TODO: Implement the max pooling backward pass                             #
  #############################################################################
  assert (H - pool_height) % stride == 0, 'Invalid height'
  assert (W - pool_width) % stride == 0, 'Invalid width'

  x_shape = x.reshape(N*C,1,H,W)
  x_col = im2col.im2col_indices(x_shape, field_height=pool_height, field_width=pool_width, padding=0, stride=stride)
  x_arg = np.argmax(x_col, axis=0)

  dx = np.zeros((pool_height*pool_width, len(x_arg)))
  dx[np.argmax(x_col, axis=0), xrange(len(x_arg))] = dout.transpose(2,3,0,1).reshape(-1)
  dx = im2col.col2im_indices(dx, (N * C, 1, H, W), pool_height, pool_width, padding=0, stride=stride)
  dx = dx.reshape(x.shape)

  #############################################################################
  #                             END OF YOUR CODE                              #
  #############################################################################
  return dx
コード例 #2
0
    def backward(self, X, grad):
        N, W_out, H_out, D_out = grad.shape
        N, W_in, H_in, D_in = X.shape

        #Preprocess 'a' for im2col utility

        grad = np.rollaxis(grad, 3, 1)
        grad = np.rollaxis(grad, 0, 4)  # D_out X H_out X W_out X N

        X = np.rollaxis(X, 3, 1)
        a_columnar = im2col_indices(X, self.field, self.field, self.padding,
                                    self.stride)  #[FFD X W_out*H_out*N]

        grad = grad.reshape(D_out, N * W_out * H_out)

        dWeight = np.dot(grad, a_columnar.T)  #[ D_out X FFD ]
        dBias = np.sum(grad, axis=1)  #D_out
        dActivation = np.dot(grad.T, self.weights).T  #[FFD_out X N*W_out**2]
        dActivation = col2im_indices(dActivation, (N, D_in, W_in, H_in),
                                     self.field, self.field, self.padding,
                                     self.stride)  # N X D_in X W_in X H_in

        #Move D axis to end
        dX = np.rollaxis(dActivation, 1, 4)  #N X W_in X H_in X D_in

        self.dw = dWeight
        self.db = dBias

        return dX
コード例 #3
0
ファイル: layers.py プロジェクト: Tang7/cnn231
def conv_backward_naive(dout, cache):
    """
  A naive implementation of the backward pass for a convolutional layer.

  Inputs:
  - dout: Upstream derivatives.
  - cache: A tuple of (x, w, b, conv_param) as in conv_forward_naive

  Returns a tuple of:
  - dx: Gradient with respect to x
  - dw: Gradient with respect to w
  - db: Gradient with respect to b
  """
    dx, dw, db = None, None, None
    #############################################################################
    # TODO: Implement the convolutional backward pass.                          #
    #############################################################################
    x, w, b, conv_param, x_cols = cache
    stride, pad = conv_param["stride"], conv_param["pad"]

    db = np.sum(dout, axis=(0, 2, 3))

    num_filters, _, filter_height, filter_width = w.shape
    dout_reshaped = dout.transpose(1, 2, 3, 0).reshape(num_filters, -1)
    dw = dout_reshaped.dot(x_cols.T).reshape(w.shape)

    dx_cols = w.reshape(num_filters, -1).T.dot(dout_reshaped)
    dx = im2col.col2im_indices(dx_cols, x.shape, filter_height, filter_width, pad, stride)

    #############################################################################
    #                             END OF YOUR CODE                              #
    #############################################################################
    return dx, dw, db
コード例 #4
0
 def backward(self, x, dy):
     N, c, h, w = x.shape
     xshaped = x.reshape(N * c, 1, h, w)
     xcol = im2col.im2col_indices(xshaped, self.kernel_h, self.kernel_w,
                                  self.pad, self.stride)
     dxcol = np.zeros_like(xcol)
     dy = dy.transpose(2, 3, 0, 1).ravel()
     dxcol[self.params['max_x'], range(self.params['max_x'].size)] = dy
     dx = im2col.col2im_indices(dxcol, (N * c, 1, h, w), self.kernel_h,
                                self.kernel_w, self.pad, self.stride)
     dx = dx.reshape(x.shape)
     return dx
コード例 #5
0
ファイル: layers.py プロジェクト: napster8104/courses
def conv_backward_naive(dout, cache, debug=False):
    """
  卷积层的反向传播实现

  Inputs:
  - dout: Upstream derivatives.
  - cache: A tuple of (x, w, b, conv_param) as in conv_forward_naive

  Returns a tuple of:
  - dx: Gradient with respect to x
  - dw: Gradient with respect to w
  - db: Gradient with respect to b
  """

    dx, dw, db = None, None, None

    x, w, b, conv_param, x_cols = cache
    stride, pad = conv_param['stride'], conv_param['pad']

    db = np.sum(dout, axis=(0, 2, 3))
    F, _, HH, WW = w.shape

    dout_reshape = np.reshape(dout.transpose(1, 2, 3, 0), (F, -1))

    dw = dout_reshape.dot(x_cols.T).reshape(w.shape)

    dx_cols = w.reshape(F, -1).T.dot(dout_reshape)

    from im2col import col2im_indices

    dx = col2im_indices(dx_cols,
                        x.shape,
                        field_height=HH,
                        field_width=WW,
                        padding=pad,
                        stride=stride,
                        verbose=True)

    if debug:
        print "dout's shape: {}".format(str(dout.shape))
        print "dout's reshape: {}".format(str(dout_reshape.shape))
        print "x's shape: {}".format(str(x.shape))
        print "x's cols: {}".format(str(x_cols.shape))
        print "w's shape: {}".format(str(w.shape))
        print "b's shape: {}".format(str(b.shape))
        print "stride: {}".format(str(conv_param["stride"]))
        print "padding: {}".format(str(conv_param["pad"]))

    return dx, dw, db
コード例 #6
0
 def backward(self, x, dy):
     xcol = im2col.im2col_indices(x, self.kernel_h, self.kernel_w, self.pad,
                                  self.stride)
     dy = dy.transpose(1, 2, 3, 0).reshape(self.kernel_number, -1)
     db = np.sum(dy, axis=1)
     dw = np.dot(dy,
                 xcol.T).reshape(self.kernel_number, self.kernel_h,
                                 self.kernel_w, -1).transpose(0, 3, 1, 2)
     self.params['dw'] = dw
     self.params['db'] = db
     W_shaped = self.params['w'].reshape(self.kernel_number, -1)
     dx = np.dot(dy.T, W_shaped).T
     dx_im = im2col.col2im_indices(dx, x.shape, self.kernel_h,
                                   self.kernel_w, self.pad, self.stride)
     return dx_im
コード例 #7
0
    def backward(self, out_grad):
        n_filters, c_kernel, h_kernel, w_kernel = self._weight.shape
        self._grad_bias = np.sum(out_grad, axis=(0, 2, 3))
        self._grad_bias = self._grad_bias.reshape(n_filters, -1)

        x_col = im2col_indices(self.x, h_kernel, w_kernel, padding=self._padding, stride=self._stride)

        out_grad_reshaped = out_grad.transpose(1, 2, 3, 0).reshape(n_filters, -1)
        self._grad_weight = out_grad_reshaped @ x_col.T
        self._grad_weight = self._grad_weight.reshape(self._weight.shape)

        weight_reshaped = self._weight.reshape(n_filters, -1)
        grad_x_col = weight_reshaped @ out_grad_reshaped
        grad_x = col2im_indices(grad_x_col, self.x.shape, h_kernel, w_kernel, padding=padding, stride=stride)

        return grad_x
コード例 #8
0
ファイル: layers.py プロジェクト: HaoranZhu/cs231n-practice
def conv_backward_naive(dout, cache, debug=False):
  """
  A naive implementation of the backward pass for a convolutional layer.

  Inputs:
  - dout: Upstream derivatives.
  - cache: A tuple of (x, w, b, conv_param) as in conv_forward_naive

  Returns a tuple of:
  - dx: Gradient with respect to x
  - dw: Gradient with respect to w
  - db: Gradient with respect to b
  """

  dx, dw, db = None, None, None
  #############################################################################
  # TODO: Implement the convolutional backward pass.                          #
  #############################################################################
  x, w, b, conv_param, x_cols = cache
  stride, pad = conv_param['stride'], conv_param['pad']

  db = np.sum( dout, axis=(0, 2, 3) )
  F, _, HH, WW = w.shape

  dout_reshape = np.reshape(dout.transpose(1,2,3,0), (F, -1))

  dw = dout_reshape.dot(x_cols.T).reshape(w.shape)

  dx_cols = w.reshape(F, -1).T.dot(dout_reshape)

  from im2col import col2im_indices

  dx = col2im_indices(dx_cols, x.shape, field_height=HH, field_width=WW, padding=pad, stride=stride, verbose=True)

  if debug:
    print "dout's shape: {}".format( str(dout.shape) ) 
    print "dout's reshape: {}".format( str(dout_reshape.shape))
    print "x's shape: {}".format( str(x.shape) )
    print "x's cols: {}".format( str(x_cols.shape))
    print "w's shape: {}".format( str(w.shape) )
    print "b's shape: {}".format( str(b.shape) )
    print "stride: {}".format( str(conv_param["stride"]) )
    print "padding: {}".format( str(conv_param["pad"]) )


  return dx, dw, db
コード例 #9
0
def cnn_backward_pass(dout, cache, debug=False):
    """
  A naive implementation of the backward pass for a convolutional layer.
  Inputs:
  - dout: Upstream derivatives.
  - cache: A tuple of (x, w, b, stride, padding) as in cnn_backward_pass
  Returns a tuple of:
  - dx: Gradient with respect to x
  - dw: Gradient with respect to w
  - db: Gradient with respect to b
  """

    dx, dw, db = None, None, None

    x, w, b, stride, padding, x_cols = cache

    db = np.sum(dout, axis=(0, 2, 3))
    F, _, HH, WW = w.shape

    dout_reshape = np.reshape(dout.transpose(1, 2, 3, 0), (F, -1))

    dw = dout_reshape.dot(x_cols.T).reshape(w.shape)

    dx_cols = w.reshape(F, -1).T.dot(dout_reshape)

    dx = col2im_indices(dx_cols,
                        x.shape,
                        field_height=HH,
                        field_width=WW,
                        padding=padding,
                        stride=stride,
                        verbose=False)

    if debug:
        print "dout's shape: {}".format(str(dout.shape))
        print "dout's reshape: {}".format(str(dout_reshape.shape))
        print "x's shape: {}".format(str(x.shape))
        print "x's cols: {}".format(str(x_cols.shape))
        print "w's shape: {}".format(str(w.shape))
        print "b's shape: {}".format(str(b.shape))
        print "stride: {}".format(str(stride))
        print "padding: {}".format(str(padding))

    return dx, dw, db
コード例 #10
0
 def backward(self, delta, alpha):
     stimulusCols = im2col_indices(self.stimulus, self.filterHeight,
                                   self.filterWidth, self.padding,
                                   self.stride)
     filterCols = self.filters.reshape((self.numFilters, -1))
     activationCols = self.activation.transpose(1, 2, 3, 0).reshape(
         self.numFilters, -1)
     deltaCols = delta.transpose(1, 2, 3, 0).reshape(
         self.numFilters, -1) * conv.aprime(activationCols)
     inputPartial = col2im_indices(
         np.dot(deltaCols.transpose(1, 0),
                filterCols), self.stimulus.shape, self.filterHeight,
         self.filterWidth, self.padding, self.stride)
     self.filters -= alpha * np.dot(
         deltaCols, stimulusCols.transpose(1, 0)).reshape(
             self.filters.shape)
     self.bias -= alpha * np.sum(deltaCols, axis=1).reshape(
         self.bias.shape)
     return inputPartial
コード例 #11
0
ファイル: main.py プロジェクト: AlinMH/acs-ml-lab
    def backward(self, x: np.ndarray, dy: np.ndarray) -> np.ndarray:
        n_filter, d_filter, h_filter, w_filter = self.weight.shape
        db = np.sum(dy, axis=(0, 2, 3))
        self.dbias = db.reshape(n_filter, -1)

        X_col = im2col_indices(x,
                               h_filter,
                               w_filter,
                               padding=self.padding,
                               stride=self.stride)
        dout_reshaped = dy.transpose(1, 2, 3, 0).reshape(n_filter, -1)
        dW = dout_reshaped @ X_col.T
        self.dweight = dW.reshape(self.weight.shape)

        W_reshape = self.weight.reshape(n_filter, -1)
        dX_col = W_reshape.T @ dout_reshaped
        dX = col2im_indices(dX_col,
                            x.shape,
                            h_filter,
                            w_filter,
                            padding=self.padding,
                            stride=self.stride)

        return dX
コード例 #12
0
ファイル: main.py プロジェクト: AlinMH/acs-ml-lab
    def backward(self, x: np.ndarray, dy: np.ndarray) -> np.ndarray:
        def dmaxpool(dX_col, dout_col, pool_cache):
            dX_col[pool_cache, range(dout_col.size)] = dout_col
            return dX_col

        x = x.reshape((x.shape[0], 1, 2 * x.shape[2], -1))
        X_col = im2col_indices(x,
                               self.size,
                               self.size,
                               padding=0,
                               stride=self.stride)

        n, d, w, h = x.shape
        dX_col = np.zeros_like(X_col)
        dout_col = dy.transpose(2, 3, 0, 1).ravel()

        # dX = dmaxpool(dX_col, dout_col, pool_cache)
        dX = col2im_indices(dX_col, (n * d, 1, h, w),
                            self.size,
                            self.size,
                            padding=0,
                            stride=self.stride)
        dX = dX.reshape(x.shape)
        return dX
コード例 #13
0
ファイル: layers.py プロジェクト: ykwon0407/cs231_2015
def conv_backward_naive(dout, cache):
  """
  A naive implementation of the backward pass for a convolutional layer.

  Inputs:
  - dout: Upstream derivatives.
  - cache: A tuple of (x, w, b, conv_param) as in conv_forward_naive

  Returns a tuple of:
  - dx: Gradient with respect to x
  - dw: Gradient with respect to w
  - db: Gradient with respect to b
  """
  dx, dw, db = None, None, None

  x, w, b, conv_param = cache
  pad, stride = conv_param['pad'], conv_param['stride']
  F, C, HH, WW = w.shape
  x_cols = im2col.im2col_indices(x, HH, WW, pad, stride)

  N, F, H_, W_ = dout.shape
  #############################################################################
  # TODO: Implement the convolutional backward pass.                          #
  #############################################################################
  dout_reshape = dout.reshape(N, -1)
  db = np.sum(dout, axis=(0, 2, 3))

  dout_reshaped = dout.transpose(1, 2, 3, 0).reshape(F, -1)
  dw = dout_reshaped.dot(x_cols.T).reshape(w.shape)

  dx_cols = w.reshape(F, -1).T.dot(dout_reshaped)
  dx = im2col.col2im_indices(dx_cols, x.shape, HH, WW, pad, stride)
  #############################################################################
  #                             END OF YOUR CODE                              #
  #############################################################################
  return dx, dw, db
コード例 #14
0
    def _backward(self, dout):
        # dout (N,Cout,H_,W_)
        # W (Cout, Cin, F, F)

        W = self.W['val']
        n_filter, d_filter, h_filter, w_filter = W.shape

        db = np.sum(dout, axis=(0, 2, 3))
        db = db.reshape(n_filter, -1)

        dout_reshaped = dout.transpose(1, 2, 3, 0).reshape(n_filter, -1)
        dW = dout_reshaped @ self.X_col.T
        dW = dW.reshape(W.shape)

        W_reshape = W.reshape(n_filter, -1)
        dX_col = W_reshape.T @ dout_reshaped
        dX = im2col.col2im_indices(dX_col,
                                   self.X.shape,
                                   h_filter,
                                   w_filter,
                                   padding=1,
                                   stride=1)

        return dX  #, dW, db
コード例 #15
0
# Same convolution
conv = nn.Conv2d(cd.inp.shape[1], out_channels, kernel_size,
 padding=padding, bias=True)

conv.weight.data = cd.convweights
#conv.bias.data = torch.zeros(out_channels)
conv.bias.data = cd.bias

output = conv(cd.inp)

output.backward(cd.convloss if padding == 0 else cd.convlossPad1)

print(f"output {to_cpp(output)}")
print(f"dinput {to_cpp(cd.inp.grad)}")
print (f"weights {to_cpp(conv.weight)}")
print(f"dweights {to_cpp(conv.weight.grad)}")

# now with im2col

n_filter = cd.convweights.shape[0]
x_col = im2col_indices(inp.detach().numpy(), kernel_size, kernel_size, padding=padding)
W_reshape = cd.convweights.detach().numpy().reshape(n_filter, -1)

dout_reshaped = cd.convloss.detach().numpy().transpose(1, 2, 3, 0).reshape(n_filter, -1)
if(padding == 1):
  dout_reshaped = cd.convlossPad1.detach().numpy().transpose(1, 2, 3, 0).reshape(n_filter, -1)

dX_col = W_reshape.T @ dout_reshaped

dX = col2im_indices(dX_col, cd.inp.shape, kernel_size, kernel_size, padding=padding)
コード例 #16
0
for h in range(kernel_size):
    for w in range(kernel_size):
        dilated[:, :, dilation * h, dilation * w] = kernel[:, :, h, w]

# forward pass with transposed kernel
n_filters = cd.convweights.shape[0]
inp_reshaped = inp.detach().numpy().transpose(1, 2, 3,
                                              0).reshape(n_filters, -1)
W_reshape = dilated.reshape(n_filters, -1)

out_col = W_reshape.T @ inp_reshaped

# image is recovered through col2im
out_image = col2im_indices(out_col,
                           cd.inp.shape,
                           dilated_size,
                           dilated_size,
                           padding=padding)
is_eq(output, out_image)

################################################################################################
### Backward Pass
dout_col = im2col_indices(cd.convlossTrans.detach().numpy(),
                          dilated_size,
                          dilated_size,
                          padding=padding)
#dout_reshaped = cd.convlossTrans.detach().numpy().transpose(1, 2, 3, 0).reshape(n_filter, -1)
dX_col = W_reshape @ dout_col
c, _, h, w = inp.shape
dX = dX_col.reshape(n_filters, h, w, c).transpose(3, 0, 1, 2)
is_eq(dX, inp.grad)