Beispiel #1
0
def test_backward():
    # try to assign back
    col_data = np.reshape(range(27),(1,3,3,3)).astype('float32')
    sz = col_data.shape
    col_data = np.zeros((1,sz[1],sz[2],sz[3]*_ksize*_ksize),'float32')
    padded_data = np.zeros((1,4,4,3),'float32')
    wrapper.im2col_backward(padded_data, col_data,_ksize, _stride)
    print padded_data
Beispiel #2
0
 def backward(self, bottom, top, propagate_down):
     """Runs the backward pass."""
     top_diff = top[0].diff()
     padded_data = self._padded.data()
     col_data = self._col.data()
     bottom_data = bottom[0].data()
     if bottom_data.ndim != 4:
         raise ValueError('Bottom data should be a 4-dim tensor.')
     kernel_diff = self._kernels.init_diff()
     if self._has_bias:
         bias_diff = self._bias.init_diff()
         # bias diff is fairly easy to compute: just sum over all other
         # dimensions
         np.sum(top_diff.reshape(top_diff.size / top_diff.shape[-1],
                                 top_diff.shape[-1]),
                axis=0, out=bias_diff)
     if propagate_down:
         bottom_diff = bottom[0].init_diff(setzero=False)
         col_diff = self._col.init_diff()
         if self._pad_size == 0:
             padded_diff = self._padded.mirror_diff(bottom_diff)
         else:
             padded_diff = self._padded.init_diff(setzero=False)
     if self._large_mem:
         # we have the col_data all pre-stored, making things more efficient.
         blasdot.dot_firstdims(col_data, top_diff, out=kernel_diff)
         if propagate_down:
             blasdot.dot_lastdim(top_diff, self._kernels.data().T,
                                 out=col_diff)
             wrapper.im2col_backward(padded_diff, col_diff,
                                 self._ksize, self._stride)
     else:
         kernel_diff_buffer = np.zeros_like(kernel_diff)
         for i in range(bottom_data.shape[0]):
             # although it is a backward layer, we still need to compute
             # the intermediate results using forward calls.
             wrapper.im2col_forward(padded_data[i:i+1], col_data,
                                    self._ksize, self._stride)
             blasdot.dot_firstdims(col_data, top_diff[i],
                                  out=kernel_diff_buffer)
             kernel_diff += kernel_diff_buffer
             if propagate_down:
                 blasdot.dot_lastdim(top_diff[i], self._kernels.data().T,
                                     out=col_diff)
                 # im2col backward
                 wrapper.im2col_backward(padded_diff[i:i+1], col_diff,
                                         self._ksize, self._stride)
     # finally, copy results to the bottom diff.
     if propagate_down:
         if self._pad_size != 0:
             bottom_diff[:] = padded_diff[:,
                                          self._pad_size:-self._pad_size,
                                          self._pad_size:-self._pad_size]
     # finally, add the regularization term
     if self._reg is not None:
         return self._reg.reg(self._kernels, bottom_data.shape[0])
     else:
         return 0.
Beispiel #3
0
 def backward(self, bottom, top, propagate_down):
     """Computes the backward pass."""
     if not propagate_down:
         return 0.
     top_diff = top[0].diff()
     bottom_diff = bottom[0].init_diff(setzero=False)
     wrapper.im2col_backward(bottom_diff, top_diff, self._psize,
                             self._stride)
     return 0.
Beispiel #4
0
 def backward(self, bottom, top, propagate_down):
     """Computes the backward pass."""
     if not propagate_down:
         return 0.
     top_diff = top[0].diff()
     bottom_diff = bottom[0].init_diff(setzero=False)
     wrapper.im2col_backward(bottom_diff, top_diff, self._psize,
                             self._stride)
     return 0.
Beispiel #5
0
    def data_backward(self,bottom,top,l_conv1):
        """Runs the backward pass."""
        # bottom:(blob) data
        # top:(blob) conv1_neuron
        # l_conv1: (layer) conv1
        top_diff = top[0].diff()
        t_sz = top_diff.shape
        t_sz0 = np.prod(t_sz[:-1])
        top_diff = np.reshape(top_diff,(t_sz0,t_sz[-1]))

        padded_data = l_conv1._padded.data()
        col_data = l_conv1._col.data() # stored in layers['conv1']
        kernel_data = l_conv1._kernels.data()
        k_sz = kernel_data.shape

        col_diff = np.zeros((t_sz0,k_sz[0]),'float32');
        for i in range(k_sz[1]):
            col_diff += np.dot(top_diff[:,i:i+1],kernel_data[:,i:i+1].T)
        
        # accumulate back to image col2im
        padded_diff = np.empty_like(bottom[0].data())
        print padded_diff.dtype
        print (t_sz[0],t_sz[1],t_sz[2],k_sz[0])
        if l_conv1._large_mem:
            wrapper.im2col_backward(padded_diff, np.reshape(col_diff,(t_sz[0],t_sz[1],t_sz[2],k_sz[0])),
                                    l_conv1._ksize, l_conv1._stride)
        else:
            raise NotImplementedError(str(type(self)) + " does not implement small memory im2col_backward.")
            """
            kernel_diff_buffer = np.empty_like(kernel_diff)
            for i in range(bottom_data.shape[0]):
                # although it is a backward layer, we still need to compute
                # the intermediate results using forward calls.
                wrapper.im2col_forward(padded_data[i:i+1], col_data,
                                       l_conv1._ksize, l_conv1._stride)
                blasdot.dot_firstdims(col_data, top_diff[i],
                                     out=kernel_diff_buffer)
                kernel_diff += kernel_diff_buffer
                if propagate_down:
                    blasdot.dot_lastdim(top_diff[i], l_conv1._kernels.data().T,
                                        out=col_diff)
                    # im2col backward
                    wrapper.im2col_backward(padded_diff[i:i+1], col_diff,
                                            l_conv1._ksize, l_conv1._stride)
            """
        return padded_diff
Beispiel #6
0
 def forward(self, bottom, top):
     """Runs the forward pass."""
     bottom_data = bottom[0].data()
     if bottom_data.ndim != 4:
         raise ValueError('Bottom data should be a 4-dim tensor.')
     if not self._kernels.has_data():
         # initialize the kernels
         self._kernels.init_data(
             (bottom_data.shape[-1],
              self._ksize * self._ksize * self._num_channels),
             bottom_data.dtype)
     # initialize the buffers.
     self._col.init_data((1, bottom_data.shape[1], bottom_data.shape[2],
                          self._kernels.data().shape[1]),
                         dtype = bottom_data.dtype)
     pad_height = self._ksize + (bottom_data.shape[1] - 1) \
             * self._stride
     pad_width = self._ksize + (bottom_data.shape[2] - 1) \
             * self._stride
     if self._mode != 'valid':
         padded_data = self._padded.init_data(
             (1, pad_height, pad_width, self._num_channels),
             dtype = bottom_data.dtype)
     top_data = top[0].init_data(
         (bottom_data.shape[0], pad_height - self._border * 2,
          pad_width - self._border * 2, self._num_channels),
         dtype=bottom_data.dtype)
     # process data individually
     for i in range(bottom_data.shape[0]):
         # first, compute the convolution as a gemm operation
         blasdot.dot_lastdim(bottom_data[i:i+1], self._kernels.data(),
                             out=self._col.data())
         if self._mode != 'valid':
         # do col2im
             wrapper.im2col_backward(padded_data, self._col.data(),
                            self._ksize, self._stride)
             top_data[i] = padded_data[0, self._border:-self._border,
                                       self._border:-self._border]
         else:
             wrapper.im2col_backward(top_data[i:i+1], self._col.data(),
                                     self._ksize, self._stride)
     return
Beispiel #7
0
 def backward(self, bottom, top, propagate_down):
     """Runs the backward pass."""
     top_diff = top[0].diff()
     padded_data = self._padded.data()
     col_data = self._col.data()
     bottom_data = bottom[0].data()
     if bottom_data.ndim != 4:
         raise ValueError('Bottom data should be a 4-dim tensor.')
     kernel_diff = self._kernels.init_diff()
     if self._has_bias:
         bias_diff = self._bias.init_diff()
         # bias diff is fairly easy to compute: just sum over all other
         # dimensions
         np.sum(top_diff.reshape(top_diff.size / top_diff.shape[-1],
                                 top_diff.shape[-1]),
                axis=0,
                out=bias_diff)
     if propagate_down:
         bottom_diff = bottom[0].init_diff(setzero=False)
         col_diff = self._col.init_diff()
         if self._pad_size == 0:
             padded_diff = self._padded.mirror_diff(bottom_diff)
         else:
             padded_diff = self._padded.init_diff(setzero=False)
     if self._large_mem:
         # we have the col_data all pre-stored, making things more efficient.
         blasdot.dot_firstdims(col_data, top_diff, out=kernel_diff)
         if propagate_down:
             blasdot.dot_lastdim(top_diff,
                                 self._kernels.data().T,
                                 out=col_diff)
             wrapper.im2col_backward(padded_diff, col_diff, self._ksize,
                                     self._stride)
     else:
         kernel_diff_buffer = np.zeros_like(kernel_diff)
         for i in range(bottom_data.shape[0]):
             # although it is a backward layer, we still need to compute
             # the intermediate results using forward calls.
             wrapper.im2col_forward(padded_data[i:i + 1], col_data,
                                    self._ksize, self._stride)
             blasdot.dot_firstdims(col_data,
                                   top_diff[i],
                                   out=kernel_diff_buffer)
             kernel_diff += kernel_diff_buffer
             if propagate_down:
                 blasdot.dot_lastdim(top_diff[i],
                                     self._kernels.data().T,
                                     out=col_diff)
                 # im2col backward
                 wrapper.im2col_backward(padded_diff[i:i + 1], col_diff,
                                         self._ksize, self._stride)
     # finally, copy results to the bottom diff.
     if propagate_down:
         if self._pad_size != 0:
             bottom_diff[:] = padded_diff[:, self._pad_size:-self._pad_size,
                                          self._pad_size:-self._pad_size]
     # finally, add the regularization term
     if self._reg is not None:
         return self._reg.reg(self._kernels, bottom_data.shape[0])
     else:
         return 0.
Beispiel #8
0
import numpy as np
# foward
_ksize = 2
_stride = 1
def test_forward():
    # same as matlab
    padded_data = np.reshape(range(48),(1,4,4,3)).astype('float32')
    sz = padded_data.shape
    col_data = np.zeros((1,(sz[1]-_ksize)/_stride+1,(sz[2]-_ksize)/_stride+1,sz[3]*_ksize*_ksize),'float32')
    wrapper.im2col_forward(padded_data, col_data,_ksize, _stride)
    print col_data

def test_backward():
    # try to assign back
    col_data = np.reshape(range(27),(1,3,3,3)).astype('float32')
    sz = col_data.shape
    col_data = np.zeros((1,sz[1],sz[2],sz[3]*_ksize*_ksize),'float32')
    padded_data = np.zeros((1,4,4,3),'float32')
    wrapper.im2col_backward(padded_data, col_data,_ksize, _stride)
    print padded_data


#col_data = np.reshape(range(12),(1,2,2,3)).astype('float32')
col_data = np.reshape(range(108),(1,3,3,3*4)).astype('float32')
#col_data = np.reshape(range(27),(1,3,3,3)).astype('float32')
sz = col_data.shape
padded_data = np.zeros((1,4,4,3),'float32')
wrapper.im2col_backward(padded_data, col_data,_ksize, _stride)
print padded_data