示例#1
0
 def deconv(self, X, subsample=(2, 2), border_mode=(2, 2), conv_mode='conv', atype='sigmoid'):
     """ 
     sets up dummy convolutional forward pass and uses its grad as deconv
     currently only tested/working with same padding
     """
 
     #Always return a c contiguous output.
     #Copy the input only if it is not already c contiguous.
     img = gpu_contiguous(X)
     kerns = gpu_contiguous(self.W)
 
     #Implement Alloc on the gpu, but without initializing memory.
     if someconfigs.backend == 'gpuarray':
         gpu_alloc_img_shape = GpuAllocEmpty('float32', None)(img.shape[0], kerns.shape[1], \
                     img.shape[2]*subsample[0], img.shape[3]*subsample[1]).shape
         #This Op builds a convolution descriptor for use in the other convolution operations.
         desc = GpuDnnConvDesc(border_mode=border_mode, subsample=subsample,\
                                     conv_mode=conv_mode)(kerns.shape)
         out = GpuAllocEmpty('float32', None)(img.shape[0], kerns.shape[1], img.shape[2]*subsample[0],\
                                                             img.shape[3]*subsample[1])
     elif someconfigs.backend == 'cudandarray':
         gpu_alloc_img_shape = gpu_alloc_empty(img.shape[0], kerns.shape[1], \
                         img.shape[2]*subsample[0], img.shape[3]*subsample[1]).shape
         desc = GpuDnnConvDesc(border_mode=border_mode, subsample=subsample,\
                                     conv_mode=conv_mode)(gpu_alloc_img_shape, kerns.shape)
         out = gpu_alloc_empty(img.shape[0], kerns.shape[1], img.shape[2]*subsample[0],\
                                                             img.shape[3]*subsample[1])
 
     #The convolution gradient with respect to the inputs.
     d_img = GpuDnnConvGradI()(kerns, img, out, desc)
     return activation_fn_th(d_img, atype=atype) 
    def conv(self,
             X,
             subsample=(2, 2),
             border_mode=(2, 2),
             atype='sigmoid',
             testF=False):

        #ConH0 = dnn_conv(X , self.W.dimshuffle(1,0,2,3), subsample=subsample, border_mode=border_mode)
        ConH0 = dnn_conv(X,
                         self.W,
                         subsample=subsample,
                         border_mode=border_mode)

        if testF:
            ConH1 = (ConH0 - self.stat_mean.dimshuffle('x', 0, 'x', 'x')) \
                                / (self.stat_std.dimshuffle('x', 0, 'x', 'x') + TINY)
        else:
            mean = ConH0.mean(axis=[0, 2, 3]).dimshuffle('x', 0, 'x', 'x')
            std = T.mean(T.sqr(ConH0 - mean),
                         axis=[0, 2, 3]).dimshuffle('x', 0, 'x', 'x')
            ConH1 = (ConH0 - mean) / T.sqrt(std + TINY)

        ConH2 = ConH1 * self.eta.dimshuffle('x', 0, 'x', 'x')\
                                    + self.beta.dimshuffle('x', 0, 'x', 'x')

        return activation_fn_th(ConH2, atype=atype)
示例#3
0
    def conv(self, X, subsample=(2, 2), border_mode=(2, 2), atype='sigmoid'):

        ConH0 = dnn_conv(X,
                         self.W.dimshuffle(1, 0, 2, 3),
                         subsample=subsample,
                         border_mode=border_mode)
        #ConH0 = dnn_conv(X , self.W, subsample=subsample, border_mode=border_mode)
        #return activation_fn_th(ConH0, atype=atype)

        return activation_fn_th(ConH0 + self.c.dimshuffle('x', 0, 'x', 'x'),
                                atype=atype)
示例#4
0
    def deconv(self,
               X,
               subsample=(2, 2),
               border_mode=(2, 2),
               conv_mode='conv',
               atype='sigmoid',
               testF=False):
        """ 
        sets up dummy convolutional forward pass and uses its grad as deconv
        currently only tested/working with same padding
        """

        #Always return a c contiguous output.
        #Copy the input only if it is not already c contiguous.
        img = gpu_contiguous(X)
        kerns = gpu_contiguous(self.W)

        #Implement Alloc on the gpu, but without initializing memory.
        if someconfigs.backend == 'gpuarray':
            gpu_alloc_img_shape = GpuAllocEmpty('float32', None)(img.shape[0], kerns.shape[1], \
                        img.shape[2]*subsample[0], img.shape[3]*subsample[1]).shape
            #This Op builds a convolution descriptor for use in the other convolution operations.
            desc = GpuDnnConvDesc(border_mode=border_mode, subsample=subsample,\
                                        conv_mode=conv_mode)(kerns.shape)
            out = GpuAllocEmpty('float32', None)(img.shape[0], kerns.shape[1], img.shape[2]*subsample[0],\
                                                                img.shape[3]*subsample[1])
        elif someconfigs.backend == 'cudandarray':
            gpu_alloc_img_shape = gpu_alloc_empty(img.shape[0], kerns.shape[1], \
                            img.shape[2]*subsample[0], img.shape[3]*subsample[1]).shape
            desc = GpuDnnConvDesc(border_mode=border_mode, subsample=subsample,\
                                        conv_mode=conv_mode)(gpu_alloc_img_shape, kerns.shape)
            out = gpu_alloc_empty(img.shape[0], kerns.shape[1], img.shape[2]*subsample[0],\
                                                                img.shape[3]*subsample[1])

        #The convolution gradient with respect to the inputs.
        d_img = GpuDnnConvGradI()(kerns, img, out, desc)
        ConH0 = d_img  #+ self.b.dimshuffle('x', 0, 'x', 'x')

        if testF:
            ConH1 = (ConH0 - self.stat_mean.dimshuffle('x', 0, 'x', 'x')) \
                                / (self.stat_std.dimshuffle('x', 0, 'x', 'x') + TINY)
        else:
            mean = ConH0.mean(axis=[0, 2, 3]).dimshuffle('x', 0, 'x', 'x')
            std = T.mean(T.sqr(ConH0 - mean),
                         axis=[0, 2, 3]).dimshuffle('x', 0, 'x', 'x')
            ConH1 = (ConH0 - mean) / T.sqrt(std + TINY)

        ConH2 = self.eta.dimshuffle('x', 0, 'x', 'x') * ConH1 \
                        + self.beta.dimshuffle('x', 0, 'x', 'x')

        return activation_fn_th(ConH2, atype=atype)
示例#5
0
    def conv(self, X, subsample=(2, 2), border_mode=(2, 2), conv_mode='conv', atype='sigmoid'):

        ConH0 = dnn_conv(X , self.W, subsample=subsample, border_mode=border_mode)
        return activation_fn_th(ConH0, atype=atype)
    def propagate(self, X, testF=False, atype='sigmoid'):

        H = self.pre_activation(X, testF=False)
        H = activation_fn_th(H, atype=atype)
        return H