示例#1
0
    def convolve(self, input, **kwargs):

        # def deconv(X, w, subsample=(1, 1), border_mode=(0, 0), conv_mode='conv'):
        img = gpu_contiguous(input)
        kerns = gpu_contiguous(self.W)
        desc = GpuDnnConvDesc(border_mode=self.crop,
                              subsample=self.stride,
                              conv_mode='conv')(gpu_alloc_empty(
                                  img.shape[0], kerns.shape[1],
                                  img.shape[2] * self.stride[0],
                                  img.shape[3] * self.stride[1]).shape,
                                                kerns.shape)
        out = gpu_alloc_empty(img.shape[0], kerns.shape[1],
                              img.shape[2] * self.stride[0],
                              img.shape[3] * self.stride[1])
        conved = GpuDnnConvGradI()(kerns, img, out, desc)
        # return d_img
        # border_mode = 'half' if self.crop == 'same' else self.crop
        # op = T.nnet.abstract_conv.AbstractConv2d_gradInputs(
        # imshp=self.output_shape,
        # kshp=self.get_W_shape(),
        # subsample=self.stride, border_mode=border_mode,
        # filter_flip=not self.flip_filters)
        # output_size = self.output_shape[2:]
        # if any(s is None for s in output_size):
        # output_size = self.get_output_shape_for(input.shape)[2:]
        # conved = op(self.W, input, output_size)
        return conved
示例#2
0
    def __init__(self,
                 filters,
                 batch_size,
                 input_space,
                 output_axes=('b', 'c', 0, 1),
                 subsample=(1, 1),
                 border_mode='valid',
                 filters_shape=None,
                 message=''):

        assert batch_size is None or batch_size > 0
        self._input_space = input_space
        self._output_axes = output_axes
        self._subsample = tuple(subsample)
        self._border_mode = border_mode

        super(Cudnn2D, self).__init__(
            filters=filters,
            img_shape=(batch_size, input_space.num_channels,
                       input_space.shape[0], input_space.shape[1]),
            subsample=self._subsample,
            border_mode=border_mode,
            filters_shape=filters.get_value(borrow=True).shape,
            message=message)

        # conv_op has to be changed
        self._conv_op = GpuDnnConv()
        self._desc = GpuDnnConvDesc(border_mode=border_mode,
                                    subsample=self._subsample,
                                    conv_mode='conv')
示例#3
0
    def build_graph(self, state_below):
        filters = self.filters
        nfilters = self.nfilters
        b = self.b
        border_mode = self.border_mode
        # activ = self.activ
        batch_size = state_below.shape[0]

        out_size = DeConvNet.infer_size(state_below.shape[1:3],
                                        filters.shape[2:], self.stride,
                                        self.border_mode)
        out_shape = [batch_size, nfilters, out_size[0], out_size[1]]
        state_below = state_below.dimshuffle(0, 3, 1, 2)

        filters = gpu_contiguous(filters)
        state_below = gpu_contiguous(state_below)
        out_shape = tensor.stack(out_shape)

        desc = GpuDnnConvDesc(border_mode=border_mode,
                              subsample=self.stride,
                              conv_mode='conv')(out_shape, filters.shape)
        pred = GpuDnnConvGradI()(filters, state_below,
                                 gpu_alloc_empty(*out_shape), desc)
        pred += b.dimshuffle('x', 0, 'x', 'x')
        pred = pred.dimshuffle(0, 2, 3, 1)

        return eval(self.activ)(pred)
示例#4
0
文件: deconv1d.py 项目: yobajnin/nn
def _deconv2d(X, w, subsample=(1, 1), border_mode=(0, 0), conv_mode='conv'):
    """ 
    from Alec (https://github.com/Newmu/dcgan_code/blob/master/lib/ops.py)
    sets up dummy convolutional forward pass and uses its grad as deconv
    currently only tested/working with same padding
    """
    img = gpu_contiguous(X)
    kerns = gpu_contiguous(w)

    out = gpu_alloc_empty(
        img.shape[0], 
        kerns.shape[1], 
        img.shape[2]*subsample[0], 
        img.shape[3]*subsample[1]
    )

    desc = GpuDnnConvDesc(border_mode=border_mode, subsample=subsample,
                          conv_mode=conv_mode)

    desc = desc(
        out.shape,
        kerns.shape
    )

    d_img = GpuDnnConvGradI()(kerns, img, out, desc)

    return d_img
示例#5
0
 def deconv(self, X, subsample=(2, 2), border_mode=(2, 2), conv_mode='conv', atype='sigmoid'):
     """ 
     sets up dummy convolutional forward pass and uses its grad as deconv
     currently only tested/working with same padding
     """
 
     #Always return a c contiguous output.
     #Copy the input only if it is not already c contiguous.
     img = gpu_contiguous(X)
     kerns = gpu_contiguous(self.W)
 
     #Implement Alloc on the gpu, but without initializing memory.
     gpu_alloc_img_shape = gpu_alloc_empty(img.shape[0], kerns.shape[1], \
             img.shape[2]*subsample[0], img.shape[3]*subsample[1]).shape
 
     #This Op builds a convolution descriptor for use in the other convolution operations.
     desc = GpuDnnConvDesc(border_mode=border_mode, subsample=subsample,
                         conv_mode=conv_mode)(gpu_alloc_img_shape, kerns.shape)
 
     out = gpu_alloc_empty(img.shape[0], kerns.shape[1], img.shape[2]*subsample[0],\
                                                 img.shape[3]*subsample[1])
 
     #The convolution gradient with respect to the inputs.
     d_img = GpuDnnConvGradI()(kerns, img, out, desc)
     return activation_fn_th(d_img + self.b.dimshuffle('x', 0, 'x', 'x'), atype=atype) 
示例#6
0
def deconv(X, w, subsample=(1, 1), border_mode=(0, 0), conv_mode='conv'):
     img = gpu_contiguous(X)
     kerns = gpu_contiguous(w)
     desc = GpuDnnConvDesc(border_mode=border_mode, subsample=subsample, conv_mode=conv_mode)(gpu_alloc_empty(img.shape[0], kerns.shape[1], img.shape[2]*subsample[0],    img.shape[3]*subsample[1]).shape, kerns.shape)
     out = gpu_alloc_empty(img.shape[0], kerns.shape[1], img.shape[2]*subsample[0], img.shape[3]*subsample[1])
     d_img = GpuDnnConvGradI()(kerns, img, out, desc)
     return d_img
示例#7
0
    def call(self, x, mask=None):
        x = gpu_contiguous(x)
        k = gpu_contiguous(self.W)
        new_size = (x.shape[0], k.shape[1], x.shape[2]*self.subsample[0], x.shape[3]*self.subsample[1])

        out = gpu_alloc_empty(*new_size)
        desc = GpuDnnConvDesc(border_mode=self.border_mode,
                              subsample=self.subsample,
                              conv_mode=self.conv_mode)(out.shape, k.shape)
        return GpuDnnConvGradI()(k, x, out, desc)
示例#8
0
def deconv(X, w, subsample=(1, 1), border_mode=(0, 0), conv_mode='conv'):
    """
    sets up dummy convolutional forward pass and uses its grad as deconv
    currently only tested/working with same padding
    """
    img = gpu_contiguous(X)
    kerns = gpu_contiguous(w.dimshuffle(1,0,2,3))
    desc = GpuDnnConvDesc(border_mode=border_mode, subsample=subsample,
                          conv_mode=conv_mode)(gpu_alloc_empty(img.shape[0], kerns.shape[1], img.shape[2]*subsample[0], img.shape[3]*subsample[1]).shape, kerns.shape)
    out = gpu_alloc_empty(img.shape[0], kerns.shape[1], img.shape[2]*subsample[0], img.shape[3]*subsample[1])
    d_img = GpuDnnConvGradI()(kerns, img, out, desc)
    return d_img
示例#9
0
    def convolve(self, input, **kwargs):
        img = gpu_contiguous(input)
        kerns = gpu_contiguous(self.W)
        out_shape = self.get_output_shape_for(img.shape)

        desc = GpuDnnConvDesc(border_mode=self.border_mode,
                              subsample=self.subsample)(gpu_alloc_empty(
                                  out_shape[0], out_shape[1], out_shape[2],
                                  out_shape[3]).shape, kerns.shape)
        out_mem = gpu_alloc_empty(out_shape[0], out_shape[1], out_shape[2],
                                  out_shape[3])
        return GpuDnnConvGradI()(kerns, img, out_mem, desc)
示例#10
0
def dnn_gradweight3D(img,
                     topgrad,
                     imshp,
                     kshp,
                     subsample,
                     border_mode='valid',
                     batchsize=None,
                     filter_flip=False):
    #print ('now inside dnn_gradweight3D')
    """
    GPU convolution gradient with respect to weight using cuDNN from NVIDIA.

    The memory layout to use is 'bc01', that is 'batch', 'channel',
    'first dim', 'second dim' in that order.

    :warning: The cuDNN library only works with GPU that have a compute
      capability of 3.0 or higer.  This means that older GPU will not
      work with this Op.
    """

    if filter_flip:
        conv_mode = 'conv'
    else:
        conv_mode = 'cross'

    img = gpu_contiguous(img)
    topgrad = gpu_contiguous(topgrad)
    #Many tensor Ops run their arguments through this function as pre-processing.
    #It passes through TensorVariable instances,
    #and tries to wrap other objects into TensorConstant.
    kerns_shp = theano.tensor.as_tensor_variable(kshp)
    kerns_shp = [
        kerns_shp[0], batchsize, kerns_shp[2], kerns_shp[3], kerns_shp[4]
    ]
    kerns_shp = theano.tensor.as_tensor_variable(kerns_shp)
    ## theano.tensor.set_subtensor(kerns_shp[1], batchsize)
    # print ('kshp = {}'.format(kshp))
    # print ('type = {}'.format(type(kshp)))
    # print ('kerns_shp (1D shape tensor ?) = {}'.format(kerns_shp))
    ## print (' kerns_shp.ndim = {}'.format(kerns_shp.ndim))
    ## print (' kern_shape.type.dtype (int64?)= {}'.format(kerns_shp.type.dtype))
    #    desc = GpuDnnConvDesc(border_mode=border_mode, subsample=subsample,
    #                          conv_mode=conv_mode)(img.shape, kerns_shp)
    #    desc = GpuDnnConvDesc(border_mode='valid', subsample=(1, 1),
    #                              conv_mode='cross', precision=precision)(img.shape,
    #                                                                      out.shape)
    #
    desc = GpuDnnConvDesc(border_mode=border_mode,
                          subsample=subsample,
                          conv_mode=conv_mode)(img.shape, kerns_shp)
    out = gpu_alloc_empty(*kerns_shp)
    return GpuDnnConv3dGradW()(img, topgrad, out, desc)
示例#11
0
 def call(self, x, mask=None):
     """
     sets up dummy convolutional forward pass and uses its grad as deconv
     currently only tested/working with same padding
     """
     img = gpu_contiguous(x)
     kerns = gpu_contiguous(self.W)
     sr, sc = self.subsample
     out = gpu_alloc_empty(img.shape[0], kerns.shape[1],
                           img.shape[2]*sr, img.shape[3]*sc)
     desc = GpuDnnConvDesc(
         border_mode=self.border_mode, subsample=self.subsample,
         conv_mode='conv')(out.shape, kerns.shape)
     d_img = GpuDnnConvGradI()(kerns, img, out, desc)
     return d_img + K.reshape(self.b, (1, self.nb_filter, 1, 1))
示例#12
0
def deconv(X, w, subsample=(1, 1), border_mode=(0, 0), conv_mode='conv'):
    #https://github.com/Newmu/dcgan_code/lib/ops.py
    from theano.sandbox.cuda.basic_ops import (gpu_contiguous, gpu_alloc_empty)
    from theano.sandbox.cuda.dnn import GpuDnnConvDesc, GpuDnnConvGradI
    """ 
    sets up dummy convolutional forward pass and uses its grad as deconv
    currently only tested/working with same padding
    """
    img = gpu_contiguous(X)
    kerns = gpu_contiguous(w)
    desc = GpuDnnConvDesc(border_mode=border_mode, subsample=subsample,
                          conv_mode=conv_mode)(gpu_alloc_empty(img.shape[0], kerns.shape[1], img.shape[2]*subsample[0], img.shape[3]*subsample[1]).shape, kerns.shape)
    out = gpu_alloc_empty(img.shape[0], kerns.shape[1], img.shape[2]*subsample[0], img.shape[3]*subsample[1])
    d_img = GpuDnnConvGradI()(kerns, img, out, desc)
    return d_img
示例#13
0
 def apply(self, input_):
     if self.use_bias:
         W, b = self.parameters
     else:
         W, = self.parameters
     W = W.dimshuffle(1, 0, 2, 3)
     img = gpu_contiguous(input_)
     kerns = gpu_contiguous(W)
     desc = GpuDnnConvDesc(border_mode=self.pad,
                           subsample=self.stride,
                           conv_mode='conv')(gpu_alloc_empty(
                               img.shape[0], kerns.shape[1],
                               img.shape[2] * self.stride[0],
                               img.shape[3] * self.stride[1]).shape,
                                             kerns.shape)
     out = gpu_alloc_empty(img.shape[0], kerns.shape[1],
                           img.shape[2] * self.stride[0],
                           img.shape[3] * self.stride[1])
     output = GpuDnnConvGradI()(kerns, img, out, desc)
     if self.use_bias:
         output += b.dimshuffle('x', 0, 'x', 'x')
     return output
示例#14
0
    def convolve(self, input, **kwargs):

        # Messy to have these imports here, but seems to allow for switching DNN off.
        from theano.sandbox.cuda.basic_ops import (as_cuda_ndarray_variable,
                                                   host_from_gpu,
                                                   gpu_contiguous, HostFromGpu,
                                                   gpu_alloc_empty)
        from theano.sandbox.cuda.dnn import GpuDnnConvDesc, GpuDnnConv, GpuDnnConvGradI, dnn_conv, dnn_pool
        # Straight outta Radford
        img = gpu_contiguous(input)
        kerns = gpu_contiguous(self.W)
        desc = GpuDnnConvDesc(border_mode=self.crop,
                              subsample=self.stride,
                              conv_mode='conv')(gpu_alloc_empty(
                                  img.shape[0], kerns.shape[1],
                                  img.shape[2] * self.stride[0],
                                  img.shape[3] * self.stride[1]).shape,
                                                kerns.shape)
        out = gpu_alloc_empty(img.shape[0], kerns.shape[1],
                              img.shape[2] * self.stride[0],
                              img.shape[3] * self.stride[1])
        conved = GpuDnnConvGradI()(kerns, img, out, desc)

        return conved