Beispiel #1
0
def dnn_upsample_nearest(img, factor):
    assert img.ndim == 4
    if isinstance(factor, int):
        factor = (factor, factor)
    assert (len(factor) == 2) and all(isinstance(f, int) for f in factor)
    img = dnn.gpu_contiguous(img)
    s = img.shape
    pool_in_shape = list(s[:2]) + [f * si for f, si in zip(factor, s[2:])]
    pool_in = dnn.gpu_alloc_empty(*pool_in_shape)
    pool_out = dnn.gpu_alloc_empty(*s)
    stride = factor
    pad = (0, 0)
    ret = UnpoolWithGrad(mode="average_inc_pad")(
        pool_in, pool_out, img, factor, stride, pad)
    window_elem = theano.tensor.prod(factor).astype(ret.dtype)
    return dnn.as_cuda_ndarray_variable(ret * window_elem)
Beispiel #2
0
def upconv(x, w, stride, x_shape=None, w_shape=None, axis_order='dnn'):
    assert stride is not None
    stride = tuple(stride)
    conv_dim = len(stride)
    border_mode = 'valid'

    # if (x_shape is None) or (None in x_shape):  # variable batch size or so
    #     x_shape = None

    if conv_dim == 1:
        x = x.dimshuffle(0, 1, 2, 'x')
        w = w.dimshuffle(0, 1, 2, 'x')
        if w_shape is not None:
            w_shape = list(w_shape) + [
                1,
            ]
        if x_shape is not None:
            x_shape = list(x_shape) + [
                1,
            ]

        stride = list(stride) + [
            1,
        ]
        y = conv2d_grad_wrt_inputs(x,
                                   w,
                                   x_shape,
                                   w_shape,
                                   border_mode,
                                   subsample=stride,
                                   filter_flip=False)
        y = y[:, :, :, 0]

    elif conv_dim == 2:
        y = conv2d_grad_wrt_inputs(x,
                                   w,
                                   x_shape,
                                   w_shape,
                                   border_mode,
                                   subsample=stride,
                                   filter_flip=False)

    elif conv_dim == 3:
        if not dnn_avail or axis_order != 'dnn':
            raise ValueError("Need dnn and dnn axis order")
        kerns = dnn.gpu_contiguous(w)
        image = dnn.gpu_contiguous(x)
        k = kerns.shape[1]
        img_sh = list(image.shape)
        out_sh = img_sh[:1] + [
            k,
        ] + [st * sh for st, sh in zip(stride, img_sh[2:])]
        out = dnn.gpu_alloc_empty(*out_sh)
        desc = dnn.GpuDnnConvDesc(border_mode='valid',
                                  subsample=stride,
                                  conv_mode='cross')(out.shape, kerns.shape)
        y = dnn.GpuDnnConv3dGradI()(kerns, image, out, desc)

    return y
Beispiel #3
0
def dnn_gradweight3D(img,
                     topgrad,
                     imshp,
                     kshp,
                     subsample,
                     border_mode='valid',
                     batchsize=None,
                     filter_flip=False):
    #print ('now inside dnn_gradweight3D')
    """
    GPU convolution gradient with respect to weight using cuDNN from NVIDIA.

    The memory layout to use is 'bc01', that is 'batch', 'channel',
    'first dim', 'second dim' in that order.

    :warning: The cuDNN library only works with GPU that have a compute
      capability of 3.0 or higer.  This means that older GPU will not
      work with this Op.
    """

    if filter_flip:
        conv_mode = 'conv'
    else:
        conv_mode = 'cross'

    img = gpu_contiguous(img)
    topgrad = gpu_contiguous(topgrad)
    #Many tensor Ops run their arguments through this function as pre-processing.
    #It passes through TensorVariable instances,
    #and tries to wrap other objects into TensorConstant.
    kerns_shp = theano.tensor.as_tensor_variable(kshp)
    kerns_shp = [
        kerns_shp[0], batchsize, kerns_shp[2], kerns_shp[3], kerns_shp[4]
    ]
    kerns_shp = theano.tensor.as_tensor_variable(kerns_shp)
    ## theano.tensor.set_subtensor(kerns_shp[1], batchsize)
    # print ('kshp = {}'.format(kshp))
    # print ('type = {}'.format(type(kshp)))
    # print ('kerns_shp (1D shape tensor ?) = {}'.format(kerns_shp))
    ## print (' kerns_shp.ndim = {}'.format(kerns_shp.ndim))
    ## print (' kern_shape.type.dtype (int64?)= {}'.format(kerns_shp.type.dtype))
    #    desc = GpuDnnConvDesc(border_mode=border_mode, subsample=subsample,
    #                          conv_mode=conv_mode)(img.shape, kerns_shp)
    #    desc = GpuDnnConvDesc(border_mode='valid', subsample=(1, 1),
    #                              conv_mode='cross', precision=precision)(img.shape,
    #                                                                      out.shape)
    #
    desc = GpuDnnConvDesc(border_mode=border_mode,
                          subsample=subsample,
                          conv_mode=conv_mode)(img.shape, kerns_shp)
    out = gpu_alloc_empty(*kerns_shp)
    return GpuDnnConv3dGradW()(img, topgrad, out, desc)
Beispiel #4
0
 def call(self, x, mask=None):
     """
     sets up dummy convolutional forward pass and uses its grad as deconv
     currently only tested/working with same padding
     """
     img = gpu_contiguous(x)
     kerns = gpu_contiguous(self.W)
     sr, sc = self.subsample
     out = gpu_alloc_empty(img.shape[0], kerns.shape[1],
                           img.shape[2]*sr, img.shape[3]*sc)
     desc = GpuDnnConvDesc(
         border_mode=self.border_mode, subsample=self.subsample,
         conv_mode='conv')(out.shape, kerns.shape)
     d_img = GpuDnnConvGradI()(kerns, img, out, desc)
     return d_img + K.reshape(self.b, (1, self.nb_filter, 1, 1))