def __init__(self, nchan_in, nchan_out, filter_size, stride=1, border=0, mode='cross', init=df.init.xavier(), bias=0): # mode='cross' is the default in Lasagne[1], Torch[2], matConvNet[3], Caffee[4]. # # 1: https://github.com/Lasagne/Lasagne/blob/63d44a0d/lasagne/layers/dnn.py#L299 # 2: https://github.com/soumith/cudnn.torch/blob/840f0228/SpatialConvolution.lua#L83 # 3: https://github.com/vlfeat/matconvnet/blob/b7dd9c96/matlab/src/bits/impl/nnconv_cudnn.cu#L133 # 4: https://github.com/BVLC/caffe/blob/50ab52cb/include/caffe/util/cudnn.hpp#L104 df.Module.__init__(self) # Catch a probably common bug while we transition the API. assert isinstance(filter_size, (list, tuple)), "New conv API: filter_size needs to be a tuple!" self.nchan_in = nchan_in self.nchan_out = nchan_out self.filter_size = filter_size self.mode = mode self.stride = expand(stride, len(filter_size), 'stride') self.border = expand(border, len(filter_size), 'border') # 'same' is a (common) shortcut for "zero-padding so that outshape == inshape". if self.border == 'same': assert all(k % 2 == 1 for k in self.filter_size), "'same' convolution only supports odd filter sizes." self.border = tuple( (k - 1)//2 for k in self.filter_size ) w_shape = (nchan_out, nchan_in) + self.filter_size w_fan = (np.prod(self.filter_size)*nchan_in, np.prod(self.filter_size)*nchan_out) w_name = ('Wconv_{},{}@{}' + 'x{}'*(len(w_shape) - 3)).format(*w_shape) self.W = self._addparam(w_shape, init, fan=w_fan, name=w_name) self.b = self._addparam_optional(nchan_out, bias, decay=False, name='bconv_{}'.format(nchan_out))
def __init__(self, nchan_in, nchan_out, filter_size, stride=1, border='valid', mode='cross', init=df.init.xavier(), bias=0, imshape=None): # See `SpatialConvolutionCUDNN` comment for the `mode` parameter. Only works in 2D df.Module.__init__(self) self.nchan_in = nchan_in self.nchan_out = nchan_out self.filter_size = filter_size self.mode = mode self.imshape = expand(imshape, len(filter_size), 'imshape', expand_nonnum=True) self.stride = expand(stride, len(filter_size), 'stride') self.border = expand(border, len(filter_size), 'border') if len(self.filter_size) == 3 and any(s != 1 for s in self.stride): raise NotImplementedError('stride != 1 is not implemented for 3D convolutions') if len(self.filter_size) == 3 and imshape is not None: raise NotImplementedError('imshape is not implemented for 3D convolutions') if len(self.filter_size) == 3 and mode != 'conv': raise NotImplementedError('mode="cross" is not implemented for 3D convolutions') if mode not in ('conv', 'cross'): raise NotImplementedError('Only "conv" and "cross" modes are implemented') self.w_shape = (nchan_out, nchan_in) + self.filter_size w_fan = (nchan_in*np.prod(self.filter_size), nchan_out*np.prod(self.filter_size)) w_name = ('Wconv_{},{}@{}' + 'x{}'*(len(self.w_shape) - 3)).format(*self.w_shape) self.W = self._addparam(self.w_shape, init, fan=w_fan, name=w_name) self.b = self._addparam_optional(nchan_out, bias, decay=False, name='bconv_{}'.format(nchan_out))
def __init__(self, nchan_in, nchan_out, filter_size, stride=1, border=0, mode='cross', init=df.init.xavier(), bias=0): """ This is the backwards path through a convolution, sometimes is also referred to as transposed convolution and (wrongly) deconvolution. This is usually used for upsampling an image. If you want the exact counterpart to another convolution earlier part of your model, consider using the `backward` function with that convolution instead. - `nchan_in`: number of channels in the input. - `nchan_out`: number of filters and thus channels in the output. - `filter_size`: 2D or 3D tuple describing the filter size. - `stride`: the stride "dilates" the output, i.e. makes it larger. - `border`: The counterpart to `border` in forward convolution. This effectively crops the output, as opposed to padding it. - `mode`: `'cross'` or `'conv'`, see forward convolution documentation. - `init`: initializer for the weights/filters. - `bias`: initializer for the bias, or `None` or `False`. """ df.Module.__init__(self) self.nchan_in = nchan_in self.nchan_out = nchan_out self.filter_size = filter_size self.mode = mode self.stride = expand(stride, len(filter_size), 'stride') self.border = expand(border, len(filter_size), 'border') # 'same' is a (common) shortcut for "zero-padding so that outshape == inshape". if self.border == 'same': assert all(k % 2 == 1 for k in self.filter_size), "'same' convolution only supports odd filter sizes." self.border = tuple( (k - 1)//2 for k in self.filter_size ) w_shape = (nchan_in, nchan_out) + self.filter_size w_fan = (np.prod(self.filter_size)*nchan_out, np.prod(self.filter_size)*nchan_in) w_name = ('Wconv_{},{}@{}' + 'x{}'*(len(w_shape) - 3)).format(*w_shape) self.W = self._addparam(w_shape, init, fan=w_fan, name=w_name) self.b = self._addparam_optional(nchan_out, bias, decay=False, name='bconv_{}'.format(nchan_out))