def __init__(self, filters, batch_size, input_space, output_axes=('b', 'c', 0, 1), subsample=(1, 1), border_mode='valid', filters_shape=None, message=''): assert batch_size is None or batch_size > 0 self._input_space = input_space self._output_axes = output_axes self._subsample = tuple(subsample) self._border_mode = border_mode super(Cudnn2D, self).__init__( filters=filters, img_shape=(batch_size, input_space.num_channels, input_space.shape[0], input_space.shape[1]), subsample=self._subsample, border_mode=border_mode, filters_shape=filters.get_value(borrow=True).shape, message=message) # conv_op has to be changed self._conv_op = GpuDnnConv() self._desc = GpuDnnConvDesc(border_mode=border_mode, subsample=self._subsample, conv_mode='conv')
def get_output_shape(self): input_shape = self.input_layer.get_output_shape() filter_shape = (self.n_features, self.n_channels, self.filter_size, self.filter_size) return GpuDnnConv.get_out_shape(input_shape, filter_shape, 'valid', self.stride)
def get_dim(self, name): if name == "output": i1_type = type(self.input_dim[1]) i2_type = type(self.input_dim[2]) if i1_type != str and i2_type != str: ishape = (self.input_dim[0], 'x', self.input_dim[1], self.input_dim[2]) kshape = (self.num_filters, 'x', self.filter_size[0], self.filter_size[1]) border_mode = self.pad subsample = self.stride oshape = GpuDnnConv.get_out_shape(ishape, kshape, border_mode, subsample) return (oshape[1], oshape[2], oshape[3]) else: # TODO manage the case where either input_dim[{1, 2}] is not a str return (self.num_filters, self.input_dim[1], self.input_dim[2]) else: return super(Conv1D, self).get_dim(name)
def lmul(self, x): """ .. todo:: WRITEME properly dot(x, A) This method overrides the original Conv2D lmul to make it work with arbitrary axis orders Parameters ---------- x : TODO TODO """ # x must be formatted as batch index, channel, topo dim 0, topo dim 1 # for use with conv2d, so check what the current input space format is assert x.ndim == 4 axes = self._input_space.axes assert len(axes) == 4 op_axes = ('b', 'c', 0, 1) if tuple(axes) != op_axes: x = x.dimshuffle(*[axes.index(ax) for ax in op_axes]) # The calling format has to be changed img = gpu_contiguous(x) kerns = gpu_contiguous(self._filters) shape = GpuDnnConv.get_out_shape(img.shape, kerns.shape, self._border_mode, self._subsample) rval = gpu_alloc_empty(*shape) desc = self._desc(img.shape, kerns.shape) rval = self._conv_op(img, kerns, rval, desc) # Format the output based on the output space axes = self._output_axes assert len(axes) == 4 if tuple(self._output_axes) != op_axes: rval = rval.dimshuffle( *[op_axes.index(ax) for ax in self._output_axes]) return rval
def lmul(self, x): """ .. todo:: WRITEME properly dot(x, A) This method overrides the original Conv2D lmul to make it work with arbitrary axis orders Parameters ---------- x : TODO TODO """ # x must be formatted as batch index, channel, topo dim 0, topo dim 1 # for use with conv2d, so check what the current input space format is assert x.ndim == 4 axes = self._input_space.axes assert len(axes) == 4 op_axes = ('b', 'c', 0, 1) if tuple(axes) != op_axes: x = x.dimshuffle(*[axes.index(ax) for ax in op_axes]) # The calling format has to be changed img = gpu_contiguous(x) kerns = gpu_contiguous(self._filters) shape = GpuDnnConv.get_out_shape( img.shape, kerns.shape, self._border_mode, self._subsample) rval = gpu_alloc_empty(*shape) desc = self._desc(img.shape, kerns.shape) rval = self._conv_op(img, kerns, rval, desc) # Format the output based on the output space axes = self._output_axes assert len(axes) == 4 if tuple(self._output_axes) != op_axes: rval = rval.dimshuffle(*[op_axes.index(ax) for ax in self._output_axes]) return rval
def get_output_shape(self): input_shape = self.input_layer.get_output_shape() return GpuDnnConv.get_out_shape(input_shape, self.filter_shape, self.pad, self.stride)