def lmul(self, x): """ dot(x, A) aka, do convolution with input image x """ check_cuda(str(type(self)) + ".lmul") # TODO Why is it CPU?? print "Por que?!?!", type(x) cpu = "Cuda" not in str(type(x)) if cpu: x = gpu_from_host(x) assert x.ndim == 5 x_axes = self.input_axes assert len(x_axes) == 5 op_axes = ("c", 0, 1, "t", "b") if tuple(x_axes) != op_axes: print "ssssssssssssssss" x = x.dimshuffle(*[x_axes.index(axis) for axis in op_axes]) _x_4d_shape = ( self.signal_shape[0], self.signal_shape[1], self.signal_shape[2], self.signal_shape[3] * self.signal_shape[4], ) x = x.reshape(_x_4d_shape) x = gpu_contiguous(x) rval = FilterActs(self.pad, self.partial_sum, self.kernel_stride[0])(x, self._filters) if cpu: rval = host_from_gpu(rval) rval = rval.reshape( ( self.filter_shape[3], self.filter_shape[4], rval.shape[1], rval.shape[2], self.signal_shape[3], self.signal_shape[4], ) ) rval = diagonal_subtensor(rval, 4, 0).sum(axis=0) # Format the output based on the output space rval_axes = self.output_axes assert len(rval_axes) == 5 if tuple(rval_axes) != op_axes: rval = rval.dimshuffle(*[op_axes.index(axis) for axis in rval_axes]) return rval
def lmul(self, x): """ dot(x, A) aka, do convolution with input image x """ check_cuda(str(type(self)) + ".lmul") # TODO Why is it CPU?? print 'Por que?!?!', type(x) cpu = 'Cuda' not in str(type(x)) if cpu: x = gpu_from_host(x) assert x.ndim == 5 x_axes = self.input_axes assert len(x_axes) == 5 op_axes = ('c', 0, 1, 't', 'b') if tuple(x_axes) != op_axes: print 'ssssssssssssssss' x = x.dimshuffle(*[x_axes.index(axis) for axis in op_axes]) _x_4d_shape = (self.signal_shape[0], self.signal_shape[1], self.signal_shape[2], self.signal_shape[3] * self.signal_shape[4]) x = x.reshape(_x_4d_shape) x = gpu_contiguous(x) rval = FilterActs(self.pad, self.partial_sum, self.kernel_stride[0])(x, self._filters) if cpu: rval = host_from_gpu(rval) rval = rval.reshape( (self.filter_shape[3], self.filter_shape[4], rval.shape[1], rval.shape[2], self.signal_shape[3], self.signal_shape[4])) rval = diagonal_subtensor(rval, 4, 0).sum(axis=0) # Format the output based on the output space rval_axes = self.output_axes assert len(rval_axes) == 5 if tuple(rval_axes) != op_axes: rval = rval.dimshuffle( *[op_axes.index(axis) for axis in rval_axes]) return rval
def lmul(self, x): """ .. todo:: WRITEME properly dot(x, A) aka, do convolution with input image x """ check_cuda(str(type(self)) + ".lmul") cpu = 'Cuda' not in str(type(x)) if cpu: x = gpu_from_host(x) # x must be formatted as channel, topo dim 0, topo dim 1, batch_index # for use with FilterActs assert x.ndim == 4 x_axes = self.input_axes assert len(x_axes) == 4 op_axes = ('c', 0, 1, 'b') if tuple(x_axes) != op_axes: x = x.dimshuffle(*[x_axes.index(axis) for axis in op_axes]) x = gpu_contiguous(x) # Patch old pickle files. if not hasattr(self, 'kernel_stride'): self.kernel_stride = (1, 1) rval = FilterActs(self.pad, self.partial_sum, self.kernel_stride[0])( x, self._filters ) # Format the output based on the output space rval_axes = self.output_axes assert len(rval_axes) == 4 if cpu: rval = host_from_gpu(rval) if tuple(rval_axes) != op_axes: rval = rval.dimshuffle(*[op_axes.index(axis) for axis in rval_axes]) return rval
def lmul(self, x): """ dot(x, A) aka, do convolution with input image x """ cpu = 'Cuda' not in str(type(x)) if cpu: x = gpu_from_host(x) # x must be formatted as channel, topo dim 0, topo dim 1, batch_index # for use with FilterActs assert x.ndim == 4 x_axes = self.input_axes assert len(x_axes) == 4 op_axes = ('c', 0, 1, 'b') if tuple(x_axes) != op_axes: x = x.dimshuffle(*[x_axes.index(axis) for axis in op_axes]) x = gpu_contiguous(x) rval = FilterActs(self.pad, self.partial_sum)(x, self._filters) # Format the output based on the output space rval_axes = self.output_axes assert len(rval_axes) == 4 if tuple(rval_axes) != op_axes: rval = rval.dimshuffle( *[op_axes.index(axis) for axis in rval_axes]) if cpu: rval = host_from_gpu(rval) return rval
def lmul(self, x): """ dot(x, A) aka, do convolution with input image x """ cpu = 'Cuda' not in str(type(x)) if cpu: x = gpu_from_host(x) # x must be formatted as channel, topo dim 0, topo dim 1, batch_index # for use with FilterActs assert x.ndim == 4 x_axes = self.input_axes assert len(x_axes) == 4 op_axes = ('c', 0, 1, 'b') if tuple(x_axes) != op_axes: x = x.dimshuffle(*[x_axes.index(axis) for axis in op_axes]) x = gpu_contiguous(x) rval = FilterActs(self.pad, self.partial_sum)(x, self._filters) # Format the output based on the output space rval_axes = self.output_axes assert len(rval_axes) == 4 if tuple(rval_axes) != op_axes: rval = rval.dimshuffle(*[op_axes.index(axis) for axis in rval_axes]) if cpu: rval = host_from_gpu(rval) return rval