def test_make_random_local(self): """ Create random local receptive fields and check whether they can be applied and give a sensible output shape """ local = make_random_local(1, 16, ('c', 0, 1, 'b'), 1, (3, 3), 16, ('c', 0, 1, 'b'), (2, 2)) f = theano.function([self.image_tensor], local.lmul(self.image_tensor)) assert f(self.image).shape == (16, 2, 2, 1)
def set_input_space(self, space): """ Note: this resets parameters! """ self.input_space = space if not isinstance(self.input_space, Conv2DSpace): raise TypeError( "The input to a convolutional layer should be a Conv2DSpace, " " but layer " + self.layer_name + " got " + str(type(self.input_space)) ) # note: I think the desired space thing is actually redundant, # since LinearTransform will also dimshuffle the axes if needed # It's not hurting anything to have it here but we could reduce # code complexity by removing it self.desired_space = Conv2DSpace(shape=space.shape, channels=space.num_channels, axes=("c", 0, 1, "b")) ch = self.desired_space.num_channels rem = ch % 4 if ch > 3 and rem != 0: self.dummy_channels = 4 - rem else: self.dummy_channels = 0 self.dummy_space = Conv2DSpace( shape=space.shape, channels=space.num_channels + self.dummy_channels, axes=("c", 0, 1, "b") ) rng = self.mlp.rng output_shape = [ self.input_space.shape[0] + 2 * self.pad - self.kernel_shape[0] + 1, self.input_space.shape[1] + 2 * self.pad - self.kernel_shape[1] + 1, ] def handle_kernel_shape(idx): if self.kernel_shape[idx] < 1: raise ValueError( "kernel must have strictly positive size on all axes but has shape: " + str(self.kernel_shape) ) if output_shape[idx] <= 0: if self.fix_kernel_shape: self.kernel_shape[idx] = self.input_space.shape[idx] + 2 * self.pad assert self.kernel_shape[idx] != 0 output_shape[idx] = 1 warnings.warn("Had to change the kernel shape to make network feasible") else: raise ValueError("kernel too big for input (even with zero padding)") map(handle_kernel_shape, [0, 1]) self.detector_space = Conv2DSpace( shape=output_shape, num_channels=self.detector_channels, axes=("c", 0, 1, "b") ) if self.pool_shape is not None: def handle_pool_shape(idx): if self.pool_shape[idx] < 1: raise ValueError("bad pool shape: " + str(self.pool_shape)) if self.pool_shape[idx] > output_shape[idx]: if self.fix_pool_shape: assert output_shape[idx] > 0 self.pool_shape[idx] = output_shape[idx] else: raise ValueError("Pool shape exceeds detector layer shape on axis %d" % idx) map(handle_pool_shape, [0, 1]) assert self.pool_shape[0] == self.pool_shape[1] assert self.pool_stride[0] == self.pool_stride[1] assert all(isinstance(elem, py_integer_types) for elem in self.pool_stride) if self.pool_stride[0] > self.pool_shape[0]: if self.fix_pool_stride: warnings.warn("Fixing the pool stride") ps = self.pool_shape[0] assert isinstance(ps, py_integer_types) self.pool_stride = [ps, ps] else: raise ValueError("Stride too big.") assert all(isinstance(elem, py_integer_types) for elem in self.pool_stride) if self.irange is not None: self.transformer = local_c01b.make_random_local( input_groups=self.input_groups, irange=self.irange, input_axes=self.desired_space.axes, image_shape=self.desired_space.shape, output_axes=self.detector_space.axes, input_channels=self.dummy_space.num_channels, output_channels=self.detector_space.num_channels, kernel_shape=self.kernel_shape, kernel_stride=self.kernel_stride, pad=self.pad, partial_sum=self.partial_sum, rng=rng, ) W, = self.transformer.get_params() W.name = "W" if self.tied_b: self.b = sharedX(np.zeros((self.detector_space.num_channels)) + self.init_bias) else: self.b = sharedX(self.detector_space.get_origin() + self.init_bias) self.b.name = "b" print "Input shape: ", self.input_space.shape print "Detector space: ", self.detector_space.shape assert self.detector_space.num_channels >= 16 if self.pool_shape is None: self.output_space = Conv2DSpace( shape=self.detector_space.shape, num_channels=self.num_channels, axes=("c", 0, 1, "b") ) else: dummy_detector = sharedX(self.detector_space.get_origin_batch(2)[0:16, :, :, :]) dummy_p = max_pool_c01b( c01b=dummy_detector, pool_shape=self.pool_shape, pool_stride=self.pool_stride, image_shape=self.detector_space.shape, ) dummy_p = dummy_p.eval() self.output_space = Conv2DSpace( shape=[dummy_p.shape[1], dummy_p.shape[2]], num_channels=self.num_channels, axes=("c", 0, 1, "b") ) print "Output space: ", self.output_space.shape