def initialize_transformer(self, rng): """ This function initializes the transformer of the class. Re-running this function will reset the transformer. Parameters ---------- rng : object random number generator object. """ if self.irange is not None: assert self.sparse_init is None self.transformer = conv2d.make_random_conv2D( irange=self.irange, input_space=self.input_space, output_space=self.detector_space, kernel_shape=self.kernel_shape, subsample=self.kernel_stride, border_mode=self.border_mode, rng=rng) elif self.sparse_init is not None: self.transformer = conv2d.make_sparse_random_conv2D( num_nonzero=self.sparse_init, input_space=self.input_space, output_space=self.detector_space, kernel_shape=self.kernel_shape, subsample=self.kernel_stride, border_mode=self.border_mode, rng=rng)
def initialize_transformer(self, rng): """ This function initializes the transformer of the class. Re-running this function will reset the transformer. X is how I generally call the sparse code variables. Thus, X_space has its dimmensions Parameters ---------- rng : object random number generator object. """ if self.irange is not None: assert self.sparse_init is None self.transformer = conv2d.make_random_conv2D( irange=self.irange, input_space=self.x_space, output_space=self.input_space.components[0], kernel_shape=self.kernel_shape, subsample=self.kernel_stride, border_mode=self.border_mode, rng=rng) elif self.sparse_init is not None: self.transformer = conv2d.make_sparse_random_conv2D( num_nonzero=self.sparse_init, input_space=self.X_space, output_space=self.detector_space, kernel_shape=self.kernel_shape, subsample=self.kernel_stride, border_mode=self.border_mode, rng=rng)
def set_input_space(self, space): """ Note: this resets parameters! """ self.input_space = space rng = self.mlp.rng if self.border_mode == 'valid': output_shape = [ (self.input_space.shape[0] - self.kernel_shape[0]) / self.kernel_stride[0] + 1, (self.input_space.shape[1] - self.kernel_shape[1]) / self.kernel_stride[1] + 1 ] elif self.border_mode == 'full': output_shape = [ (self.input_space.shape[0] + self.kernel_shape[0]) / self.kernel_stride[0] - 1, (self.input_space.shape[1] + self.kernel_shape[1]) / self.kernel_stride_stride[1] - 1 ] self.output_space = self.detector_space = Conv2DSpace( shape=output_shape, num_channels=self.output_channels, axes=('b', 'c', 0, 1)) if self.irange is not None: assert self.sparse_init is None self.transformer = conv2d.make_random_conv2D( irange=self.irange, input_space=self.input_space, output_space=self.detector_space, kernel_shape=self.kernel_shape, batch_size=self.mlp.batch_size, subsample=self.kernel_stride, border_mode=self.border_mode, rng=rng) elif self.sparse_init is not None: self.transformer = conv2d.make_sparse_random_conv2D( num_nonzero=self.sparse_init, input_space=self.input_space, output_space=self.detector_space, kernel_shape=self.kernel_shape, batch_size=self.mlp.batch_size, subsample=self.kernel_stride, border_mode=self.border_mode, rng=rng) W, = self.transformer.get_params() W.name = 'W' self.b = sharedX(self.detector_space.get_origin() + self.init_bias) self.b.name = 'b' print 'Input shape: ', self.input_space.shape print 'Output space: ', self.output_space.shape
def set_input_space(self, space): """ Note: this resets parameters! """ self.input_space = space rng = self.mlp.rng if self.border_mode == 'valid': output_shape = [(self.input_space.shape[0] - self.kernel_shape[0]) / self.kernel_stride[0] + 1, (self.input_space.shape[1] - self.kernel_shape[1]) / self.kernel_stride[1] + 1] elif self.border_mode == 'full': output_shape = [(self.input_space.shape[0] + self.kernel_shape[0]) / self.kernel_stride[0] - 1, (self.input_space.shape[1] + self.kernel_shape[1]) / self.kernel_stride_stride[1] - 1] self.output_space = self.detector_space = Conv2DSpace(shape=output_shape, num_channels = self.output_channels, axes = ('b', 'c', 0, 1)) if self.irange is not None: assert self.sparse_init is None self.transformer = conv2d.make_random_conv2D( irange = self.irange, input_space = self.input_space, output_space = self.detector_space, kernel_shape = self.kernel_shape, batch_size = self.mlp.batch_size, subsample = self.kernel_stride, border_mode = self.border_mode, rng = rng) elif self.sparse_init is not None: self.transformer = conv2d.make_sparse_random_conv2D( num_nonzero = self.sparse_init, input_space = self.input_space, output_space = self.detector_space, kernel_shape = self.kernel_shape, batch_size = self.mlp.batch_size, subsample = self.kernel_stride, border_mode = self.border_mode, rng = rng) W, = self.transformer.get_params() W.name = 'W' self.b = sharedX(self.detector_space.get_origin() + self.init_bias) self.b.name = 'b' print 'Input shape: ', self.input_space.shape print 'Output space: ', self.output_space.shape
def set_input_space(self, space): self.input_space = space if not isinstance(space, Conv2DSpace): raise BadInputSpaceError("ConvRectifiedLinear.set_input_space " "expected a Conv2DSpace, got " + str(space) + " of type " + str(type(space))) rng = self.mlp.rng if self.border_mode == 'valid': output_shape = [ (self.input_space.shape[0] - self.kernel_shape[0]) / self.kernel_stride[0] + 1, (self.input_space.shape[1] - self.kernel_shape[1]) / self.kernel_stride[1] + 1 ] elif self.border_mode == 'full': output_shape = [ (self.input_space.shape[0] + self.kernel_shape[0]) / self.kernel_stride[0] - 1, (self.input_space.shape[1] + self.kernel_shape[1]) / self.kernel_stride[1] - 1 ] self.detector_space = Conv2DSpace(shape=output_shape, num_channels=self.output_channels, axes=('b', 'c', 0, 1)) if self.irange is not None: assert self.sparse_init is None self.transformer = conv2d.make_random_conv2D( irange=self.irange, input_space=self.input_space, output_space=self.detector_space, kernel_shape=self.kernel_shape, batch_size=self.mlp.batch_size, subsample=self.kernel_stride, border_mode=self.border_mode, rng=rng) elif self.sparse_init is not None: self.transformer = conv2d.make_sparse_random_conv2D( num_nonzero=self.sparse_init, input_space=self.input_space, output_space=self.detector_space, kernel_shape=self.kernel_shape, batch_size=self.mlp.batch_size, subsample=self.kernel_stride, border_mode=self.border_mode, rng=rng) W, = self.transformer.get_params() W.name = 'W' self.b = sharedX( np.zeros(((self.num_pieces * self.output_channels), )) + self.init_bias) self.b.name = 'b' print 'Input shape: ', self.input_space.shape print 'Detector space: ', self.detector_space.shape assert self.pool_type in ['max', 'mean'] dummy_batch_size = self.mlp.batch_size if dummy_batch_size is None: dummy_batch_size = 2 dummy_detector = sharedX( self.detector_space.get_origin_batch(dummy_batch_size)) #dummy_p = dummy_p.eval() self.output_space = Conv2DSpace(shape=[400, 1], num_channels=self.output_channels, axes=('b', 'c', 0, 1)) W = rng.uniform(-self.irange, self.irange, (426, (self.num_pieces * self.output_channels))) W = sharedX(W) W.name = self.layer_name + "_w" self.transformer = MatrixMul(W) print 'Output space: ', self.output_space.shape
def set_input_space(self, space): """ Note: this resets parameters! """ self.input_space = space rng = self.mlp.rng if self.border_mode == "valid": output_shape = [ self.input_space.shape[0] - self.kernel_shape[0] + 1, self.input_space.shape[1] - self.kernel_shape[1] + 1, ] elif self.border_mode == "full": output_shape = [ self.input_space.shape[0] + self.kernel_shape[0] - 1, self.input_space.shape[1] + self.kernel_shape[1] - 1, ] self.detector_space = Conv2DSpace(shape=output_shape, num_channels=self.output_channels, axes=("b", "c", 0, 1)) if self.irange is not None: assert self.sparse_init is None self.transformer = conv2d.make_random_conv2D( irange=self.irange, input_space=self.input_space, output_space=self.detector_space, kernel_shape=self.kernel_shape, batch_size=self.mlp.batch_size, subsample=(1, 1), border_mode=self.border_mode, rng=rng, ) elif self.sparse_init is not None: self.transformer = conv2d.make_sparse_random_conv2D( num_nonzero=self.sparse_init, input_space=self.input_space, output_space=self.detector_space, kernel_shape=self.kernel_shape, batch_size=self.mlp.batch_size, subsample=(1, 1), border_mode=self.border_mode, rng=rng, ) W, = self.transformer.get_params() W.name = "W" self.b = sharedX(self.detector_space.get_origin() + self.init_bias) self.b.name = "b" print "Input shape: ", self.input_space.shape print "Detector space: ", self.detector_space.shape if self.mlp.batch_size is None: raise ValueError( "Tried to use a convolutional layer with an MLP that has " "no batch size specified. You must specify the batch size of the " "model because theano requires the batch size to be known at " "graph construction time for convolution." ) assert self.pool_type in ["max", "mean"] dummy_detector = sharedX(self.detector_space.get_origin_batch(self.mlp.batch_size)) if self.pool_type == "max": dummy_p = max_pool( bc01=dummy_detector, pool_shape=self.pool_shape, pool_stride=self.pool_stride, image_shape=self.detector_space.shape, output_channels=self.output_channels, ) elif self.pool_type == "mean": dummy_p = mean_pool( bc01=dummy_detector, pool_shape=self.pool_shape, pool_stride=self.pool_stride, image_shape=self.detector_space.shape, ) dummy_p = dummy_p.eval() self.tmp_output_space = Conv2DSpace( shape=[dummy_p.shape[2], dummy_p.shape[3]], num_channels=1, axes=("b", "c", 0, 1) # self.output_channels, ) if self.crop_border: self.output_space = self.input_space else: self.output_space = self.tmp_output_space print "Output space: ", self.output_space.shape
def set_input_space(self, space): self.input_space = space if not isinstance(space, Conv2DSpace): raise BadInputSpaceError("ConvRectifiedLinear.set_input_space " "expected a Conv2DSpace, got " + str(space) + " of type " + str(type(space))) rng = self.mlp.rng if self.border_mode == 'valid': output_shape = [(self.input_space.shape[0]-self.kernel_shape[0]) / self.kernel_stride[0] + 1, (self.input_space.shape[1]-self.kernel_shape[1]) / self.kernel_stride[1] + 1] elif self.border_mode == 'full': output_shape = [(self.input_space.shape[0]+self.kernel_shape[0]) / self.kernel_stride[0] - 1, (self.input_space.shape[1]+self.kernel_shape[1]) / self.kernel_stride[1] - 1] self.detector_space = Conv2DSpace(shape=output_shape, num_channels=self.output_channels, axes=('b', 'c', 0, 1)) if self.irange is not None: assert self.sparse_init is None self.transformer = conv2d.make_random_conv2D( irange=self.irange, input_space=self.input_space, output_space=self.detector_space, kernel_shape=self.kernel_shape, batch_size=self.mlp.batch_size, subsample=self.kernel_stride, border_mode=self.border_mode, rng=rng) elif self.sparse_init is not None: self.transformer = conv2d.make_sparse_random_conv2D( num_nonzero=self.sparse_init, input_space=self.input_space, output_space=self.detector_space, kernel_shape=self.kernel_shape, batch_size=self.mlp.batch_size, subsample=self.kernel_stride, border_mode=self.border_mode, rng=rng) W, = self.transformer.get_params() W.name = 'W' self.b = sharedX(np.zeros(((self.num_pieces*self.output_channels),)) + self.init_bias) self.b.name = 'b' print 'Input shape: ', self.input_space.shape print 'Detector space: ', self.detector_space.shape assert self.pool_type in ['max', 'mean'] dummy_batch_size = self.mlp.batch_size if dummy_batch_size is None: dummy_batch_size = 2 dummy_detector = sharedX( self.detector_space.get_origin_batch(dummy_batch_size)) #dummy_p = dummy_p.eval() self.output_space = Conv2DSpace(shape=[1, 1], num_channels=self.output_channels, axes=('b', 'c', 0, 1)) W = rng.uniform(-self.irange,self.irange,(426, (self.num_pieces*self.output_channels))) W = sharedX(W) W.name = self.layer_name + "_w" self.transformer = MatrixMul(W) print 'Output space: ', self.output_space.shape
def set_input_space(self, space): """ Note: this resets parameters! """ self.input_space = space rng = self.mlp.rng if self.border_mode == 'valid': output_shape = [ self.input_space.shape[0] - self.kernel_shape[0] + 1, self.input_space.shape[1] - self.kernel_shape[1] + 1 ] elif self.border_mode == 'full': output_shape = [ self.input_space.shape[0] + self.kernel_shape[0] - 1, self.input_space.shape[1] + self.kernel_shape[1] - 1 ] self.detector_space = Conv2DSpace(shape=output_shape, num_channels=self.output_channels, axes=('b', 'c', 0, 1)) if self.irange is not None: assert self.sparse_init is None self.transformer = conv2d.make_random_conv2D( irange=self.irange, input_space=self.input_space, output_space=self.detector_space, kernel_shape=self.kernel_shape, batch_size=self.mlp.batch_size, subsample=(1, 1), border_mode=self.border_mode, rng=rng) elif self.sparse_init is not None: self.transformer = conv2d.make_sparse_random_conv2D( num_nonzero=self.sparse_init, input_space=self.input_space, output_space=self.detector_space, kernel_shape=self.kernel_shape, batch_size=self.mlp.batch_size, subsample=(1, 1), border_mode=self.border_mode, rng=rng) W, = self.transformer.get_params() W.name = 'W' self.b = sharedX(self.detector_space.get_origin() + self.init_bias) self.b.name = 'b' print 'Input shape: ', self.input_space.shape print 'Detector space: ', self.detector_space.shape if self.mlp.batch_size is None: raise ValueError( "Tried to use a convolutional layer with an MLP that has " "no batch size specified. You must specify the batch size of the " "model because theano requires the batch size to be known at " "graph construction time for convolution.") assert self.pool_type in ['max', 'mean'] dummy_detector = sharedX( self.detector_space.get_origin_batch(self.mlp.batch_size)) if self.pool_type == 'max': dummy_p = max_pool(bc01=dummy_detector, pool_shape=self.pool_shape, pool_stride=self.pool_stride, image_shape=self.detector_space.shape) elif self.pool_type == 'mean': dummy_p = mean_pool(bc01=dummy_detector, pool_shape=self.pool_shape, pool_stride=self.pool_stride, image_shape=self.detector_space.shape) dummy_p = dummy_p.eval() self.output_space = Conv2DSpace( shape=[dummy_p.shape[2], dummy_p.shape[3]], num_channels=self.output_channels, axes=('b', 'c', 0, 1)) print 'Output space: ', self.output_space.shape