Exemple #1
0
 def _train_fprop(self, state_below):
     conv_out = conv2d_fft(state_below,
                           self.W,
                           border_mode=self.border_mode,
                           image_shape=self.image_shape,
                           pad_last_dim=self.pad_last_dim)
     return conv_out + self.b.dimshuffle('x', 0, 'x', 'x')
Exemple #2
0
 def _train_fprop(self, state_below):
     conv_out = conv2d_fft(
         state_below,
         self.W,
         border_mode=self.border_mode,
         image_shape=self.image_shape,
         pad_last_dim=self.pad_last_dim,
     )
     return conv_out + self.b.dimshuffle("x", 0, "x", "x")
 def output_func(self, input):
   return conv2d_fft(input, self.W, border_mode='valid',
                      filter_shape=self.filter_shape,
                      image_shape=self.input_shape)
Exemple #4
0
 def output_func(self, input):
     return conv2d_fft(input,
                       self.W,
                       border_mode='valid',
                       filter_shape=self.filter_shape,
                       image_shape=self.input_shape)
Exemple #5
0
    def __init__(self,
                 rng,
                 input,
                 subsample,
                 filter_shape,
                 image_shape,
                 poolsize=(2, 2)):
        """
        Allocate a LeNetConvPoolLayer with shared variable internal parameters.

        :type rng: numpy.random.RandomState
        :param rng: a random number generator used to initialize weights

        :type input: theano.tensor.dtensor4
        :param input: symbolic image tensor, of shape image_shape

        :type filter_shape: tuple or list of length 4
        :param filter_shape: (number of filters, num input feature maps,
                              filter height, filter width)

        :type image_shape: tuple or list of length 4
        :param image_shape: (batch size, num input feature maps,
                             image height, image width)

        :type poolsize: tuple or list of length 2
        :param poolsize: the downsampling (pooling) factor (#rows, #cols)
        """

        assert image_shape[1] == filter_shape[1]
        self.input = input

        # there are "num input feature maps * filter height * filter width"
        # inputs to each hidden unit
        fan_in = numpy.prod(filter_shape[1:])
        # each unit in the lower layer receives a gradient from:
        # "num output feature maps * filter height * filter width" /
        #   pooling size
        fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
                   numpy.prod(poolsize))
        # initialize weights with random weights
        W_bound = numpy.sqrt(6. / (fan_in + fan_out))
        self.W = theano.tensor._shared(numpy.asarray(
            rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
            dtype=theano.config.floatX),
                                       borrow=True)

        # the bias is a 1D tensor -- one bias per output feature map
        b_values = numpy.zeros((filter_shape[0], ), dtype=theano.config.floatX)
        self.b = theano.tensor._shared(value=b_values, borrow=True)

        fft_filters = False
        # convolve input feature maps with filters
        if fft_filters != True:
            conv_out = conv.conv2d(input=input,
                                   filters=self.W,
                                   filter_shape=filter_shape,
                                   subsample=subsample,
                                   image_shape=image_shape)

        elif fft_filters is True:
            conv_out = conv2d_fft(
                input=theano.sandbox.cuda.as_cuda_ndarray_variable(input),
                filters=self.W,
                filter_shape=filter_shape,
                image_shape=image_shape)

        # downsample each feature map individually, using maxpooling
        pooled_out = downsample.max_pool_2d(input=conv_out,
                                            ds=poolsize,
                                            ignore_border=True)

        # add the bias term. Since the bias is a vector (1D array), we first
        # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
        # thus be broadcasted across mini-batches and feature map
        # width & height
        def rectify(X):
            return T.maximum(X, 0.)

        self.output = rectify(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))

        # store parameters of this layer
        self.params = [self.W, self.b]