예제 #1
0
def t_mk_unpooling(t_unpool_input, t_pool_shape, t_switches, t_orig_shape):
    """
    Make theano graph for unpooling with known switches
    :param t_unpool_input:
    :param t_pool_shape:
    :param t_switches:
    :return: Unpooled result
    """
    # sizes
    t_batch_sz = t_unpool_input.shape[0]
    # input
    t_in_ch = t_unpool_input.shape[1]
    t_in_h = t_unpool_input.shape[2]
    t_in_w = t_unpool_input.shape[3]
    # pooling
    t_pool_ch = t_pool_shape[0]
    t_pool_h = t_pool_shape[1]
    t_pool_w = t_pool_shape[2]
    # padded output
    t_out_ch_padded = t_in_ch * t_pool_ch
    t_out_h_padded = t_in_h * t_pool_h
    t_out_w_padded = t_in_w * t_pool_w
    # cropped output
    t_out_ch = t_orig_shape[1]
    t_out_h = t_orig_shape[2]
    t_out_w = t_orig_shape[3]

    # reshape input to get ready for unpool
    t_lpsb = t_batch_sz * t_in_ch
    t_pooled_raw = T.reshape(t_unpool_input, T.stack([t_lpsb, t_unpool_input.size//t_lpsb])).transpose().ravel()

    # ok, now unpool with switches
    t_raw_out_init = T.zeros(T.stack([t_batch_sz*t_in_ch*t_in_h*t_in_w, t_pool_ch*t_pool_h*t_pool_w]))
    t_rows = T.arange(t_raw_out_init.shape[0])
    t_raw_out = T.inc_subtensor(t_raw_out_init[t_rows, t_switches], t_pooled_raw)

    # now we only need to reshape back
    t_out_prep_shape_1 = T.stack([1, t_in_h * t_in_w, t_batch_sz * t_out_ch_padded, t_pool_h * t_pool_w])
    t_out_prep_1 = neibs2images(t_raw_out, T.stack([t_pool_ch, t_pool_h*t_pool_w]), t_out_prep_shape_1)
    t_out_prep_2 = T.transpose(t_out_prep_1[0], [1, 0, 2])
    t_out_prep_shape_3 = T.stack([t_batch_sz*t_out_ch_padded*t_in_h*t_in_w, t_pool_h*t_pool_w])
    t_out_prep_3 = T.reshape(t_out_prep_2, t_out_prep_shape_3)
    t_unpool_out_shape = T.stack([t_batch_sz, t_out_ch_padded, t_out_h_padded, t_out_w_padded])
    t_unpool_out_padded = neibs2images(t_out_prep_3, T.stack([t_pool_h, t_pool_w]), t_unpool_out_shape)
    # cut original data
    t_unpool_out = t_unpool_out_padded[:t_batch_sz, :t_out_ch, :t_out_h, :t_out_w]

    return t_unpool_out
예제 #2
0
    def test_can_not_infer_nb_dim(self):
        # Was reported in gh-5613. Test that we do not crash
        # or that we crash in a few other case found while
        # investigating that case

        img = T.tensor4("img")
        patches = T.nnet.neighbours.images2neibs(img, [16, 16])
        extractPatches = theano.function([img], patches, mode=self.mode)

        patsRecovery = T.matrix("patsRecovery")
        original_size = T.ivector("original_size")

        for mode in ["valid", "ignore_borders"]:
            out = neibs2images(patsRecovery, (16, 16), original_size, mode=mode)
            f = theano.function([patsRecovery, original_size], out, mode=self.mode)

            im_val = np.ones((1, 3, 320, 320), dtype=np.float32)
            neibs = extractPatches(im_val)
            f(neibs, im_val.shape)
            # Wrong number of dimensions
            with pytest.raises(ValueError):
                f(neibs, (1, 1, 3, 320, 320))
            # End up with a step of 0
            # This can lead to division by zero in DebugMode
            with pytest.raises((ValueError, ZeroDivisionError)):
                f(neibs, (3, 320, 320, 1))
예제 #3
0
    def test_can_not_infer_nb_dim(self):
        # Was reported in gh-5613. Test that we do not crash
        # or that we crash in a few other case found while
        # investigating that case

        img = T.tensor4('img')
        patches = T.nnet.neighbours.images2neibs(img, [16, 16])
        extractPatches = theano.function([img], patches)

        patsRecovery = T.matrix('patsRecovery')
        original_size = T.ivector('original_size')

        for mode in ['valid', 'ignore_borders']:
            out = neibs2images(patsRecovery, (16, 16),
                               original_size,
                               mode=mode)
            f = theano.function([patsRecovery, original_size], out)

            im_val = np.ones((1, 3, 320, 320), dtype=np.float32)
            neibs = extractPatches(im_val)
            f(neibs, im_val.shape)
            # Wrong number of dimensions
            self.assertRaises(ValueError, f, neibs, (1, 1, 3, 320, 320))
            # End up with a step of 0
            self.assertRaises(ValueError, f, neibs, (3, 320, 320, 1))
예제 #4
0
    def test_neibs_manual(self):
        shape = (2, 3, 4, 4)
        for dtype in self.dtypes:
            images = shared(
                np.arange(np.prod(shape), dtype=dtype).reshape(shape))
            neib_shape = T.as_tensor_variable((2, 2))

            for border in ['valid', 'ignore_borders']:
                f = function([],
                             images2neibs(images, neib_shape, mode=border),
                             mode=self.mode)
                assert any([
                    isinstance(node.op, self.op)
                    for node in f.maker.fgraph.toposort()
                ])

                # print images.get_value(borrow=True)
                neibs = f()
                # print neibs
                assert np.allclose(
                    neibs,
                    [[0, 1, 4, 5], [2, 3, 6, 7], [8, 9, 12, 13],
                     [10, 11, 14, 15], [16, 17, 20, 21], [18, 19, 22, 23],
                     [24, 25, 28, 29], [26, 27, 30, 31], [32, 33, 36, 37],
                     [34, 35, 38, 39], [40, 41, 44, 45], [42, 43, 46, 47],
                     [48, 49, 52, 53], [50, 51, 54, 55], [56, 57, 60, 61],
                     [58, 59, 62, 63], [64, 65, 68, 69], [66, 67, 70, 71],
                     [72, 73, 76, 77], [74, 75, 78, 79], [80, 81, 84, 85],
                     [82, 83, 86, 87], [88, 89, 92, 93], [90, 91, 94, 95]])
                g = function([],
                             neibs2images(neibs, neib_shape, images.shape),
                             mode=self.mode)

                assert np.allclose(images.get_value(borrow=True), g())
예제 #5
0
    def test_neibs(self):
        for shape, pshape in [((10, 7, 18, 18), (2, 2)),
                              ((10, 7, 6, 18), (3, 2)),
                              ((5, 7, 66, 66), (33, 33)),
                              ((5, 7, 68, 66), (34, 33))]:
            for border in ['valid', 'ignore_borders']:
                for dtype in self.dtypes:
                    images = shared(
                        numpy.arange(numpy.prod(shape), dtype=dtype).reshape(shape))
                    neib_shape = T.as_tensor_variable(pshape)

                    f = function([],
                                 images2neibs(images, neib_shape, mode=border),
                                 mode=self.mode)

                    # print images.get_value(borrow=True)
                    neibs = f()
                    # print neibs
                    g = function([],
                                 neibs2images(neibs, neib_shape, images.shape),
                                 mode=self.mode)
                    assert any([isinstance(node.op, self.op)
                                for node in f.maker.fgraph.toposort()])

                    # print g()
                    assert numpy.allclose(images.get_value(borrow=True), g())
def plot_top_16(D, sz, imname):
    '''
    Plots the top 16 components from the basis matrix D.
    Each basis vector represents an image block of shape (sz, sz)

    Parameters
    -------------
    D: np.ndarray
        N x n matrix representing the basis vectors of the PCA space
        N is the dimension of the original space (number of pixels in a block)
        n represents the maximum dimension of the PCA space (assumed to be atleast 16)

    sz: Integer
        The height and width of each block

    imname: string
        name of file where image will be saved.
    '''
    #TODO: Obtain top 16 components of D and plot them
    image = T.tensor4('image')
    neibs = nbs.images2neibs(image, neib_shape = (sz, sz))
    transToImage = nbs.neibs2images(neibs, neib_shape = (sz, sz), original_shape = (1,1, sz, sz))
    trans_func = theano.function([neibs], transToImage)
    f, axarr = plt.subplots(4,4)
    for i in range(4):
        for j in range(4):
            plt.axes(axarr[i,j])
            plt.imshow(trans_func(D[:,[i*4+j]].T)[0,0], cmap = 'gray')
    os.chdir(CWD)
    f.savefig(imname)
    plt.close(f)
예제 #7
0
    def test_neibs(self):
        for shape, pshape in [((10, 7, 18, 18), (2, 2)),
                              ((10, 7, 6, 18), (3, 2)),
                              ((5, 7, 66, 66), (33, 33)),
                              ((5, 7, 68, 66), (34, 33))]:
            for border in ['valid', 'ignore_borders']:
                for dtype in self.dtypes:
                    images = shared(
                        np.arange(np.prod(shape), dtype=dtype).reshape(shape))
                    neib_shape = T.as_tensor_variable(pshape)

                    f = function([],
                                 images2neibs(images, neib_shape, mode=border),
                                 mode=self.mode)

                    # print images.get_value(borrow=True)
                    neibs = f()
                    # print neibs
                    g = function([],
                                 neibs2images(neibs, neib_shape, images.shape),
                                 mode=self.mode)
                    assert any([
                        isinstance(node.op, self.op)
                        for node in f.maker.fgraph.toposort()
                    ])

                    # print g()
                    assert np.allclose(images.get_value(borrow=True), g())
예제 #8
0
    def test_can_not_infer_nb_dim(self):
        # Was reported in gh-5613. Test that we do not crash
        # or that we crash in a few other case found while
        # investigating that case

        img = T.tensor4('img')
        patches = T.nnet.neighbours.images2neibs(img, [16, 16])
        extractPatches = theano.function([img], patches)

        patsRecovery = T.matrix('patsRecovery')
        original_size = T.ivector('original_size')

        for mode in ['valid', 'ignore_borders']:
            out = neibs2images(patsRecovery, (16, 16),
                               original_size, mode=mode)
            f = theano.function([patsRecovery, original_size], out)

            im_val = numpy.ones((1, 3, 320, 320), dtype=numpy.float32)
            neibs = extractPatches(im_val)
            f(neibs, im_val.shape)
            # Wrong number of dimensions
            self.assertRaises(ValueError, f, neibs,
                              (1, 1, 3, 320, 320))
            # End up with a step of 0
            self.assertRaises(ValueError, f, neibs,
                              (3, 320, 320, 1))
예제 #9
0
 def __init__(self, input1, input2):
     x1_sub = input1[:, :, 2:-2, 2:-2]
     x1_flatten = T.flatten(x1_sub)
     x1 = T.extra_ops.repeat(x1_flatten, 25)
     x1 = T.reshape(x1, [T.shape(x1_flatten)[0], 25])
     x2 = neighbours.images2neibs(input2, neib_shape=(5, 5), neib_step=(1, 1))
     diff = x1 - x2
     new_shape = T.shape(x1_sub)*[1, 1, 5, 5]
     diff_img = neighbours.neibs2images(diff, neib_shape=(5, 5), original_shape=[1, 25, 25*5, 5*5])
     self.output = T.nnet.relu(diff_img)
예제 #10
0
 def __init__(self, input1, input2):
     x1_sub = input1[:, :, 2:-2, 2:-2]
     x1_flatten = T.flatten(x1_sub)
     x1 = T.extra_ops.repeat(x1_flatten, 25)
     x1 = T.reshape(x1, [T.shape(x1_flatten)[0], 25])
     x2 = neighbours.images2neibs(input2,
                                  neib_shape=(5, 5),
                                  neib_step=(1, 1))
     diff = x1 - x2
     new_shape = T.shape(x1_sub) * [1, 1, 5, 5]
     diff_img = neighbours.neibs2images(
         diff, neib_shape=(5, 5), original_shape=[1, 25, 25 * 5, 5 * 5])
     self.output = T.nnet.relu(diff_img)
def reconstructed_image(D,c,num_coeffs,X_mean,n_blocks,im_num):
    '''
    This function reconstructs an image X_recon_img given the number of
    coefficients for each image specified by num_coeffs
    '''
    
    '''
        Parameters
    ---------------
    c: np.ndarray
        a n x m matrix  representing the coefficients of all the image blocks.
        n represents the maximum dimension of the PCA space.
        m is (number of images x n_blocks**2)

    D: np.ndarray
        an N x n matrix representing the basis vectors of the PCA space
        N is the dimension of the original space (number of pixels in a block)

    im_num: Integer
        index of the image to visualize

    X_mean: np.ndarray
        a matrix representing the mean block.

    num_coeffs: Integer
        an integer that specifies the number of top components to be
        considered while reconstructing
        

    n_blocks: Integer
        number of blocks comprising the image in each direction.
        For example, for a 256x256 image divided into 64x64 blocks, n_blocks will be 4
    '''

    #TODO: Enter code below for reconstructing the image X_recon_img    
    
    c_im = c[:num_coeffs,n_blocks*n_blocks*im_num:n_blocks*n_blocks*(im_num+1)]
    D_im = D[:,:num_coeffs]
    M_coef = np.dot(D_im.T, X_mean.T)
    tmp1 = c_im - np.repeat(M_coef.reshape(-1, 1), n_blocks**2, 1)
    X_blocks = np.dot(D_im, tmp1) + np.repeat(X_mean.reshape(-1,1), n_blocks**2, 1)
    X_blocks = X_blocks.T
    slide_window = int(X_mean.size ** 0.5)
    image = T.tensor4('image')
    neibs = nbs.images2neibs(image, neib_shape = (slide_window, slide_window))
    transToImage = nbs.neibs2images(neibs, neib_shape = (slide_window, slide_window), original_shape = (1,1,IMG_SIZE, IMG_SIZE))
    trans_func = theano.function([neibs], transToImage)
    X_recon_img = trans_func(X_blocks)
    return X_recon_img[0,0]
예제 #12
0
def get_single_deconv_out_new(input, filter):
    output_shape = (1, 1, input.shape[0]*filter.shape[0], input.shape[1]*filter.shape[1])
    # reshape inputs
    image = input.reshape((1, K.prod(input.shape)))
    kernel = filter.reshape((1, K.prod(filter.shape)))

    # neibs = images2neibs(output, neib_shape=filter.shape, neib_step=filter.shape)

    def fn(i, k):
        return i*k
    results, updates = theano.scan(fn=fn, sequences=image[0], non_sequences=kernel[0])

    # neibs = neibs*results
    img_new = neibs2images(results, filter.shape, output_shape)

    return img_new[0][0]
예제 #13
0
def plot(c, D, n_blocks, X_mn, ax):
    '''
    Plots a reconstruction of a particular image using D as the basis matrix and coeffiecient
    vectors from c

    Parameters
    ------------------------
        c: np.ndarray
            a l x m matrix  representing the coefficients of all blocks in a particular image
            l represents the dimension of the PCA space used for reconstruction
            m represents the number of blocks in an image

        D: np.ndarray
            an N x l matrix representing l basis vectors of the PCA space
            N is the dimension of the original space (number of pixels in a block)

        n_blocks: Integer
            number of blocks comprising the image in each direction.
            For example, for a 256x256 image divided into 64x64 blocks, n_blocks will be 4

        X_mn: basis vectors represent the divergence from the mean so this
            matrix should be added to all reconstructed blocks

        ax: the axis on which the image will be plotted
    '''
    
    # raise NotImplementedError
    # m x N
    I=np.dot(D,c).T
    
    sz,stmp = D.shape
    sz = np.int(np.sqrt(sz))

    I = I + np.dot(np.ones([n_blocks**2,1]),X_mn.reshape([1,sz**2]))
    
    neibs = T.matrix('neibs')
    # print(sz*n_blocks)
    images = neibs2images(neibs, neib_shape=(sz,sz),original_shape=(1,1,sz*n_blocks,sz*n_blocks))
    
    # Constructing theano function
    inv_window_function = theano.function([neibs], images)
        
    X=inv_window_function(np.float32(I)).reshape(sz*n_blocks,sz*n_blocks)
    ax.imshow(X,cmap = cm.Greys_r)
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
예제 #14
0
    def test_neibs_manual(self):
        shape = (2, 3, 4, 4)
        for dtype in self.dtypes:
            images = shared(
                    numpy.arange(numpy.prod(shape), dtype=dtype
                    ).reshape(shape))
            neib_shape = T.as_tensor_variable((2, 2))

            for border in ['valid', 'ignore_borders']:
                f = function([], images2neibs(images, neib_shape, mode=border),
                             mode=self.mode)
                assert any([isinstance(node.op, self.op)
                            for node in f.maker.fgraph.toposort()])

                # print images.get_value(borrow=True)
                neibs = f()
                # print neibs
                assert numpy.allclose(neibs,
                   [[ 0,  1,  4,  5],
                   [ 2,  3,  6,  7],
                   [ 8,  9, 12, 13],
                   [10, 11, 14, 15],
                   [16, 17, 20, 21],
                   [18, 19, 22, 23],
                   [24, 25, 28, 29],
                   [26, 27, 30, 31],
                   [32, 33, 36, 37],
                   [34, 35, 38, 39],
                   [40, 41, 44, 45],
                   [42, 43, 46, 47],
                   [48, 49, 52, 53],
                   [50, 51, 54, 55],
                   [56, 57, 60, 61],
                   [58, 59, 62, 63],
                   [64, 65, 68, 69],
                   [66, 67, 70, 71],
                   [72, 73, 76, 77],
                   [74, 75, 78, 79],
                   [80, 81, 84, 85],
                   [82, 83, 86, 87],
                   [88, 89, 92, 93],
                   [90, 91, 94, 95]])
                g = function([], neibs2images(neibs, neib_shape, images.shape),
                             mode=self.mode)

                assert numpy.allclose(images.get_value(borrow=True), g())
예제 #15
0
def get_single_deconv_out(input, filter):

    # reshape inputs
    img = input.reshape((1, 1, input.shape[0], input.shape[1]))
    kernel = filter.reshape((1, K.prod(filter.shape)))

    # construct split function
    # image = T.tensor4("image")
    neibs = images2neibs(img, neib_shape=filter.shape, neib_step=filter.shape)
    # window_function = theano.function([image], neibs)
    #
    # neibs_val = window_function(img_val)

    neibs = neibs*kernel

    # construct merge function
    img_new = neibs2images(neibs, filter.shape, img.shape)

    return img_new[0][0]
def plot(c, D, n_blocks, X_mn, ax):
    '''
    Plots a reconstruction of a particular image using D as the basis matrix and coeffiecient
    vectors from c

    Parameters
    ------------------------
        c: np.ndarray
            a l x m matrix  representing the coefficients of all blocks in a particular image
            l represents the dimension of the PCA space used for reconstruction
            m represents the number of blocks in an image

        D: np.ndarray
            an N x l matrix representing l basis vectors of the PCA space
            N is the dimension of the original space (number of pixels in a block)

        n_blocks: Integer
            number of blocks comprising the image in each direction.
            For example, for a 256x256 image divided into 64x64 blocks, n_blocks will be 4

        X_mn: basis vectors represent the divergence from the mean so this
            matrix should be added to all reconstructed blocks

        ax: the axis on which the image will be plotted
    '''

    Y = np.dot(c.T, D.T)
    Y  = Y + np.repeat(X_mn.reshape(1, -1), Y.shape[0], 0)

    sz = 256/n_blocks
    images = T.tensor4('images')
    neibs = images2neibs(images, neib_shape=(sz, sz))

    im_new = neibs2images(neibs, (sz, sz), (256,256))
    # Theano function definition
    inv_window = theano.function([neibs], im_new)
    # Function application
    imag = inv_window(Y)

    pyplot.subplot(ax)
    pyplot.imshow(imag, cmap = cm.Greys_r)
예제 #17
0
def plot(c, D, n_blocks, X_mn, ax):
    '''
    Plots a reconstruction of a particular image using D as the basis matrix and coeffiecient
    vectors from c

    Parameters
    ------------------------
        c: np.ndarray
            a l x m matrix  representing the coefficients of all blocks in a particular image
            l represents the dimension of the PCA space used for reconstruction
            m represents the number of blocks in an image

        D: np.ndarray
            an N x l matrix representing l basis vectors of the PCA space
            N is the dimension of the original space (number of pixels in a block)

        n_blocks: Integer
            number of blocks comprising the image in each direction.
            For example, for a 256x256 image divided into 64x64 blocks, n_blocks will be 4

        X_mn: basis vectors represent the divergence from the mean so this
            matrix should be added to all reconstructed blocks

        ax: the axis on which the image will be plotted
    '''
    neibs = theano.tensor.matrix('neibs')
    x = np.dot(D, c).T
    x = x + np.repeat(X_mn.reshape(1, -1), x.shape[0], 0)

    im_new = neibs2images(neibs, (256 / n_blocks, 256 / n_blocks),
                          (1, 1, 256, 256))
    inv_window = theano.function([neibs], im_new)
    im_new_val = inv_window(x)
    im_new_val = np.reshape(im_new_val, (256, 256))

    ax.imshow(im_new_val, cmap=cm.Greys_r)
    '''raise NotImplementedError'''
예제 #18
0
 def fn(neibs):
     return neibs2images(neibs, (2, 2), (2, 3, 10, 10))
예제 #19
0
def reconstructed_image(D, c, num_coeffs, X_mean, n_blocks, im_num):
    '''
    This function reconstructs an image X_recon_img given the number of
    coefficients for each image specified by num_coeffs
    '''
    '''
        Parameters
    ---------------
    c: np.ndarray
        a n x m matrix  representing the coefficients of all the image blocks.
        n represents the maximum dimension of the PCA space.
        m is (number of images x n_blocks**2)

    D: np.ndarray
        an N x n matrix representing the basis vectors of the PCA space
        N is the dimension of the original space (number of pixels in a block)

    im_num: Integer
        index of the image to visualize

    X_mean: np.ndarray
        a matrix representing the mean block.

    num_coeffs: Integer
        an integer that specifies the number of top components to be
        considered while reconstructing

    n_blocks: Integer
        number of blocks comprising the image in each direction.
        For example, for a 256x256 image divided into 64x64 blocks, n_blocks will be 4
    '''

    # print '--- @ PCs =', num_coeffs

    # print '!!!!!'
    # print 'SHAPE(c) =', c.shape
    # print 'SHAPE(D) =', D.shape
    # print '!!!!!'
    # print sz
    # print height, width

    c_im = c[:num_coeffs,
             n_blocks * n_blocks * im_num:n_blocks * n_blocks * (im_num + 1)]
    D_im = D[:, :num_coeffs]

    # print '!!!!!'
    # print 'SHAPE(c_im) =', c_im.shape
    # print 'SHAPE(D_im) =', D_im.shape
    # print '!!!!!'

    #TODO: Enter code below for reconstructing the image X_recon_img

    X_recon = (np.dot(D_im, c_im)).T

    # Defining variables
    neibs = T.matrix('neibs')
    im_new = neibs2images(neibs, (sz, sz), (height, width))
    # Theano function definition
    inv_window = theano.function([neibs], im_new)

    # Function application
    X_mean_ex = np.tile(X_mean, (n_blocks, n_blocks))
    X_recon_img = inv_window(X_recon) + X_mean_ex

    # print '!!!!!'
    # print X_recon_img.shape
    # print '!!!!!'

    return X_recon_img
예제 #20
0
 def fn(neibs):
     return neibs2images(neibs, (2, 2), (2, 3, 10, 10))
    def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
        """
        Allocate a LeNetConvPoolLayer with shared variable internal parameters.

        :type rng: numpy.random.RandomState
        :param rng: a random number generator used to initialize weights

        :type input: theano.tensor.dtensor4
        :param input: symbolic image tensor, of shape image_shape

        :type filter_shape: tuple or list of length 4
        :param filter_shape: (number of filters, num input feature maps,
                              filter height, filter width)

        :type image_shape: tuple or list of length 4
        :param image_shape: (batch size, num input feature maps,
                             image height, image width)

        :type poolsize: tuple or list of length 2
        :param poolsize: the downsampling (pooling) factor (#rows, #cols)
        """

        assert image_shape[1] == filter_shape[1]
        self.input = input

        # there are "num input feature maps * filter height * filter width"
        # inputs to each hidden unit
        fan_in = numpy.prod(filter_shape[1:])
        # each unit in the lower layer receives a gradient from:
        # "num output feature maps * filter height * filter width" /
        #   pooling size
        fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
                   numpy.prod(poolsize))
        # initialize weights with random weights
        W_bound = numpy.sqrt(6. / (fan_in + fan_out))
        self.W = theano.shared(
            numpy.asarray(
                rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
                dtype=theano.config.floatX
            ),
            borrow=True
        )

        # the bias is a 1D tensor -- one bias per output feature map
        b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
        self.b = theano.shared(value=b_values, borrow=True)

        # convolve input feature maps with filters
        conv_out = conv.conv2d(
            input=input,
            filters=self.W,
            filter_shape=filter_shape,
            image_shape=image_shape
        )

        print conv_out.type

        # get neighbourhoods for lcn
        neib_sets = neighbours.images2neibs(
            conv_out,
            neib_shape=(5,5),
            mode='ignore_borders'
        )

        #local contrast normalization
        lcn_sets, updates = theano.map(fn=self.lcn_pixel,sequences=neib_sets) 

        #transform back to images
        lcn_out = neighbours.neibs2images(
            neib_sets,
            neib_shape=(5,5),
            original_shape=conv_out.shape,
            mode='ignore_borders'
        )

        # downsample each feature map individually, using maxpooling
        pooled_out = downsample.max_pool_2d(
            input=lcn_out,
            ds=poolsize,
            ignore_border=True
        )

        # add the bias term. Since the bias is a vector (1D array), we first
        # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
        # thus be broadcasted across mini-batches and feature map
        # width & height
        relu = lambda x: x * (x > 0)
        self.output = relu(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))

        # store parameters of this layer
        self.params = [self.W, self.b]

        # keep track of model input
        self.input = input