Beispiel #1
0
def test_batch_normalization_inside_convolutional_sequence():
    """Test that BN bricks work in ConvolutionalSequences."""
    conv_seq = ConvolutionalSequence(
        [Convolutional(filter_size=(3, 3), num_filters=4),
         BatchNormalization(broadcastable=(False, True, True)),
         AveragePooling(pooling_size=(2, 2)),
         BatchNormalization(broadcastable=(False, False, False)),
         MaxPooling(pooling_size=(2, 2), step=(1, 1))],
        weights_init=Constant(1.),
        biases_init=Constant(2.),
        image_size=(10, 8), num_channels=9)

    conv_seq_no_bn = ConvolutionalSequence(
        [Convolutional(filter_size=(3, 3), num_filters=4),
         AveragePooling(pooling_size=(2, 2)),
         MaxPooling(pooling_size=(2, 2), step=(1, 1))],
        weights_init=Constant(1.),
        biases_init=Constant(2.),
        image_size=(10, 8), num_channels=9)

    conv_seq.initialize()
    conv_seq_no_bn.initialize()
    rng = numpy.random.RandomState((2015, 12, 17))
    input_ = random_unif(rng, (2, 9, 10, 8))

    x = theano.tensor.tensor4()
    ybn = conv_seq.apply(x)
    y = conv_seq_no_bn.apply(x)
    yield (assert_equal, ybn.eval({x: input_}), y.eval({x: input_}))

    std = conv_seq.children[-2].population_stdev
    std.set_value(3 * std.get_value(borrow=True))
    yield (assert_equal, ybn.eval({x: input_}), y.eval({x: input_}) / 3.)
Beispiel #2
0
def test_batch_normalization_broadcastable_sanity():
    bn = BatchNormalization((5, 3, 2), broadcastable=(False, True, False))
    with bn:
        cg = ComputationGraph([bn.apply(tensor.tensor4('abc'))])
    vars = VariableFilter(roles=[BATCH_NORM_MINIBATCH_ESTIMATE])(cg)
    assert all(v.broadcastable[1:] == bn.population_mean.broadcastable
               for v in vars)
Beispiel #3
0
def join(small_image,
         big_image,
         n_filter_small,
         n_filter_big,
         big_img_size_in,
         ordering=''):

    # upsample small image
    upsampled_small = tensor.repeat(small_image, 2, axis=2)
    upsampled_small = tensor.repeat(upsampled_small, 2, axis=3)

    img_size_small = (n_filter_small, big_img_size_in[0], big_img_size_in[1])
    img_size_big = (n_filter_big, big_img_size_in[0], big_img_size_in[1])

    bn_small = BatchNormalization(img_size_small,
                                  name='bn_small%s' % (ordering, ))
    bn_small.initialize()
    bn_big = BatchNormalization(img_size_big, name='bn_big%s' % (ordering, ))
    bn_big.initialize()

    depth_concat = tensor.concatenate(
        [bn_small.apply(upsampled_small),
         bn_big.apply(big_image)], axis=1)

    return depth_concat
Beispiel #4
0
def test_batch_normalization_broadcastable_sanity():
    bn = BatchNormalization((5, 3, 2), broadcastable=(False, True, False))
    with bn:
        cg = ComputationGraph([bn.apply(tensor.tensor4('abc'))])
    vars = VariableFilter(roles=[BATCH_NORM_MINIBATCH_ESTIMATE])(cg)
    assert all(v.broadcastable[1:] == bn.population_mean.broadcastable
               for v in vars)
Beispiel #5
0
    def __init__(self, input_dim, output_dim, hidden_size, init_ranges,
                 **kwargs):
        linear1 = LinearMaxout(input_dim=input_dim,
                               output_dim=hidden_size,
                               num_pieces=2,
                               name='linear1')
        linear2 = LinearMaxout(input_dim=hidden_size,
                               output_dim=hidden_size,
                               num_pieces=2,
                               name='linear2')
        linear3 = Linear(input_dim=hidden_size, output_dim=output_dim)
        logistic = Logistic()
        bricks = [
            linear1,
            BatchNormalization(input_dim=hidden_size, name='bn2'), linear2,
            BatchNormalization(input_dim=hidden_size, name='bnl'), linear3,
            logistic
        ]
        for init_range, b in zip(init_ranges, (linear1, linear2, linear3)):
            b.biases_init = initialization.Constant(0)
            b.weights_init = initialization.Uniform(width=init_range)

        kwargs.setdefault('use_bias', False)
        super(ConcatenateClassifier, self).__init__([b.apply for b in bricks],
                                                    **kwargs)
Beispiel #6
0
def test_batch_normalization_image_size_setter():
    """Test that setting image_size on a BatchNormalization works."""
    bn = BatchNormalization()
    bn.image_size = (5, 4)
    assert bn.input_dim == (None, 5, 4)
    bn.image_size = (4, 5)
    assert bn.input_dim == (None, 4, 5)
Beispiel #7
0
    def __init__(self, batch_norm, num_channels=1, **kwargs):

        self.layers = []
        self.layers.append(
            Convolutional(filter_size=(3, 3),
                          num_filters=64,
                          border_mode=(1, 1),
                          name='conv_1'))
        self.layers.append(Rectifier())
        self.layers.append(MaxPooling(pooling_size=(2, 2), name='pool_1'))
        self.layers.append(
            Convolutional(filter_size=(3, 3),
                          num_filters=128,
                          border_mode=(1, 1),
                          name='conv_2'))
        self.layers.append(Rectifier())
        self.layers.append(MaxPooling(pooling_size=(2, 2), name='pool_2'))
        self.layers.append(
            Convolutional(filter_size=(3, 3),
                          num_filters=256,
                          border_mode=(1, 1),
                          name='conv_3'))
        if batch_norm:
            self.layers.append(
                BatchNormalization(broadcastable=(False, True, True),
                                   name='bn_1'))
        self.layers.append(Rectifier())
        self.layers.append(
            Convolutional(filter_size=(3, 3),
                          num_filters=256,
                          border_mode=(1, 1),
                          name='conv_4'))
        self.layers.append(Rectifier())
        self.layers.append(
            MaxPooling(pooling_size=(1, 2), step=(1, 2), name='pool_3'))
        self.layers.append(
            Convolutional(filter_size=(3, 3),
                          num_filters=512,
                          border_mode=(1, 1),
                          name='conv_5'))
        if batch_norm:
            self.layers.append(
                BatchNormalization(broadcastable=(False, True, True),
                                   name='bn_2'))
        self.layers.append(Rectifier())
        self.layers.append(
            MaxPooling(pooling_size=(2, 1), step=(2, 1), name='pool_4'))
        self.layers.append(
            Convolutional(filter_size=(3, 3),
                          num_filters=512,
                          border_mode=(1, 1),
                          name='conv_6'))
        if batch_norm:
            self.layers.append(
                BatchNormalization(broadcastable=(False, True, True),
                                   name='bn_3'))
        self.layers.append(Rectifier())
        self.conv_sequence = ConvolutionalSequence(self.layers, 1)
Beispiel #8
0
def apply_setup(input_dim, broadcastable, conserve_memory):
    """Common setup code."""
    bn = BatchNormalization(input_dim, broadcastable, conserve_memory,
                            epsilon=1e-4)
    bn.initialize()
    b_len = (len(input_dim) if isinstance(input_dim, collections.Sequence)
             else 1)
    x = tensor.TensorType(theano.config.floatX,
                          [False] * (b_len + 1))()
    return bn, x
Beispiel #9
0
def test_batch_normalization_simple():
    x = tensor.matrix()
    eps = 1e-4
    bn = BatchNormalization(input_dim=4, epsilon=eps)
    bn.initialize()
    with batch_normalization(bn):
        y = bn.apply(x)
    rng = numpy.random.RandomState((2016, 1, 18))
    x_ = rng.uniform(size=(5, 4)).astype(theano.config.floatX)
    y_ = y.eval({x: x_})
    y_expected = (x_ - x_.mean(axis=0)) / numpy.sqrt(x_.var(axis=0) + eps)
    assert_allclose(y_, y_expected, rtol=1e-4)
Beispiel #10
0
def test_batch_normalization_simple():
    x = tensor.matrix()
    eps = 1e-4
    bn = BatchNormalization(input_dim=4, epsilon=eps)
    bn.initialize()
    with batch_normalization(bn):
        y = bn.apply(x)
    rng = numpy.random.RandomState((2016, 1, 18))
    x_ = rng.uniform(size=(5, 4)).astype(theano.config.floatX)
    y_ = y.eval({x: x_})
    y_expected = (x_ - x_.mean(axis=0)) / numpy.sqrt(x_.var(axis=0) + eps)
    assert_allclose(y_, y_expected, rtol=1e-4)
Beispiel #11
0
def conv_block(input_img,
               n_filter,
               filter_size,
               input_featuremap_size,
               ordering=''):

    # found in torch spatialconvolution
    std0 = 2. / (filter_size[0] * filter_size[1] *
                 input_featuremap_size[0])**.5
    std1 = 2. / (input_featuremap_size[0])**.5

    layers = []
    layers.append(
        Convolutional(filter_size=filter_size,
                      num_filters=n_filter,
                      border_mode='half',
                      name='conv%s_1' % (ordering, ),
                      use_bias=True,
                      weights_init=Uniform(width=std0)))
    layers.append(BatchNormalization(name='bn%s_1' % (ordering, )))
    layers.append(LeakyReLU())
    layers.append(
        Convolutional(filter_size=filter_size,
                      num_filters=n_filter,
                      border_mode='half',
                      name='conv%s_2' % (ordering, ),
                      use_bias=True,
                      weights_init=Uniform(width=std0)))
    layers.append(BatchNormalization(name='bn%s_2' % (ordering, )))
    layers.append(LeakyReLU())
    layers.append(
        Convolutional(filter_size=(1, 1),
                      num_filters=n_filter,
                      border_mode='valid',
                      name='conv%s_3b' % (ordering, ),
                      use_bias=True,
                      weights_init=Uniform(width=std1)))
    layers.append(BatchNormalization(name='bn%s_3' % (ordering, )))
    layers.append(LeakyReLU())

    conv_sequence = ConvolutionalSequence(
        layers,
        num_channels=input_featuremap_size[0],
        image_size=(input_featuremap_size[1], input_featuremap_size[2]),
        biases_init=Uniform(width=.1),
        name='convsequence%s' % (ordering, ))

    conv_sequence.initialize()
    return conv_sequence.apply(input_img)
Beispiel #12
0
    def __init__(self,
                 conv_activations,
                 num_channels,
                 image_shape,
                 filter_sizes,
                 feature_maps,
                 pooling_sizes,
                 top_mlp_activations,
                 top_mlp_dims,
                 conv_step=None,
                 border_mode='valid',
                 **kwargs):
        if conv_step is None:
            self.conv_step = (1, 1)
        else:
            self.conv_step = conv_step
        self.num_channels = num_channels
        self.image_shape = image_shape
        self.top_mlp_activations = top_mlp_activations
        self.top_mlp_dims = top_mlp_dims
        self.border_mode = border_mode

        conv_parameters = zip(filter_sizes, feature_maps)

        # Construct convolutional, activation, and pooling layers with corresponding parameters
        self.convolution_layer = (
            Convolutional(filter_size=filter_size,
                          num_filters=num_filter,
                          step=self.conv_step,
                          border_mode=self.border_mode,
                          name='conv_{}'.format(i))
            for i, (filter_size, num_filter) in enumerate(conv_parameters))

        self.BN_layer = (BatchNormalization(name='bn_conv_{}'.format(i))
                         for i in enumerate(conv_parameters))

        self.pooling_layer = (MaxPooling(size, name='pool_{}'.format(i))
                              for i, size in enumerate(pooling_sizes))

        self.layers = list(
            interleave([
                self.convolution_layer, self.BN_layer, conv_activations,
                self.pooling_layer
            ]))

        self.conv_sequence = ConvolutionalSequence(self.layers,
                                                   num_channels,
                                                   image_size=image_shape)

        # Construct a top MLP
        self.top_mlp = MLP(top_mlp_activations, top_mlp_dims)

        # We need to flatten the output of the last convolutional layer.
        # This brick accepts a tensor of dimension (batch_size, ...) and
        # returns a matrix (batch_size, features)
        self.flattener = Flattener()
        application_methods = [
            self.conv_sequence.apply, self.flattener.apply, self.top_mlp.apply
        ]
        super(LeNet, self).__init__(application_methods, **kwargs)
Beispiel #13
0
    def __init__(self, visual_dim, textual_dim, output_dim, hidden_size,
                 init_ranges, **kwargs):
        (visual_init_range, textual_init_range, gbu_init_range, linear_range_1,
         linear_range_2, linear_range_3) = init_ranges
        visual_mlp = Sequence([
            BatchNormalization(input_dim=visual_dim).apply,
            Linear(visual_dim,
                   hidden_size,
                   use_bias=False,
                   weights_init=initialization.Uniform(
                       width=visual_init_range)).apply,
        ],
                              name='visual_mlp')
        textual_mlp = Sequence([
            BatchNormalization(input_dim=textual_dim).apply,
            Linear(textual_dim,
                   hidden_size,
                   use_bias=False,
                   weights_init=initialization.Uniform(
                       width=textual_init_range)).apply,
        ],
                               name='textual_mlp')

        gbu = GatedBimodal(
            hidden_size,
            weights_init=initialization.Uniform(width=gbu_init_range))

        logistic_mlp = MLPGenreClassifier(
            hidden_size, output_dim, hidden_size,
            [linear_range_1, linear_range_2, linear_range_3])
        # logistic_mlp = Sequence([
        #    BatchNormalization(input_dim=hidden_size, name='bn1').apply,
        #    Linear(hidden_size, output_dim, name='linear_output', use_bias=False,
        #           weights_init=initialization.Uniform(width=linear_range_1)).apply,
        #    Logistic().apply
        #], name='logistic_mlp')

        children = [visual_mlp, textual_mlp, gbu, logistic_mlp]
        kwargs.setdefault('use_bias', False)
        kwargs.setdefault('children', children)
        super(GatedClassifier, self).__init__(**kwargs)
Beispiel #14
0
 def check(input_dim, expected_shape, broadcastable=None,
           conserve_memory=True):
     bn = BatchNormalization(input_dim=input_dim,
                             broadcastable=broadcastable,
                             conserve_memory=conserve_memory)
     if broadcastable is None:
         if not isinstance(input_dim, collections.Sequence):
             b_input_dim = (input_dim,)
         else:
             b_input_dim = input_dim
         input_broadcastable = tuple(False for _ in range(len(b_input_dim)))
     else:
         input_broadcastable = broadcastable
     bn.allocate()
     assert conserve_memory == bn.conserve_memory
     assert input_dim == bn.input_dim
     assert bn.broadcastable == broadcastable
     assert bn.scale.broadcastable == input_broadcastable
     assert bn.shift.broadcastable == input_broadcastable
     assert bn.population_mean.broadcastable == input_broadcastable
     assert bn.population_stdev.broadcastable == input_broadcastable
     assert_allclose(bn.population_mean.get_value(borrow=True), 0.)
     assert_allclose(bn.population_stdev.get_value(borrow=True), 1.)
     assert_equal(bn.scale.get_value(borrow=True).shape, expected_shape)
     assert_equal(bn.shift.get_value(borrow=True).shape, expected_shape)
     assert_equal(bn.population_mean.get_value(borrow=True).shape,
                  expected_shape)
     assert_equal(bn.population_stdev.get_value(borrow=True).shape,
                  expected_shape)
     assert numpy.isnan(bn.shift.get_value(borrow=True)).all()
     assert numpy.isnan(bn.scale.get_value(borrow=True)).all()
     bn.initialize()
     assert_allclose(bn.shift.get_value(borrow=True), 0.)
     assert_allclose(bn.scale.get_value(borrow=True), 1.)
Beispiel #15
0
    def __init__(self, visual_dim, textual_dim, output_dim, hidden_size,
                 init_ranges, **kwargs):
        (visual_range, textual_range, linear_range_1, linear_range_2,
         linear_range_3) = init_ranges
        visual_layer = FeedforwardSequence([
            BatchNormalization(input_dim=visual_dim).apply,
            LinearMaxout(
                input_dim=visual_dim,
                output_dim=hidden_size,
                weights_init=initialization.Uniform(width=visual_range),
                use_bias=False,
                biases_init=initialization.Constant(0),
                num_pieces=2).apply
        ],
                                           name='visual_layer')
        textual_layer = FeedforwardSequence([
            BatchNormalization(input_dim=textual_dim).apply,
            LinearMaxout(
                input_dim=textual_dim,
                output_dim=hidden_size,
                weights_init=initialization.Uniform(width=textual_range),
                biases_init=initialization.Constant(0),
                use_bias=False,
                num_pieces=2).apply
        ],
                                            name='textual_layer')
        logistic_mlp = MLPGenreClassifier(
            hidden_size, output_dim, hidden_size,
            [linear_range_1, linear_range_2, linear_range_3])
        # logistic_mlp = Sequence([
        #   BatchNormalization(input_dim=hidden_size, name='bn1').apply,
        #   Linear(hidden_size, output_dim, name='linear_output', use_bias=False,
        #          weights_init=initialization.Uniform(width=linear_range_1)).apply,
        #   Logistic().apply
        #], name='logistic_mlp')

        children = [visual_layer, textual_layer, logistic_mlp]
        kwargs.setdefault('use_bias', False)
        kwargs.setdefault('children', children)
        super(LinearSumClassifier, self).__init__(**kwargs)
Beispiel #16
0
def convolutional_sequence(filter_size, num_filters, image_size, num_channels=1):
    
    layers = []
    # layers.append(BatchNormalization(name='batchnorm_pixels'))

    layers.append(Convolutional(filter_size=filter_size, num_filters=num_filters, use_bias=True, tied_biases=True, name='conv_1'))
    layers.append(BatchNormalization(name='batchnorm_1'))
    layers.append(Rectifier(name='non_linear_1'))
    
    layers.append(Convolutional(filter_size=filter_size, num_filters=num_filters, use_bias=True, tied_biases=False, name='conv_2'))
    layers.append(BatchNormalization(name='batchnorm_2'))
    layers.append(Rectifier(name='non_linear_2'))
    
    layers.append(MaxPooling(pooling_size=(2,2), name='maxpool_2'))
        
    layers.append(Convolutional(filter_size=filter_size, num_filters=num_filters*2, use_bias=True, tied_biases=True, name='conv_3'))
    layers.append(BatchNormalization(name='batchnorm_3'))
    layers.append(Rectifier(name='non_linear_3'))

    layers.append(Convolutional(filter_size=filter_size, num_filters=num_filters*2, use_bias=True, tied_biases=True, name='conv_4'))
    layers.append(BatchNormalization(name='batchnorm_4'))
    layers.append(Rectifier(name='non_linear_4'))
    
    layers.append(MaxPooling(pooling_size=(2,2), name='maxpool_4'))
    
    layers.append(Convolutional(filter_size=filter_size, num_filters=num_filters*4, use_bias=True, tied_biases=False, name='conv_5'))
    layers.append(BatchNormalization(name='batchnorm_5'))
    layers.append(Rectifier(name='non_linear_5'))

    layers.append(Convolutional(filter_size=filter_size, num_filters=num_filters*4, use_bias=True, tied_biases=True, name='conv_6'))
    layers.append(BatchNormalization(name='batchnorm_6'))
    layers.append(Rectifier(name='non_linear_6'))
    
    layers.append(MaxPooling(pooling_size=(2,2), name='maxpool_6'))

    layers.append(Convolutional(filter_size=filter_size, num_filters=num_filters*8, use_bias=True, tied_biases=True, name='conv_7'))
    layers.append(BatchNormalization(name='batchnorm_7'))
    layers.append(Rectifier(name='non_linear_7'))

    layers.append(Convolutional(filter_size=filter_size, num_filters=num_filters*8, use_bias=True, tied_biases=True, name='conv_8'))
    layers.append(BatchNormalization(name='batchnorm_8'))
    layers.append(Rectifier(name='non_linear_8'))
    
    layers.append(MaxPooling(pooling_size=(2,2), name='maxpool_8'))
    
    return ConvolutionalSequence(layers, num_channels=num_channels, image_size=image_size, biases_init=Constant(0.),  weights_init=IsotropicGaussian(0.01))
Beispiel #17
0
def test_batch_normalization_nested():
    x = tensor.tensor4()
    eps = 1e-4
    r_dims = (0, 2, 3)
    batch_dims = (5, 4, 3, 2)
    bn = BatchNormalization(input_dim=batch_dims[1:],
                            broadcastable=(False, True, True),
                            epsilon=eps)
    seq = Sequence([bn.apply, Tanh().apply])
    seq.initialize()
    with batch_normalization(seq):
        y = seq.apply(x)
    rng = numpy.random.RandomState((2016, 1, 18))
    x_ = rng.uniform(size=batch_dims).astype(theano.config.floatX)
    y_ = y.eval({x: x_})
    y_expected = numpy.tanh(
        (x_ - x_.mean(axis=r_dims, keepdims=True)) /
        numpy.sqrt(x_.var(axis=r_dims, keepdims=True) + eps))
    assert_allclose(y_, y_expected, rtol=1e-4)
Beispiel #18
0
    def __init__(self, visual_dim, textual_dim, output_dim, hidden_size,
                 init_ranges, **kwargs):
        (visual_range, textual_range, linear_range_1, linear_range_2,
         linear_range_3) = init_ranges
        manager_dim = visual_dim + textual_dim
        visual_mlp = MLPGenreClassifier(
            visual_dim,
            output_dim,
            hidden_size, [linear_range_1, linear_range_2, linear_range_3],
            name='visual_mlp')
        textual_mlp = MLPGenreClassifier(
            textual_dim,
            output_dim,
            hidden_size, [linear_range_1, linear_range_2, linear_range_3],
            name='textual_mlp')
        # manager_mlp = MLPGenreClassifier(manager_dim, 2, hidden_size, [
        # linear_range_1, linear_range_2, linear_range_3], output_act=Softmax,
        # name='manager_mlp')
        bn = BatchNormalization(input_dim=manager_dim, name='bn3')
        manager_mlp = Sequence([
            Linear(manager_dim,
                   2,
                   name='linear_output',
                   use_bias=False,
                   weights_init=initialization.Uniform(
                       width=linear_range_1)).apply,
        ],
                               name='manager_mlp')
        fork = Fork(
            input_dim=manager_dim,
            output_dims=[2] * output_dim,
            prototype=manager_mlp,
            output_names=['linear_' + str(i) for i in range(output_dim)])

        children = [visual_mlp, textual_mlp, fork, bn, NDimensionalSoftmax()]
        kwargs.setdefault('use_bias', False)
        kwargs.setdefault('children', children)
        super(MoEClassifier, self).__init__(**kwargs)
Beispiel #19
0
def test_apply_batch_normalization_nested():
    x = tensor.matrix()
    eps = 1e-8
    batch_dims = (3, 9)
    bn = BatchNormalization(input_dim=5, epsilon=eps)
    mlp = MLP([Sequence([bn.apply, Tanh().apply])], [9, 5],
              weights_init=Constant(0.4),
              biases_init=Constant(1))
    mlp.initialize()
    y = mlp.apply(x)
    cg = apply_batch_normalization(ComputationGraph([y]))
    y_bn = cg.outputs[0]
    rng = numpy.random.RandomState((2016, 1, 18))
    x_ = rng.uniform(size=batch_dims).astype(theano.config.floatX)
    y_ = y_bn.eval({x: x_})
    W_, b_ = map(
        lambda s:
        (getattr(mlp.linear_transformations[0], s).get_value(borrow=True)),
        ['W', 'b'])
    z_ = numpy.dot(x_, W_) + b_
    y_expected = numpy.tanh(
        (z_ - z_.mean(axis=0)) / numpy.sqrt(z_.var(axis=0) + eps))
    assert_allclose(y_, y_expected, rtol=1e-3)
Beispiel #20
0
num_filter = [16, 32, 48, 64]
num_channels = 3
pooling_size = [(3,3), (2,2), (2,2)]
conv_step = (1,1)
border_mode = 'valid'

conv_layers1 = []
conv_layers1.append(SpatialBatchNormalization(name='spatialBN_{}'.format(i)))
conv_layers1.append(
    Convolutional(
        filter_size=filter_size[j],
        num_filters=num_filter[j],
        step=conv_step,
        border_mode=border_mode,
        name='conv_{}'.format(i)))
conv_layers1.append(BatchNormalization(name='BNconv_{}'.format(i)))
conv_layers1.append(conv_activation[0])
conv_layers1.append(MaxPooling(pooling_size[j], name='pool_{}'.format(i)))

i = i + 1 #Sequence
conv_layers1.append(
    Convolutional(
        filter_size=filter_size[j+1],
        num_filters=num_filter[j+1],
        step=conv_step,
        border_mode=border_mode,
        name='conv_{}'.format(i)))
conv_layers1.append(BatchNormalization(name='BNconv_{}'.format(i)))
conv_layers1.append(conv_activation[0])
conv_layers1.append(MaxPooling(pooling_size[j+1], name='pool_{}'.format(i)))
Beispiel #21
0
def convolutional_sequence(filter_size,
                           num_filters,
                           image_size,
                           num_channels=1):

    layers = []
    layers.append(
        Convolutional(filter_size=filter_size,
                      num_filters=num_filters,
                      num_channels=num_channels,
                      use_bias=True,
                      tied_biases=True,
                      weights_init=IsotropicGaussian(0.01),
                      image_size=image_size,
                      name='conv_1'))
    layers.append(LeakyRectifier(name='non_linear_1'))
    layers.append(
        BatchNormalization(input_dim=layers[0].get_dim('output'),
                           name='batchnorm_1'))

    layers.append(
        Convolutional(filter_size=filter_size,
                      num_filters=num_filters,
                      num_channels=num_channels,
                      use_bias=True,
                      tied_biases=True,
                      weights_init=IsotropicGaussian(0.01),
                      image_size=image_size,
                      name='conv_2'))
    layers.append(LeakyRectifier(name='non_linear_2'))
    layers.append(
        BatchNormalization(input_dim=layers[3].get_dim('output'),
                           name='batchnorm_2'))

    layers.append(
        MaxPooling(pooling_size=(2, 2),
                   padding=(1, 1),
                   weights_init=IsotropicGaussian(0.01),
                   name='maxpool_2'))

    layers.append(
        Convolutional(filter_size=filter_size,
                      num_filters=num_filters * 2,
                      num_channels=num_channels,
                      use_bias=True,
                      tied_biases=True,
                      weights_init=IsotropicGaussian(0.01),
                      image_size=image_size,
                      name='conv_3'))
    layers.append(LeakyRectifier(name='non_linear_3'))
    layers.append(
        BatchNormalization(input_dim=layers[7].get_dim('output'),
                           name='batchnorm_3'))

    layers.append(
        Convolutional(filter_size=filter_size,
                      num_filters=num_filters * 2,
                      num_channels=num_channels,
                      use_bias=True,
                      tied_biases=True,
                      weights_init=IsotropicGaussian(0.01),
                      image_size=image_size,
                      name='conv_4'))
    layers.append(LeakyRectifier(name='non_linear_4'))
    layers.append(
        BatchNormalization(input_dim=layers[10].get_dim('output'),
                           name='batchnorm_4'))

    layers.append(
        MaxPooling(pooling_size=(2, 2),
                   padding=(1, 1),
                   weights_init=IsotropicGaussian(0.01),
                   name='maxpool_4'))

    layers.append(
        Convolutional(filter_size=filter_size,
                      num_filters=num_filters * 4,
                      num_channels=num_channels,
                      use_bias=True,
                      tied_biases=True,
                      weights_init=IsotropicGaussian(0.01),
                      image_size=image_size,
                      name='conv_5'))
    layers.append(LeakyRectifier(name='non_linear_5'))
    layers.append(
        BatchNormalization(input_dim=layers[14].get_dim('output'),
                           name='batchnorm_5'))

    layers.append(
        Convolutional(filter_size=filter_size,
                      num_filters=num_filters * 4,
                      num_channels=num_channels,
                      use_bias=True,
                      tied_biases=True,
                      weights_init=IsotropicGaussian(0.01),
                      image_size=image_size,
                      name='conv_6'))
    layers.append(LeakyRectifier(name='non_linear_6'))
    layers.append(
        BatchNormalization(input_dim=layers[17].get_dim('output'),
                           name='batchnorm_6'))

    layers.append(
        MaxPooling(pooling_size=(2, 2),
                   padding=(1, 1),
                   weights_init=IsotropicGaussian(0.01),
                   name='maxpool_6'))

    layers.append(
        Convolutional(filter_size=filter_size,
                      num_filters=num_filters * 8,
                      num_channels=num_channels,
                      use_bias=True,
                      tied_biases=True,
                      weights_init=IsotropicGaussian(0.01),
                      image_size=image_size,
                      name='conv_7'))
    layers.append(LeakyRectifier(name='non_linear_7'))
    layers.append(
        BatchNormalization(input_dim=layers[21].get_dim('output'),
                           name='batchnorm_7'))

    layers.append(
        Convolutional(filter_size=filter_size,
                      num_filters=num_filters * 8,
                      num_channels=num_channels,
                      use_bias=True,
                      tied_biases=True,
                      weights_init=IsotropicGaussian(0.01),
                      image_size=image_size,
                      name='conv_8'))
    layers.append(LeakyRectifier(name='non_linear_8'))
    layers.append(
        BatchNormalization(input_dim=layers[24].get_dim('output'),
                           name='batchnorm_8'))

    layers.append(
        MaxPooling(pooling_size=(2, 2),
                   padding=(1, 1),
                   weights_init=IsotropicGaussian(0.01),
                   name='maxpool_8'))

    return ConvolutionalSequence(layers,
                                 num_channels=num_channels,
                                 image_size=image_size,
                                 biases_init=Uniform(width=.1))
Beispiel #22
0
#Create the stmbolics variable
x = tensor.tensor4('image_features')
y = tensor.lmatrix('targets')

#Get the parameters
conv_parameters = zip(filter_size, num_filter)

#Create the convolutions layers
conv_layers = list(
    interleave([
        (Convolutional(filter_size=filter_size,
                       num_filters=num_filter,
                       name='conv_{}'.format(i))
         for i, (filter_size, num_filter) in enumerate(conv_parameters)),
        (BatchNormalization(name='batch_{}'.format(i))
         for i, _ in enumerate(conv_parameters)),
        (Rectifier() for i, (f_size, num_f) in enumerate(conv_parameters)),
        (MaxPooling(size, name='pool_{}'.format(i))
         for i, size in enumerate(pooling_sizes))
    ]))

#Create the sequence
conv_sequence = ConvolutionalSequence(conv_layers,
                                      num_channels,
                                      image_size=image_shape,
                                      use_bias=False)
#Add the Softmax function
out = Flattener().apply(conv_sequence.apply(x))
predict = NDimensionalSoftmax().apply(out)
def intranet(i, j, out, image_size, filter_size, num_filter, num_channels,
             pooling_size, conv_step, border_mode, conv_activation):

    conv_layersA = []  #first intra convolutional sequence

    conv_layersA.append(
        Convolutional(filter_size=filter_size[j],
                      num_filters=num_filter[j],
                      step=conv_step,
                      border_mode=border_mode,
                      name='conv_A{}({})'.format(i, j)))
    conv_layersA.append(BatchNormalization(name='BNconv_A{}({})'.format(i, j)))
    conv_layersA.append(conv_activation[0])

    j = j + 1  #next sub layer
    conv_layersA.append(
        Convolutional(filter_size=filter_size[j],
                      num_filters=num_filter[j],
                      step=conv_step,
                      border_mode=border_mode,
                      name='conv_A{}({})'.format(i, j)))
    conv_layersA.append(BatchNormalization(name='BNconv_A{}({})'.format(i, j)))
    conv_layersA.append(conv_activation[0])

    conv_sequenceA = ConvolutionalSequence(conv_layersA,
                                           num_channels=num_channels,
                                           image_size=image_size,
                                           weights_init=Uniform(width=0.2),
                                           use_bias=False,
                                           name='convSeq_A{}'.format(i))
    out1 = conv_sequenceA.apply(out)

    conv_layersB = []  #second intra convolutional sequence

    j = j + 1  #next sub layer
    conv_layersB.append(
        Convolutional(filter_size=filter_size[j],
                      num_filters=num_filter[j],
                      step=conv_step,
                      border_mode=border_mode,
                      name='conv_B{}({})'.format(i, j)))
    conv_layersB.append(BatchNormalization(name='BNconv_B{}({})'.format(i, j)))
    conv_layersB.append(conv_activation[0])

    j = j + 1  #next sub layer
    conv_layersB.append(
        Convolutional(filter_size=filter_size[j],
                      num_filters=num_filter[j],
                      step=conv_step,
                      border_mode=border_mode,
                      name='conv_B{}({})'.format(i, j)))
    conv_layersB.append(BatchNormalization(name='BNconv_B{}({})'.format(i, j)))
    conv_layersB.append(conv_activation[0])

    conv_sequenceB = ConvolutionalSequence(conv_layersB,
                                           num_channels=num_channels,
                                           image_size=image_size,
                                           weights_init=Uniform(width=0.2),
                                           use_bias=False,
                                           name='convSeq_B{}'.format(i))
    out2 = conv_sequenceB.apply(out)

    #Merge
    return tensor.concatenate([out1, out2], axis=1)
Beispiel #24
0
def build_model(images, labels):

    vgg = VGG(layer='conv3_4')
    vgg.push_initialization_config()
    vgg.initialize()

    sb = SubstractBatch()

    # Construct a bottom convolutional sequence
    layers = [
        Convolutional(filter_size=(3, 3),
                      num_filters=100,
                      use_bias=True,
                      tied_biases=True,
                      name='final_conv0'),
        BatchNormalization(name='batchnorm_1'),
        Rectifier(name='final_conv0_act'),
        Convolutional(filter_size=(3, 3),
                      num_filters=100,
                      use_bias=True,
                      tied_biases=True,
                      name='final_conv1'),
        BatchNormalization(name='batchnorm_2'),
        Rectifier(name='final_conv1_act'),
        MaxPooling(pooling_size=(2, 2), name='maxpool_final')
    ]
    bottom_conv_sequence = ConvolutionalSequence(
        layers,
        num_channels=256,
        image_size=(40, 40),
        biases_init=Constant(0.),
        weights_init=IsotropicGaussian(0.01))
    bottom_conv_sequence._push_allocation_config()

    # Flatten layer
    flattener = Flattener()

    # Construct a top MLP
    conv_out_dim = numpy.prod(bottom_conv_sequence.get_dim('output'))
    print 'dim output conv:', bottom_conv_sequence.get_dim('output')
    # conv_out_dim = 20 * 40 * 40
    top_mlp = BatchNormalizedMLP(
        [Rectifier(name='non_linear_9'),
         Softmax(name='non_linear_11')], [conv_out_dim, 1024, 10],
        weights_init=IsotropicGaussian(),
        biases_init=Constant(0))

    # Construct feedforward sequence
    ss_seq = FeedforwardSequence([
        vgg.apply, bottom_conv_sequence.apply, flattener.apply, top_mlp.apply
    ])
    ss_seq.push_initialization_config()
    ss_seq.initialize()

    prediction = ss_seq.apply(images)
    cost_noreg = CategoricalCrossEntropy().apply(labels.flatten(), prediction)

    # add regularization
    selector = Selector([top_mlp])
    Ws = selector.get_parameters('W')
    mlp_brick_name = 'batchnormalizedmlp'
    W0 = Ws['/%s/linear_0.W' % mlp_brick_name]
    W1 = Ws['/%s/linear_1.W' % mlp_brick_name]
    cost = cost_noreg + .0001 * (W0**2).sum() + .001 * (W1**2).sum()

    # define learned parameters
    selector = Selector([ss_seq])
    Ws = selector.get_parameters('W')
    bs = selector.get_parameters('b')
    BNSCs = selector.get_parameters('batch_norm_scale')
    BNSHs = selector.get_parameters('batch_norm_shift')

    parameters_top = []
    parameters_top += [v for k, v in Ws.items()]
    parameters_top += [v for k, v in bs.items()]
    parameters_top += [v for k, v in BNSCs.items()]
    parameters_top += [v for k, v in BNSHs.items()]

    selector = Selector([vgg])
    convs = selector.get_parameters()

    parameters_all = []
    parameters_all += parameters_top
    parameters_all += [v for k, v in convs.items()]

    return cost, [parameters_top, parameters_all]
Beispiel #25
0
    #Merge
    return T.concatenate([out21, out22, out23, out24, out31, out32, out33, out34], axis=1)

############## CREATE THE NETWORK ###############
#Define the parameters
#Create the symbolics variable
x = tensor.tensor4('image_features')
y = tensor.lmatrix('targets')

num_epochs = 1000
layers = []

###############FIRST STAGE#######################
#Create the convolutions layers
layers.append(Convolutional(filter_size=(7,7), step=(2,2), num_filters=96, border_mode='half', name='conv_0'))
layers.append(BatchNormalization(name='batch_0'))
layers.append(Rectifier())
layers.append(MaxPooling((3,3), step=(2,2), padding=(1,1), name='pool_0'))

convSeq = ConvolutionalSequence(layers, num_channels=3, image_size=(220,220), weights_init=Orthogonal(), use_bias=False, name='ConvSeq')
convSeq.initialize()
out = convSeq.apply(x)

#FIRE MODULES
out1 = Fire((55,55), 96, 16, 16, 16, out, 10)
out2 = Fire((55,55), 128, 16, 16, 16, out1, 25)
out3 = Fire((55,55), 128, 32, 32, 32, out2, 300)
out31 = MaxPooling((3,3), step=(2,2), padding=(1,1), name='poolLow').apply(out3)
out4 = Fire((28,28), 256, 32, 32, 32, out31, 45)
out5 = Fire((28,28), 256, 48, 48, 48, out4, 500)
out6 = Fire((28,28), 384, 48, 48, 48, out5, 65)
Beispiel #26
0
############## CREATE THE NETWORK ###############
#Define the parameters
#Create the stmbolics variable
x = tensor.tensor4('image_features')
y = tensor.lmatrix('targets')

num_epochs = 500
layers = []
###############FIRST STAGE#######################
#Create the convolutions layers
layers.append(
    Convolutional(filter_size=(7, 7),
                  num_filters=32,
                  border_mode='half',
                  name='conv_0'))
layers.append(BatchNormalization(name='batch_0'))
layers.append(Rectifier())
layers.append(MaxPooling((3, 3), step=(2, 2), padding=(1, 1), name='pool_0'))

layers.append(
    Convolutional(filter_size=(1, 1),
                  num_filters=64,
                  border_mode='half',
                  name='conv_1'))
layers.append(BatchNormalization(name='batch_1'))
layers.append(Rectifier())
layers.append(MaxPooling((3, 3), step=(2, 2), padding=(1, 1), name='pool_1'))
layers.append(
    Convolutional(filter_size=(3, 3),
                  num_filters=192,
                  border_mode='half',
Beispiel #27
0
def inception(image_shape, num_input, conv1, conv2, conv3, conv4, conv5, conv6,
              out, i):
    layers1 = []
    layers2 = []
    layers3 = []
    layers4 = []
    layers1.append(
        Convolutional(filter_size=(1, 1),
                      num_channels=num_input,
                      num_filters=conv1,
                      image_size=image_shape,
                      border_mode='half',
                      name='conv_{}'.format(i)))
    layers1.append(BatchNormalization(name='batch_{}'.format(i)))
    layers1.append(Rectifier())
    conv_sequence1 = ConvolutionalSequence(layers1,
                                           num_channels=num_input,
                                           image_size=image_shape,
                                           weights_init=Orthogonal(),
                                           use_bias=False,
                                           name='convSeq_{}'.format(i))
    conv_sequence1.initialize()
    out1 = conv_sequence1.apply(out)
    i = i + 1

    layers2.append(
        Convolutional(filter_size=(1, 1),
                      num_channels=num_input,
                      num_filters=conv2,
                      image_size=image_shape,
                      border_mode='half',
                      name='conv_{}'.format(i)))
    layers2.append(BatchNormalization(name='batch_{}'.format(i)))
    layers2.append(Rectifier())
    i = i + 1
    layers2.append(
        Convolutional(filter_size=(3, 3),
                      num_channels=conv2,
                      num_filters=conv3,
                      image_size=image_shape,
                      border_mode='half',
                      name='conv_{}'.format(i)))
    layers2.append(BatchNormalization(name='batch_{}'.format(i)))
    layers2.append(Rectifier())
    conv_sequence2 = ConvolutionalSequence(layers2,
                                           num_channels=num_input,
                                           image_size=image_shape,
                                           weights_init=Orthogonal(),
                                           use_bias=False,
                                           name='convSeq_{}'.format(i))
    conv_sequence2.initialize()
    out2 = conv_sequence2.apply(out)
    i = i + 1

    layers3.append(
        Convolutional(filter_size=(1, 1),
                      num_channels=num_input,
                      num_filters=conv4,
                      image_size=image_shape,
                      border_mode='half',
                      name='conv_{}'.format(i)))
    layers3.append(BatchNormalization(name='batch_{}'.format(i)))
    layers3.append(Rectifier())
    i = i + 1
    layers3.append(
        Convolutional(filter_size=(5, 5),
                      num_channels=conv4,
                      num_filters=conv5,
                      image_size=image_shape,
                      border_mode='half',
                      name='conv_{}'.format(i)))
    layers3.append(BatchNormalization(name='batch_{}'.format(i)))
    layers3.append(Rectifier())
    conv_sequence3 = ConvolutionalSequence(layers3,
                                           num_channels=num_input,
                                           image_size=image_shape,
                                           weights_init=Orthogonal(),
                                           use_bias=False,
                                           name='convSeq_{}'.format(i))
    conv_sequence3.initialize()
    out3 = conv_sequence3.apply(out)
    i = i + 1

    layers4.append(
        MaxPooling((3, 3),
                   step=(1, 1),
                   padding=(1, 1),
                   name='pool_{}'.format(i)))
    layers4.append(
        Convolutional(filter_size=(1, 1),
                      num_channels=num_input,
                      num_filters=conv6,
                      image_size=image_shape,
                      border_mode='half',
                      name='conv_{}'.format(i)))
    layers4.append(BatchNormalization(name='batch_{}'.format(i)))
    layers4.append(Rectifier())
    i = i + 1
    conv_sequence4 = ConvolutionalSequence(layers4,
                                           num_channels=num_input,
                                           image_size=image_shape,
                                           weights_init=Orthogonal(),
                                           use_bias=False,
                                           name='convSeq_{}'.format(i))
    conv_sequence4.initialize()
    out4 = conv_sequence4.apply(out)
    #Merge
    return T.concatenate([out1, out2, out3, out4], axis=1)
Beispiel #28
0
def Fire(image_shape, num_input, conv1, conv2, conv3, out, i):
    layers11 = []
    layers12 = []
    layers13 = []
    layers14 = []

    ############# SQUEEZE ###########
    ### 4 Conv 1x1 ###
    layers11.append(Convolutional(filter_size=(1,1), num_channels=num_input, num_filters=conv1, image_size=image_shape, border_mode='half', name='conv_{}'.format(i)))
    layers11.append(BatchNormalization(name='batch_{}'.format(i)))
    layers11.append(Rectifier())
    conv_sequence11 = ConvolutionalSequence(layers11, num_channels=num_input, image_size=image_shape, weights_init=Orthogonal(), use_bias=False, name='convSeq_{}'.format(i))
    conv_sequence11.initialize()
    out11 = conv_sequence11.apply(out)
    i = i + 1

    layers12.append(Convolutional(filter_size=(1,1), num_channels=num_input, num_filters=conv1, image_size=image_shape, border_mode='half', name='conv_{}'.format(i)))
    layers12.append(BatchNormalization(name='batch_{}'.format(i)))
    layers12.append(Rectifier())
    conv_sequence12 = ConvolutionalSequence(layers12, num_channels=num_input, image_size=image_shape, weights_init=Orthogonal(), use_bias=False, name='convSeq_{}'.format(i))
    conv_sequence12.initialize()
    out12 = conv_sequence12.apply(out)
    i = i + 1

    layers13.append(Convolutional(filter_size=(1,1), num_channels=num_input, num_filters=conv1, image_size=image_shape, border_mode='half', name='conv_{}'.format(i)))
    layers13.append(BatchNormalization(name='batch_{}'.format(i)))
    layers13.append(Rectifier())
    conv_sequence13 = ConvolutionalSequence(layers13, num_channels=num_input, image_size=image_shape, weights_init=Orthogonal(), use_bias=False, name='convSeq_{}'.format(i))
    conv_sequence13.initialize()
    out13 = conv_sequence13.apply(out)
    i = i + 1

    layers14.append(Convolutional(filter_size=(1,1), num_channels=num_input, num_filters=conv1, image_size=image_shape, border_mode='half', name='conv_{}'.format(i)))
    layers14.append(BatchNormalization(name='batch_{}'.format(i)))
    layers14.append(Rectifier())
    conv_sequence14 = ConvolutionalSequence(layers14, num_channels=num_input, image_size=image_shape, weights_init=Orthogonal(), use_bias=False, name='convSeq_{}'.format(i))
    conv_sequence14.initialize()
    out14 = conv_sequence14.apply(out)
    i = i + 1

    squeezed = T.concatenate([out11, out12, out13, out14], axis=1)

    ####### EXPAND #####
    layers21 = []
    layers22 = []
    layers23 = []
    layers24 = []
    layers31 = []
    layers32 = []
    layers33 = []
    layers34 = []
    num_input2 = conv1 * 4
    ### 4 conv 1x1 ###
    layers21.append(Convolutional(filter_size=(1,1), num_channels=num_input2, num_filters=conv2, image_size=image_shape, border_mode='half', name='conv_{}'.format(i)))
    layers21.append(BatchNormalization(name='batch_{}'.format(i)))
    layers21.append(Rectifier())
    conv_sequence21 = ConvolutionalSequence(layers21, num_channels=num_input2, image_size=image_shape, weights_init=Orthogonal(), use_bias=False, name='convSeq_{}'.format(i))
    conv_sequence21.initialize()
    out21 = conv_sequence21.apply(squeezed)
    i = i + 1

    layers22.append(Convolutional(filter_size=(1,1), num_channels=num_input2, num_filters=conv2, image_size=image_shape, border_mode='half', name='conv_{}'.format(i)))
    layers22.append(BatchNormalization(name='batch_{}'.format(i)))
    layers22.append(Rectifier())
    conv_sequence22 = ConvolutionalSequence(layers22, num_channels=num_input2, image_size=image_shape, weights_init=Orthogonal(), use_bias=False, name='convSeq_{}'.format(i))
    conv_sequence22.initialize()
    out22 = conv_sequence22.apply(squeezed)
    i = i + 1

    layers23.append(Convolutional(filter_size=(1,1), num_channels=num_input2, num_filters=conv2, image_size=image_shape, border_mode='half', name='conv_{}'.format(i)))
    layers23.append(BatchNormalization(name='batch_{}'.format(i)))
    layers23.append(Rectifier())
    conv_sequence23 = ConvolutionalSequence(layers23, num_channels=num_input2, image_size=image_shape, weights_init=Orthogonal(), use_bias=False, name='convSeq_{}'.format(i))
    conv_sequence23.initialize()
    out23 = conv_sequence23.apply(squeezed)
    i = i + 1

    layers24.append(Convolutional(filter_size=(1,1), num_channels=num_input2, num_filters=conv2, image_size=image_shape, border_mode='half', name='conv_{}'.format(i)))
    layers24.append(BatchNormalization(name='batch_{}'.format(i)))
    layers24.append(Rectifier())
    conv_sequence24 = ConvolutionalSequence(layers24, num_channels=num_input2, image_size=image_shape, weights_init=Orthogonal(), use_bias=False, name='convSeq_{}'.format(i))
    conv_sequence24.initialize()
    out24 = conv_sequence24.apply(squeezed)
    i = i + 1

    ### 4 conv 3x3 ###
    layers31.append(Convolutional(filter_size=(3,3), num_channels=num_input2, num_filters=conv3, image_size=image_shape, border_mode='half', name='conv_{}'.format(i)))
    layers31.append(BatchNormalization(name='batch_{}'.format(i)))
    layers31.append(Rectifier())
    conv_sequence31 = ConvolutionalSequence(layers31, num_channels=num_input2, image_size=image_shape, weights_init=Orthogonal(), use_bias=False, name='convSeq_{}'.format(i))
    conv_sequence31.initialize()
    out31 = conv_sequence31.apply(squeezed)
    i = i + 1

    layers32.append(Convolutional(filter_size=(3,3), num_channels=num_input2, num_filters=conv3, image_size=image_shape, border_mode='half', name='conv_{}'.format(i)))
    layers32.append(BatchNormalization(name='batch_{}'.format(i)))
    layers32.append(Rectifier())
    conv_sequence32 = ConvolutionalSequence(layers32, num_channels=num_input2, image_size=image_shape, weights_init=Orthogonal(), use_bias=False, name='convSeq_{}'.format(i))
    conv_sequence32.initialize()
    out32 = conv_sequence32.apply(squeezed)
    i = i + 1

    layers33.append(Convolutional(filter_size=(3,3), num_channels=num_input2, num_filters=conv3, image_size=image_shape, border_mode='half', name='conv_{}'.format(i)))
    layers33.append(BatchNormalization(name='batch_{}'.format(i)))
    layers33.append(Rectifier())
    conv_sequence33 = ConvolutionalSequence(layers33, num_channels=num_input2, image_size=image_shape, weights_init=Orthogonal(), use_bias=False, name='convSeq_{}'.format(i))
    conv_sequence33.initialize()
    out33 = conv_sequence33.apply(squeezed)
    i = i + 1

    layers34.append(Convolutional(filter_size=(3,3), num_channels=num_input2, num_filters=conv3, image_size=image_shape, border_mode='half', name='conv_{}'.format(i)))
    layers34.append(BatchNormalization(name='batch_{}'.format(i)))
    layers34.append(Rectifier())
    conv_sequence34 = ConvolutionalSequence(layers34, num_channels=num_input2, image_size=image_shape, weights_init=Orthogonal(), use_bias=False, name='convSeq_{}'.format(i))
    conv_sequence34.initialize()
    out34 = conv_sequence34.apply(squeezed)
    i = i + 1

    #Merge
    return T.concatenate([out21, out22, out23, out24, out31, out32, out33, out34], axis=1)