示例#1
0
def test_convolutional_sequence_with_no_input_size():
    # suppose x is outputted by some RNN
    x = tensor.tensor4('x')
    filter_size = (1, 1)
    num_filters = 2
    num_channels = 1
    pooling_size = (1, 1)
    conv = Convolutional(filter_size, num_filters, tied_biases=False,
                         weights_init=Constant(1.), biases_init=Constant(1.))
    act = Rectifier()
    pool = MaxPooling(pooling_size)

    bad_seq = ConvolutionalSequence([conv, act, pool], num_channels,
                                    tied_biases=False)
    assert_raises_regexp(ValueError, 'Cannot infer bias size \S+',
                         bad_seq.initialize)

    seq = ConvolutionalSequence([conv, act, pool], num_channels,
                                tied_biases=True)
    try:
        seq.initialize()
        out = seq.apply(x)
    except TypeError:
        assert False, "This should have succeeded"

    assert out.ndim == 4
示例#2
0
def net_dvc(image_size=(32,32)):
    convos = [5,5,5]
    pools = [2,2,2]
    filters = [100,200,300]

    tuplify = lambda x: (x,x)
    convos = list(map(tuplify, convos))
    conv_layers = [Convolutional(filter_size=s,num_filters=o, num_channels=i, name="Conv"+str(n))\
            for s,o,i,n in zip(convos, filters, [3] + filters, range(1000))]

    pool_layers = [MaxPooling(p) for p in map(tuplify, pools)]

    activations = [Rectifier() for i in convos]

    layers = [i for l in zip(conv_layers, activations, pool_layers) for i in l]

    cnn = ConvolutionalSequence(layers, 3,  image_size=image_size, name="cnn",
            weights_init=Uniform(width=.1),
            biases_init=Constant(0))

    cnn._push_allocation_config()
    cnn_output = np.prod(cnn.get_dim('output'))

    mlp_size = [cnn_output,500,2]
    mlp = MLP([Rectifier(), Softmax()], mlp_size,  name="mlp",
            weights_init=Uniform(width=.1),
            biases_init=Constant(0))

    seq = FeedforwardSequence([net.apply for net in [cnn,Flattener(),mlp]])
    seq.push_initialization_config()

    seq.initialize()
    return seq
示例#3
0
class VGGNet(FeedforwardSequence, Initializable):

    def __init__(self, image_dimension, **kwargs):

        layers = []
        
        #############################################
        # a first block with 2 convolutions of 32 (3, 3) filters
        layers.append(Convolutional((3, 3), 32, border_mode='half'))
        layers.append(Rectifier())
        layers.append(Convolutional((3, 3), 32, border_mode='half'))
        layers.append(Rectifier())

        # maxpool with size=(2, 2)
        layers.append(MaxPooling((2, 2)))

        #############################################
        # a 2nd block with 3 convolutions of 64 (3, 3) filters
        layers.append(Convolutional((3, 3), 64, border_mode='half'))
        layers.append(Rectifier())
        layers.append(Convolutional((3, 3), 64, border_mode='half'))
        layers.append(Rectifier())
        layers.append(Convolutional((3, 3), 64, border_mode='half'))
        layers.append(Rectifier())
        
        # maxpool with size=(2, 2)
        layers.append(MaxPooling((2, 2)))

        #############################################
        # a 3rd block with 4 convolutions of 128 (3, 3) filters
        layers.append(Convolutional((3, 3), 128, border_mode='half'))
        layers.append(Rectifier())
        layers.append(Convolutional((3, 3), 128, border_mode='half'))
        layers.append(Rectifier())
        layers.append(Convolutional((3, 3), 128, border_mode='half'))
        layers.append(Rectifier())
        layers.append(Convolutional((3, 3), 128, border_mode='half'))
        layers.append(Rectifier())
        
        # maxpool with size=(2, 2)
        layers.append(MaxPooling((2, 2)))

        self.conv_sequence = ConvolutionalSequence(layers, 3, image_size=image_dimension)

        flattener = Flattener()

        self.top_mlp = BatchNormalizedMLP(activations=[Rectifier(), Logistic()], dims=[500, 1])

        application_methods = [self.conv_sequence.apply, flattener.apply, self.top_mlp.apply]

        super(VGGNet, self).__init__(application_methods, biases_init=Constant(0), weights_init=Uniform(width=.1), **kwargs)


    def _push_allocation_config(self):
        self.conv_sequence._push_allocation_config()
        conv_out_dim = self.conv_sequence.get_dim('output')

        print conv_out_dim
        
        self.top_mlp.dims = [numpy.prod(conv_out_dim)] + self.top_mlp.dims
def create_model_bricks():
    convnet = ConvolutionalSequence(
        layers=[
            Convolutional(
                filter_size=(4, 4),
                num_filters=32,
                name='conv1'),
            SpatialBatchNormalization(name='batch_norm1'),
            Rectifier(),
            Convolutional(
                filter_size=(3, 3),
                step=(2, 2),
                num_filters=32,
                name='conv2'),
            SpatialBatchNormalization(name='batch_norm2'),
            Rectifier(),
            Convolutional(
                filter_size=(4, 4),
                num_filters=64,
                name='conv3'),
            SpatialBatchNormalization(name='batch_norm3'),
            Rectifier(),
            Convolutional(
                filter_size=(3, 3),
                step=(2, 2),
                num_filters=64,
                name='conv4'),
            SpatialBatchNormalization(name='batch_norm4'),
            Rectifier(),
            Convolutional(
                filter_size=(3, 3),
                num_filters=128,
                name='conv5'),
            SpatialBatchNormalization(name='batch_norm5'),
            Rectifier(),
            Convolutional(
                filter_size=(3, 3),
                step=(2, 2),
                num_filters=128,
                name='conv6'),
            SpatialBatchNormalization(name='batch_norm6'),
            Rectifier(),
        ],
        num_channels=3,
        image_size=(64, 64),
        use_bias=False,
        weights_init=IsotropicGaussian(0.033),
        biases_init=Constant(0),
        name='convnet')
    convnet.initialize()

    mlp = BatchNormalizedMLP(
        activations=[Rectifier(), Logistic()],
        dims=[numpy.prod(convnet.get_dim('output')), 1000, 40],
        weights_init=IsotropicGaussian(0.033),
        biases_init=Constant(0),
        name='mlp')
    mlp.initialize()

    return convnet, mlp
def build_model_mnist():

    # CNN
    filter_size = (5, 5)
    activation = Rectifier().apply
    pooling_size = (2, 2)
    num_filters = 50
    layer0 = ConvolutionalLayer(activation=activation, filter_size=filter_size, num_filters=num_filters,
                              pooling_size=pooling_size,
                              weights_init=Uniform(width=0.1),
                              biases_init=Uniform(width=0.01), name="layer_0")

    filter_size = (3, 3)
    activation = Rectifier().apply
    num_filters = 20
    layer1 = ConvolutionalLayer(activation=activation, filter_size=filter_size, num_filters=num_filters,
                              pooling_size=pooling_size,
                              weights_init=Uniform(width=0.1),
                              biases_init=Uniform(width=0.01), name="layer_1")

    conv_layers = [layer0, layer1]
    convnet = ConvolutionalSequence(conv_layers, num_channels= 1,
                                    image_size=(28, 28))

    convnet.initialize()
    output_dim = np.prod(convnet.get_dim('output'))
    mlp = MLP(activations=[Identity()], dims=[output_dim, 10],
                        weights_init=Uniform(width=0.1),
                        biases_init=Uniform(width=0.01), name="layer_2")
    mlp.initialize()

    classifier = Classifier(convnet, mlp)
    classifier.initialize()
    return classifier
示例#6
0
class LeNet(FeedforwardSequence, Initializable):

    def __init__(self, conv_activations, num_channels, image_shape,
                 filter_sizes, feature_maps, pooling_sizes,
                 top_mlp_activations, top_mlp_dims,
                 conv_step=None, border_mode='valid', **kwargs):
        if conv_step is None:
            self.conv_step = (1, 1)
        else:
            self.conv_step = conv_step
        self.num_channels = num_channels
        self.image_shape = image_shape
        self.top_mlp_activations = top_mlp_activations
        self.top_mlp_dims = top_mlp_dims
        self.border_mode = border_mode

        conv_parameters = zip(filter_sizes, feature_maps)

        # Construct convolutional layers with corresponding parameters
        self.layers = list(interleave([
            (Convolutional(filter_size=filter_size,
                           num_filters=num_filter,
                           step=self.conv_step,
                           border_mode=self.border_mode,
                           name='conv_{}'.format(i))
             for i, (filter_size, num_filter)
             in enumerate(conv_parameters)),
            conv_activations,
            (MaxPooling(size, name='pool_{}'.format(i))
             for i, size in enumerate(pooling_sizes))]))

        self.conv_sequence = ConvolutionalSequence(self.layers, num_channels,
                                                   image_size=image_shape)

        # Construct a top MLP
        self.top_mlp = MLP(top_mlp_activations, top_mlp_dims)

        # We need to flatten the output of the last convolutional layer.
        # This brick accepts a tensor of dimension (batch_size, ...) and
        # returns a matrix (batch_size, features)
        self.flattener = Flattener()
        application_methods = [self.conv_sequence.apply, self.flattener.apply,
                               self.top_mlp.apply]
        super(LeNet, self).__init__(application_methods, **kwargs)

    @property
    def output_dim(self):
        return self.top_mlp_dims[-1]

    @output_dim.setter
    def output_dim(self, value):
        self.top_mlp_dims[-1] = value

    def _push_allocation_config(self):
        self.conv_sequence._push_allocation_config()
        conv_out_dim = self.conv_sequence.get_dim('output')

        self.top_mlp.activations = self.top_mlp_activations
        self.top_mlp.dims = [numpy.prod(conv_out_dim)] + self.top_mlp_dims
示例#7
0
def test_convolutional_sequence_tied_biases_not_pushed_if_not_explicitly_set():
    cnn = ConvolutionalSequence(
        sum([[Convolutional(filter_size=(1, 1), num_filters=1,
                            tied_biases=True), Rectifier()]
             for _ in range(3)], []),
        num_channels=1, image_size=(1, 1))
    cnn.allocate()
    assert [child.tied_biases for child in cnn.children
            if isinstance(child, Convolutional)]
示例#8
0
def test_convolutional_sequence_with_raw_activation():
    seq = ConvolutionalSequence([Rectifier()], num_channels=4,
                                image_size=(20, 14))
    input_ = (((numpy.arange(2 * 4 * 20 * 14)
                .reshape((2, 4, 20, 14)) % 2) * 2 - 1)
              .astype(theano.config.floatX))
    expected_ = input_ * (input_ > 0)
    x = theano.tensor.tensor4()
    assert_allclose(seq.apply(x).eval({x: input_}), expected_)
示例#9
0
def test_convolutional_transpose_original_size_inferred_conv_sequence():
    brick = ConvolutionalTranspose(filter_size=(4, 5), num_filters=10,
                                   step=(3, 2))

    seq = ConvolutionalSequence([brick], num_channels=5, image_size=(6, 9))
    try:
        seq.allocate()
    except Exception as e:
        raise AssertionError('exception raised: {}: {}'.format(
            e.__class__.__name__, e))
示例#10
0
def test_pooling_works_in_convolutional_sequence():
    x = tensor.tensor4('x')
    brick = ConvolutionalSequence([AveragePooling((2, 2), step=(2, 2)),
                                   MaxPooling((4, 4), step=(2, 2),
                                              ignore_border=True)],
                                  image_size=(16, 32), num_channels=3)
    brick.allocate()
    y = brick.apply(x)
    out = y.eval({x: numpy.empty((2, 3, 16, 32), dtype=theano.config.floatX)})
    assert out.shape == (2, 3, 3, 7)
示例#11
0
def test_convolutional_sequence_use_bias():
    cnn = ConvolutionalSequence(
        sum([[Convolutional(filter_size=(1, 1), num_filters=1), Rectifier()]
             for _ in range(3)], []),
        num_channels=1, image_size=(1, 1),
        use_bias=False)
    cnn.allocate()
    x = tensor.tensor4()
    y = cnn.apply(x)
    params = ComputationGraph(y).parameters
    assert len(params) == 3 and all(param.name == 'W' for param in params)
示例#12
0
def test_convolutional_sequence_use_bias():
    cnn = ConvolutionalSequence(
        [ConvolutionalActivation(activation=Rectifier().apply, filter_size=(1, 1), num_filters=1) for _ in range(3)],
        num_channels=1,
        image_size=(1, 1),
        use_bias=False,
    )
    cnn.allocate()
    x = tensor.tensor4()
    y = cnn.apply(x)
    params = ComputationGraph(y).parameters
    assert len(params) == 3 and all(param.name == "W" for param in params)
示例#13
0
def test_convolutional_sequence_with_convolutions_raw_activation():
    seq = ConvolutionalSequence(
        [Convolutional(filter_size=(3, 3), num_filters=4),
         Rectifier(),
         Convolutional(filter_size=(5, 5), num_filters=3, step=(2, 2)),
         Tanh()],
        num_channels=2,
        image_size=(21, 39))
    seq.allocate()
    x = theano.tensor.tensor4()
    out = seq.apply(x).eval({x: numpy.ones((10, 2, 21, 39),
                                           dtype=theano.config.floatX)})
    assert out.shape == (10, 3, 8, 17)
示例#14
0
def test_batch_normalization_inside_convolutional_sequence():
    """Test that BN bricks work in ConvolutionalSequences."""
    conv_seq = ConvolutionalSequence(
        [Convolutional(filter_size=(3, 3), num_filters=4),
         BatchNormalization(broadcastable=(False, True, True)),
         AveragePooling(pooling_size=(2, 2)),
         BatchNormalization(broadcastable=(False, False, False)),
         MaxPooling(pooling_size=(2, 2), step=(1, 1))],
        weights_init=Constant(1.),
        biases_init=Constant(2.),
        image_size=(10, 8), num_channels=9)

    conv_seq_no_bn = ConvolutionalSequence(
        [Convolutional(filter_size=(3, 3), num_filters=4),
         AveragePooling(pooling_size=(2, 2)),
         MaxPooling(pooling_size=(2, 2), step=(1, 1))],
        weights_init=Constant(1.),
        biases_init=Constant(2.),
        image_size=(10, 8), num_channels=9)

    conv_seq.initialize()
    conv_seq_no_bn.initialize()
    rng = numpy.random.RandomState((2015, 12, 17))
    input_ = random_unif(rng, (2, 9, 10, 8))

    x = theano.tensor.tensor4()
    ybn = conv_seq.apply(x)
    y = conv_seq_no_bn.apply(x)
    yield (assert_equal, ybn.eval({x: input_}), y.eval({x: input_}))

    std = conv_seq.children[-2].population_stdev
    std.set_value(3 * std.get_value(borrow=True))
    yield (assert_equal, ybn.eval({x: input_}), y.eval({x: input_}) / 3.)
示例#15
0
    def __init__(
        self,
        conv_activations,
        num_channels,
        image_shape,
        filter_sizes,
        feature_maps,
        pooling_sizes,
        top_mlp_activations,
        top_mlp_dims,
        conv_step=None,
        border_mode="valid",
        **kwargs
    ):
        if conv_step is None:
            self.conv_step = (1, 1)
        else:
            self.conv_step = conv_step
        self.num_channels = num_channels
        self.image_shape = image_shape
        self.top_mlp_activations = top_mlp_activations
        self.top_mlp_dims = top_mlp_dims
        self.border_mode = border_mode

        conv_parameters = zip(filter_sizes, feature_maps)

        # Construct convolutional, activation, and pooling layers with corresponding parameters
        self.convolution_layer = (
            Convolutional(
                filter_size=filter_size,
                num_filters=num_filter,
                step=self.conv_step,
                border_mode=self.border_mode,
                name="conv_{}".format(i),
            )
            for i, (filter_size, num_filter) in enumerate(conv_parameters)
        )

        self.BN_layer = (BatchNormalization(name="bn_conv_{}".format(i)) for i in enumerate(conv_parameters))

        self.pooling_layer = (MaxPooling(size, name="pool_{}".format(i)) for i, size in enumerate(pooling_sizes))

        self.layers = list(interleave([self.convolution_layer, self.BN_layer, conv_activations, self.pooling_layer]))

        self.conv_sequence = ConvolutionalSequence(self.layers, num_channels, image_size=image_shape)

        # Construct a top MLP
        self.top_mlp = MLP(top_mlp_activations, top_mlp_dims)

        # Construct a top batch normalized MLP
        # mlp_class = BatchNormalizedMLP
        # extra_kwargs = {'conserve_memory': False}
        # self.top_mlp = mlp_class(top_mlp_activations, top_mlp_dims, **extra_kwargs)

        # We need to flatten the output of the last convolutional layer.
        # This brick accepts a tensor of dimension (batch_size, ...) and
        # returns a matrix (batch_size, features)
        self.flattener = Flattener()
        application_methods = [self.conv_sequence.apply, self.flattener.apply, self.top_mlp.apply]
        super(LeNet, self).__init__(application_methods, **kwargs)
示例#16
0
    def build_conv_layers(self, image=None) :

        if image is None :
            image = T.ftensor4('spectrogram')
        else :
            image = image

        conv_list = []
        for layer in range(self.layers) :
            layer_param = self.params[layer]
            conv_layer = Convolutional(layer_param[0], layer_param[1], layer_param[2])
            pool_layer = MaxPooling(layer_param[3])

            conv_layer.name = "convolution"+str(layer)
            pool_layer.name = "maxpooling"+str(layer)

            conv_list.append(conv_layer)
            conv_list.append(pool_layer)
            conv_list.append(Rectifier())

        conv_seq = ConvolutionalSequence(
            conv_list,
            self.params[0][2],
            image_size=self.image_size,
            weights_init=IsotropicGaussian(std=0.5, mean=0),
            biases_init=Constant(0))

        conv_seq._push_allocation_config()
        conv_seq.initialize()
        out = conv_seq.apply(image)

        return out, conv_seq.get_dim('output')
    def __init__(
        self,
        conv_activations,
        num_channels,
        image_shape,
        filter_sizes,
        feature_maps,
        conv_steps,
        pooling_sizes,
        top_mlp_activations,
        top_mlp_dims,
        border_mode="valid",
        **kwargs
    ):

        self.num_channels = num_channels
        self.image_shape = image_shape
        self.top_mlp_activations = top_mlp_activations
        self.top_mlp_dims = top_mlp_dims
        self.border_mode = border_mode

        conv_parameters = zip(filter_sizes, feature_maps, conv_steps)

        # Construct convolutional, activation, and pooling layers with corresponding parameters
        conv_layers = list(
            interleave(
                [
                    (
                        Convolutional(
                            filter_size=filter_size,
                            num_filters=num_filter,
                            step=conv_step,
                            border_mode=self.border_mode,
                            name="conv_{}".format(i),
                        )
                        for i, (filter_size, num_filter, conv_step) in enumerate(conv_parameters)
                    ),
                    conv_activations,
                    (MaxPooling(size, name="pool_{}".format(i)) for i, size in enumerate(pooling_sizes)),
                ]
            )
        )

        # Applying SpatialBatchNormalization to inputs
        self.layers = [SpatialBatchNormalization()] + conv_layers
        # self.layers = conv_layers
        self.conv_sequence = ConvolutionalSequence(self.layers, num_channels, image_size=image_shape)

        # Construct a top MLP
        self.top_mlp = MLP(top_mlp_activations, top_mlp_dims)

        # We need to flatten the output of the last convolutional layer.
        # This brick accepts a tensor of dimension (batch_size, ...) and
        # returns a matrix (batch_size, features)
        self.flattener = Flattener()
        application_methods = [self.conv_sequence.apply, self.flattener.apply, self.top_mlp.apply]
        super(LeNet, self).__init__(application_methods, **kwargs)
示例#18
0
class EncoderMapping(Initializable):
    """
    Parameters
    ----------
    layers: :class:`list`
        list of bricks
    num_channels: :class: `int`
           Number of input channels
    image_size: :class:`tuple`
        Image size
    n_emb: :class:`int`
        Dimensionality of the embedding
    use_bias: :class:`bool`
        self explanatory
    """
    def __init__(self, layers, num_channels, image_size, n_emb, use_bias=False, **kwargs):
        self.layers = layers
        self.num_channels = num_channels
        self.image_size = image_size

        self.pre_encoder = ConvolutionalSequence(layers=layers[:-1],
                                                 num_channels=num_channels,
                                                 image_size=image_size,
                                                 use_bias=use_bias,
                                                 name='encoder_conv_mapping')
        self.pre_encoder.allocate()
        n_channels = n_emb + self.pre_encoder.get_dim('output')[0]
        self.post_encoder = ConvolutionalSequence(layers=[layers[-1]],
                                                  num_channels=n_channels,
                                                  image_size=(1, 1),
                                                  use_bias=use_bias)
        children = [self.pre_encoder, self.post_encoder]
        kwargs.setdefault('children', []).extend(children)
        super(EncoderMapping, self).__init__(**kwargs)

    @application(inputs=['x', 'y'], outputs=['output'])
    def apply(self, x, y):
        "Returns mu and logsigma"
        # Getting emebdding
        pre_z = self.pre_encoder.apply(x)
        # Concatenating
        pre_z_embed_y = tensor.concatenate([pre_z, y], axis=1)
        # propagating through last layer
        return self.post_encoder.apply(pre_z_embed_y)
def main():
    initial = numpy.random.normal(0, 0.1, (1, 1, 200, 200))
    x = theano.shared(initial)

    conv_layer = ConvolutionalLayer(
        Rectifier().apply,
        (16, 16),
        9,
        (4, 4),
        1
    )
    conv_layer2 = ConvolutionalLayer(
        Rectifier().apply,
        (7, 7),
        9,
        (2, 2),
        1
    )
    con_seq = ConvolutionalSequence([conv_layer], 1,
                                    image_size=(200, 200),
                                    weights_init=IsotropicGaussian(0.1),
                                    biases_init=Constant(0.)
                                    )

    con_seq.initialize()
    out = con_seq.apply(x)
    target_out = out[0, 0, 1, 1]

    grad = theano.grad(target_out - .1 * (x ** 2).sum(), x)
    updates = {x: x + 5e-1 * grad}
    #x.set_value(numpy.ones((1, 1, 200, 200)))
    #print theano.function([], out)()

    make_step = theano.function([], target_out, updates=updates)

    for i in xrange(400):
        out_val = make_step()
        print i, out_val

    image = x.get_value()[0][0]
    image = (image - image.mean()) / image.std()
    image = numpy.array([image, image, image]).transpose(1, 2, 0)
    plt.imshow(numpy.cast['uint8'](image * 65. + 128.), interpolation='none')
    plt.show()
示例#20
0
def test_convolutional_sequence_activation_get_dim():
    seq = ConvolutionalSequence([Tanh()], num_channels=9, image_size=(4, 6))
    seq.allocate()
    assert seq.get_dim('output') == (9, 4, 6)

    seq = ConvolutionalSequence([Convolutional(filter_size=(7, 7),
                                               num_filters=5,
                                               border_mode=(1, 1)),
                                 Tanh()], num_channels=8, image_size=(8, 11))
    seq.allocate()
    assert seq.get_dim('output') == (5, 4, 7)
示例#21
0
    def __init__(self, layers, num_channels, image_size, n_emb, use_bias=False, **kwargs):
        self.layers = layers
        self.num_channels = num_channels
        self.image_size = image_size

        self.pre_encoder = ConvolutionalSequence(layers=layers[:-1],
                                                 num_channels=num_channels,
                                                 image_size=image_size,
                                                 use_bias=use_bias,
                                                 name='encoder_conv_mapping')
        self.pre_encoder.allocate()
        n_channels = n_emb + self.pre_encoder.get_dim('output')[0]
        self.post_encoder = ConvolutionalSequence(layers=[layers[-1]],
                                                  num_channels=n_channels,
                                                  image_size=(1, 1),
                                                  use_bias=use_bias)
        children = [self.pre_encoder, self.post_encoder]
        kwargs.setdefault('children', []).extend(children)
        super(EncoderMapping, self).__init__(**kwargs)
示例#22
0
    def __init__(self, conv_activations, num_channels, image_shape,
                 filter_sizes, feature_maps, pooling_sizes,
                 top_mlp_activations, top_mlp_dims,
                 conv_step=None, border_mode='valid', **kwargs):
        if conv_step is None:
            self.conv_step = (1, 1)
        else:
            self.conv_step = conv_step
        self.num_channels = num_channels
        self.image_shape = image_shape
        self.top_mlp_activations = top_mlp_activations
        self.top_mlp_dims = top_mlp_dims
        self.border_mode = border_mode

        conv_parameters = zip(conv_activations, filter_sizes, feature_maps)

        # Construct convolutional layers with corresponding parameters
        self.layers = list(interleave([
            (ConvolutionalActivation(filter_size=filter_size,
                                     num_filters=num_filter,
                                     activation=activation.apply,
                                     step=self.conv_step,
                                     border_mode=self.border_mode,
                                     name='conv_{}'.format(i))
             for i, (activation, filter_size, num_filter)
             in enumerate(conv_parameters)),
            (MaxPooling(size, name='pool_{}'.format(i))
             for i, size in enumerate(pooling_sizes))]))

        self.conv_sequence = ConvolutionalSequence(self.layers, num_channels,
                                                   image_size=image_shape)

        # Construct a top MLP
        self.top_mlp = MLP(top_mlp_activations, top_mlp_dims)

        # We need to flatten the output of the last convolutional layer.
        # This brick accepts a tensor of dimension (batch_size, ...) and
        # returns a matrix (batch_size, features)
        self.flattener = Flattener()
        application_methods = [self.conv_sequence.apply, self.flattener.apply,
                               self.top_mlp.apply]
        super(LeNet, self).__init__(application_methods, **kwargs)
示例#23
0
def test_convolutional_sequence():
    x = tensor.tensor4('x')
    num_channels = 4
    pooling_size = 3
    batch_size = 5
    activation = Rectifier().apply

    conv = ConvolutionalLayer(activation, (3, 3), 5,
                              (pooling_size, pooling_size),
                              weights_init=Constant(1.),
                              biases_init=Constant(5.))
    conv2 = ConvolutionalActivation(activation, (2, 2), 4,
                                    weights_init=Constant(1.))

    seq = ConvolutionalSequence([conv, conv2], num_channels,
                                image_size=(17, 13))
    seq.push_allocation_config()
    assert conv.num_channels == 4
    assert conv2.num_channels == 5
    conv2.convolution.use_bias = False
    y = seq.apply(x)
    seq.initialize()
    func = function([x], y)

    x_val = numpy.ones((batch_size, 4, 17, 13), dtype=theano.config.floatX)
    y_val = (numpy.ones((batch_size, 4, 4, 3)) *
             (9 * 4 + 5) * 4 * 5)
    assert_allclose(func(x_val), y_val)
示例#24
0
def test_convolutional_sequence_with_no_input_size():
    # suppose x is outputted by some RNN
    x = tensor.tensor4('x')
    filter_size = (1, 1)
    num_filters = 2
    num_channels = 1
    pooling_size = (1, 1)
    conv = Convolutional(filter_size,
                         num_filters,
                         tied_biases=False,
                         weights_init=Constant(1.),
                         biases_init=Constant(1.))
    act = Rectifier()
    pool = MaxPooling(pooling_size)

    bad_seq = ConvolutionalSequence([conv, act, pool],
                                    num_channels,
                                    tied_biases=False)
    assert_raises_regexp(ValueError, 'Cannot infer bias size \S+',
                         bad_seq.initialize)

    seq = ConvolutionalSequence([conv, act, pool],
                                num_channels,
                                tied_biases=True)
    try:
        seq.initialize()
        out = seq.apply(x)
    except TypeError:
        assert False, "This should have succeeded"

    assert out.ndim == 4
示例#25
0
    def __init__(self, layers, num_channels, image_size, use_bias=False, **kwargs):
        self.layers = layers
        self.num_channels = num_channels
        self.image_size = image_size

        self.mapping = ConvolutionalSequence(layers=layers,
                                             num_channels=num_channels,
                                             image_size=image_size,
                                             use_bias=use_bias,
                                             name='decoder_mapping')
        children = [self.mapping]
        kwargs.setdefault('children', []).extend(children)
        super(Decoder, self).__init__(**kwargs)
示例#26
0
    def __init__(self, conv_activations, num_channels, image_shape,
                 noise_batch_size,
                 filter_sizes, feature_maps, pooling_sizes,
                 top_mlp_activations, top_mlp_dims,
                 conv_step=None, border_mode='valid',
                 tied_biases=True, **kwargs):
        if conv_step is None:
            self.conv_step = (1, 1)
        else:
            self.conv_step = conv_step
        self.num_channels = num_channels
        self.image_shape = image_shape
        self.noise_batch_size = noise_batch_size
        self.top_mlp_activations = top_mlp_activations
        self.top_mlp_dims = top_mlp_dims
        self.border_mode = border_mode
        self.tied_biases = tied_biases

        conv_parameters = zip(filter_sizes, feature_maps)

        # Construct convolutional layers with corresponding parameters
        self.layers = list(interleave([
            (NoisyConvolutional(filter_size=filter_size,
                           num_filters=num_filter,
                           step=self.conv_step,
                           border_mode=self.border_mode,
                           tied_biases=self.tied_biases,
                           noise_batch_size=self.noise_batch_size,
                           name='conv_{}'.format(i))
             for i, (filter_size, num_filter)
             in enumerate(conv_parameters)),
            conv_activations,
            (MaxPooling(size, name='pool_{}'.format(i))
             for i, size in enumerate(pooling_sizes))]))

        self.conv_sequence = ConvolutionalSequence(
                self.layers, num_channels,
                image_size=image_shape)
        self.conv_sequence.name = 'cs'

        # Construct a top MLP
        self.top_mlp = MLP(top_mlp_activations, top_mlp_dims,
                prototype=NoisyLinear(noise_batch_size=self.noise_batch_size))

        # We need to flatten the output of the last convolutional layer.
        # This brick accepts a tensor of dimension (batch_size, ...) and
        # returns a matrix (batch_size, features)
        self.flattener = Flattener()
        application_methods = [self.conv_sequence.apply, self.flattener.apply,
                               self.top_mlp.apply]
        super(NoisyLeNet, self).__init__(application_methods, **kwargs)
示例#27
0
def net_dvc(image_size=(32, 32)):
    convos = [5, 5, 5]
    pools = [2, 2, 2]
    filters = [100, 200, 300]

    tuplify = lambda x: (x, x)
    convos = list(map(tuplify, convos))
    conv_layers = [Convolutional(filter_size=s,num_filters=o, num_channels=i, name="Conv"+str(n))\
            for s,o,i,n in zip(convos, filters, [3] + filters, range(1000))]

    pool_layers = [MaxPooling(p) for p in map(tuplify, pools)]

    activations = [Rectifier() for i in convos]

    layers = [i for l in zip(conv_layers, activations, pool_layers) for i in l]

    cnn = ConvolutionalSequence(layers,
                                3,
                                image_size=image_size,
                                name="cnn",
                                weights_init=Uniform(width=.1),
                                biases_init=Constant(0))

    cnn._push_allocation_config()
    cnn_output = np.prod(cnn.get_dim('output'))

    mlp_size = [cnn_output, 500, 2]
    mlp = MLP([Rectifier(), Softmax()],
              mlp_size,
              name="mlp",
              weights_init=Uniform(width=.1),
              biases_init=Constant(0))

    seq = FeedforwardSequence([net.apply for net in [cnn, Flattener(), mlp]])
    seq.push_initialization_config()

    seq.initialize()
    return seq
示例#28
0
def test_batch_normalization_inside_convolutional_sequence():
    """Test that BN bricks work in ConvolutionalSequences."""
    conv_seq = ConvolutionalSequence([
        Convolutional(filter_size=(3, 3), num_filters=4),
        BatchNormalization(broadcastable=(False, True, True)),
        AveragePooling(pooling_size=(2, 2)),
        BatchNormalization(broadcastable=(False, False, False)),
        MaxPooling(pooling_size=(2, 2), step=(1, 1))
    ],
                                     weights_init=Constant(1.),
                                     biases_init=Constant(2.),
                                     image_size=(10, 8),
                                     num_channels=9)

    conv_seq_no_bn = ConvolutionalSequence([
        Convolutional(filter_size=(3, 3), num_filters=4),
        AveragePooling(pooling_size=(2, 2)),
        MaxPooling(pooling_size=(2, 2), step=(1, 1))
    ],
                                           weights_init=Constant(1.),
                                           biases_init=Constant(2.),
                                           image_size=(10, 8),
                                           num_channels=9)

    conv_seq.initialize()
    conv_seq_no_bn.initialize()
    rng = numpy.random.RandomState((2015, 12, 17))
    input_ = random_unif(rng, (2, 9, 10, 8))

    x = theano.tensor.tensor4()
    ybn = conv_seq.apply(x)
    y = conv_seq_no_bn.apply(x)
    yield (assert_equal, ybn.eval({x: input_}), y.eval({x: input_}))

    std = conv_seq.children[-2].population_stdev
    std.set_value(3 * std.get_value(borrow=True))
    yield (assert_equal, ybn.eval({x: input_}), y.eval({x: input_}) / 3.)
示例#29
0
    def __init__(self, image_dimension, **kwargs):

        layers = []
        
        #############################################
        # a first block with 2 convolutions of 32 (3, 3) filters
        layers.append(Convolutional((3, 3), 32, border_mode='half'))
        layers.append(Rectifier())
        layers.append(Convolutional((3, 3), 32, border_mode='half'))
        layers.append(Rectifier())

        # maxpool with size=(2, 2)
        layers.append(MaxPooling((2, 2)))

        #############################################
        # a 2nd block with 3 convolutions of 64 (3, 3) filters
        layers.append(Convolutional((3, 3), 64, border_mode='half'))
        layers.append(Rectifier())
        layers.append(Convolutional((3, 3), 64, border_mode='half'))
        layers.append(Rectifier())
        layers.append(Convolutional((3, 3), 64, border_mode='half'))
        layers.append(Rectifier())
        
        # maxpool with size=(2, 2)
        layers.append(MaxPooling((2, 2)))

        #############################################
        # a 3rd block with 4 convolutions of 128 (3, 3) filters
        layers.append(Convolutional((3, 3), 128, border_mode='half'))
        layers.append(Rectifier())
        layers.append(Convolutional((3, 3), 128, border_mode='half'))
        layers.append(Rectifier())
        layers.append(Convolutional((3, 3), 128, border_mode='half'))
        layers.append(Rectifier())
        layers.append(Convolutional((3, 3), 128, border_mode='half'))
        layers.append(Rectifier())
        
        # maxpool with size=(2, 2)
        layers.append(MaxPooling((2, 2)))

        self.conv_sequence = ConvolutionalSequence(layers, 3, image_size=image_dimension)

        flattener = Flattener()

        self.top_mlp = BatchNormalizedMLP(activations=[Rectifier(), Logistic()], dims=[500, 1])

        application_methods = [self.conv_sequence.apply, flattener.apply, self.top_mlp.apply]

        super(VGGNet, self).__init__(application_methods, biases_init=Constant(0), weights_init=Uniform(width=.1), **kwargs)
示例#30
0
def test_convolutional_sequence_tied_biases_pushed_if_explicitly_set():
    cnn = ConvolutionalSequence(
        sum([[Convolutional(filter_size=(1, 1), num_filters=1,
                            tied_biases=True), Rectifier()]
             for _ in range(3)], []),
        num_channels=1, image_size=(1, 1), tied_biases=False)
    cnn.allocate()
    assert [not child.tied_biases for child in cnn.children
            if isinstance(child, Convolutional)]

    cnn = ConvolutionalSequence(
        sum([[Convolutional(filter_size=(1, 1), num_filters=1), Rectifier()]
             for _ in range(3)], []),
        num_channels=1, image_size=(1, 1), tied_biases=True)
    cnn.allocate()
    assert [child.tied_biases for child in cnn.children
            if isinstance(child, Convolutional)]
示例#31
0
    def __init__(self, image_shape=None, output_size=None, **kwargs):
        self.num_channels = 3
        self.image_shape = image_shape or (32, 32)
        self.output_size = output_size or 10
        conv_parameters = [(96, 3, 1, 'half'), (96, 3, 1, 'half'),
                           (96, 3, 2, 'half'), (192, 3, 1, 'half'),
                           (192, 3, 1, 'half'), (192, 3, 2, 'half'),
                           (192, 3, 1, 'half'), (192, 1, 1, 'valid'),
                           (10, 1, 1, 'valid')]
        fc_layer = 10

        self.convolutions = list([
            Convolutional(filter_size=(filter_size, filter_size),
                          num_filters=num_filters,
                          step=(conv_step, conv_step),
                          border_mode=border_mode,
                          tied_biases=True,
                          name='conv_{}'.format(i))
            for i, (num_filters, filter_size, conv_step,
                    border_mode) in enumerate(conv_parameters)
        ])

        # Add two trivial channel masks to allow by-channel dropout
        self.convolutions.insert(6, ChannelMask(name='mask_1'))
        self.convolutions.insert(3, ChannelMask(name='mask_0'))

        self.conv_sequence = ConvolutionalSequence(
            list(
                interleave([
                    self.convolutions, (Rectifier() for _ in self.convolutions)
                ])), self.num_channels, self.image_shape)

        # The AllConvNet applies average pooling to combine top-level
        # features across the image.
        self.flattener = GlobalAverageFlattener()

        # Then it inserts one final 10-way FC layer before softmax
        # self.top_mlp = MLP([Rectifier(), Softmax()],
        #     [conv_parameters[-1][0], fc_layer, self.output_size])
        self.top_softmax = Softmax()

        application_methods = [
            self.conv_sequence.apply, self.flattener.apply,
            self.top_softmax.apply
        ]

        super(AllConvNet, self).__init__(application_methods, **kwargs)
示例#32
0
def convolutional_sequence(filter_size, num_filters, image_size, num_channels=1):
    
    layers = []
    # layers.append(BatchNormalization(name='batchnorm_pixels'))

    layers.append(Convolutional(filter_size=filter_size, num_filters=num_filters, use_bias=True, tied_biases=True, name='conv_1'))
    layers.append(BatchNormalization(name='batchnorm_1'))
    layers.append(Rectifier(name='non_linear_1'))
    
    layers.append(Convolutional(filter_size=filter_size, num_filters=num_filters, use_bias=True, tied_biases=False, name='conv_2'))
    layers.append(BatchNormalization(name='batchnorm_2'))
    layers.append(Rectifier(name='non_linear_2'))
    
    layers.append(MaxPooling(pooling_size=(2,2), name='maxpool_2'))
        
    layers.append(Convolutional(filter_size=filter_size, num_filters=num_filters*2, use_bias=True, tied_biases=True, name='conv_3'))
    layers.append(BatchNormalization(name='batchnorm_3'))
    layers.append(Rectifier(name='non_linear_3'))

    layers.append(Convolutional(filter_size=filter_size, num_filters=num_filters*2, use_bias=True, tied_biases=True, name='conv_4'))
    layers.append(BatchNormalization(name='batchnorm_4'))
    layers.append(Rectifier(name='non_linear_4'))
    
    layers.append(MaxPooling(pooling_size=(2,2), name='maxpool_4'))
    
    layers.append(Convolutional(filter_size=filter_size, num_filters=num_filters*4, use_bias=True, tied_biases=False, name='conv_5'))
    layers.append(BatchNormalization(name='batchnorm_5'))
    layers.append(Rectifier(name='non_linear_5'))

    layers.append(Convolutional(filter_size=filter_size, num_filters=num_filters*4, use_bias=True, tied_biases=True, name='conv_6'))
    layers.append(BatchNormalization(name='batchnorm_6'))
    layers.append(Rectifier(name='non_linear_6'))
    
    layers.append(MaxPooling(pooling_size=(2,2), name='maxpool_6'))

    layers.append(Convolutional(filter_size=filter_size, num_filters=num_filters*8, use_bias=True, tied_biases=True, name='conv_7'))
    layers.append(BatchNormalization(name='batchnorm_7'))
    layers.append(Rectifier(name='non_linear_7'))

    layers.append(Convolutional(filter_size=filter_size, num_filters=num_filters*8, use_bias=True, tied_biases=True, name='conv_8'))
    layers.append(BatchNormalization(name='batchnorm_8'))
    layers.append(Rectifier(name='non_linear_8'))
    
    layers.append(MaxPooling(pooling_size=(2,2), name='maxpool_8'))
    
    return ConvolutionalSequence(layers, num_channels=num_channels, image_size=image_size, biases_init=Constant(0.),  weights_init=IsotropicGaussian(0.01))
示例#33
0
def test_border_mode_not_pushed():
    layers = [
        Convolutional(border_mode='full'),
        ConvolutionalActivation(Rectifier().apply),
        ConvolutionalActivation(Rectifier().apply, border_mode='valid'),
        ConvolutionalActivation(Rectifier().apply, border_mode='full')
    ]
    stack = ConvolutionalSequence(layers)
    stack.push_allocation_config()
    assert stack.children[0].border_mode == 'full'
    assert stack.children[1].border_mode == 'valid'
    assert stack.children[2].border_mode == 'valid'
    assert stack.children[3].border_mode == 'full'
    stack2 = ConvolutionalSequence(layers, border_mode='full')
    stack2.push_allocation_config()
    assert stack2.children[0].border_mode == 'full'
    assert stack2.children[1].border_mode == 'full'
    assert stack2.children[2].border_mode == 'full'
    assert stack2.children[3].border_mode == 'full'
示例#34
0
文件: main.py 项目: rizar/ift6266h16
    def __init__(self, conv_activations, num_channels, image_shape,
                 filter_sizes, feature_maps, pooling_sizes, repeat_times,
                 top_mlp_activations, top_mlp_dims,
                 stride, batch_norm, border_mode='valid', **kwargs):
        self.stride = stride
        self.num_channels = num_channels
        self.image_shape = image_shape
        self.top_mlp_activations = top_mlp_activations
        self.top_mlp_dims = top_mlp_dims
        self.border_mode = border_mode

        # Construct convolutional layers with corresponding parameters
        self.layers = []
        for i, activation in enumerate(conv_activations):
            for j in range(repeat_times[i]):
                self.layers.append(
                    Convolutional(
                        filter_size=filter_sizes[i], num_filters=feature_maps[i],
                        step=(1, 1) if i > 0 or j > 0 else (self.stride, self.stride),
                        border_mode=self.border_mode,
                        name='conv_{}_{}'.format(i, j)))
                if batch_norm:
                    self.layers.append(
                        BatchNormalization(broadcastable=(False, True, True),
                                        conserve_memory=True,
                                        mean_only=batch_norm == 'mean-only',
                                        name='bn_{}_{}'.format(i, j)))
                self.layers.append(activation)
            self.layers.append(MaxPooling(pooling_sizes[i], name='pool_{}'.format(i)))

        self.conv_sequence = ConvolutionalSequence(self.layers, num_channels,
                                                   image_size=image_shape)

        # Construct a top MLP
        self.top_mlp = MLP(top_mlp_activations, top_mlp_dims)

        # We need to flatten the output of the last convolutional layer.
        # This brick accepts a tensor of dimension (batch_size, ...) and
        # returns a matrix (batch_size, features)
        self.flattener = Flattener()
        application_methods = [self.conv_sequence.apply, self.flattener.apply,
                               self.top_mlp.apply]
        super(LeNet, self).__init__(application_methods, **kwargs)
示例#35
0
    def __init__(self, conv_activations, num_channels, image_shape,
                 filter_sizes, feature_maps, pooling_sizes,
                 top_mlp_activations, top_mlp_dims,
                 conv_step=None, border_mode='valid', **kwargs):
        if conv_step is None:
            self.conv_step = (1, 1)
        else:
            self.conv_step = conv_step
        self.num_channels = num_channels
        self.image_shape = image_shape
        self.top_mlp_activations = top_mlp_activations
        self.top_mlp_dims = top_mlp_dims
        self.border_mode = border_mode

        conv_parameters = zip(conv_activations, filter_sizes, feature_maps)

        # Construct convolutional layers with corresponding parameters
        self.layers = []
        for i, (activation, filter_size, num_filter) in enumerate(conv_parameters):
            self.layers.extend(
                [Convolutional(filter_size=filter_size,
                               num_filters=num_filter,
                               step=self.conv_step,
                               name='conv_{}'.format(i))])
            self.layers.extend([activation])
        for i, size in enumerate(pooling_sizes):
            self.layers.extend([MaxPooling(size, name='pool_{}'.format(i))])

        self.conv_sequence = ConvolutionalSequence(self.layers, num_channels,
                                                   border_mode=self.border_mode,
                                                   image_size=image_shape)

        # Construct a top MLP
        self.top_mlp = MLP(top_mlp_activations, top_mlp_dims)

        # We need to flatten the output of the last convolutional layer.
        # This brick accepts a tensor of dimension (batch_size, ...) and
        # returns a matrix (batch_size, features)
        self.flattener = Flattener()
        application_methods = [self.conv_sequence.apply, self.flattener.apply,
                               self.top_mlp.apply]
        super(LeNet, self).__init__(application_methods, **kwargs)
示例#36
0
class Decoder(Initializable):
    def __init__(self, layers, num_channels, image_size, use_bias=False, **kwargs):
        self.layers = layers
        self.num_channels = num_channels
        self.image_size = image_size

        self.mapping = ConvolutionalSequence(layers=layers,
                                             num_channels=num_channels,
                                             image_size=image_size,
                                             use_bias=use_bias,
                                             name='decoder_mapping')
        children = [self.mapping]
        kwargs.setdefault('children', []).extend(children)
        super(Decoder, self).__init__(**kwargs)

    @application(inputs=['z', 'y'], outputs=['outputs'])
    def apply(self, z, y, application_call):
        # Concatenating conditional data with inputs
        z_y = tensor.concatenate([z, y], axis=1)
        return self.mapping.apply(z_y)
示例#37
0
def test_border_mode_not_pushed():
    layers = [Convolutional(border_mode='full'),
              ConvolutionalActivation(Rectifier().apply),
              ConvolutionalActivation(Rectifier().apply, border_mode='valid'),
              ConvolutionalLayer(Rectifier().apply, border_mode='full')]
    stack = ConvolutionalSequence(layers)
    stack.push_allocation_config()
    assert stack.children[0].border_mode == 'full'
    assert stack.children[1].border_mode == 'valid'
    assert stack.children[2].border_mode == 'valid'
    assert stack.children[3].border_mode == 'full'
    stack2 = ConvolutionalSequence(layers, border_mode='full')
    stack2.push_allocation_config()
    assert stack2.children[0].border_mode == 'full'
    assert stack2.children[1].border_mode == 'full'
    assert stack2.children[2].border_mode == 'full'
    assert stack2.children[3].border_mode == 'full'
示例#38
0
layers.append(BatchNormalization(name='batch_1'))
layers.append(Rectifier())
layers.append(MaxPooling((3, 3), step=(2, 2), padding=(1, 1), name='pool_1'))
layers.append(
    Convolutional(filter_size=(3, 3),
                  num_filters=192,
                  border_mode='half',
                  name='conv_2'))
layers.append(BatchNormalization(name='batch_2'))
layers.append(Rectifier())
layers.append(MaxPooling((3, 3), step=(2, 2), padding=(1, 1), name='pool_2'))

#Create the sequence
conv_sequence = ConvolutionalSequence(layers,
                                      num_channels=3,
                                      image_size=(160, 160),
                                      weights_init=Orthogonal(),
                                      use_bias=False,
                                      name='convSeq')
#Initialize the convnet
conv_sequence.initialize()
#Output the first result
out = conv_sequence.apply(x)

###############SECOND STAGE#####################
out2 = inception((20, 20), 192, 64, 96, 128, 16, 32, 32, out, 10)
out3 = inception((20, 20), 256, 128, 128, 192, 32, 96, 64, out2, 20)
out31 = MaxPooling((2, 2), name='poolLow').apply(out3)

out4 = inception((10, 10), 480, 192, 96, 208, 16, 48, 64, out31, 30)
out5 = inception((10, 10), 512, 160, 112, 224, 24, 64, 64, out4, 40)
out6 = inception((10, 10), 512, 128, 128, 256, 24, 64, 64, out5, 50)
示例#39
0
# Convolutional layers


filter_sizes = [(5, 5)] * 2
num_filters = [128, 256]
pooling_sizes = [(2, 2)] * 2
activation = Logistic().apply
conv_layers = [
    b.ConvolutionalLayer(activation, filter_size, num_filters_, pooling_size, num_channels=3)
    for filter_size, num_filters_, pooling_size
    in zip(filter_sizes, num_filters, pooling_sizes)
]

convnet = ConvolutionalSequence(conv_layers, num_channels=3,
                                image_size=(32, 32),
                                weights_init=Uniform(0, 0.2),
                                biases_init=Constant(0.))


convnet.initialize()
conv_features = Flattener().apply(convnet.apply(X))


# MLP

mlp = MLP(activations=[Logistic(name='sigmoid_0'),
          Softmax(name='softmax_1')], dims=[ 256, 256, 256, 2],
          weights_init=IsotropicGaussian(0.01), biases_init=Constant(0))
[child.name for child in mlp.children]
['linear_0', 'sigmoid_0', 'linear_1', 'softmax_1']
Y = mlp.apply(conv_features)
示例#40
0
conv_layers1.append(BatchNormalization(name='BNconv_{}'.format(i)))
conv_layers1.append(conv_activation[0])

i = i + 1 #Sequence
conv_layers1.append(
    Convolutional(
        filter_size=filter_size[j+3],
        num_filters=num_filter[j+3],
        step=conv_step,
        border_mode=border_mode,
        name='conv_{}'.format(i)))
conv_layers1.append(BatchNormalization(name='BNconv_{}'.format(i)))
conv_layers1.append(conv_activation[0])
conv_layers1.append(MaxPooling(pooling_size[j+2], name='pool_{}'.format(i)))

conv_sequence1 = ConvolutionalSequence(conv_layers1, num_channels=num_channels, image_size=image_size,
weights_init=Uniform(width=0.2), biases_init=Constant(0.), name='ConvSeq1_{}'.format(i))
conv_sequence1.initialize()
out1 = conv_sequence1.apply(x)


################# Convolutional Sequence 2 #################
# conv_layers2 parameters
i = i+1 #Sequence
j = 0 #Sub Layer
filter_size = [(7,7), (5,5), (2,2), (5,5)]
num_filter = [16, 32, 48, 64]
num_channels = 3
pooling_size = [(3,3), (2,2), (2,2)]
conv_step = (1,1)
border_mode = 'valid'
示例#41
0
#Create the convolutions layers
conv_layers = list(
    interleave([(ConvolutionalActivation(filter_size=filter_size,
                                         num_filters=num_filter,
                                         activation=activation,
                                         name='conv_{}'.format(i))
                 for i, (activation, filter_size,
                         num_filter) in enumerate(conv_parameters)),
                (MaxPooling(size, name='pool_{}'.format(i))
                 for i, size in enumerate(pooling_sizes))]))

#Create the sequence
conv_sequence = ConvolutionalSequence(conv_layers,
                                      num_channels,
                                      image_size=image_shape,
                                      weights_init=Uniform(width=0.2),
                                      biases_init=Constant(0.))
#Initialize the convnet
conv_sequence.initialize()
#Add the MLP
top_mlp_dims = [np.prod(conv_sequence.get_dim('output'))
                ] + mlp_hiddens + [output_size]
out = Flattener().apply(conv_sequence.apply(x))
mlp = MLP(mlp_activation,
          top_mlp_dims,
          weights_init=Uniform(0, 0.2),
          biases_init=Constant(0.))
#Initialisze the MLP
mlp.initialize()
#Get the output
示例#42
0
def build_and_run(label, config):
    ############## CREATE THE NETWORK ###############
    #Define the parameters
    num_epochs, num_batches, num_channels, image_shape, filter_size, num_filter, pooling_sizes, mlp_hiddens, output_size, batch_size, activation, mlp_activation = config[
        'num_epochs'], config['num_batches'], config['num_channels'], config[
            'image_shape'], config['filter_size'], config[
                'num_filter'], config['pooling_sizes'], config[
                    'mlp_hiddens'], config['output_size'], config[
                        'batch_size'], config['activation'], config[
                            'mlp_activation']
    #    print(num_epochs, num_channels, image_shape, filter_size, num_filter, pooling_sizes, mlp_hiddens, output_size, batch_size, activation, mlp_activation)
    lambda_l1 = 0.000025
    lambda_l2 = 0.000025

    print("Building model")
    #Create the symbolics variable
    x = T.tensor4('image_features')
    y = T.lmatrix('targets')

    #Get the parameters
    conv_parameters = zip(filter_size, num_filter)

    #Create the convolutions layers
    conv_layers = list(
        interleave([(Convolutional(filter_size=filter_size,
                                   num_filters=num_filter,
                                   name='conv_{}'.format(i))
                     for i, (filter_size,
                             num_filter) in enumerate(conv_parameters)),
                    (activation),
                    (MaxPooling(size, name='pool_{}'.format(i))
                     for i, size in enumerate(pooling_sizes))]))
    #    (AveragePooling(size, name='pool_{}'.format(i)) for i, size in enumerate(pooling_sizes))]))

    #Create the sequence
    conv_sequence = ConvolutionalSequence(conv_layers,
                                          num_channels,
                                          image_size=image_shape,
                                          weights_init=Uniform(width=0.2),
                                          biases_init=Constant(0.))
    #Initialize the convnet
    conv_sequence.initialize()
    #Add the MLP
    top_mlp_dims = [np.prod(conv_sequence.get_dim('output'))
                    ] + mlp_hiddens + [output_size]
    out = Flattener().apply(conv_sequence.apply(x))
    mlp = MLP(mlp_activation,
              top_mlp_dims,
              weights_init=Uniform(0, 0.2),
              biases_init=Constant(0.))
    #Initialisze the MLP
    mlp.initialize()
    #Get the output
    predict = mlp.apply(out)

    cost = CategoricalCrossEntropy().apply(y.flatten(),
                                           predict).copy(name='cost')
    error = MisclassificationRate().apply(y.flatten(), predict)

    #Little trick to plot the error rate in two different plots (We can't use two time the same data in the plot for a unknow reason)
    error_rate = error.copy(name='error_rate')
    error_rate2 = error.copy(name='error_rate2')

    ########### REGULARIZATION ##################
    cg = ComputationGraph([cost])
    weights = VariableFilter(roles=[WEIGHT])(cg.variables)
    biases = VariableFilter(roles=[BIAS])(cg.variables)
    # # l2_penalty_weights = T.sum([i*lambda_l2/len(weights) * (W ** 2).sum() for i,W in enumerate(weights)]) # Gradually increase penalty for layer
    l2_penalty = T.sum([
        lambda_l2 * (W**2).sum() for i, W in enumerate(weights + biases)
    ])  # Gradually increase penalty for layer
    # # #l2_penalty_bias = T.sum([lambda_l2*(B **2).sum() for B in biases])
    # # #l2_penalty = l2_penalty_weights + l2_penalty_bias
    l2_penalty.name = 'l2_penalty'
    l1_penalty = T.sum([lambda_l1 * T.abs_(z).sum() for z in weights + biases])
    #  l1_penalty_weights = T.sum([i*lambda_l1/len(weights) * T.abs_(W).sum() for i,W in enumerate(weights)]) # Gradually increase penalty for layer
    #  l1_penalty_biases = T.sum([lambda_l1 * T.abs_(B).sum() for B in biases])
    #  l1_penalty = l1_penalty_biases + l1_penalty_weights
    l1_penalty.name = 'l1_penalty'
    costreg = cost + l2_penalty + l1_penalty
    costreg.name = 'costreg'

    ########### DEFINE THE ALGORITHM #############
    #  algorithm = GradientDescent(cost=cost, parameters=cg.parameters, step_rule=Momentum())
    algorithm = GradientDescent(cost=costreg,
                                parameters=cg.parameters,
                                step_rule=Adam())

    ########### GET THE DATA #####################
    istest = 'test' in config.keys()
    train_stream, valid_stream, test_stream = get_stream(batch_size,
                                                         image_shape,
                                                         test=istest)

    ########### INITIALIZING EXTENSIONS ##########
    checkpoint = Checkpoint('models/best_' + label + '.tar')
    checkpoint.add_condition(
        ['after_epoch'], predicate=OnLogRecord('valid_error_rate_best_so_far'))
    #Adding a live plot with the bokeh server
    plot = Plot(
        label,
        channels=[
            ['train_error_rate', 'valid_error_rate'],
            ['valid_cost', 'valid_error_rate2'],
            # ['train_costreg','train_grad_norm']], #
            [
                'train_costreg', 'train_total_gradient_norm',
                'train_l2_penalty', 'train_l1_penalty'
            ]
        ],
        server_url="http://hades.calculquebec.ca:5042")

    grad_norm = aggregation.mean(algorithm.total_gradient_norm)
    grad_norm.name = 'grad_norm'

    extensions = [
        Timing(),
        FinishAfter(after_n_epochs=num_epochs, after_n_batches=num_batches),
        DataStreamMonitoring([cost, error_rate, error_rate2],
                             valid_stream,
                             prefix="valid"),
        TrainingDataMonitoring([
            costreg, error_rate, error_rate2, grad_norm, l2_penalty, l1_penalty
        ],
                               prefix="train",
                               after_epoch=True),
        plot,
        ProgressBar(),
        Printing(),
        TrackTheBest('valid_error_rate', min),  #Keep best
        checkpoint,  #Save best
        FinishIfNoImprovementAfter('valid_error_rate_best_so_far', epochs=4)
    ]  # Early-stopping
    model = Model(cost)
    main_loop = MainLoop(algorithm,
                         data_stream=train_stream,
                         model=model,
                         extensions=extensions)
    main_loop.run()
示例#43
0
class LeNet(FeedforwardSequence, Initializable):
    '''
    ----------
    conv_activations : list of :class:`.Brick`
        Activations for convolutional network.
    num_channels : int
        Number of channels in the input image.
    image_shape : tuple
        Input image shape.
    filter_sizes : list of tuples
        Filter sizes of :class:`.blocks.conv.ConvolutionalLayer`.
    feature_maps : list
        Number of filters for each of convolutions.
    pooling_sizes : list of tuples
        Sizes of max pooling for each convolutional layer.
    top_mlp_activations : list of :class:`.blocks.bricks.Activation`
        List of activations for the top MLP.
    top_mlp_dims : list
        Numbers of hidden units and the output dimension of the top MLP.
    conv_step : tuples
        Step of convolution (similar for all layers).
    border_mode : str
        Border mode of convolution (similar for all layers).
    '''
    def __init__(self,
                 conv_activations,
                 num_channels,
                 image_shape,
                 filter_sizes,
                 feature_maps,
                 pooling_sizes,
                 top_mlp_activations,
                 top_mlp_dims,
                 conv_step=None,
                 border_mode='valid',
                 **kwargs):
        if conv_step is None:
            self.conv_step = (1, 1)
        else:
            self.conv_step = conv_step
        self.num_channels = num_channels
        self.image_shape = image_shape
        self.top_mlp_activations = top_mlp_activations
        self.top_mlp_dims = top_mlp_dims
        self.border_mode = border_mode

        conv_parameters = zip(filter_sizes, feature_maps)

        # Construct convolutional layers with corresponding parameters
        self.layers = list(
            interleave([(Convolutional(filter_size=filter_size,
                                       num_filters=num_filter,
                                       border_mode=self.border_mode,
                                       name='conv_{}'.format(i))
                         for i, (filter_size,
                                 num_filter) in enumerate(conv_parameters)),
                        conv_activations,
                        (MaxPooling(size, name='pool_{}'.format(i))
                         for i, size in enumerate(pooling_sizes))]))

        self.conv_sequence = ConvolutionalSequence(self.layers,
                                                   num_channels,
                                                   image_size=image_shape)

        # Construct a top MLP
        self.top_mlp = MLP(top_mlp_activations, top_mlp_dims)

        # We need to flatten the output of the last convolutional layer.
        # This brick accepts a tensor of dimension (batch_size, ...) and
        # returns a matrix (batch_size, features)
        self.flattener = Flattener()
        application_methods = [
            self.conv_sequence.apply, self.flattener.apply, self.top_mlp.apply
        ]
        super(LeNet, self).__init__(application_methods, **kwargs)

    @property
    def output_dim(self):
        return self.top_mlp_dims[-1]

    @output_dim.setter
    def output_dim(self, value):
        self.top_mlp_dims[-1] = value

    def _push_allocation_config(self):
        self.conv_sequence._push_allocation_config()
        conv_out_dim = self.conv_sequence.get_dim('output')

        self.top_mlp.activations = self.top_mlp_activations
        self.top_mlp.dims = [numpy.prod(conv_out_dim)] + self.top_mlp_dims
示例#44
0
num_filters = 4
initial_weight_std = .01
epochs = 5

x = T.tensor4('features')
y = T.lmatrix('targets')

# Convolutional Layers
conv_layers = [
    ConvolutionalLayer(Rectifier().apply, (3, 3), 16, (2, 2), name='l1'),
    ConvolutionalLayer(Rectifier().apply, (3, 3), 32, (2, 2), name='l2')
]

convnet = ConvolutionalSequence(conv_layers,
                                num_channels=1,
                                image_size=(28, 28),
                                weights_init=IsotropicGaussian(0.1),
                                biases_init=Constant(0))

convnet.initialize()

output_dim = np.prod(convnet.get_dim('output'))
print(output_dim)

# Fully connected layers
features = Flattener().apply(convnet.apply(x))

mlp = MLP(activations=[Rectifier(), None],
          dims=[output_dim, 100, 10],
          weights_init=IsotropicGaussian(0.01),
          biases_init=Constant(0))
示例#45
0
    def __init__(self,
                 image_size=None,
                 output_size=None,
                 mid_noise=False,
                 out_noise=False,
                 tied_noise=False,
                 tied_sigma=False,
                 final_noise=False,
                 noise_batch_size=None,
                 noise_rate=None,
                 prior_noise_level=None,
                 **kwargs):
        self.num_channels = 3
        self.image_size = image_size or (32, 32)
        self.output_size = output_size or 10
        self.noise_batch_size = noise_batch_size
        self.noise_rate = noise_rate
        n = 18
        num_filters = [16, 32, 64]
        num_channels = num_filters[0]
        self.convolutions = [
            Convolutional(filter_size=(3, 3),
                          num_filters=num_channels,
                          step=(1, 1),
                          border_mode='half',
                          tied_biases=True,
                          name='conv_0'),
        ]
        for j, num in enumerate(num_filters):
            for i in range(n):
                self.convolutions.append(
                    ResidualConvolutional(filter_size=(3, 3),
                                          num_filters=num,
                                          num_channels=num_channels,
                                          mid_noise=mid_noise,
                                          out_noise=out_noise,
                                          tied_noise=tied_noise,
                                          tied_sigma=tied_sigma,
                                          noise_rate=noise_rate,
                                          noise_batch_size=noise_batch_size,
                                          prior_noise_level=prior_noise_level,
                                          step=(2, 2) if i == 0 and j > 0 else
                                          (1, 1),
                                          name='group_%d_%d' % (num, i)))
                num_channels = num
        self.convolutions.extend([
            SpatialBatchNormalization(name='bn_last'),
            Rectifier(name='relu_last')
        ])
        if final_noise:
            self.convolutions.append(
                SpatialNoise(name='nfin',
                             noise_rate=noise_rate,
                             noise_batch_size=noise_batch_size,
                             tied_noise=tied_noise,
                             tied_sigma=tied_sigma,
                             prior_noise_level=prior_noise_level))
        self.conv_sequence = ConvolutionalSequence(
            self.convolutions,
            image_size=self.image_size,
            num_channels=self.num_channels)

        # The AllConvNet applies average pooling to combine top-level
        # features across the image.
        self.flattener = GlobalAverageFlattener()

        # Then it inserts one final 10-way FC layer before softmax
        self.top_mlp = MLP([Softmax()], [num_filters[-1], self.output_size])
        # self.top_softmax = Softmax()

        application_methods = [
            self.conv_sequence.apply, self.flattener.apply, self.top_mlp.apply
        ]

        super(ResNet, self).__init__(application_methods, **kwargs)
示例#46
0
def create_model_bricks(image_size, depth):
    # original celebA64 was depth=3 (went to bach_norm6)
    layers = []
    if(depth > 0):
        layers = layers + [
            Convolutional(
                filter_size=(4, 4),
                num_filters=32,
                name='conv1'),
            SpatialBatchNormalization(name='batch_norm1'),
            Rectifier(),
            Convolutional(
                filter_size=(3, 3),
                step=(2, 2),
                num_filters=32,
                name='conv2'),
            SpatialBatchNormalization(name='batch_norm2'),
            Rectifier(),
        ]
    if(depth > 1):
        layers = layers + [
            Convolutional(
                filter_size=(4, 4),
                num_filters=64,
                name='conv3'),
            SpatialBatchNormalization(name='batch_norm3'),
            Rectifier(),
            Convolutional(
                filter_size=(3, 3),
                step=(2, 2),
                num_filters=64,
                name='conv4'),
            SpatialBatchNormalization(name='batch_norm4'),
            Rectifier(),
        ]
    if(depth > 2):
        layers = layers + [
            Convolutional(
                filter_size=(3, 3),
                num_filters=128,
                name='conv5'),
            SpatialBatchNormalization(name='batch_norm5'),
            Rectifier(),
            Convolutional(
                filter_size=(3, 3),
                step=(2, 2),
                num_filters=128,
                name='conv6'),
            SpatialBatchNormalization(name='batch_norm6'),
            Rectifier(),
        ]
    if(depth > 3):
        layers = layers + [
            Convolutional(
                filter_size=(3, 3),
                num_filters=256,
                name='conv7'),
            SpatialBatchNormalization(name='batch_norm7'),
            Rectifier(),
            Convolutional(
                filter_size=(3, 3),
                step=(2, 2),
                num_filters=256,
                name='conv8'),
            SpatialBatchNormalization(name='batch_norm8'),
            Rectifier(),
        ]
    if(depth > 4):
        layers = layers + [
            Convolutional(
                filter_size=(3, 3),
                num_filters=512,
                name='conv9'),
            SpatialBatchNormalization(name='batch_norm9'),
            Rectifier(),
            Convolutional(
                filter_size=(3, 3),
                step=(2, 2),
                num_filters=512,
                name='conv10'),
            SpatialBatchNormalization(name='batch_norm10'),
            Rectifier(),
        ]
    if(depth > 5):
        layers = layers + [
            Convolutional(
                filter_size=(3, 3),
                num_filters=512,
                name='conv11'),
            SpatialBatchNormalization(name='batch_norm11'),
            Rectifier(),
            Convolutional(
                filter_size=(3, 3),
                step=(2, 2),
                num_filters=512,
                name='conv12'),
            SpatialBatchNormalization(name='batch_norm12'),
            Rectifier(),
        ]
    if(depth > 6):
        layers = layers + [
            Convolutional(
                filter_size=(3, 3),
                num_filters=512,
                name='conv13'),
            SpatialBatchNormalization(name='batch_norm13'),
            Rectifier(),
            Convolutional(
                filter_size=(3, 3),
                step=(2, 2),
                num_filters=512,
                name='conv14'),
            SpatialBatchNormalization(name='batch_norm14'),
            Rectifier(),
        ]
    if(depth > 7):
        layers = layers + [
            Convolutional(
                filter_size=(3, 3),
                num_filters=512,
                name='conv15'),
            SpatialBatchNormalization(name='batch_norm15'),
            Rectifier(),
            Convolutional(
                filter_size=(3, 3),
                step=(2, 2),
                num_filters=512,
                name='conv16'),
            SpatialBatchNormalization(name='batch_norm16'),
            Rectifier(),
        ]

    print("creating model of depth {} with {} layers".format(depth, len(layers)))

    convnet = ConvolutionalSequence(
        layers=layers,
        num_channels=3,
        image_size=(image_size, image_size),
        use_bias=False,
        weights_init=IsotropicGaussian(0.033),
        biases_init=Constant(0),
        name='convnet')
    convnet.initialize()

    mlp = BatchNormalizedMLP(
        activations=[Rectifier(), Logistic()],
        dims=[numpy.prod(convnet.get_dim('output')), 1000, 64],
        weights_init=IsotropicGaussian(0.033),
        biases_init=Constant(0),
        name='mlp')
    mlp.initialize()

    return convnet, mlp, len(layers)
示例#47
0
class VGGNet(FeedforwardSequence, Initializable):
    def __init__(self, image_dimension, **kwargs):

        layers = []

        #############################################
        # a first block with 2 convolutions of 64 (3, 3) filters
        layers.append(
            Convolutional((3, 3), 64, border_mode='half', name='conv_1_1'))
        layers.append(Rectifier())
        layers.append(
            Convolutional((3, 3), 64, border_mode='half', name='conv_1_2'))
        layers.append(Rectifier())

        # maxpool with size=(2, 2)
        layers.append(MaxPooling((2, 2)))

        #############################################
        # a 2nd block with 3 convolutions of 128 (3, 3) filters
        layers.append(
            Convolutional((3, 3), 128, border_mode='half', name='conv_2_1'))
        layers.append(Rectifier())
        layers.append(
            Convolutional((3, 3), 128, border_mode='half', name='conv_2_2'))
        layers.append(Rectifier())

        # maxpool with size=(2, 2)
        layers.append(MaxPooling((2, 2)))

        #############################################
        # a 3rd block with 4 convolutions of 256 (3, 3) filters
        layers.append(
            Convolutional((3, 3), 256, border_mode='half', name='conv_3_1'))
        layers.append(Rectifier())
        layers.append(
            Convolutional((3, 3), 256, border_mode='half', name='conv_3_2'))
        layers.append(Rectifier())
        layers.append(
            Convolutional((3, 3), 256, border_mode='half', name='conv_3_3'))
        layers.append(Rectifier())
        layers.append(
            Convolutional((3, 3), 256, border_mode='half', name='conv_3_4'))
        layers.append(Rectifier())

        # maxpool with size=(2, 2)
        layers.append(MaxPooling((2, 2)))

        #############################################
        # a 4th block with 4 convolutions of 512 (3, 3) filters
        layers.append(
            Convolutional((3, 3), 512, border_mode='half', name='conv_4_1'))
        layers.append(Rectifier())
        layers.append(
            Convolutional((3, 3), 512, border_mode='half', name='conv_4_2'))
        layers.append(Rectifier())
        layers.append(
            Convolutional((3, 3), 512, border_mode='half', name='conv_4_3'))
        layers.append(Rectifier())
        layers.append(
            Convolutional((3, 3), 512, border_mode='half', name='conv_4_4'))
        layers.append(Rectifier())

        # maxpool with size=(2, 2)
        layers.append(MaxPooling((2, 2)))

        #############################################
        # a 5th block with 4 convolutions of 512 (3, 3) filters
        layers.append(
            Convolutional((3, 3), 512, border_mode='half', name='conv_5_1'))
        layers.append(Rectifier())
        layers.append(
            Convolutional((3, 3), 512, border_mode='half', name='conv_5_2'))
        layers.append(Rectifier())
        layers.append(
            Convolutional((3, 3), 512, border_mode='half', name='conv_5_3'))
        layers.append(Rectifier())
        layers.append(
            Convolutional((3, 3), 512, border_mode='half', name='conv_5_4'))
        layers.append(Rectifier())

        # maxpool with size=(2, 2)
        layers.append(MaxPooling((2, 2)))

        self.conv_sequence = ConvolutionalSequence(layers,
                                                   3,
                                                   image_size=image_dimension)

        flattener = Flattener()

        self.top_mlp = BatchNormalizedMLP(
            activations=[Rectifier(),
                         Rectifier(),
                         Rectifier(),
                         Logistic()],
            dims=[4096, 4096, 1000, 1])

        application_methods = [
            self.conv_sequence.apply, flattener.apply, self.top_mlp.apply
        ]

        super(VGGNet, self).__init__(application_methods,
                                     biases_init=Constant(0),
                                     weights_init=Uniform(width=.1),
                                     **kwargs)

    def _push_allocation_config(self):
        self.conv_sequence._push_allocation_config()
        conv_out_dim = self.conv_sequence.get_dim('output')

        self.top_mlp.dims = [numpy.prod(conv_out_dim)] + self.top_mlp.dims
示例#48
0
#Define the parameters
#Create the symbolics variable
x = tensor.tensor4('image_features')
y = tensor.lmatrix('targets')

num_epochs = 1000
layers = []

###############FIRST STAGE#######################
#Create the convolutions layers
layers.append(Convolutional(filter_size=(7,7), step=(2,2), num_filters=96, border_mode='half', name='conv_0'))
layers.append(BatchNormalization(name='batch_0'))
layers.append(Rectifier())
layers.append(MaxPooling((3,3), step=(2,2), padding=(1,1), name='pool_0'))

convSeq = ConvolutionalSequence(layers, num_channels=3, image_size=(220,220), weights_init=Orthogonal(), use_bias=False, name='ConvSeq')
convSeq.initialize()
out = convSeq.apply(x)

#FIRE MODULES
out1 = Fire((55,55), 96, 16, 16, 16, out, 10)
out2 = Fire((55,55), 128, 16, 16, 16, out1, 25)
out3 = Fire((55,55), 128, 32, 32, 32, out2, 300)
out31 = MaxPooling((3,3), step=(2,2), padding=(1,1), name='poolLow').apply(out3)
out4 = Fire((28,28), 256, 32, 32, 32, out31, 45)
out5 = Fire((28,28), 256, 48, 48, 48, out4, 500)
out6 = Fire((28,28), 384, 48, 48, 48, out5, 65)
out7 = Fire((28,28), 384, 64, 64, 64, out6, 700)
out71 = MaxPooling((3,3), step=(2,2), padding=(1,1), name='poolLow2').apply(out7)
out8 = Fire((14,14), 512, 64, 64, 64, out71, 85)
示例#49
0
def Fire(image_shape, num_input, conv1, conv2, conv3, out, i):
    layers11 = []
    layers12 = []
    layers13 = []
    layers14 = []

    ############# SQUEEZE ###########
    ### 4 Conv 1x1 ###
    layers11.append(Convolutional(filter_size=(1,1), num_channels=num_input, num_filters=conv1, image_size=image_shape, border_mode='half', name='conv_{}'.format(i)))
    layers11.append(BatchNormalization(name='batch_{}'.format(i)))
    layers11.append(Rectifier())
    conv_sequence11 = ConvolutionalSequence(layers11, num_channels=num_input, image_size=image_shape, weights_init=Orthogonal(), use_bias=False, name='convSeq_{}'.format(i))
    conv_sequence11.initialize()
    out11 = conv_sequence11.apply(out)
    i = i + 1

    layers12.append(Convolutional(filter_size=(1,1), num_channels=num_input, num_filters=conv1, image_size=image_shape, border_mode='half', name='conv_{}'.format(i)))
    layers12.append(BatchNormalization(name='batch_{}'.format(i)))
    layers12.append(Rectifier())
    conv_sequence12 = ConvolutionalSequence(layers12, num_channels=num_input, image_size=image_shape, weights_init=Orthogonal(), use_bias=False, name='convSeq_{}'.format(i))
    conv_sequence12.initialize()
    out12 = conv_sequence12.apply(out)
    i = i + 1

    layers13.append(Convolutional(filter_size=(1,1), num_channels=num_input, num_filters=conv1, image_size=image_shape, border_mode='half', name='conv_{}'.format(i)))
    layers13.append(BatchNormalization(name='batch_{}'.format(i)))
    layers13.append(Rectifier())
    conv_sequence13 = ConvolutionalSequence(layers13, num_channels=num_input, image_size=image_shape, weights_init=Orthogonal(), use_bias=False, name='convSeq_{}'.format(i))
    conv_sequence13.initialize()
    out13 = conv_sequence13.apply(out)
    i = i + 1

    layers14.append(Convolutional(filter_size=(1,1), num_channels=num_input, num_filters=conv1, image_size=image_shape, border_mode='half', name='conv_{}'.format(i)))
    layers14.append(BatchNormalization(name='batch_{}'.format(i)))
    layers14.append(Rectifier())
    conv_sequence14 = ConvolutionalSequence(layers14, num_channels=num_input, image_size=image_shape, weights_init=Orthogonal(), use_bias=False, name='convSeq_{}'.format(i))
    conv_sequence14.initialize()
    out14 = conv_sequence14.apply(out)
    i = i + 1

    squeezed = T.concatenate([out11, out12, out13, out14], axis=1)

    ####### EXPAND #####
    layers21 = []
    layers22 = []
    layers23 = []
    layers24 = []
    layers31 = []
    layers32 = []
    layers33 = []
    layers34 = []
    num_input2 = conv1 * 4
    ### 4 conv 1x1 ###
    layers21.append(Convolutional(filter_size=(1,1), num_channels=num_input2, num_filters=conv2, image_size=image_shape, border_mode='half', name='conv_{}'.format(i)))
    layers21.append(BatchNormalization(name='batch_{}'.format(i)))
    layers21.append(Rectifier())
    conv_sequence21 = ConvolutionalSequence(layers21, num_channels=num_input2, image_size=image_shape, weights_init=Orthogonal(), use_bias=False, name='convSeq_{}'.format(i))
    conv_sequence21.initialize()
    out21 = conv_sequence21.apply(squeezed)
    i = i + 1

    layers22.append(Convolutional(filter_size=(1,1), num_channels=num_input2, num_filters=conv2, image_size=image_shape, border_mode='half', name='conv_{}'.format(i)))
    layers22.append(BatchNormalization(name='batch_{}'.format(i)))
    layers22.append(Rectifier())
    conv_sequence22 = ConvolutionalSequence(layers22, num_channels=num_input2, image_size=image_shape, weights_init=Orthogonal(), use_bias=False, name='convSeq_{}'.format(i))
    conv_sequence22.initialize()
    out22 = conv_sequence22.apply(squeezed)
    i = i + 1

    layers23.append(Convolutional(filter_size=(1,1), num_channels=num_input2, num_filters=conv2, image_size=image_shape, border_mode='half', name='conv_{}'.format(i)))
    layers23.append(BatchNormalization(name='batch_{}'.format(i)))
    layers23.append(Rectifier())
    conv_sequence23 = ConvolutionalSequence(layers23, num_channels=num_input2, image_size=image_shape, weights_init=Orthogonal(), use_bias=False, name='convSeq_{}'.format(i))
    conv_sequence23.initialize()
    out23 = conv_sequence23.apply(squeezed)
    i = i + 1

    layers24.append(Convolutional(filter_size=(1,1), num_channels=num_input2, num_filters=conv2, image_size=image_shape, border_mode='half', name='conv_{}'.format(i)))
    layers24.append(BatchNormalization(name='batch_{}'.format(i)))
    layers24.append(Rectifier())
    conv_sequence24 = ConvolutionalSequence(layers24, num_channels=num_input2, image_size=image_shape, weights_init=Orthogonal(), use_bias=False, name='convSeq_{}'.format(i))
    conv_sequence24.initialize()
    out24 = conv_sequence24.apply(squeezed)
    i = i + 1

    ### 4 conv 3x3 ###
    layers31.append(Convolutional(filter_size=(3,3), num_channels=num_input2, num_filters=conv3, image_size=image_shape, border_mode='half', name='conv_{}'.format(i)))
    layers31.append(BatchNormalization(name='batch_{}'.format(i)))
    layers31.append(Rectifier())
    conv_sequence31 = ConvolutionalSequence(layers31, num_channels=num_input2, image_size=image_shape, weights_init=Orthogonal(), use_bias=False, name='convSeq_{}'.format(i))
    conv_sequence31.initialize()
    out31 = conv_sequence31.apply(squeezed)
    i = i + 1

    layers32.append(Convolutional(filter_size=(3,3), num_channels=num_input2, num_filters=conv3, image_size=image_shape, border_mode='half', name='conv_{}'.format(i)))
    layers32.append(BatchNormalization(name='batch_{}'.format(i)))
    layers32.append(Rectifier())
    conv_sequence32 = ConvolutionalSequence(layers32, num_channels=num_input2, image_size=image_shape, weights_init=Orthogonal(), use_bias=False, name='convSeq_{}'.format(i))
    conv_sequence32.initialize()
    out32 = conv_sequence32.apply(squeezed)
    i = i + 1

    layers33.append(Convolutional(filter_size=(3,3), num_channels=num_input2, num_filters=conv3, image_size=image_shape, border_mode='half', name='conv_{}'.format(i)))
    layers33.append(BatchNormalization(name='batch_{}'.format(i)))
    layers33.append(Rectifier())
    conv_sequence33 = ConvolutionalSequence(layers33, num_channels=num_input2, image_size=image_shape, weights_init=Orthogonal(), use_bias=False, name='convSeq_{}'.format(i))
    conv_sequence33.initialize()
    out33 = conv_sequence33.apply(squeezed)
    i = i + 1

    layers34.append(Convolutional(filter_size=(3,3), num_channels=num_input2, num_filters=conv3, image_size=image_shape, border_mode='half', name='conv_{}'.format(i)))
    layers34.append(BatchNormalization(name='batch_{}'.format(i)))
    layers34.append(Rectifier())
    conv_sequence34 = ConvolutionalSequence(layers34, num_channels=num_input2, image_size=image_shape, weights_init=Orthogonal(), use_bias=False, name='convSeq_{}'.format(i))
    conv_sequence34.initialize()
    out34 = conv_sequence34.apply(squeezed)
    i = i + 1

    #Merge
    return T.concatenate([out21, out22, out23, out24, out31, out32, out33, out34], axis=1)
示例#50
0
文件: utils.py 项目: zggl/discgen
def load_vgg_classifier():
    """Loads the VGG19 classifier into a brick.

    Relies on ``vgg19_normalized.pkl`` containing the model
    parameters.

    Returns
    -------
    convnet : :class:`blocks.bricks.conv.ConvolutionalSequence`
        VGG19 convolutional brick.

    """
    convnet = ConvolutionalSequence(
        layers=[
            Convolutional(
                filter_size=(3, 3),
                border_mode=(1, 1),
                num_filters=64,
                name='conv1_1'),
            Rectifier(),
            Convolutional(
                filter_size=(3, 3),
                border_mode=(1, 1),
                num_filters=64,
                name='conv1_2'),
            Rectifier(),
            AveragePooling(
                pooling_size=(2, 2),
                name='pool1'),
            Convolutional(
                filter_size=(3, 3),
                border_mode=(1, 1),
                num_filters=128,
                name='conv2_1'),
            Rectifier(),
            Convolutional(
                filter_size=(3, 3),
                border_mode=(1, 1),
                num_filters=128,
                name='conv2_2'),
            Rectifier(),
            AveragePooling(
                pooling_size=(2, 2),
                name='pool2'),
            Convolutional(
                filter_size=(3, 3),
                border_mode=(1, 1),
                num_filters=256,
                name='conv3_1'),
            Rectifier(),
            Convolutional(
                filter_size=(3, 3),
                border_mode=(1, 1),
                num_filters=256,
                name='conv3_2'),
            Rectifier(),
            Convolutional(
                filter_size=(3, 3),
                border_mode=(1, 1),
                num_filters=256,
                name='conv3_3'),
            Rectifier(),
            Convolutional(
                filter_size=(3, 3),
                border_mode=(1, 1),
                num_filters=256,
                name='conv3_4'),
            Rectifier(),
            AveragePooling(
                pooling_size=(2, 2),
                name='pool3'),
            Convolutional(
                filter_size=(3, 3),
                border_mode=(1, 1),
                num_filters=512,
                name='conv4_1'),
            Rectifier(),
            Convolutional(
                filter_size=(3, 3),
                border_mode=(1, 1),
                num_filters=512,
                name='conv4_2'),
            Rectifier(),
            Convolutional(
                filter_size=(3, 3),
                border_mode=(1, 1),
                num_filters=512,
                name='conv4_3'),
            Rectifier(),
            Convolutional(
                filter_size=(3, 3),
                border_mode=(1, 1),
                num_filters=512,
                name='conv4_4'),
            Rectifier(),
            AveragePooling(
                pooling_size=(2, 2),
                name='pool4'),
            Convolutional(
                filter_size=(3, 3),
                border_mode=(1, 1),
                num_filters=512,
                name='conv5_1'),
            Rectifier(),
            Convolutional(
                filter_size=(3, 3),
                border_mode=(1, 1),
                num_filters=512,
                name='conv5_2'),
            Rectifier(),
            Convolutional(
                filter_size=(3, 3),
                border_mode=(1, 1),
                num_filters=512,
                name='conv5_3'),
            Rectifier(),
            Convolutional(
                filter_size=(3, 3),
                border_mode=(1, 1),
                num_filters=512,
                name='conv5_4'),
            Rectifier(),
            AveragePooling(
                pooling_size=(2, 2),
                name='pool5'),
        ],
        num_channels=3,
        image_size=(32, 32),
        tied_biases=True,
        weights_init=Constant(0),
        biases_init=Constant(0),
        name='convnet')
    convnet.initialize()

    with open('vgg19_normalized.pkl', 'rb') as f:
        if six.PY3:
            data = cPickle.load(f, encoding='latin1')
        else:
            data = cPickle.load(f)
        parameter_values = data['param values']
    conv_weights = parameter_values[::2]
    conv_biases = parameter_values[1::2]
    conv_indices = [0, 2, 5, 7, 10, 12, 14, 16, 19, 21, 23, 25, 28, 30, 32, 34]
    conv_layers = [convnet.layers[i] for i in conv_indices]
    for layer, W_val, b_val in zip(conv_layers, conv_weights, conv_biases):
        W, b = layer.parameters
        W.set_value(W_val)
        b.set_value(b_val)

    return convnet
示例#51
0
def inception(image_shape, num_input, conv1, conv2, conv3, conv4, conv5, conv6,
              out, i):
    layers1 = []
    layers2 = []
    layers3 = []
    layers4 = []
    layers1.append(
        Convolutional(filter_size=(1, 1),
                      num_channels=num_input,
                      num_filters=conv1,
                      image_size=image_shape,
                      border_mode='half',
                      name='conv_{}'.format(i)))
    layers1.append(BatchNormalization(name='batch_{}'.format(i)))
    layers1.append(Rectifier())
    conv_sequence1 = ConvolutionalSequence(layers1,
                                           num_channels=num_input,
                                           image_size=image_shape,
                                           weights_init=Orthogonal(),
                                           use_bias=False,
                                           name='convSeq_{}'.format(i))
    conv_sequence1.initialize()
    out1 = conv_sequence1.apply(out)
    i = i + 1

    layers2.append(
        Convolutional(filter_size=(1, 1),
                      num_channels=num_input,
                      num_filters=conv2,
                      image_size=image_shape,
                      border_mode='half',
                      name='conv_{}'.format(i)))
    layers2.append(BatchNormalization(name='batch_{}'.format(i)))
    layers2.append(Rectifier())
    i = i + 1
    layers2.append(
        Convolutional(filter_size=(3, 3),
                      num_channels=conv2,
                      num_filters=conv3,
                      image_size=image_shape,
                      border_mode='half',
                      name='conv_{}'.format(i)))
    layers2.append(BatchNormalization(name='batch_{}'.format(i)))
    layers2.append(Rectifier())
    conv_sequence2 = ConvolutionalSequence(layers2,
                                           num_channels=num_input,
                                           image_size=image_shape,
                                           weights_init=Orthogonal(),
                                           use_bias=False,
                                           name='convSeq_{}'.format(i))
    conv_sequence2.initialize()
    out2 = conv_sequence2.apply(out)
    i = i + 1

    layers3.append(
        Convolutional(filter_size=(1, 1),
                      num_channels=num_input,
                      num_filters=conv4,
                      image_size=image_shape,
                      border_mode='half',
                      name='conv_{}'.format(i)))
    layers3.append(BatchNormalization(name='batch_{}'.format(i)))
    layers3.append(Rectifier())
    i = i + 1
    layers3.append(
        Convolutional(filter_size=(5, 5),
                      num_channels=conv4,
                      num_filters=conv5,
                      image_size=image_shape,
                      border_mode='half',
                      name='conv_{}'.format(i)))
    layers3.append(BatchNormalization(name='batch_{}'.format(i)))
    layers3.append(Rectifier())
    conv_sequence3 = ConvolutionalSequence(layers3,
                                           num_channels=num_input,
                                           image_size=image_shape,
                                           weights_init=Orthogonal(),
                                           use_bias=False,
                                           name='convSeq_{}'.format(i))
    conv_sequence3.initialize()
    out3 = conv_sequence3.apply(out)
    i = i + 1

    layers4.append(
        MaxPooling((3, 3),
                   step=(1, 1),
                   padding=(1, 1),
                   name='pool_{}'.format(i)))
    layers4.append(
        Convolutional(filter_size=(1, 1),
                      num_channels=num_input,
                      num_filters=conv6,
                      image_size=image_shape,
                      border_mode='half',
                      name='conv_{}'.format(i)))
    layers4.append(BatchNormalization(name='batch_{}'.format(i)))
    layers4.append(Rectifier())
    i = i + 1
    conv_sequence4 = ConvolutionalSequence(layers4,
                                           num_channels=num_input,
                                           image_size=image_shape,
                                           weights_init=Orthogonal(),
                                           use_bias=False,
                                           name='convSeq_{}'.format(i))
    conv_sequence4.initialize()
    out4 = conv_sequence4.apply(out)
    #Merge
    return T.concatenate([out1, out2, out3, out4], axis=1)
示例#52
0
def build_model(images, labels):

    vgg = VGG(layer='conv3_4')
    vgg.push_initialization_config()
    vgg.initialize()

    sb = SubstractBatch()

    # Construct a bottom convolutional sequence
    layers = [
        Convolutional(filter_size=(3, 3),
                      num_filters=100,
                      use_bias=True,
                      tied_biases=True,
                      name='final_conv0'),
        BatchNormalization(name='batchnorm_1'),
        Rectifier(name='final_conv0_act'),
        Convolutional(filter_size=(3, 3),
                      num_filters=100,
                      use_bias=True,
                      tied_biases=True,
                      name='final_conv1'),
        BatchNormalization(name='batchnorm_2'),
        Rectifier(name='final_conv1_act'),
        MaxPooling(pooling_size=(2, 2), name='maxpool_final')
    ]
    bottom_conv_sequence = ConvolutionalSequence(
        layers,
        num_channels=256,
        image_size=(40, 40),
        biases_init=Constant(0.),
        weights_init=IsotropicGaussian(0.01))
    bottom_conv_sequence._push_allocation_config()

    # Flatten layer
    flattener = Flattener()

    # Construct a top MLP
    conv_out_dim = numpy.prod(bottom_conv_sequence.get_dim('output'))
    print 'dim output conv:', bottom_conv_sequence.get_dim('output')
    # conv_out_dim = 20 * 40 * 40
    top_mlp = BatchNormalizedMLP(
        [Rectifier(name='non_linear_9'),
         Softmax(name='non_linear_11')], [conv_out_dim, 1024, 10],
        weights_init=IsotropicGaussian(),
        biases_init=Constant(0))

    # Construct feedforward sequence
    ss_seq = FeedforwardSequence([
        vgg.apply, bottom_conv_sequence.apply, flattener.apply, top_mlp.apply
    ])
    ss_seq.push_initialization_config()
    ss_seq.initialize()

    prediction = ss_seq.apply(images)
    cost_noreg = CategoricalCrossEntropy().apply(labels.flatten(), prediction)

    # add regularization
    selector = Selector([top_mlp])
    Ws = selector.get_parameters('W')
    mlp_brick_name = 'batchnormalizedmlp'
    W0 = Ws['/%s/linear_0.W' % mlp_brick_name]
    W1 = Ws['/%s/linear_1.W' % mlp_brick_name]
    cost = cost_noreg + .0001 * (W0**2).sum() + .001 * (W1**2).sum()

    # define learned parameters
    selector = Selector([ss_seq])
    Ws = selector.get_parameters('W')
    bs = selector.get_parameters('b')
    BNSCs = selector.get_parameters('batch_norm_scale')
    BNSHs = selector.get_parameters('batch_norm_shift')

    parameters_top = []
    parameters_top += [v for k, v in Ws.items()]
    parameters_top += [v for k, v in bs.items()]
    parameters_top += [v for k, v in BNSCs.items()]
    parameters_top += [v for k, v in BNSHs.items()]

    selector = Selector([vgg])
    convs = selector.get_parameters()

    parameters_all = []
    parameters_all += parameters_top
    parameters_all += [v for k, v in convs.items()]

    return cost, [parameters_top, parameters_all]
示例#53
0
    # Discriminator

    layers = [
        conv_brick(5, 1, 32),
        ConvMaxout(num_pieces=NUM_PIECES),
        conv_brick(4, 2, 64),
        ConvMaxout(num_pieces=NUM_PIECES),
        conv_brick(4, 1, 128),
        ConvMaxout(num_pieces=NUM_PIECES),
        conv_brick(4, 2, 256),
        ConvMaxout(num_pieces=NUM_PIECES),
        conv_brick(4, 1, 512),
        ConvMaxout(num_pieces=NUM_PIECES)
    ]
    x_discriminator = ConvolutionalSequence(layers=layers,
                                            num_channels=NUM_CHANNELS,
                                            image_size=IMAGE_SIZE,
                                            name='x_discriminator')
    x_discriminator.push_allocation_config()

    layers = [
        conv_brick(1, 1, 512),
        ConvMaxout(num_pieces=NUM_PIECES),
        conv_brick(1, 1, 512),
        ConvMaxout(num_pieces=NUM_PIECES)
    ]
    z_discriminator = ConvolutionalSequence(layers=layers,
                                            num_channels=NLAT,
                                            image_size=(1, 1),
                                            use_bias=False,
                                            name='z_discriminator')
    z_discriminator.push_allocation_config()
示例#54
0
class LeNet(FeedforwardSequence, Initializable):
    def __init__(self,
                 conv_activations,
                 num_channels,
                 image_shape,
                 filter_sizes,
                 feature_maps,
                 pooling_sizes,
                 top_mlp_activations,
                 top_mlp_dims,
                 conv_step=None,
                 border_mode='valid',
                 **kwargs):
        if conv_step is None:
            self.conv_step = (1, 1)
        else:
            self.conv_step = conv_step
        self.num_channels = num_channels
        self.image_shape = image_shape
        self.top_mlp_activations = top_mlp_activations
        self.top_mlp_dims = top_mlp_dims
        self.border_mode = border_mode

        conv_parameters = zip(filter_sizes, feature_maps)

        # Construct convolutional layers with corresponding parameters
        self.layers = list(
            interleave([(Convolutional(filter_size=filter_size,
                                       num_filters=num_filter,
                                       step=self.conv_step,
                                       border_mode=self.border_mode,
                                       name='conv_{}'.format(i))
                         for i, (filter_size,
                                 num_filter) in enumerate(conv_parameters)),
                        conv_activations,
                        (MaxPooling(size, name='pool_{}'.format(i))
                         for i, size in enumerate(pooling_sizes))]))

        self.conv_sequence = ConvolutionalSequence(self.layers,
                                                   num_channels,
                                                   image_size=image_shape)

        # Construct a top MLP
        self.top_mlp = MLP(top_mlp_activations, top_mlp_dims)

        # We need to flatten the output of the last convolutional layer.
        # This brick accepts a tensor of dimension (batch_size, ...) and
        # returns a matrix (batch_size, features)
        self.flattener = Flattener()
        application_methods = [
            self.conv_sequence.apply, self.flattener.apply, self.top_mlp.apply
        ]
        super(LeNet, self).__init__(application_methods, **kwargs)

    @property
    def output_dim(self):
        return self.top_mlp_dims[-1]

    @output_dim.setter
    def output_dim(self, value):
        self.top_mlp_dims[-1] = value

    def _push_allocation_config(self):
        self.conv_sequence._push_allocation_config()
        conv_out_dim = self.conv_sequence.get_dim('output')

        self.top_mlp.activations = self.top_mlp_activations
        self.top_mlp.dims = [numpy.prod(conv_out_dim)] + self.top_mlp_dims
示例#55
0
filter_sizes = [(5, 5)] * 2
num_filters = [128, 256]
pooling_sizes = [(2, 2)] * 2
activation = Logistic().apply
conv_layers = [
    b.ConvolutionalLayer(activation,
                         filter_size,
                         num_filters_,
                         pooling_size,
                         num_channels=3) for filter_size, num_filters_,
    pooling_size in zip(filter_sizes, num_filters, pooling_sizes)
]

convnet = ConvolutionalSequence(conv_layers,
                                num_channels=3,
                                image_size=(32, 32),
                                weights_init=Uniform(0, 0.2),
                                biases_init=Constant(0.))

convnet.initialize()
conv_features = Flattener().apply(convnet.apply(X))

# MLP

mlp = MLP(activations=[Logistic(name='sigmoid_0'),
                       Softmax(name='softmax_1')],
          dims=[256, 256, 256, 2],
          weights_init=IsotropicGaussian(0.01),
          biases_init=Constant(0))
[child.name for child in mlp.children]
['linear_0', 'sigmoid_0', 'linear_1', 'softmax_1']
class LeNet(FeedforwardSequence, Initializable):
    """LeNet-like convolutional network.

    The class implements LeNet, which is a convolutional sequence with
    an MLP on top (several fully-connected layers). For details see
    [LeCun95]_.

    .. [LeCun95] LeCun, Yann, et al.
       *Comparison of learning algorithms for handwritten digit
       recognition.*,
       International conference on artificial neural networks. Vol. 60.

    Parameters
    ----------
    conv_activations : list of :class:`.Brick`
        Activations for convolutional network.
    num_channels : int
        Number of channels in the input image.
    image_shape : tuple
        Input image shape.
    filter_sizes : list of tuples
        Filter sizes of :class:`.blocks.conv.ConvolutionalLayer`.
    feature_maps : list
        Number of filters for each of convolutions.
    pooling_sizes : list of tuples
        Sizes of max pooling for each convolutional layer.
    top_mlp_activations : list of :class:`.blocks.bricks.Activation`
        List of activations for the top MLP.
    top_mlp_dims : list
        Numbers of hidden units and the output dimension of the top MLP.
    conv_step : tuples
        Step of convolution (similar for all layers).
    border_mode : str
        Border mode of convolution (similar for all layers).

    """

    def __init__(self, conv_activations, num_channels, image_shape,
                 filter_sizes, feature_maps, conv_steps, pooling_sizes,
                 top_mlp_activations, top_mlp_dims, border_mode='valid', **kwargs):

        self.num_channels = num_channels
        self.image_shape = image_shape
        self.top_mlp_activations = top_mlp_activations
        self.top_mlp_dims = top_mlp_dims
        self.border_mode = border_mode

        conv_parameters = zip(filter_sizes, feature_maps, conv_steps)

        # Construct convolutional, activation, and pooling layers with corresponding parameters
        conv_layers = list(interleave([
            (Convolutional(filter_size=filter_size,
                           num_filters=num_filter,
                           step=conv_step,
                           border_mode=self.border_mode,
                           name='conv_{}'.format(i))
             for i, (filter_size, num_filter, conv_step)
             in enumerate(conv_parameters)),
             conv_activations,
             (MaxPooling(size, name='pool_{}'.format(i))
             for i, size in enumerate(pooling_sizes))]))

        # Applying SpatialBatchNormalization to inputs
        #self.layers = [SpatialBatchNormalization()] + conv_layers
        self.layers = conv_layers
        self.conv_sequence = ConvolutionalSequence(self.layers, num_channels,
                                                   image_size=image_shape)

        # Construct a top MLP
        self.top_mlp = MLP(top_mlp_activations, top_mlp_dims)

        # We need to flatten the output of the last convolutional layer.
        # This brick accepts a tensor of dimension (batch_size, ...) and
        # returns a matrix (batch_size, features)
        self.flattener = Flattener()
        application_methods = [self.conv_sequence.apply, self.flattener.apply,
                               self.top_mlp.apply]
        super(LeNet, self).__init__(application_methods, **kwargs)

    @property
    def output_dim(self):
        return self.top_mlp_dims[-1]

    @output_dim.setter
    def output_dim(self, value):
        self.top_mlp_dims[-1] = value

    def _push_allocation_config(self):
        self.conv_sequence._push_allocation_config()
        conv_out_dim = self.conv_sequence.get_dim('output')

        self.top_mlp.activations = self.top_mlp_activations
        self.top_mlp.dims = [numpy.prod(conv_out_dim)] + self.top_mlp_dims
示例#57
0
def convolutional_sequence(filter_size,
                           num_filters,
                           image_size,
                           num_channels=1):

    layers = []
    layers.append(
        Convolutional(filter_size=filter_size,
                      num_filters=num_filters,
                      num_channels=num_channels,
                      use_bias=True,
                      tied_biases=True,
                      weights_init=IsotropicGaussian(0.01),
                      image_size=image_size,
                      name='conv_1'))
    layers.append(LeakyRectifier(name='non_linear_1'))
    layers.append(
        BatchNormalization(input_dim=layers[0].get_dim('output'),
                           name='batchnorm_1'))

    layers.append(
        Convolutional(filter_size=filter_size,
                      num_filters=num_filters,
                      num_channels=num_channels,
                      use_bias=True,
                      tied_biases=True,
                      weights_init=IsotropicGaussian(0.01),
                      image_size=image_size,
                      name='conv_2'))
    layers.append(LeakyRectifier(name='non_linear_2'))
    layers.append(
        BatchNormalization(input_dim=layers[3].get_dim('output'),
                           name='batchnorm_2'))

    layers.append(
        MaxPooling(pooling_size=(2, 2),
                   padding=(1, 1),
                   weights_init=IsotropicGaussian(0.01),
                   name='maxpool_2'))

    layers.append(
        Convolutional(filter_size=filter_size,
                      num_filters=num_filters * 2,
                      num_channels=num_channels,
                      use_bias=True,
                      tied_biases=True,
                      weights_init=IsotropicGaussian(0.01),
                      image_size=image_size,
                      name='conv_3'))
    layers.append(LeakyRectifier(name='non_linear_3'))
    layers.append(
        BatchNormalization(input_dim=layers[7].get_dim('output'),
                           name='batchnorm_3'))

    layers.append(
        Convolutional(filter_size=filter_size,
                      num_filters=num_filters * 2,
                      num_channels=num_channels,
                      use_bias=True,
                      tied_biases=True,
                      weights_init=IsotropicGaussian(0.01),
                      image_size=image_size,
                      name='conv_4'))
    layers.append(LeakyRectifier(name='non_linear_4'))
    layers.append(
        BatchNormalization(input_dim=layers[10].get_dim('output'),
                           name='batchnorm_4'))

    layers.append(
        MaxPooling(pooling_size=(2, 2),
                   padding=(1, 1),
                   weights_init=IsotropicGaussian(0.01),
                   name='maxpool_4'))

    layers.append(
        Convolutional(filter_size=filter_size,
                      num_filters=num_filters * 4,
                      num_channels=num_channels,
                      use_bias=True,
                      tied_biases=True,
                      weights_init=IsotropicGaussian(0.01),
                      image_size=image_size,
                      name='conv_5'))
    layers.append(LeakyRectifier(name='non_linear_5'))
    layers.append(
        BatchNormalization(input_dim=layers[14].get_dim('output'),
                           name='batchnorm_5'))

    layers.append(
        Convolutional(filter_size=filter_size,
                      num_filters=num_filters * 4,
                      num_channels=num_channels,
                      use_bias=True,
                      tied_biases=True,
                      weights_init=IsotropicGaussian(0.01),
                      image_size=image_size,
                      name='conv_6'))
    layers.append(LeakyRectifier(name='non_linear_6'))
    layers.append(
        BatchNormalization(input_dim=layers[17].get_dim('output'),
                           name='batchnorm_6'))

    layers.append(
        MaxPooling(pooling_size=(2, 2),
                   padding=(1, 1),
                   weights_init=IsotropicGaussian(0.01),
                   name='maxpool_6'))

    layers.append(
        Convolutional(filter_size=filter_size,
                      num_filters=num_filters * 8,
                      num_channels=num_channels,
                      use_bias=True,
                      tied_biases=True,
                      weights_init=IsotropicGaussian(0.01),
                      image_size=image_size,
                      name='conv_7'))
    layers.append(LeakyRectifier(name='non_linear_7'))
    layers.append(
        BatchNormalization(input_dim=layers[21].get_dim('output'),
                           name='batchnorm_7'))

    layers.append(
        Convolutional(filter_size=filter_size,
                      num_filters=num_filters * 8,
                      num_channels=num_channels,
                      use_bias=True,
                      tied_biases=True,
                      weights_init=IsotropicGaussian(0.01),
                      image_size=image_size,
                      name='conv_8'))
    layers.append(LeakyRectifier(name='non_linear_8'))
    layers.append(
        BatchNormalization(input_dim=layers[24].get_dim('output'),
                           name='batchnorm_8'))

    layers.append(
        MaxPooling(pooling_size=(2, 2),
                   padding=(1, 1),
                   weights_init=IsotropicGaussian(0.01),
                   name='maxpool_8'))

    return ConvolutionalSequence(layers,
                                 num_channels=num_channels,
                                 image_size=image_size,
                                 biases_init=Uniform(width=.1))
示例#58
0
def create_model_bricks(z_dim, image_size, depth):

    g_image_size = image_size
    g_image_size2 = g_image_size / 2
    g_image_size3 = g_image_size / 4
    g_image_size4 = g_image_size / 8
    g_image_size5 = g_image_size / 16

    encoder_layers = []
    if depth > 0:
        encoder_layers = encoder_layers + [
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=32,
                          name='conv1'),
            SpatialBatchNormalization(name='batch_norm1'),
            Rectifier(),
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=32,
                          name='conv2'),
            SpatialBatchNormalization(name='batch_norm2'),
            Rectifier(),
            Convolutional(
                filter_size=(2, 2), step=(2, 2), num_filters=32, name='conv3'),
            SpatialBatchNormalization(name='batch_norm3'),
            Rectifier()
        ]
    if depth > 1:
        encoder_layers = encoder_layers + [
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=64,
                          name='conv4'),
            SpatialBatchNormalization(name='batch_norm4'),
            Rectifier(),
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=64,
                          name='conv5'),
            SpatialBatchNormalization(name='batch_norm5'),
            Rectifier(),
            Convolutional(
                filter_size=(2, 2), step=(2, 2), num_filters=64, name='conv6'),
            SpatialBatchNormalization(name='batch_norm6'),
            Rectifier()
        ]
    if depth > 2:
        encoder_layers = encoder_layers + [
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=128,
                          name='conv7'),
            SpatialBatchNormalization(name='batch_norm7'),
            Rectifier(),
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=128,
                          name='conv8'),
            SpatialBatchNormalization(name='batch_norm8'),
            Rectifier(),
            Convolutional(
                filter_size=(2, 2), step=(2, 2), num_filters=128,
                name='conv9'),
            SpatialBatchNormalization(name='batch_norm9'),
            Rectifier()
        ]
    if depth > 3:
        encoder_layers = encoder_layers + [
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=256,
                          name='conv10'),
            SpatialBatchNormalization(name='batch_norm10'),
            Rectifier(),
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=256,
                          name='conv11'),
            SpatialBatchNormalization(name='batch_norm11'),
            Rectifier(),
            Convolutional(filter_size=(2, 2),
                          step=(2, 2),
                          num_filters=256,
                          name='conv12'),
            SpatialBatchNormalization(name='batch_norm12'),
            Rectifier(),
        ]
    if depth > 4:
        encoder_layers = encoder_layers + [
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=512,
                          name='conv13'),
            SpatialBatchNormalization(name='batch_norm13'),
            Rectifier(),
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=512,
                          name='conv14'),
            SpatialBatchNormalization(name='batch_norm14'),
            Rectifier(),
            Convolutional(filter_size=(2, 2),
                          step=(2, 2),
                          num_filters=512,
                          name='conv15'),
            SpatialBatchNormalization(name='batch_norm15'),
            Rectifier()
        ]

    decoder_layers = []
    if depth > 4:
        decoder_layers = decoder_layers + [
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=512,
                          name='conv_n3'),
            SpatialBatchNormalization(name='batch_norm_n3'),
            Rectifier(),
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=512,
                          name='conv_n2'),
            SpatialBatchNormalization(name='batch_norm_n2'),
            Rectifier(),
            ConvolutionalTranspose(
                filter_size=(2, 2),
                step=(2, 2),
                original_image_size=(g_image_size5, g_image_size5),
                num_filters=512,
                name='conv_n1'),
            SpatialBatchNormalization(name='batch_norm_n1'),
            Rectifier()
        ]

    if depth > 3:
        decoder_layers = decoder_layers + [
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=256,
                          name='conv1'),
            SpatialBatchNormalization(name='batch_norm1'),
            Rectifier(),
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=256,
                          name='conv2'),
            SpatialBatchNormalization(name='batch_norm2'),
            Rectifier(),
            ConvolutionalTranspose(
                filter_size=(2, 2),
                step=(2, 2),
                original_image_size=(g_image_size4, g_image_size4),
                num_filters=256,
                name='conv3'),
            SpatialBatchNormalization(name='batch_norm3'),
            Rectifier()
        ]

    if depth > 2:
        decoder_layers = decoder_layers + [
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=128,
                          name='conv4'),
            SpatialBatchNormalization(name='batch_norm4'),
            Rectifier(),
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=128,
                          name='conv5'),
            SpatialBatchNormalization(name='batch_norm5'),
            Rectifier(),
            ConvolutionalTranspose(
                filter_size=(2, 2),
                step=(2, 2),
                original_image_size=(g_image_size3, g_image_size3),
                num_filters=128,
                name='conv6'),
            SpatialBatchNormalization(name='batch_norm6'),
            Rectifier()
        ]

    if depth > 1:
        decoder_layers = decoder_layers + [
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=64,
                          name='conv7'),
            SpatialBatchNormalization(name='batch_norm7'),
            Rectifier(),
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=64,
                          name='conv8'),
            SpatialBatchNormalization(name='batch_norm8'),
            Rectifier(),
            ConvolutionalTranspose(
                filter_size=(2, 2),
                step=(2, 2),
                original_image_size=(g_image_size2, g_image_size2),
                num_filters=64,
                name='conv9'),
            SpatialBatchNormalization(name='batch_norm9'),
            Rectifier()
        ]

    if depth > 0:
        decoder_layers = decoder_layers + [
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=32,
                          name='conv10'),
            SpatialBatchNormalization(name='batch_norm10'),
            Rectifier(),
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=32,
                          name='conv11'),
            SpatialBatchNormalization(name='batch_norm11'),
            Rectifier(),
            ConvolutionalTranspose(
                filter_size=(2, 2),
                step=(2, 2),
                original_image_size=(g_image_size, g_image_size),
                num_filters=32,
                name='conv12'),
            SpatialBatchNormalization(name='batch_norm12'),
            Rectifier()
        ]

    decoder_layers = decoder_layers + [
        Convolutional(filter_size=(1, 1), num_filters=3, name='conv_out'),
        Logistic()
    ]

    print("creating model of depth {} with {} encoder and {} decoder layers".
          format(depth, len(encoder_layers), len(decoder_layers)))

    encoder_convnet = ConvolutionalSequence(
        layers=encoder_layers,
        num_channels=3,
        image_size=(g_image_size, g_image_size),
        use_bias=False,
        weights_init=IsotropicGaussian(0.033),
        biases_init=Constant(0),
        name='encoder_convnet')
    encoder_convnet.initialize()

    encoder_filters = numpy.prod(encoder_convnet.get_dim('output'))

    encoder_mlp = MLP(
        dims=[encoder_filters, 1000, z_dim],
        activations=[
            Sequence([BatchNormalization(1000).apply,
                      Rectifier().apply],
                     name='activation1'),
            Identity().apply
        ],
        weights_init=IsotropicGaussian(0.033),
        biases_init=Constant(0),
        name='encoder_mlp')
    encoder_mlp.initialize()

    decoder_mlp = BatchNormalizedMLP(
        activations=[Rectifier(), Rectifier()],
        dims=[encoder_mlp.output_dim // 2, 1000, encoder_filters],
        weights_init=IsotropicGaussian(0.033),
        biases_init=Constant(0),
        name='decoder_mlp')
    decoder_mlp.initialize()

    decoder_convnet = ConvolutionalSequence(
        layers=decoder_layers,
        num_channels=encoder_convnet.get_dim('output')[0],
        image_size=encoder_convnet.get_dim('output')[1:],
        use_bias=False,
        weights_init=IsotropicGaussian(0.033),
        biases_init=Constant(0),
        name='decoder_convnet')
    decoder_convnet.initialize()

    return encoder_convnet, encoder_mlp, decoder_convnet, decoder_mlp
示例#59
0
def create_model_brick():
    layers = [
        conv_brick(2, 1, 64),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_brick(7, 2, 128),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_brick(5, 2, 256),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_brick(7, 2, 256),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_brick(4, 1, 512),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_brick(1, 1, 2 * NLAT)
    ]
    encoder_mapping = ConvolutionalSequence(layers=layers,
                                            num_channels=NUM_CHANNELS,
                                            image_size=IMAGE_SIZE,
                                            use_bias=False,
                                            name='encoder_mapping')
    encoder = GaussianConditional(encoder_mapping, name='encoder')

    layers = [
        conv_transpose_brick(4, 1, 512),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_transpose_brick(7, 2, 256),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_transpose_brick(5, 2, 256),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_transpose_brick(7, 2, 128),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_transpose_brick(2, 1, 64),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_brick(1, 1, NUM_CHANNELS),
        Logistic()
    ]
    decoder_mapping = ConvolutionalSequence(layers=layers,
                                            num_channels=NLAT,
                                            image_size=(1, 1),
                                            use_bias=False,
                                            name='decoder_mapping')
    decoder = DeterministicConditional(decoder_mapping, name='decoder')

    layers = [
        conv_brick(2, 1, 64),
        LeakyRectifier(leak=LEAK),
        conv_brick(7, 2, 128),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_brick(5, 2, 256),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_brick(7, 2, 256),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_brick(4, 1, 512),
        bn_brick(),
        LeakyRectifier(leak=LEAK)
    ]
    x_discriminator = ConvolutionalSequence(layers=layers,
                                            num_channels=NUM_CHANNELS,
                                            image_size=IMAGE_SIZE,
                                            use_bias=False,
                                            name='x_discriminator')
    x_discriminator.push_allocation_config()

    layers = [
        conv_brick(1, 1, 1024),
        LeakyRectifier(leak=LEAK),
        conv_brick(1, 1, 1024),
        LeakyRectifier(leak=LEAK)
    ]
    z_discriminator = ConvolutionalSequence(layers=layers,
                                            num_channels=NLAT,
                                            image_size=(1, 1),
                                            use_bias=False,
                                            name='z_discriminator')
    z_discriminator.push_allocation_config()

    layers = [
        conv_brick(1, 1, 2048),
        LeakyRectifier(leak=LEAK),
        conv_brick(1, 1, 2048),
        LeakyRectifier(leak=LEAK),
        conv_brick(1, 1, 1)
    ]
    joint_discriminator = ConvolutionalSequence(
        layers=layers,
        num_channels=(x_discriminator.get_dim('output')[0] +
                      z_discriminator.get_dim('output')[0]),
        image_size=(1, 1),
        name='joint_discriminator')

    discriminator = XZJointDiscriminator(x_discriminator,
                                         z_discriminator,
                                         joint_discriminator,
                                         name='discriminator')

    ali = ALI(encoder,
              decoder,
              discriminator,
              weights_init=GAUSSIAN_INIT,
              biases_init=ZERO_INIT,
              name='ali')
    ali.push_allocation_config()
    encoder_mapping.layers[-1].use_bias = True
    encoder_mapping.layers[-1].tied_biases = False
    decoder_mapping.layers[-2].use_bias = True
    decoder_mapping.layers[-2].tied_biases = False
    x_discriminator.layers[0].use_bias = True
    x_discriminator.layers[0].tied_biases = True
    ali.initialize()
    raw_marginals, = next(
        create_celeba_data_streams(500, 500)[0].get_epoch_iterator())
    b_value = get_log_odds(raw_marginals)
    decoder_mapping.layers[-2].b.set_value(b_value)

    return ali
示例#60
0
def create_model_bricks():
    encoder_convnet = ConvolutionalSequence(
        layers=[
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=32,
                          name='conv1'),
            SpatialBatchNormalization(name='batch_norm1'),
            Rectifier(),
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=32,
                          name='conv2'),
            SpatialBatchNormalization(name='batch_norm2'),
            Rectifier(),
            Convolutional(filter_size=(2, 2),
                          step=(2, 2),
                          num_filters=32,
                          name='conv3'),
            SpatialBatchNormalization(name='batch_norm3'),
            Rectifier(),
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=64,
                          name='conv4'),
            SpatialBatchNormalization(name='batch_norm4'),
            Rectifier(),
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=64,
                          name='conv5'),
            SpatialBatchNormalization(name='batch_norm5'),
            Rectifier(),
            Convolutional(filter_size=(2, 2),
                          step=(2, 2),
                          num_filters=64,
                          name='conv6'),
            SpatialBatchNormalization(name='batch_norm6'),
            Rectifier(),
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=128,
                          name='conv7'),
            SpatialBatchNormalization(name='batch_norm7'),
            Rectifier(),
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=128,
                          name='conv8'),
            SpatialBatchNormalization(name='batch_norm8'),
            Rectifier(),
            Convolutional(filter_size=(2, 2),
                          step=(2, 2),
                          num_filters=128,
                          name='conv9'),
            SpatialBatchNormalization(name='batch_norm9'),
            Rectifier(),
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=256,
                          name='conv10'),
            SpatialBatchNormalization(name='batch_norm10'),
            Rectifier(),
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=256,
                          name='conv11'),
            SpatialBatchNormalization(name='batch_norm11'),
            Rectifier(),
            Convolutional(filter_size=(2, 2),
                          step=(2, 2),
                          num_filters=256,
                          name='conv12'),
            SpatialBatchNormalization(name='batch_norm12'),
            Rectifier(),
        ],
        num_channels=3,
        image_size=(64, 64),
        use_bias=False,
        weights_init=IsotropicGaussian(0.033),
        biases_init=Constant(0),
        name='encoder_convnet')
    encoder_convnet.initialize()

    encoder_filters = numpy.prod(encoder_convnet.get_dim('output'))

    encoder_mlp = MLP(
        dims=[encoder_filters, 1000, 1000],
        activations=[
            Sequence([BatchNormalization(1000).apply,
                      Rectifier().apply],
                     name='activation1'),
            Identity().apply
        ],
        weights_init=IsotropicGaussian(0.033),
        biases_init=Constant(0),
        name='encoder_mlp')
    encoder_mlp.initialize()

    decoder_mlp = BatchNormalizedMLP(
        activations=[Rectifier(), Rectifier()],
        dims=[encoder_mlp.output_dim // 2, 1000, encoder_filters],
        weights_init=IsotropicGaussian(0.033),
        biases_init=Constant(0),
        name='decoder_mlp')
    decoder_mlp.initialize()

    decoder_convnet = ConvolutionalSequence(
        layers=[
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=256,
                          name='conv1'),
            SpatialBatchNormalization(name='batch_norm1'),
            Rectifier(),
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=256,
                          name='conv2'),
            SpatialBatchNormalization(name='batch_norm2'),
            Rectifier(),
            ConvolutionalTranspose(filter_size=(2, 2),
                                   step=(2, 2),
                                   original_image_size=(8, 8),
                                   num_filters=256,
                                   name='conv3'),
            SpatialBatchNormalization(name='batch_norm3'),
            Rectifier(),
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=128,
                          name='conv4'),
            SpatialBatchNormalization(name='batch_norm4'),
            Rectifier(),
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=128,
                          name='conv5'),
            SpatialBatchNormalization(name='batch_norm5'),
            Rectifier(),
            ConvolutionalTranspose(filter_size=(2, 2),
                                   step=(2, 2),
                                   original_image_size=(16, 16),
                                   num_filters=128,
                                   name='conv6'),
            SpatialBatchNormalization(name='batch_norm6'),
            Rectifier(),
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=64,
                          name='conv7'),
            SpatialBatchNormalization(name='batch_norm7'),
            Rectifier(),
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=64,
                          name='conv8'),
            SpatialBatchNormalization(name='batch_norm8'),
            Rectifier(),
            ConvolutionalTranspose(filter_size=(2, 2),
                                   step=(2, 2),
                                   original_image_size=(32, 32),
                                   num_filters=64,
                                   name='conv9'),
            SpatialBatchNormalization(name='batch_norm9'),
            Rectifier(),
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=32,
                          name='conv10'),
            SpatialBatchNormalization(name='batch_norm10'),
            Rectifier(),
            Convolutional(filter_size=(3, 3),
                          border_mode=(1, 1),
                          num_filters=32,
                          name='conv11'),
            SpatialBatchNormalization(name='batch_norm11'),
            Rectifier(),
            ConvolutionalTranspose(filter_size=(2, 2),
                                   step=(2, 2),
                                   original_image_size=(64, 64),
                                   num_filters=32,
                                   name='conv12'),
            SpatialBatchNormalization(name='batch_norm12'),
            Rectifier(),
            Convolutional(filter_size=(1, 1), num_filters=3, name='conv_out'),
            Logistic(),
        ],
        num_channels=encoder_convnet.get_dim('output')[0],
        image_size=encoder_convnet.get_dim('output')[1:],
        use_bias=False,
        weights_init=IsotropicGaussian(0.033),
        biases_init=Constant(0),
        name='decoder_convnet')
    decoder_convnet.initialize()

    return encoder_convnet, encoder_mlp, decoder_convnet, decoder_mlp