コード例 #1
0
ファイル: test_bn.py プロジェクト: ck624/dependency-parser
def test_batch_normalization_inside_convolutional_sequence():
    """Test that BN bricks work in ConvolutionalSequences."""
    conv_seq = ConvolutionalSequence(
        [Convolutional(filter_size=(3, 3), num_filters=4),
         BatchNormalization(broadcastable=(False, True, True)),
         AveragePooling(pooling_size=(2, 2)),
         BatchNormalization(broadcastable=(False, False, False)),
         MaxPooling(pooling_size=(2, 2), step=(1, 1))],
        weights_init=Constant(1.),
        biases_init=Constant(2.),
        image_size=(10, 8), num_channels=9)

    conv_seq_no_bn = ConvolutionalSequence(
        [Convolutional(filter_size=(3, 3), num_filters=4),
         AveragePooling(pooling_size=(2, 2)),
         MaxPooling(pooling_size=(2, 2), step=(1, 1))],
        weights_init=Constant(1.),
        biases_init=Constant(2.),
        image_size=(10, 8), num_channels=9)

    conv_seq.initialize()
    conv_seq_no_bn.initialize()
    rng = numpy.random.RandomState((2015, 12, 17))
    input_ = random_unif(rng, (2, 9, 10, 8))

    x = theano.tensor.tensor4()
    ybn = conv_seq.apply(x)
    y = conv_seq_no_bn.apply(x)
    yield (assert_equal, ybn.eval({x: input_}), y.eval({x: input_}))

    std = conv_seq.children[-2].population_stdev
    std.set_value(3 * std.get_value(borrow=True))
    yield (assert_equal, ybn.eval({x: input_}), y.eval({x: input_}) / 3.)
コード例 #2
0
ファイル: test_conv.py プロジェクト: violet-zct/blocks
def test_convolutional_sequence_tied_biases_pushed_if_explicitly_set():
    cnn = ConvolutionalSequence(sum([[
        Convolutional(filter_size=(1, 1), num_filters=1, tied_biases=True),
        Rectifier()
    ] for _ in range(3)], []),
                                num_channels=1,
                                image_size=(1, 1),
                                tied_biases=False)
    cnn.allocate()
    assert [
        not child.tied_biases for child in cnn.children
        if isinstance(child, Convolutional)
    ]

    cnn = ConvolutionalSequence(sum(
        [[Convolutional(filter_size=(1, 1), num_filters=1),
          Rectifier()] for _ in range(3)], []),
                                num_channels=1,
                                image_size=(1, 1),
                                tied_biases=True)
    cnn.allocate()
    assert [
        child.tied_biases for child in cnn.children
        if isinstance(child, Convolutional)
    ]
コード例 #3
0
    def __init__(self,
                 layers,
                 num_channels,
                 image_size,
                 n_emb,
                 use_bias=False,
                 **kwargs):
        self.layers = layers
        self.num_channels = num_channels
        self.image_size = image_size

        self.pre_encoder = ConvolutionalSequence(layers=layers[:-1],
                                                 num_channels=num_channels,
                                                 image_size=image_size,
                                                 use_bias=use_bias,
                                                 name='encoder_conv_mapping')
        self.pre_encoder.allocate()
        n_channels = n_emb + self.pre_encoder.get_dim('output')[0]
        self.post_encoder = ConvolutionalSequence(layers=[layers[-1]],
                                                  num_channels=n_channels,
                                                  image_size=(1, 1),
                                                  use_bias=use_bias)
        children = [self.pre_encoder, self.post_encoder]
        kwargs.setdefault('children', []).extend(children)
        super(EncoderMapping, self).__init__(**kwargs)
コード例 #4
0
ファイル: test_conv.py プロジェクト: violet-zct/blocks
def test_convolutional_sequence_with_no_input_size():
    # suppose x is outputted by some RNN
    x = tensor.tensor4('x')
    filter_size = (1, 1)
    num_filters = 2
    num_channels = 1
    pooling_size = (1, 1)
    conv = Convolutional(filter_size,
                         num_filters,
                         tied_biases=False,
                         weights_init=Constant(1.),
                         biases_init=Constant(1.))
    act = Rectifier()
    pool = MaxPooling(pooling_size)

    bad_seq = ConvolutionalSequence([conv, act, pool],
                                    num_channels,
                                    tied_biases=False)
    assert_raises_regexp(ValueError, 'Cannot infer bias size \S+',
                         bad_seq.initialize)

    seq = ConvolutionalSequence([conv, act, pool],
                                num_channels,
                                tied_biases=True)
    try:
        seq.initialize()
        out = seq.apply(x)
    except TypeError:
        assert False, "This should have succeeded"

    assert out.ndim == 4
コード例 #5
0
def test_convolutional_sequence_activation_get_dim():
    seq = ConvolutionalSequence([Tanh()], num_channels=9, image_size=(4, 6))
    seq.allocate()
    assert seq.get_dim('output') == (9, 4, 6)

    seq = ConvolutionalSequence([Convolutional(filter_size=(7, 7),
                                               num_filters=5,
                                               border_mode=(1, 1)),
                                 Tanh()], num_channels=8, image_size=(8, 11))
    seq.allocate()
    assert seq.get_dim('output') == (5, 4, 7)
コード例 #6
0
    def __init__(self, **kwargs):
        conv_layers = [
            Convolutional(filter_size=(3, 3), num_filters=64,
                          border_mode=(1, 1), name='conv_1'),
            Rectifier(),
            Convolutional(filter_size=(3, 3), num_filters=64,
                          border_mode=(1, 1), name='conv_2'),
            Rectifier(),
            MaxPooling((2, 2), step=(2, 2), name='pool_2'),

            Convolutional(filter_size=(3, 3), num_filters=128,
                          border_mode=(1, 1), name='conv_3'),
            Rectifier(),
            Convolutional(filter_size=(3, 3), num_filters=128,
                          border_mode=(1, 1), name='conv_4'),
            Rectifier(),
            MaxPooling((2, 2), step=(2, 2), name='pool_4'),

            Convolutional(filter_size=(3, 3), num_filters=256,
                          border_mode=(1, 1), name='conv_5'),
            Rectifier(),
            Convolutional(filter_size=(3, 3), num_filters=256,
                          border_mode=(1, 1), name='conv_6'),
            Rectifier(),
            Convolutional(filter_size=(3, 3), num_filters=256,
                          border_mode=(1, 1), name='conv_7'),
            Rectifier(),
            MaxPooling((2, 2), step=(2, 2), name='pool_7'),

            Convolutional(filter_size=(3, 3), num_filters=512,
                          border_mode=(1, 1), name='conv_8'),
            Rectifier(),
            Convolutional(filter_size=(3, 3), num_filters=512,
                          border_mode=(1, 1), name='conv_9'),
            Rectifier(),
            Convolutional(filter_size=(3, 3), num_filters=512,
                          border_mode=(1, 1), name='conv_10'),
            Rectifier(),
            MaxPooling((2, 2), step=(2, 2), name='pool_10'),

            Convolutional(filter_size=(3, 3), num_filters=512,
                          border_mode=(1, 1), name='conv_11'),
            Rectifier(),
            Convolutional(filter_size=(3, 3), num_filters=512,
                          border_mode=(1, 1), name='conv_12'),
            Rectifier(),
            Convolutional(filter_size=(3, 3), num_filters=512,
                          border_mode=(1, 1), name='conv_13'),
            Rectifier(),
            MaxPooling((2, 2), step=(2, 2), name='pool_13'),
        ]

        mlp = MLP([Rectifier(name='fc_14'), Rectifier('fc_15'), Softmax()],
                  [25088, 4096, 4096, 1000],
                  )
        conv_sequence = ConvolutionalSequence(
            conv_layers, 3, image_size=(224, 224))

        super(VGGNet, self).__init__(
            [conv_sequence.apply, Flattener().apply, mlp.apply], **kwargs)
コード例 #7
0
    def __init__(self,
                 conv_activations,
                 num_channels,
                 image_shape,
                 filter_sizes,
                 feature_maps,
                 pooling_sizes,
                 top_mlp_activations,
                 top_mlp_dims,
                 conv_step=None,
                 border_mode='valid',
                 **kwargs):
        if conv_step is None:
            self.conv_step = (1, 1)
        else:
            self.conv_step = conv_step
        self.num_channels = num_channels
        self.image_shape = image_shape
        self.top_mlp_activations = top_mlp_activations
        self.top_mlp_dims = top_mlp_dims
        self.border_mode = border_mode

        conv_parameters = zip(filter_sizes, feature_maps)

        # Construct convolutional, activation, and pooling layers with corresponding parameters
        self.convolution_layer = (
            Convolutional(filter_size=filter_size,
                          num_filters=num_filter,
                          step=self.conv_step,
                          border_mode=self.border_mode,
                          name='conv_{}'.format(i))
            for i, (filter_size, num_filter) in enumerate(conv_parameters))

        self.BN_layer = (BatchNormalization(name='bn_conv_{}'.format(i))
                         for i in enumerate(conv_parameters))

        self.pooling_layer = (MaxPooling(size, name='pool_{}'.format(i))
                              for i, size in enumerate(pooling_sizes))

        self.layers = list(
            interleave([
                self.convolution_layer, self.BN_layer, conv_activations,
                self.pooling_layer
            ]))

        self.conv_sequence = ConvolutionalSequence(self.layers,
                                                   num_channels,
                                                   image_size=image_shape)

        # Construct a top MLP
        self.top_mlp = MLP(top_mlp_activations, top_mlp_dims)

        # We need to flatten the output of the last convolutional layer.
        # This brick accepts a tensor of dimension (batch_size, ...) and
        # returns a matrix (batch_size, features)
        self.flattener = Flattener()
        application_methods = [
            self.conv_sequence.apply, self.flattener.apply, self.top_mlp.apply
        ]
        super(LeNet, self).__init__(application_methods, **kwargs)
コード例 #8
0
def create_model_bricks():
    convnet = ConvolutionalSequence(
        layers=[
            Convolutional(
                filter_size=(4, 4),
                num_filters=32,
                name='conv1'),
            SpatialBatchNormalization(name='batch_norm1'),
            Rectifier(),
            Convolutional(
                filter_size=(3, 3),
                step=(2, 2),
                num_filters=32,
                name='conv2'),
            SpatialBatchNormalization(name='batch_norm2'),
            Rectifier(),
            Convolutional(
                filter_size=(4, 4),
                num_filters=64,
                name='conv3'),
            SpatialBatchNormalization(name='batch_norm3'),
            Rectifier(),
            Convolutional(
                filter_size=(3, 3),
                step=(2, 2),
                num_filters=64,
                name='conv4'),
            SpatialBatchNormalization(name='batch_norm4'),
            Rectifier(),
            Convolutional(
                filter_size=(3, 3),
                num_filters=128,
                name='conv5'),
            SpatialBatchNormalization(name='batch_norm5'),
            Rectifier(),
            Convolutional(
                filter_size=(3, 3),
                step=(2, 2),
                num_filters=128,
                name='conv6'),
            SpatialBatchNormalization(name='batch_norm6'),
            Rectifier(),
        ],
        num_channels=3,
        image_size=(64, 64),
        use_bias=False,
        weights_init=IsotropicGaussian(0.033),
        biases_init=Constant(0),
        name='convnet')
    convnet.initialize()

    mlp = BatchNormalizedMLP(
        activations=[Rectifier(), Logistic()],
        dims=[numpy.prod(convnet.get_dim('output')), 1000, 40],
        weights_init=IsotropicGaussian(0.033),
        biases_init=Constant(0),
        name='mlp')
    mlp.initialize()

    return convnet, mlp
コード例 #9
0
def test_convolutional_sequence():
    x = tensor.tensor4('x')
    num_channels = 4
    pooling_size = 3
    batch_size = 5
    activation = Rectifier().apply

    conv = ConvolutionalLayer(activation, (3, 3),
                              5, (pooling_size, pooling_size),
                              weights_init=Constant(1.),
                              biases_init=Constant(5.))
    conv2 = ConvolutionalActivation(activation, (2, 2),
                                    4,
                                    weights_init=Constant(1.))

    seq = ConvolutionalSequence([conv, conv2],
                                num_channels,
                                image_size=(17, 13))
    seq.push_allocation_config()
    assert conv.num_channels == 4
    assert conv2.num_channels == 5
    conv2.convolution.use_bias = False
    y = seq.apply(x)
    seq.initialize()
    func = function([x], y)

    x_val = numpy.ones((batch_size, 4, 17, 13), dtype=theano.config.floatX)
    y_val = (numpy.ones((batch_size, 4, 4, 3)) * (9 * 4 + 5) * 4 * 5)
    assert_allclose(func(x_val), y_val)
コード例 #10
0
    def build_conv_layers(self, image=None):

        if image is None:
            image = T.ftensor4('spectrogram')
        else:
            image = image

        conv_list = []
        for layer in range(self.layers):
            layer_param = self.params[layer]
            conv_layer = Convolutional(layer_param[0], layer_param[1],
                                       layer_param[2])
            pool_layer = MaxPooling(layer_param[3])

            conv_layer.name = "convolution" + str(layer)
            pool_layer.name = "maxpooling" + str(layer)

            conv_list.append(conv_layer)
            conv_list.append(pool_layer)
            conv_list.append(Rectifier())

        conv_seq = ConvolutionalSequence(conv_list,
                                         self.params[0][2],
                                         image_size=self.image_size,
                                         weights_init=IsotropicGaussian(
                                             std=0.5, mean=0),
                                         biases_init=Constant(0))

        conv_seq._push_allocation_config()
        conv_seq.initialize()
        out = conv_seq.apply(image)

        return out, conv_seq.get_dim('output')
コード例 #11
0
ファイル: allconv.py プロジェクト: chargen/net-intent
    def __init__(self,
                 image_shape=None,
                 output_size=None,
                 noise_batch_size=None,
                 noise_without_rectifier=False,
                 noise_after_rectifier=False,
                 **kwargs):
        self.num_channels = 3
        self.image_shape = image_shape or (32, 32)
        self.output_size = output_size or 10
        self.noise_batch_size = noise_batch_size
        conv_parameters = [(96, 3, 1, 'half', Convolutional),
                           (96, 3, 1, 'half', Convolutional),
                           (96, 3, 2, 'half', NoisyConvolutional),
                           (192, 3, 1, 'half', Convolutional),
                           (192, 3, 1, 'half', Convolutional),
                           (192, 3, 2, 'half', NoisyConvolutional),
                           (192, 3, 1, 'half', Convolutional),
                           (192, 1, 1, 'valid', Convolutional),
                           (10, 1, 1, 'valid', Convolutional)]
        fc_layer = 10

        self.convolutions = []
        layers = []
        for i, (num_filters, filter_size, conv_step, border_mode,
                cls) in enumerate(conv_parameters):
            if cls == NoisyConvolutional and noise_after_rectifier:
                cls = NoisyConvolutional2
            layer = cls(filter_size=(filter_size, filter_size),
                        num_filters=num_filters,
                        step=(conv_step, conv_step),
                        border_mode=border_mode,
                        tied_biases=True,
                        name='conv_{}'.format(i))
            if cls == NoisyConvolutional or cls == NoisyConvolutional2:
                layer.noise_batch_size = self.noise_batch_size
            self.convolutions.append(layer)
            layers.append(layer)
            if cls != NoisyConvolutional2 and not noise_without_rectifier:
                layers.append(Rectifier())

        self.conv_sequence = ConvolutionalSequence(layers,
                                                   self.num_channels,
                                                   image_size=self.image_shape)

        # The AllConvNet applies average pooling to combine top-level
        # features across the image.
        self.flattener = GlobalAverageFlattener()

        # Then it inserts one final 10-way FC layer before softmax
        # self.top_mlp = MLP([Rectifier(), Softmax()],
        #     [conv_parameters[-1][0], fc_layer, self.output_size])
        self.top_softmax = Softmax()

        application_methods = [
            self.conv_sequence.apply, self.flattener.apply,
            self.top_softmax.apply
        ]

        super(NoisyAllConvNet, self).__init__(application_methods, **kwargs)
コード例 #12
0
    def __init__(self, batch_norm, num_channels=1, **kwargs):

        self.layers = []
        self.layers.append(
            Convolutional(filter_size=(3, 3),
                          num_filters=64,
                          border_mode=(1, 1),
                          name='conv_1'))
        self.layers.append(Rectifier())
        self.layers.append(MaxPooling(pooling_size=(2, 2), name='pool_1'))
        self.layers.append(
            Convolutional(filter_size=(3, 3),
                          num_filters=128,
                          border_mode=(1, 1),
                          name='conv_2'))
        self.layers.append(Rectifier())
        self.layers.append(MaxPooling(pooling_size=(2, 2), name='pool_2'))
        self.layers.append(
            Convolutional(filter_size=(3, 3),
                          num_filters=256,
                          border_mode=(1, 1),
                          name='conv_3'))
        if batch_norm:
            self.layers.append(
                BatchNormalization(broadcastable=(False, True, True),
                                   name='bn_1'))
        self.layers.append(Rectifier())
        self.layers.append(
            Convolutional(filter_size=(3, 3),
                          num_filters=256,
                          border_mode=(1, 1),
                          name='conv_4'))
        self.layers.append(Rectifier())
        self.layers.append(
            MaxPooling(pooling_size=(1, 2), step=(1, 2), name='pool_3'))
        self.layers.append(
            Convolutional(filter_size=(3, 3),
                          num_filters=512,
                          border_mode=(1, 1),
                          name='conv_5'))
        if batch_norm:
            self.layers.append(
                BatchNormalization(broadcastable=(False, True, True),
                                   name='bn_2'))
        self.layers.append(Rectifier())
        self.layers.append(
            MaxPooling(pooling_size=(2, 1), step=(2, 1), name='pool_4'))
        self.layers.append(
            Convolutional(filter_size=(3, 3),
                          num_filters=512,
                          border_mode=(1, 1),
                          name='conv_6'))
        if batch_norm:
            self.layers.append(
                BatchNormalization(broadcastable=(False, True, True),
                                   name='bn_3'))
        self.layers.append(Rectifier())
        self.conv_sequence = ConvolutionalSequence(self.layers, 1)
コード例 #13
0
    def __init__(self, image_dimension, **kwargs):

        layers = []

        #############################################
        # a first block with 2 convolutions of 32 (3, 3) filters
        layers.append(Convolutional((3, 3), 32, border_mode='half'))
        layers.append(Rectifier())
        layers.append(Convolutional((3, 3), 32, border_mode='half'))
        layers.append(Rectifier())

        # maxpool with size=(2, 2)
        layers.append(MaxPooling((2, 2)))

        #############################################
        # a 2nd block with 3 convolutions of 64 (3, 3) filters
        layers.append(Convolutional((3, 3), 64, border_mode='half'))
        layers.append(Rectifier())
        layers.append(Convolutional((3, 3), 64, border_mode='half'))
        layers.append(Rectifier())
        layers.append(Convolutional((3, 3), 64, border_mode='half'))
        layers.append(Rectifier())

        # maxpool with size=(2, 2)
        layers.append(MaxPooling((2, 2)))

        #############################################
        # a 3rd block with 4 convolutions of 128 (3, 3) filters
        layers.append(Convolutional((3, 3), 128, border_mode='half'))
        layers.append(Rectifier())
        layers.append(Convolutional((3, 3), 128, border_mode='half'))
        layers.append(Rectifier())
        layers.append(Convolutional((3, 3), 128, border_mode='half'))
        layers.append(Rectifier())
        layers.append(Convolutional((3, 3), 128, border_mode='half'))
        layers.append(Rectifier())

        # maxpool with size=(2, 2)
        layers.append(MaxPooling((2, 2)))

        self.conv_sequence = ConvolutionalSequence(layers,
                                                   3,
                                                   image_size=image_dimension)

        flattener = Flattener()

        self.top_mlp = MLP(activations=[Rectifier(), Logistic()],
                           dims=[500, 1])

        application_methods = [
            self.conv_sequence.apply, flattener.apply, self.top_mlp.apply
        ]

        super(VGGNet, self).__init__(application_methods,
                                     biases_init=Constant(0),
                                     weights_init=Uniform(width=.1),
                                     **kwargs)
コード例 #14
0
ファイル: test_conv.py プロジェクト: violet-zct/blocks
def test_convolutional_sequence_with_raw_activation():
    seq = ConvolutionalSequence([Rectifier()],
                                num_channels=4,
                                image_size=(20, 14))
    input_ = (((numpy.arange(2 * 4 * 20 * 14).reshape(
        (2, 4, 20, 14)) % 2) * 2 - 1).astype(theano.config.floatX))
    expected_ = input_ * (input_ > 0)
    x = theano.tensor.tensor4()
    assert_allclose(seq.apply(x).eval({x: input_}), expected_)
コード例 #15
0
ファイル: test_conv.py プロジェクト: zuiwufenghua/blocks
def test_border_mode_not_pushed():
    layers = [
        Convolutional(border_mode='full'),
        ConvolutionalActivation(Rectifier().apply),
        ConvolutionalActivation(Rectifier().apply, border_mode='valid'),
        ConvolutionalActivation(Rectifier().apply, border_mode='full')
    ]
    stack = ConvolutionalSequence(layers)
    stack.push_allocation_config()
    assert stack.children[0].border_mode == 'full'
    assert stack.children[1].border_mode == 'valid'
    assert stack.children[2].border_mode == 'valid'
    assert stack.children[3].border_mode == 'full'
    stack2 = ConvolutionalSequence(layers, border_mode='full')
    stack2.push_allocation_config()
    assert stack2.children[0].border_mode == 'full'
    assert stack2.children[1].border_mode == 'full'
    assert stack2.children[2].border_mode == 'full'
    assert stack2.children[3].border_mode == 'full'
コード例 #16
0
def test_pooling_works_in_convolutional_sequence():
    x = tensor.tensor4('x')
    brick = ConvolutionalSequence([AveragePooling((2, 2), step=(2, 2)),
                                   MaxPooling((4, 4), step=(2, 2),
                                              ignore_border=True)],
                                  image_size=(16, 32), num_channels=3)
    brick.allocate()
    y = brick.apply(x)
    out = y.eval({x: numpy.empty((2, 3, 16, 32), dtype=theano.config.floatX)})
    assert out.shape == (2, 3, 3, 7)
コード例 #17
0
def test_convolutional_transpose_original_size_inferred_conv_sequence():
    brick = ConvolutionalTranspose(filter_size=(4, 5), num_filters=10,
                                   step=(3, 2))

    seq = ConvolutionalSequence([brick], num_channels=5, image_size=(6, 9))
    try:
        seq.allocate()
    except Exception as e:
        raise AssertionError('exception raised: {}: {}'.format(
            e.__class__.__name__, e))
def test_fully_layer():
	batch_size=2
	x = T.tensor4();
	y = T.ivector()
	V = 200
	layer_conv = Convolutional(filter_size=(5,5),num_filters=V,
				name="toto",
				weights_init=IsotropicGaussian(0.01),
				biases_init=Constant(0.0))
	# try with no bias
	activation = Rectifier()
	pool = MaxPooling(pooling_size=(2,2))

	convnet = ConvolutionalSequence([layer_conv, activation, pool], num_channels=15,
					image_size=(10,10),
					name="conv_section")
	convnet.push_allocation_config()
	convnet.initialize()
	output=convnet.apply(x)
	batch_size=output.shape[0]
	output_dim=np.prod(convnet.get_dim('output'))
	result_conv = output.reshape((batch_size, output_dim))
	mlp=MLP(activations=[Rectifier().apply], dims=[output_dim, 10],
				weights_init=IsotropicGaussian(0.01),
				biases_init=Constant(0.0))
	mlp.initialize()
	output=mlp.apply(result_conv)
	cost = T.mean(Softmax().categorical_cross_entropy(y.flatten(), output))
	cg = ComputationGraph(cost)
	W = VariableFilter(roles=[WEIGHT])(cg.variables)
	B = VariableFilter(roles=[BIAS])(cg.variables)
	W = W[0]; b = B[0]

	inputs_fully = VariableFilter(roles=[INPUT], bricks=[Linear])(cg)
	outputs_fully = VariableFilter(roles=[OUTPUT], bricks=[Linear])(cg)
	var_input=inputs_fully[0]
	var_output=outputs_fully[0]
	
	[d_W,d_S,d_b] = T.grad(cost, [W, var_output, b])

	d_b = d_b.dimshuffle(('x',0))
	d_p = T.concatenate([d_W, d_b], axis=0)
	x_value = 1e3*np.random.ranf((2,15, 10, 10))
	f = theano.function([x,y], [var_input, d_S, d_p], allow_input_downcast=True, on_unused_input='ignore')
	A, B, C= f(x_value, [5, 0])
	A = np.concatenate([A, np.ones((2,1))], axis=1)
	print 'A', A.shape
	print 'B', B.shape
	print 'C', C.shape

	print lin.norm(C - np.dot(np.transpose(A), B), 'fro')

	return
	
	"""
コード例 #19
0
def test_convolutional_sequence_use_bias():
    cnn = ConvolutionalSequence(
        sum([[Convolutional(filter_size=(1, 1), num_filters=1), Rectifier()]
             for _ in range(3)], []),
        num_channels=1, image_size=(1, 1),
        use_bias=False)
    cnn.allocate()
    x = tensor.tensor4()
    y = cnn.apply(x)
    params = ComputationGraph(y).parameters
    assert len(params) == 3 and all(param.name == 'W' for param in params)
コード例 #20
0
def test_convolutional_sequence_with_convolutions_raw_activation():
    seq = ConvolutionalSequence(
        [Convolutional(filter_size=(3, 3), num_filters=4),
         Rectifier(),
         Convolutional(filter_size=(5, 5), num_filters=3, step=(2, 2)),
         Tanh()],
        num_channels=2,
        image_size=(21, 39))
    seq.allocate()
    x = theano.tensor.tensor4()
    out = seq.apply(x).eval({x: numpy.ones((10, 2, 21, 39),
                                           dtype=theano.config.floatX)})
    assert out.shape == (10, 3, 8, 17)
コード例 #21
0
def conv_block(input_img,
               n_filter,
               filter_size,
               input_featuremap_size,
               ordering=''):

    # found in torch spatialconvolution
    std0 = 2. / (filter_size[0] * filter_size[1] *
                 input_featuremap_size[0])**.5
    std1 = 2. / (input_featuremap_size[0])**.5

    layers = []
    layers.append(
        Convolutional(filter_size=filter_size,
                      num_filters=n_filter,
                      border_mode='half',
                      name='conv%s_1' % (ordering, ),
                      use_bias=True,
                      weights_init=Uniform(width=std0)))
    layers.append(BatchNormalization(name='bn%s_1' % (ordering, )))
    layers.append(LeakyReLU())
    layers.append(
        Convolutional(filter_size=filter_size,
                      num_filters=n_filter,
                      border_mode='half',
                      name='conv%s_2' % (ordering, ),
                      use_bias=True,
                      weights_init=Uniform(width=std0)))
    layers.append(BatchNormalization(name='bn%s_2' % (ordering, )))
    layers.append(LeakyReLU())
    layers.append(
        Convolutional(filter_size=(1, 1),
                      num_filters=n_filter,
                      border_mode='valid',
                      name='conv%s_3b' % (ordering, ),
                      use_bias=True,
                      weights_init=Uniform(width=std1)))
    layers.append(BatchNormalization(name='bn%s_3' % (ordering, )))
    layers.append(LeakyReLU())

    conv_sequence = ConvolutionalSequence(
        layers,
        num_channels=input_featuremap_size[0],
        image_size=(input_featuremap_size[1], input_featuremap_size[2]),
        biases_init=Uniform(width=.1),
        name='convsequence%s' % (ordering, ))

    conv_sequence.initialize()
    return conv_sequence.apply(input_img)
コード例 #22
0
ファイル: allconv.py プロジェクト: chargen/net-intent
    def __init__(self, image_shape=None, output_size=None, **kwargs):
        self.num_channels = 3
        self.image_shape = image_shape or (32, 32)
        self.output_size = output_size or 10
        conv_parameters = [(96, 3, 1, 'half'), (96, 3, 1, 'half'),
                           (96, 3, 2, 'half'), (192, 3, 1, 'half'),
                           (192, 3, 1, 'half'), (192, 3, 2, 'half'),
                           (192, 3, 1, 'half'), (192, 1, 1, 'valid'),
                           (10, 1, 1, 'valid')]
        fc_layer = 10

        self.convolutions = list([
            Convolutional(filter_size=(filter_size, filter_size),
                          num_filters=num_filters,
                          step=(conv_step, conv_step),
                          border_mode=border_mode,
                          tied_biases=True,
                          name='conv_{}'.format(i))
            for i, (num_filters, filter_size, conv_step,
                    border_mode) in enumerate(conv_parameters)
        ])

        # Add two trivial channel masks to allow by-channel dropout
        self.convolutions.insert(6, ChannelMask(name='mask_1'))
        self.convolutions.insert(3, ChannelMask(name='mask_0'))

        self.conv_sequence = ConvolutionalSequence(
            list(
                interleave([
                    self.convolutions, (Rectifier() for _ in self.convolutions)
                ])), self.num_channels, self.image_shape)

        # The AllConvNet applies average pooling to combine top-level
        # features across the image.
        self.flattener = GlobalAverageFlattener()

        # Then it inserts one final 10-way FC layer before softmax
        # self.top_mlp = MLP([Rectifier(), Softmax()],
        #     [conv_parameters[-1][0], fc_layer, self.output_size])
        self.top_softmax = Softmax()

        application_methods = [
            self.conv_sequence.apply, self.flattener.apply,
            self.top_softmax.apply
        ]

        super(AllConvNet, self).__init__(application_methods, **kwargs)
コード例 #23
0
def convolutional_sequence(filter_size, num_filters, image_size, num_channels=1):
    
    layers = []
    # layers.append(BatchNormalization(name='batchnorm_pixels'))

    layers.append(Convolutional(filter_size=filter_size, num_filters=num_filters, use_bias=True, tied_biases=True, name='conv_1'))
    layers.append(BatchNormalization(name='batchnorm_1'))
    layers.append(Rectifier(name='non_linear_1'))
    
    layers.append(Convolutional(filter_size=filter_size, num_filters=num_filters, use_bias=True, tied_biases=False, name='conv_2'))
    layers.append(BatchNormalization(name='batchnorm_2'))
    layers.append(Rectifier(name='non_linear_2'))
    
    layers.append(MaxPooling(pooling_size=(2,2), name='maxpool_2'))
        
    layers.append(Convolutional(filter_size=filter_size, num_filters=num_filters*2, use_bias=True, tied_biases=True, name='conv_3'))
    layers.append(BatchNormalization(name='batchnorm_3'))
    layers.append(Rectifier(name='non_linear_3'))

    layers.append(Convolutional(filter_size=filter_size, num_filters=num_filters*2, use_bias=True, tied_biases=True, name='conv_4'))
    layers.append(BatchNormalization(name='batchnorm_4'))
    layers.append(Rectifier(name='non_linear_4'))
    
    layers.append(MaxPooling(pooling_size=(2,2), name='maxpool_4'))
    
    layers.append(Convolutional(filter_size=filter_size, num_filters=num_filters*4, use_bias=True, tied_biases=False, name='conv_5'))
    layers.append(BatchNormalization(name='batchnorm_5'))
    layers.append(Rectifier(name='non_linear_5'))

    layers.append(Convolutional(filter_size=filter_size, num_filters=num_filters*4, use_bias=True, tied_biases=True, name='conv_6'))
    layers.append(BatchNormalization(name='batchnorm_6'))
    layers.append(Rectifier(name='non_linear_6'))
    
    layers.append(MaxPooling(pooling_size=(2,2), name='maxpool_6'))

    layers.append(Convolutional(filter_size=filter_size, num_filters=num_filters*8, use_bias=True, tied_biases=True, name='conv_7'))
    layers.append(BatchNormalization(name='batchnorm_7'))
    layers.append(Rectifier(name='non_linear_7'))

    layers.append(Convolutional(filter_size=filter_size, num_filters=num_filters*8, use_bias=True, tied_biases=True, name='conv_8'))
    layers.append(BatchNormalization(name='batchnorm_8'))
    layers.append(Rectifier(name='non_linear_8'))
    
    layers.append(MaxPooling(pooling_size=(2,2), name='maxpool_8'))
    
    return ConvolutionalSequence(layers, num_channels=num_channels, image_size=image_size, biases_init=Constant(0.),  weights_init=IsotropicGaussian(0.01))
コード例 #24
0
    def __init__(self,
                 layers,
                 num_channels,
                 image_size,
                 use_bias=False,
                 **kwargs):
        self.layers = layers
        self.num_channels = num_channels
        self.image_size = image_size

        self.mapping = ConvolutionalSequence(layers=layers,
                                             num_channels=num_channels,
                                             image_size=image_size,
                                             use_bias=use_bias,
                                             name='decoder_mapping')
        children = [self.mapping]
        kwargs.setdefault('children', []).extend(children)
        super(Decoder, self).__init__(**kwargs)
コード例 #25
0
def main():
    initial = numpy.random.normal(0, 0.1, (1, 1, 200, 200))
    x = theano.shared(initial)

    conv_layer = ConvolutionalLayer(
        Rectifier().apply,
        (16, 16),
        9,
        (4, 4),
        1
    )
    conv_layer2 = ConvolutionalLayer(
        Rectifier().apply,
        (7, 7),
        9,
        (2, 2),
        1
    )
    con_seq = ConvolutionalSequence([conv_layer], 1,
                                    image_size=(200, 200),
                                    weights_init=IsotropicGaussian(0.1),
                                    biases_init=Constant(0.)
                                    )

    con_seq.initialize()
    out = con_seq.apply(x)
    target_out = out[0, 0, 1, 1]

    grad = theano.grad(target_out - .1 * (x ** 2).sum(), x)
    updates = {x: x + 5e-1 * grad}
    #x.set_value(numpy.ones((1, 1, 200, 200)))
    #print theano.function([], out)()

    make_step = theano.function([], target_out, updates=updates)

    for i in xrange(400):
        out_val = make_step()
        print i, out_val

    image = x.get_value()[0][0]
    image = (image - image.mean()) / image.std()
    image = numpy.array([image, image, image]).transpose(1, 2, 0)
    plt.imshow(numpy.cast['uint8'](image * 65. + 128.), interpolation='none')
    plt.show()
コード例 #26
0
ファイル: train.py プロジェクト: refnil/ift6266h16
def net_dvc(image_size=(32, 32)):
    convos = [5, 5, 5]
    pools = [2, 2, 2]
    filters = [100, 200, 300]

    tuplify = lambda x: (x, x)
    convos = list(map(tuplify, convos))
    conv_layers = [Convolutional(filter_size=s,num_filters=o, num_channels=i, name="Conv"+str(n))\
            for s,o,i,n in zip(convos, filters, [3] + filters, range(1000))]

    pool_layers = [MaxPooling(p) for p in map(tuplify, pools)]

    activations = [Rectifier() for i in convos]

    layers = [i for l in zip(conv_layers, activations, pool_layers) for i in l]

    cnn = ConvolutionalSequence(layers,
                                3,
                                image_size=image_size,
                                name="cnn",
                                weights_init=Uniform(width=.1),
                                biases_init=Constant(0))

    cnn._push_allocation_config()
    cnn_output = np.prod(cnn.get_dim('output'))

    mlp_size = [cnn_output, 500, 2]
    mlp = MLP([Rectifier(), Softmax()],
              mlp_size,
              name="mlp",
              weights_init=Uniform(width=.1),
              biases_init=Constant(0))

    seq = FeedforwardSequence([net.apply for net in [cnn, Flattener(), mlp]])
    seq.push_initialization_config()

    seq.initialize()
    return seq
コード例 #27
0
def create_model_brick():
    layers = [
        conv_brick(2, 1, 64),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_brick(7, 2, 128),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_brick(5, 2, 256),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_brick(7, 2, 256),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_brick(4, 1, 512),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_brick(1, 1, 2 * NLAT)
    ]
    encoder_mapping = ConvolutionalSequence(layers=layers,
                                            num_channels=NUM_CHANNELS,
                                            image_size=IMAGE_SIZE,
                                            use_bias=False,
                                            name='encoder_mapping')
    encoder = GaussianConditional(encoder_mapping, name='encoder')

    layers = [
        conv_transpose_brick(4, 1, 512),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_transpose_brick(7, 2, 256),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_transpose_brick(5, 2, 256),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_transpose_brick(7, 2, 128),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_transpose_brick(2, 1, 64),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_brick(1, 1, NUM_CHANNELS),
        Logistic()
    ]
    decoder_mapping = ConvolutionalSequence(layers=layers,
                                            num_channels=NLAT,
                                            image_size=(1, 1),
                                            use_bias=False,
                                            name='decoder_mapping')
    decoder = DeterministicConditional(decoder_mapping, name='decoder')

    layers = [
        conv_brick(2, 1, 64),
        LeakyRectifier(leak=LEAK),
        conv_brick(7, 2, 128),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_brick(5, 2, 256),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_brick(7, 2, 256),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_brick(4, 1, 512),
        bn_brick(),
        LeakyRectifier(leak=LEAK)
    ]
    x_discriminator = ConvolutionalSequence(layers=layers,
                                            num_channels=NUM_CHANNELS,
                                            image_size=IMAGE_SIZE,
                                            use_bias=False,
                                            name='x_discriminator')
    x_discriminator.push_allocation_config()

    layers = [
        conv_brick(1, 1, 1024),
        LeakyRectifier(leak=LEAK),
        conv_brick(1, 1, 1024),
        LeakyRectifier(leak=LEAK)
    ]
    z_discriminator = ConvolutionalSequence(layers=layers,
                                            num_channels=NLAT,
                                            image_size=(1, 1),
                                            use_bias=False,
                                            name='z_discriminator')
    z_discriminator.push_allocation_config()

    layers = [
        conv_brick(1, 1, 2048),
        LeakyRectifier(leak=LEAK),
        conv_brick(1, 1, 2048),
        LeakyRectifier(leak=LEAK),
        conv_brick(1, 1, 1)
    ]
    joint_discriminator = ConvolutionalSequence(
        layers=layers,
        num_channels=(x_discriminator.get_dim('output')[0] +
                      z_discriminator.get_dim('output')[0]),
        image_size=(1, 1),
        name='joint_discriminator')

    discriminator = XZJointDiscriminator(x_discriminator,
                                         z_discriminator,
                                         joint_discriminator,
                                         name='discriminator')

    ali = ALI(encoder,
              decoder,
              discriminator,
              weights_init=GAUSSIAN_INIT,
              biases_init=ZERO_INIT,
              name='ali')
    ali.push_allocation_config()
    encoder_mapping.layers[-1].use_bias = True
    encoder_mapping.layers[-1].tied_biases = False
    decoder_mapping.layers[-2].use_bias = True
    decoder_mapping.layers[-2].tied_biases = False
    x_discriminator.layers[0].use_bias = True
    x_discriminator.layers[0].tied_biases = True
    ali.initialize()
    raw_marginals, = next(
        create_celeba_data_streams(500, 500)[0].get_epoch_iterator())
    b_value = get_log_odds(raw_marginals)
    decoder_mapping.layers[-2].b.set_value(b_value)

    return ali
コード例 #28
0
    # Discriminator

    layers = [
        conv_brick(5, 1, 32),
        ConvMaxout(num_pieces=NUM_PIECES),
        conv_brick(4, 2, 64),
        ConvMaxout(num_pieces=NUM_PIECES),
        conv_brick(4, 1, 128),
        ConvMaxout(num_pieces=NUM_PIECES),
        conv_brick(4, 2, 256),
        ConvMaxout(num_pieces=NUM_PIECES),
        conv_brick(4, 1, 512),
        ConvMaxout(num_pieces=NUM_PIECES)
    ]
    x_discriminator = ConvolutionalSequence(layers=layers,
                                            num_channels=NUM_CHANNELS,
                                            image_size=IMAGE_SIZE,
                                            name='x_discriminator')
    x_discriminator.push_allocation_config()

    layers = [
        conv_brick(1, 1, 512),
        ConvMaxout(num_pieces=NUM_PIECES),
        conv_brick(1, 1, 512),
        ConvMaxout(num_pieces=NUM_PIECES)
    ]
    z_discriminator = ConvolutionalSequence(layers=layers,
                                            num_channels=NLAT,
                                            image_size=(1, 1),
                                            use_bias=False,
                                            name='z_discriminator')
    z_discriminator.push_allocation_config()
コード例 #29
0
def inception(image_shape, num_input, conv1, conv2, conv3, conv4, conv5, conv6,
              out, i):
    layers1 = []
    layers2 = []
    layers3 = []
    layers4 = []
    layers1.append(
        Convolutional(filter_size=(1, 1),
                      num_channels=num_input,
                      num_filters=conv1,
                      image_size=image_shape,
                      border_mode='half',
                      name='conv_{}'.format(i)))
    layers1.append(BatchNormalization(name='batch_{}'.format(i)))
    layers1.append(Rectifier())
    conv_sequence1 = ConvolutionalSequence(layers1,
                                           num_channels=num_input,
                                           image_size=image_shape,
                                           weights_init=Orthogonal(),
                                           use_bias=False,
                                           name='convSeq_{}'.format(i))
    conv_sequence1.initialize()
    out1 = conv_sequence1.apply(out)
    i = i + 1

    layers2.append(
        Convolutional(filter_size=(1, 1),
                      num_channels=num_input,
                      num_filters=conv2,
                      image_size=image_shape,
                      border_mode='half',
                      name='conv_{}'.format(i)))
    layers2.append(BatchNormalization(name='batch_{}'.format(i)))
    layers2.append(Rectifier())
    i = i + 1
    layers2.append(
        Convolutional(filter_size=(3, 3),
                      num_channels=conv2,
                      num_filters=conv3,
                      image_size=image_shape,
                      border_mode='half',
                      name='conv_{}'.format(i)))
    layers2.append(BatchNormalization(name='batch_{}'.format(i)))
    layers2.append(Rectifier())
    conv_sequence2 = ConvolutionalSequence(layers2,
                                           num_channels=num_input,
                                           image_size=image_shape,
                                           weights_init=Orthogonal(),
                                           use_bias=False,
                                           name='convSeq_{}'.format(i))
    conv_sequence2.initialize()
    out2 = conv_sequence2.apply(out)
    i = i + 1

    layers3.append(
        Convolutional(filter_size=(1, 1),
                      num_channels=num_input,
                      num_filters=conv4,
                      image_size=image_shape,
                      border_mode='half',
                      name='conv_{}'.format(i)))
    layers3.append(BatchNormalization(name='batch_{}'.format(i)))
    layers3.append(Rectifier())
    i = i + 1
    layers3.append(
        Convolutional(filter_size=(5, 5),
                      num_channels=conv4,
                      num_filters=conv5,
                      image_size=image_shape,
                      border_mode='half',
                      name='conv_{}'.format(i)))
    layers3.append(BatchNormalization(name='batch_{}'.format(i)))
    layers3.append(Rectifier())
    conv_sequence3 = ConvolutionalSequence(layers3,
                                           num_channels=num_input,
                                           image_size=image_shape,
                                           weights_init=Orthogonal(),
                                           use_bias=False,
                                           name='convSeq_{}'.format(i))
    conv_sequence3.initialize()
    out3 = conv_sequence3.apply(out)
    i = i + 1

    layers4.append(
        MaxPooling((3, 3),
                   step=(1, 1),
                   padding=(1, 1),
                   name='pool_{}'.format(i)))
    layers4.append(
        Convolutional(filter_size=(1, 1),
                      num_channels=num_input,
                      num_filters=conv6,
                      image_size=image_shape,
                      border_mode='half',
                      name='conv_{}'.format(i)))
    layers4.append(BatchNormalization(name='batch_{}'.format(i)))
    layers4.append(Rectifier())
    i = i + 1
    conv_sequence4 = ConvolutionalSequence(layers4,
                                           num_channels=num_input,
                                           image_size=image_shape,
                                           weights_init=Orthogonal(),
                                           use_bias=False,
                                           name='convSeq_{}'.format(i))
    conv_sequence4.initialize()
    out4 = conv_sequence4.apply(out)
    #Merge
    return T.concatenate([out1, out2, out3, out4], axis=1)
コード例 #30
0
layers.append(BatchNormalization(name='batch_1'))
layers.append(Rectifier())
layers.append(MaxPooling((3, 3), step=(2, 2), padding=(1, 1), name='pool_1'))
layers.append(
    Convolutional(filter_size=(3, 3),
                  num_filters=192,
                  border_mode='half',
                  name='conv_2'))
layers.append(BatchNormalization(name='batch_2'))
layers.append(Rectifier())
layers.append(MaxPooling((3, 3), step=(2, 2), padding=(1, 1), name='pool_2'))

#Create the sequence
conv_sequence = ConvolutionalSequence(layers,
                                      num_channels=3,
                                      image_size=(160, 160),
                                      weights_init=Orthogonal(),
                                      use_bias=False,
                                      name='convSeq')
#Initialize the convnet
conv_sequence.initialize()
#Output the first result
out = conv_sequence.apply(x)

###############SECOND STAGE#####################
out2 = inception((20, 20), 192, 64, 96, 128, 16, 32, 32, out, 10)
out3 = inception((20, 20), 256, 128, 128, 192, 32, 96, 64, out2, 20)
out31 = MaxPooling((2, 2), name='poolLow').apply(out3)

out4 = inception((10, 10), 480, 192, 96, 208, 16, 48, 64, out31, 30)
out5 = inception((10, 10), 512, 160, 112, 224, 24, 64, 64, out4, 40)
out6 = inception((10, 10), 512, 128, 128, 256, 24, 64, 64, out5, 50)