Пример #1
0
def build_cnn(patch_size, input_var=None):
    import pdb
    pdb.set_trace()
    input_layer = lasagne.layers.InputLayer(shape=(None, 1, patch_size[0],
                                                   patch_size[1]),
                                            input_var=input_var)

    network = batch_norm(
        lasagne.layers.Conv2DLayer(input_layer,
                                   num_filters=32,
                                   filter_size=(9, 9)))
    network = lasagne.layers.MaxPool2DLayer(network, pool_size=2)
    network = batch_norm(
        lasagne.layers.Conv2DLayer(network, num_filters=32,
                                   filter_size=(5, 5)))
    network = lasagne.layers.MaxPool2DLayer(network, pool_size=2)
    network = batch_norm(
        lasagne.layers.Conv2DLayer(network, num_filters=32,
                                   filter_size=(5, 5)))
    network = lasagne.layers.MaxPool2DLayer(network, pool_size=2)
    network = batch_norm(
        lasagne.layers.Conv2DLayer(network, num_filters=32,
                                   filter_size=(5, 5)))
    network = lasagne.layers.MaxPool2DLayer(network, pool_size=2)
    network = batch_norm(
        lasagne.layers.Conv2DLayer(network, num_filters=32,
                                   filter_size=(3, 3)))
    network = batch_norm(
        lasagne.layers.Conv2DLayer(network,
                                   num_filters=32,
                                   filter_size=(5, 5),
                                   pad='full'))
    network = lasagne.layers.Upscale2DLayer(network, scale_factor=2)
    network = batch_norm(
        lasagne.layers.Conv2DLayer(network,
                                   num_filters=32,
                                   filter_size=(3, 3),
                                   pad='full'))
    network = lasagne.layers.Upscale2DLayer(network, scale_factor=2)
    network = batch_norm(
        lasagne.layers.Conv2DLayer(network,
                                   num_filters=32,
                                   filter_size=(3, 3),
                                   pad='full'))
    network = lasagne.layers.Upscale2DLayer(network, scale_factor=2)
    network = batch_norm(
        lasagne.layers.Conv2DLayer(network,
                                   num_filters=32,
                                   filter_size=(3, 3),
                                   pad='full'))
    network = lasagne.layers.Upscale2DLayer(network, scale_factor=2)
    network = batch_norm(
        lasagne.layers.Conv2DLayer(
            network,
            num_filters=1,
            filter_size=(5, 5),
            pad='full',
            nonlinearity=lasagne.nonlinearities.sigmoid))

    return network
Пример #2
0
def build_model(batch_size=BATCH_SIZE):
    """ Compile net architecture """
    nonlin = lasagne.nonlinearities.rectify

    # --- input layers ---
    l_in = lasagne.layers.InputLayer(shape=(batch_size, INPUT_SHAPE[0], INPUT_SHAPE[1], INPUT_SHAPE[2]), name='Input')

    # --- conv layers ---
    net = Conv2DLayer(l_in, num_filters=64, filter_size=3, pad=1, W=init_conv(),
                      nonlinearity=nonlin, name='Conv')
    net = batch_norm(net)
    net = Conv2DLayer(net, num_filters=64, filter_size=3, pad=1, W=init_conv(),
                      nonlinearity=nonlin, name='Conv')
    net = batch_norm(net)
    net = lasagne.layers.MaxPool2DLayer(net, pool_size=2, name='Pool')
    net = lasagne.layers.DropoutLayer(net, p=0.25, name='Dropout')

    net = Conv2DLayer(net, num_filters=128, filter_size=3, pad=1, W=init_conv(),
                      nonlinearity=nonlin, name='Conv')
    net = Conv2DLayer(net, num_filters=128, filter_size=3, pad=1, W=init_conv(),
                      nonlinearity=nonlin, name='Conv')
    net = batch_norm(net)
    net = lasagne.layers.MaxPool2DLayer(net, pool_size=2, name='Pool')
    net = lasagne.layers.DropoutLayer(net, p=0.25, name='Dropout')

    net = Conv2DLayer(net, num_filters=256, filter_size=3, pad=1, W=init_conv(),
                      nonlinearity=nonlin, name='Conv')
    net = batch_norm(net)
    net = Conv2DLayer(net, num_filters=256, filter_size=3, pad=1, W=init_conv(),
                      nonlinearity=nonlin, name='Conv')
    net = batch_norm(net)
    net = Conv2DLayer(net, num_filters=256, filter_size=3, pad=1, W=init_conv(),
                      nonlinearity=nonlin, name='Conv')
    net = batch_norm(net)
    net = Conv2DLayer(net, num_filters=256, filter_size=3, pad=1, W=init_conv(),
                      nonlinearity=nonlin, name='Conv')
    net = batch_norm(net)
    net = lasagne.layers.MaxPool2DLayer(net, pool_size=2, name='Pool')
    net = lasagne.layers.DropoutLayer(net, p=0.25, name='Dropout')

    net = Conv2DLayer(net, num_filters=1024, filter_size=3, pad=0, W=init_conv(),
                      nonlinearity=nonlin, name='Conv')
    net = batch_norm(net)
    net = lasagne.layers.DropoutLayer(net, p=0.5, name='Dropout')
    net = Conv2DLayer(net, num_filters=1024, filter_size=1, pad=0, W=init_conv(),
                      nonlinearity=nonlin, name='Conv')
    net = batch_norm(net)
    net = lasagne.layers.DropoutLayer(net, p=0.5, name='Dropout')

    # --- feed forward part ---
    net = Conv2DLayer(net, num_filters=10, filter_size=1, W=init_conv(),
                      nonlinearity=nonlin, name='Conv')
    net = batch_norm(net)
    net = lasagne.layers.Pool2DLayer(net, pool_size=2, ignore_border=False,
                                     mode='average_exc_pad', name='GlobalAveragePool')
    net = lasagne.layers.FlattenLayer(net, name='Flatten')

    return net, l_in
def get_model(inp, patch_op):

    icnn1 = batch_norm(utils_lasagne.GCNNLayer([inp, patch_op], 16, nrings=5, nrays=16))
    ffn1 = LL.DenseLayer(icnn1, 512)
    icnn2 = batch_norm(utils_lasagne.GCNNLayer([icnn1, patch_op], 32, nrings=5, nrays=16))
    ffn2 = LL.DenseLayer(icnn2, 512)
    icnn3 = batch_norm(utils_lasagne.GCNNLayer([icnn2, patch_op], 64, nrings=5, nrays=16))
    ffn3 = LL.DenseLayer(icnn3, 512)
    ffn4 = LL.ConcatLayer([inp,ffn1,ffn2,ffn3],axis=1, cropping=None);
    ffn = LL.DenseLayer(ffn4, nclasses, nonlinearity=utils_lasagne.log_softmax)
    return ffn
Пример #4
0
def get_model(inp, patch_op):
    icnn = LL.DenseLayer(inp, 16)
    icnn = batch_norm(
        utils_lasagne.GCNNLayer([icnn, patch_op], 16, nrings=4, nrays=8))
    icnn = batch_norm(
        utils_lasagne.GCNNLayer([icnn, patch_op], 32, nrings=4, nrays=8))
    icnn = batch_norm(
        utils_lasagne.GCNNLayer([icnn, patch_op], 64, nrings=4, nrays=8))
    ffn = batch_norm(LL.DenseLayer(icnn, 512))
    ffn = LL.DenseLayer(icnn, nclasses, nonlinearity=utils_lasagne.log_softmax)

    return ffn
Пример #5
0
        def residual_block(l, increase_dim=False, projection=False):
            input_num_filters = l.output_shape[1]
            if increase_dim:
                first_stride = (2, 2)
                out_num_filters = input_num_filters * 2
            else:
                first_stride = (1, 1)
                out_num_filters = input_num_filters

            stack_1 = batch_norm(ConvLayer(l, num_filters=out_num_filters,
                                       filter_size=(3, 3),
                                       stride=first_stride,
                                       nonlinearity=rectify, pad='same',
                                       W=lasagne.init.HeNormal(gain='relu')))
            stack_2 = batch_norm(ConvLayer(stack_1,
                                       num_filters=out_num_filters,
                                       filter_size=(3, 3),
                                       stride=(1, 1),
                                       nonlinearity=None,
                                       pad='same',
                                       W=lasagne.init.HeNormal(gain='relu')))

            # add shortcut connections
            if increase_dim:
                if projection:
                    # projection shortcut, as option B in paper
                    projection = batch_norm(ConvLayer(l,
                                        num_filters=out_num_filters,
                                        filter_size=(1, 1),
                                        stride=(2, 2),
                                        nonlinearity=None, pad='same', b=None))
                    block = NonlinearityLayer(ElemwiseSumLayer([stack_2, projection]),
                                              nonlinearity=rectify)
                else:
                    # identity shortcut, as option A in paper
                    identity = ExpressionLayer(l, lambda X: X[:, :, ::2, ::2],
                                               lambda s: (s[0], s[1], s[2] // 2, s[3]//2))
                    padding = PadLayer(identity, [out_num_filters // 4, 0, 0],
                                       batch_ndim=1)
                    block = NonlinearityLayer(ElemwiseSumLayer([stack_2, padding]),nonlinearity=rectify)
            else:
                block = NonlinearityLayer(ElemwiseSumLayer([stack_2, l]),
                                          nonlinearity=rectify)

            return block
Пример #6
0
def build_cnn_u(patch_size, input_var=None, number_layers=4):
    network = lasagne.layers.InputLayer(shape=(None, 1, patch_size[0],
                                               patch_size[1]),
                                        input_var=input_var)

    layers = []

    for i in range(0, number_layers):
        network = batch_norm(
            lasagne.layers.Conv2DLayer(network,
                                       num_filters=32 * (2**i),
                                       filter_size=(3, 3),
                                       pad='same'))
        network = batch_norm(
            lasagne.layers.Conv2DLayer(network,
                                       num_filters=32 * (2**i),
                                       filter_size=(3, 3),
                                       pad='same'))
        # append layers to add shortcuts
        layers.append(network)
        network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))

    # layer without upscale
    network = batch_norm(
        lasagne.layers.Conv2DLayer(network,
                                   num_filters=32 * (2**(number_layers)),
                                   filter_size=(3, 3),
                                   pad='same'))
    network = batch_norm(
        lasagne.layers.Conv2DLayer(network,
                                   num_filters=32 * (2**(number_layers)),
                                   filter_size=(3, 3),
                                   pad='same'))

    for i in range(number_layers, 0, -1):
        network = lasagne.layers.Upscale2DLayer(network, scale_factor=(2, 2))
        network = lasagne.layers.ConcatLayer([network, layers.pop()], axis=1)
        network = batch_norm(
            lasagne.layers.Conv2DLayer(network,
                                       num_filters=32 * (2**(i - 1)),
                                       filter_size=(3, 3),
                                       pad='same'))
        network = batch_norm(
            lasagne.layers.Conv2DLayer(network,
                                       num_filters=32 * (2**(i - 1)),
                                       filter_size=(3, 3),
                                       pad='same'))

    network = batch_norm(
        lasagne.layers.Conv2DLayer(
            network,
            num_filters=1,
            filter_size=(1, 1),
            nonlinearity=lasagne.nonlinearities.sigmoid))

    return network
Пример #7
0
def _my_batch_norm(in_layer):
    """
    my modified version of batchnorm: for ReLUs neither scaling nor bias is needed
    bias is already removed by lasagne.layers.batchnorm, but the scalign is still present.
    one can enforce fixed sclaing by passing 'gamma': None to the Batchnorm Layer
    see https://github.com/Lasagne/Lasagne/issues/635
    :param layer:
    :return:
    """
    if in_layer.nonlinearity.__name__ == rectify.__name__:
        kw = {'gamma': None}
    else:
        kw = {}
    return batch_norm(in_layer, **kw)
def bn_conv(input_layer, **kwargs):
    """

    Parameters
    ----------
    input_layer :
    kwargs :

    Returns
    -------

    """
    l = Conv2DLayer(input_layer, **kwargs)
    l = batch_norm(l, epsilon=0.001)
    return l
Пример #9
0
def build_model(batch_size=BATCH_SIZE):
    """
    Compile net architecture

    :param batch_size: batch size used for training the model
    :return:
        l_out: out-layer of network
        l_in: in-layer of network
    """

    # --- input layers ---
    l_in = lasagne.layers.InputLayer(shape=(batch_size, INPUT_SHAPE[0], INPUT_SHAPE[1], INPUT_SHAPE[2]), name='Input')

    # --- conv layers ---
    net = Conv2DLayer(l_in, num_filters=64, filter_size=3, pad=1, W=init_conv(),
                      nonlinearity=lasagne.nonlinearities.rectify, name='Conv')
    net = batch_norm(net)
    net = Conv2DLayer(net, num_filters=64, filter_size=3, pad=1, W=init_conv(),
                      nonlinearity=lasagne.nonlinearities.rectify, name='Conv')
    net = batch_norm(net)
    net = lasagne.layers.MaxPool2DLayer(net, pool_size=2, name='Pool')
    net = lasagne.layers.DropoutLayer(net, p=0.25, name='Dropout')

    net = Conv2DLayer(net, num_filters=96, filter_size=3, pad=1, W=init_conv(),
                      nonlinearity=lasagne.nonlinearities.rectify, name='Conv')
    net = batch_norm(net)
    net = Conv2DLayer(net, num_filters=96, filter_size=3, pad=0, W=init_conv(),
                      nonlinearity=lasagne.nonlinearities.rectify, name='Conv')
    net = batch_norm(net)
    net = lasagne.layers.MaxPool2DLayer(net, pool_size=2, name='Pool')
    net = lasagne.layers.DropoutLayer(net, p=0.25, name='Dropout')

    net = Conv2DLayer(net, num_filters=256, filter_size=3, pad=0, W=init_conv(),
                      nonlinearity=lasagne.nonlinearities.rectify, name='Conv')
    net = batch_norm(net)
    net = lasagne.layers.DropoutLayer(net, p=0.5, name='Dropout')
    net = Conv2DLayer(net, num_filters=256, filter_size=1, pad=0, W=init_conv(),
                      nonlinearity=lasagne.nonlinearities.rectify, name='Conv')
    net = batch_norm(net)
    net = lasagne.layers.DropoutLayer(net, p=0.5, name='Dropout')

    # --- classification layers ---
    net = Conv2DLayer(net, num_filters=10, filter_size=1, W=init_conv(),
                      nonlinearity=lasagne.nonlinearities.rectify, name='Conv')
    net = batch_norm(net)
    net = lasagne.layers.Pool2DLayer(net, pool_size=5, ignore_border=False,
                                     mode='average_exc_pad', name='GlobalAveragePool')
    l_out = lasagne.layers.FlattenLayer(net, name='Flatten')

    return l_out, l_in
Пример #10
0
def build_inception_module(name, input_layer, nfilters, batch_norm):
    # nfilters: (pool_proj, 1x1, 3x3_reduce, 3x3, 5x5_reduce, 5x5)
    net = {}
    net['pool'] = PoolLayerDNN(input_layer, pool_size=3, stride=1, pad=1)
    net['pool_proj'] = ConvLayer(net['pool'],
                                 nfilters[0],
                                 1,
                                 flip_filters=False)
    if batch_norm:
        net['pool_proj'] = normalization.batch_norm(net['pool_proj'])

    net['1x1'] = ConvLayer(input_layer, nfilters[1], 1, flip_filters=False)
    if batch_norm:
        net['1x1'] = normalization.batch_norm(net['1x1'])

    net['3x3_reduce'] = ConvLayer(input_layer,
                                  nfilters[2],
                                  1,
                                  flip_filters=False)
    if batch_norm:
        net['3x3_reduce'] = normalization.batch_norm(net['3x3_reduce'])
    net['3x3'] = ConvLayer(net['3x3_reduce'],
                           nfilters[3],
                           3,
                           pad=1,
                           flip_filters=False)
    if batch_norm:
        net['3x3'] = normalization.batch_norm(net['3x3'])

    net['5x5_reduce'] = ConvLayer(input_layer,
                                  nfilters[4],
                                  1,
                                  flip_filters=False)
    if batch_norm:
        net['5x5_reduce'] = normalization.batch_norm(net['5x5_reduce'])
    net['5x5'] = ConvLayer(net['5x5_reduce'],
                           nfilters[5],
                           5,
                           pad=2,
                           flip_filters=False)
    if batch_norm:
        net['5x5'] = normalization.batch_norm(net['5x5'])

    net['output'] = ConcatLayer([
        net['1x1'],
        net['3x3'],
        net['5x5'],
        net['pool_proj'],
    ])

    return {'{}/{}'.format(name, k): v for k, v in net.items()}
Пример #11
0
def build_stereo_cnn(input_var=None, in_shape_1=100, in_shape_2=150):

    conv_num_filters1 = 16
    conv_num_filters2 = 32
    conv_num_filters3 = 64
    conv_num_filters4 = 128
    filter_size1 = 7
    filter_size2 = 5
    filter_size3 = 3
    filter_size4 = 3
    pool_size = 2
    scale_factor = 2
    pad_in = 'valid'
    pad_out = 'full'

    # Input layer, as usual:
    network = InputLayer(shape=(None, 2, in_shape_1, in_shape_2),
                         input_var=input_var,
                         name="input_layer")

    network = batch_norm(
        Conv2DLayer(network,
                    num_filters=conv_num_filters1,
                    filter_size=(filter_size1, filter_size1),
                    pad=pad_in,
                    nonlinearity=lasagne.nonlinearities.rectify,
                    W=lasagne.init.GlorotUniform(),
                    name="conv1"))

    network = MaxPool2DLayer(network,
                             pool_size=(pool_size, pool_size),
                             name="pool1")

    network = batch_norm(
        Conv2DLayer(network,
                    num_filters=conv_num_filters2,
                    filter_size=(filter_size2, filter_size2),
                    pad=pad_in,
                    nonlinearity=lasagne.nonlinearities.rectify,
                    W=lasagne.init.GlorotUniform(),
                    name="conv2"))

    network = MaxPool2DLayer(network,
                             pool_size=(pool_size, pool_size),
                             name="pool2")

    network = batch_norm(
        Conv2DLayer(network,
                    num_filters=conv_num_filters3,
                    filter_size=(filter_size3, filter_size3),
                    pad=pad_in,
                    nonlinearity=lasagne.nonlinearities.rectify,
                    W=lasagne.init.GlorotUniform(),
                    name="conv3"))

    network = MaxPool2DLayer(network,
                             pool_size=(pool_size, pool_size),
                             name="pool3")

    network = batch_norm(
        Conv2DLayer(network,
                    num_filters=conv_num_filters4,
                    filter_size=(filter_size4, filter_size4),
                    pad=pad_in,
                    nonlinearity=lasagne.nonlinearities.rectify,
                    W=lasagne.init.GlorotUniform(),
                    name="conv4"))

    network = batch_norm(
        Conv2DLayer(network,
                    num_filters=32,
                    filter_size=(filter_size4, filter_size4),
                    pad=pad_out,
                    nonlinearity=lasagne.nonlinearities.rectify,
                    W=lasagne.init.GlorotUniform(),
                    name="deconv1"))

    network = Upscale2DLayer(network,
                             scale_factor=(pool_size, pool_size),
                             name="upscale1")

    network = batch_norm(
        Conv2DLayer(network,
                    num_filters=16,
                    filter_size=(filter_size3, filter_size3),
                    pad=pad_out,
                    nonlinearity=lasagne.nonlinearities.rectify,
                    W=lasagne.init.GlorotUniform(),
                    name="deconv2"))

    network = Upscale2DLayer(network,
                             scale_factor=(pool_size, pool_size),
                             name="upscale2")

    network = batch_norm(
        Conv2DLayer(network,
                    num_filters=8,
                    filter_size=(filter_size2, filter_size2),
                    pad=pad_out,
                    nonlinearity=lasagne.nonlinearities.rectify,
                    W=lasagne.init.GlorotUniform(),
                    name="deconv3"))

    network = Upscale2DLayer(network,
                             scale_factor=(pool_size, pool_size),
                             name="upscale3")

    network = batch_norm(
        Conv2DLayer(network,
                    num_filters=1,
                    filter_size=(filter_size1, filter_size1),
                    pad=pad_out,
                    nonlinearity=lasagne.nonlinearities.sigmoid,
                    W=lasagne.init.GlorotUniform(),
                    name="deconv4"))

    return network
Пример #12
0
# -*- coding: UTF-8 -*-

import lasagne
from lasagne.layers import InputLayer
from lasagne.layers import Conv2DLayer
from lasagne.layers import Pool2DLayer
from lasagne.layers import DenseLayer
from lasagne.layers import GlobalPoolLayer
from lasagne.layers import ConcatLayer
from lasagne.layers.normalization import batch_norm, BatchNormLayer
from lasagne.nonlinearities import softmax

l1_in = InputLayer((64, 768), name='l1_in')
l1_dense = DenseLayer(l1_in, num_units=500, name='l1_dense')

l2_in = InputLayer((64, 768), name='l2_in')
l2_batch = batch_norm(DenseLayer(l2_in, num_units=500, name='l2_dense'),
                      name='l2_batch')

l1_layerList = lasagne.layers.get_all_layers(l1_dense)
l2_layerList = lasagne.layers.get_all_layers(l2_batch)

batch_norm_Layer = l2_layerList[2]
batch_norm_params = batch_norm_Layer.params

l1_layerParams = lasagne.layers.get_all_params(l1_dense)
l2_layerParams = lasagne.layers.get_all_params(l2_batch)
l2_trainableParams = lasagne.layers.get_all_param_values(l2_batch,
                                                         trainable=True)

print 'finish!'
Пример #13
0
def build_model(data_size, num_classes, batch_norm=True, cudnn=False):
    net = {}
    input_var = T.tensor4('input')

    Conv2DLayer = lasagne.layers.Conv2DLayer
    Pool2DLayer = lasagne.layers.MaxPool2DLayer

    if cudnn:
        print "Running with cuDNN"
        dnn_module = import_module('lasagne.layers.dnn')
        Conv2DLayer = dnn_module.Conv2DDNNLayer
        Pool2DLayer = dnn_module.MaxPool2DDNNLayer

    net['input'] = lasagne.layers.InputLayer(shape=data_size, )

    net['conv1'] = Conv2DLayer(
        net['input'],
        num_filters=32,
        filter_size=4,
        flip_filters=False,
        nonlinearity=lasagne.nonlinearities.rectify,
        W=lasagne.init.GlorotUniform(gain='relu'),
    )
    if batch_norm:
        net['conv1'] = normalization.batch_norm(net['conv1'])
    net['norm1'] = lasagne.layers.LocalResponseNormalization2DLayer(
        net['conv1'])
    net['pool1'] = Pool2DLayer(
        net['norm1'],
        pool_size=(2, 2),
        stride=2,
    )  #out 32x62x62

    net['conv2'] = Conv2DLayer(
        net['pool1'],
        num_filters=64,
        pad=1,
        filter_size=4,
        flip_filters=False,
        nonlinearity=lasagne.nonlinearities.rectify,
        W=lasagne.init.GlorotUniform(gain='relu'),
    )
    if batch_norm:
        net['conv2'] = normalization.batch_norm(net['conv2'])
    net['norm2'] = lasagne.layers.LocalResponseNormalization2DLayer(
        net['conv2'])
    net['pool2'] = Pool2DLayer(
        net['norm2'],
        pool_size=(2, 2),
        stride=2,
    )  #out 64x30x30

    net['conv3-1'] = Conv2DLayer(
        net['pool2'],
        num_filters=128,
        filter_size=3,
        flip_filters=False,
        nonlinearity=lasagne.nonlinearities.rectify,
        W=lasagne.init.GlorotUniform(gain='relu'),
    )  #out 128x28x28
    if batch_norm:
        net['conv3-1'] = normalization.batch_norm(net['conv3-1'])
    net['conv3-2'] = Conv2DLayer(
        net['conv3-1'],
        num_filters=128,
        filter_size=3,
        flip_filters=False,
        nonlinearity=lasagne.nonlinearities.rectify,
        W=lasagne.init.GlorotUniform(gain='relu'),
    )  #out 128x26x26
    if batch_norm:
        net['conv3-2'] = normalization.batch_norm(net['conv3-2'])

    net['conv4'] = Conv2DLayer(
        net['conv3-2'],
        num_filters=64,
        pad=1,
        filter_size=4,
        flip_filters=False,
        nonlinearity=lasagne.nonlinearities.rectify,
        W=lasagne.init.GlorotUniform(gain='relu'),
    )
    if batch_norm:
        net['conv4'] = normalization.batch_norm(net['conv4'])
    net['pool4'] = Pool2DLayer(
        net['conv4'],
        pool_size=(2, 2),
        stride=2,
    )  #out 64x12x12

    net['fc1'] = lasagne.layers.DenseLayer(
        net['pool4'],
        num_units=512,
        W=lasagne.init.GlorotUniform(gain="relu"),
    )
    if batch_norm:
        net['fc1'] = normalization.batch_norm(net['fc1'])
    net['dropout1'] = lasagne.layers.DropoutLayer(net['fc1'], p=0.5)

    net['fc2'] = lasagne.layers.DenseLayer(
        net['dropout1'],
        num_units=512,
        W=lasagne.init.GlorotUniform(gain="relu"),
    )
    if batch_norm:
        net['fc2'] = normalization.batch_norm(net['fc2'])
    net['dropout2'] = lasagne.layers.DropoutLayer(net['fc2'], p=0.5)

    # - applies the softmax after computing the final layer units
    net['output'] = lasagne.layers.DenseLayer(
        net['dropout2'],
        num_units=num_classes,
        nonlinearity=lasagne.nonlinearities.softmax,
        #W=lasagne.init.GlorotUniform(),
    )
    if batch_norm:
        net['output'] = normalization.batch_norm(net['output'])
    return net
Пример #14
0
    def build_cnn(self, input_var=None, n=5):

        # create a residual learning building block with two stacked 3x3
        # convlayers as in paper
        def residual_block(l, increase_dim=False, projection=False):
            input_num_filters = l.output_shape[1]
            if increase_dim:
                first_stride = (2, 2)
                out_num_filters = input_num_filters * 2
            else:
                first_stride = (1, 1)
                out_num_filters = input_num_filters

            stack_1 = batch_norm(ConvLayer(l, num_filters=out_num_filters,
                                       filter_size=(3, 3),
                                       stride=first_stride,
                                       nonlinearity=rectify, pad='same',
                                       W=lasagne.init.HeNormal(gain='relu')))
            stack_2 = batch_norm(ConvLayer(stack_1,
                                       num_filters=out_num_filters,
                                       filter_size=(3, 3),
                                       stride=(1, 1),
                                       nonlinearity=None,
                                       pad='same',
                                       W=lasagne.init.HeNormal(gain='relu')))

            # add shortcut connections
            if increase_dim:
                if projection:
                    # projection shortcut, as option B in paper
                    projection = batch_norm(ConvLayer(l,
                                        num_filters=out_num_filters,
                                        filter_size=(1, 1),
                                        stride=(2, 2),
                                        nonlinearity=None, pad='same', b=None))
                    block = NonlinearityLayer(ElemwiseSumLayer([stack_2, projection]),
                                              nonlinearity=rectify)
                else:
                    # identity shortcut, as option A in paper
                    identity = ExpressionLayer(l, lambda X: X[:, :, ::2, ::2],
                                               lambda s: (s[0], s[1], s[2] // 2, s[3]//2))
                    padding = PadLayer(identity, [out_num_filters // 4, 0, 0],
                                       batch_ndim=1)
                    block = NonlinearityLayer(ElemwiseSumLayer([stack_2, padding]),nonlinearity=rectify)
            else:
                block = NonlinearityLayer(ElemwiseSumLayer([stack_2, l]),
                                          nonlinearity=rectify)

            return block

        # Building the network
        l_in = InputLayer(shape=(None, 3, 32, 32), input_var=input_var)

        # first layer, output is 16 x 32 x 32
        l = batch_norm(ConvLayer(l_in, num_filters=16,
                                 filter_size=(3, 3),
                                 stride=(1, 1),
                                 nonlinearity=rectify, pad='same',
                                 W=lasagne.init.HeNormal(gain='relu')))

        # first stack of residual blocks, output is 16 x 32 x 32
        for _ in range(n):
            l = residual_block(l)

        # second stack of residual blocks, output is 32 x 16 x 16
        l = residual_block(l, increase_dim=True)
        for _ in range(1, n):
            l = residual_block(l)

        # third stack of residual blocks, output is 64 x 8 x 8
        l = residual_block(l, increase_dim=True)
        for _ in range(1, n):
            l = residual_block(l)

        # average pooling
        l = GlobalPoolLayer(l)

        # fully connected layer
        network = DenseLayer(
                l, num_units=10,
                W=lasagne.init.HeNormal(),
                nonlinearity=softmax)

        return network
def bn_conv(input_layer, **kwargs):
    l = Conv2DLayer(input_layer, **kwargs)
    l = batch_norm(l, epsilon=0.001)
    return l
Пример #16
0
def build_model(batch_size=BATCH_SIZE):
    """ Compile net architecture """
    nonlin = lasagne.nonlinearities.rectify

    # --- input layers ---
    l_in = lasagne.layers.InputLayer(shape=(batch_size, INPUT_SHAPE[0],
                                            INPUT_SHAPE[1], INPUT_SHAPE[2]),
                                     name='Input')

    # --- conv layers ---
    net = Conv2DLayer(l_in,
                      num_filters=64,
                      filter_size=3,
                      pad=1,
                      W=init_conv(),
                      nonlinearity=nonlin,
                      name='Conv')
    net = batch_norm(net)
    net = Conv2DLayer(net,
                      num_filters=64,
                      filter_size=3,
                      pad=1,
                      W=init_conv(),
                      nonlinearity=nonlin,
                      name='Conv')
    net = batch_norm(net)
    net = lasagne.layers.MaxPool2DLayer(net, pool_size=2, name='Pool')
    net = lasagne.layers.DropoutLayer(net, p=0.25, name='Dropout')

    net = Conv2DLayer(net,
                      num_filters=128,
                      filter_size=3,
                      pad=1,
                      W=init_conv(),
                      nonlinearity=nonlin,
                      name='Conv')
    net = Conv2DLayer(net,
                      num_filters=128,
                      filter_size=3,
                      pad=1,
                      W=init_conv(),
                      nonlinearity=nonlin,
                      name='Conv')
    net = batch_norm(net)
    net = lasagne.layers.MaxPool2DLayer(net, pool_size=2, name='Pool')
    net = lasagne.layers.DropoutLayer(net, p=0.25, name='Dropout')

    net = Conv2DLayer(net,
                      num_filters=256,
                      filter_size=3,
                      pad=1,
                      W=init_conv(),
                      nonlinearity=nonlin,
                      name='Conv')
    net = batch_norm(net)
    net = Conv2DLayer(net,
                      num_filters=256,
                      filter_size=3,
                      pad=1,
                      W=init_conv(),
                      nonlinearity=nonlin,
                      name='Conv')
    net = batch_norm(net)
    net = Conv2DLayer(net,
                      num_filters=256,
                      filter_size=3,
                      pad=1,
                      W=init_conv(),
                      nonlinearity=nonlin,
                      name='Conv')
    net = batch_norm(net)
    net = Conv2DLayer(net,
                      num_filters=256,
                      filter_size=3,
                      pad=1,
                      W=init_conv(),
                      nonlinearity=nonlin,
                      name='Conv')
    net = batch_norm(net)
    net = lasagne.layers.MaxPool2DLayer(net, pool_size=2, name='Pool')
    net = lasagne.layers.DropoutLayer(net, p=0.25, name='Dropout')

    net = Conv2DLayer(net,
                      num_filters=1024,
                      filter_size=3,
                      pad=0,
                      W=init_conv(),
                      nonlinearity=nonlin,
                      name='Conv')
    net = batch_norm(net)
    net = lasagne.layers.DropoutLayer(net, p=0.5, name='Dropout')
    net = Conv2DLayer(net,
                      num_filters=1024,
                      filter_size=1,
                      pad=0,
                      W=init_conv(),
                      nonlinearity=nonlin,
                      name='Conv')
    net = batch_norm(net)
    net = lasagne.layers.DropoutLayer(net, p=0.5, name='Dropout')

    # --- feed forward part ---
    net = Conv2DLayer(net,
                      num_filters=10,
                      filter_size=1,
                      W=init_conv(),
                      nonlinearity=nonlin,
                      name='Conv')
    net = batch_norm(net)
    net = lasagne.layers.Pool2DLayer(net,
                                     pool_size=2,
                                     ignore_border=False,
                                     mode='average_exc_pad',
                                     name='GlobalAveragePool')
    net = lasagne.layers.FlattenLayer(net, name='Flatten')

    return net, l_in
Пример #17
0
 def bn_conv(input_layer, **kwargs):
     l = Conv2DLayer(input_layer, **kwargs)
     l = batch_norm(l, epsilon=0.001)
     return l
Пример #18
0
def build_model(data_size, num_classes, batch_norm=True):
    net = {}
    net['input'] = InputLayer(data_size)
    net['conv1/7x7_s2'] = ConvLayer(net['input'],
                                    64,
                                    7,
                                    stride=2,
                                    pad=3,
                                    flip_filters=False)
    if batch_norm:
        net['conv1/7x7_s2'] = normalization.batch_norm(net['conv1/7x7_s2'])
    net['pool1/3x3_s2'] = PoolLayer(net['conv1/7x7_s2'],
                                    pool_size=3,
                                    stride=2,
                                    ignore_border=False)
    net['pool1/norm1'] = LRNLayer(net['pool1/3x3_s2'], alpha=0.00002, k=1)
    net['conv2/3x3_reduce'] = ConvLayer(net['pool1/norm1'],
                                        64,
                                        1,
                                        flip_filters=False)
    if batch_norm:
        net['conv2/3x3_reduce'] = normalization.batch_norm(
            net['conv2/3x3_reduce'])
    net['conv2/3x3'] = ConvLayer(net['conv2/3x3_reduce'],
                                 192,
                                 3,
                                 pad=1,
                                 flip_filters=False)
    if batch_norm:
        net['conv2/3x3'] = normalization.batch_norm(net['conv2/3x3'])
    net['conv2/norm2'] = LRNLayer(net['conv2/3x3'], alpha=0.00002, k=1)
    net['pool2/3x3_s2'] = PoolLayer(net['conv2/norm2'],
                                    pool_size=3,
                                    stride=2,
                                    ignore_border=False)

    net.update(
        build_inception_module('inception_3a', net['pool2/3x3_s2'],
                               [32, 64, 96, 128, 16, 32], batch_norm))
    net.update(
        build_inception_module('inception_3b', net['inception_3a/output'],
                               [64, 128, 128, 192, 32, 96], batch_norm))
    net['pool3/3x3_s2'] = PoolLayer(net['inception_3b/output'],
                                    pool_size=3,
                                    stride=2,
                                    ignore_border=False)

    net.update(
        build_inception_module('inception_4a', net['pool3/3x3_s2'],
                               [64, 192, 96, 208, 16, 48], batch_norm))
    net.update(
        build_inception_module('inception_4b', net['inception_4a/output'],
                               [64, 160, 112, 224, 24, 64], batch_norm))
    net.update(
        build_inception_module('inception_4c', net['inception_4b/output'],
                               [64, 128, 128, 256, 24, 64], batch_norm))
    net.update(
        build_inception_module('inception_4d', net['inception_4c/output'],
                               [64, 112, 144, 288, 32, 64], batch_norm))
    net.update(
        build_inception_module('inception_4e', net['inception_4d/output'],
                               [128, 256, 160, 320, 32, 128], batch_norm))
    net['pool4/3x3_s2'] = PoolLayer(net['inception_4e/output'],
                                    pool_size=3,
                                    stride=2,
                                    ignore_border=False)

    net.update(
        build_inception_module('inception_5a', net['pool4/3x3_s2'],
                               [128, 256, 160, 320, 32, 128], batch_norm))
    net.update(
        build_inception_module('inception_5b', net['inception_5a/output'],
                               [128, 384, 192, 384, 48, 128], batch_norm))

    net['pool5/7x7_s1'] = GlobalPoolLayer(net['inception_5b/output'])
    net['dropout'] = DropoutLayer(net['pool5/7x7_s1'], p=0.4)
    net['loss3/classifier'] = DenseLayer(net['dropout'],
                                         num_units=1000,
                                         nonlinearity=linear)
    if batch_norm:
        net['loss3/classifier'] = normalization.batch_norm(
            net['loss3/classifier'])
    net['output'] = DenseLayer(net['loss3/classifier'],
                               num_units=num_classes,
                               nonlinearity=softmax)
    if batch_norm:
        net['output'] = normalization.batch_norm(net['output'])
    return net
Пример #19
0
def build_model(batch_size=BATCH_SIZE):
    """
    Compile net architecture

    :param batch_size: batch size used for training the model
    :return:
        l_out: out-layer of network
        l_in: in-layer of network
    """

    # --- input layers ---
    l_in = lasagne.layers.InputLayer(shape=(batch_size, INPUT_SHAPE[0],
                                            INPUT_SHAPE[1], INPUT_SHAPE[2]),
                                     name='Input')

    # --- conv layers ---
    net = Conv2DLayer(l_in,
                      num_filters=64,
                      filter_size=3,
                      pad=1,
                      W=init_conv(),
                      nonlinearity=lasagne.nonlinearities.rectify,
                      name='Conv')
    net = batch_norm(net)
    net = Conv2DLayer(net,
                      num_filters=64,
                      filter_size=3,
                      pad=1,
                      W=init_conv(),
                      nonlinearity=lasagne.nonlinearities.rectify,
                      name='Conv')
    net = batch_norm(net)
    net = lasagne.layers.MaxPool2DLayer(net, pool_size=2, name='Pool')
    net = lasagne.layers.DropoutLayer(net, p=0.25, name='Dropout')

    net = Conv2DLayer(net,
                      num_filters=96,
                      filter_size=3,
                      pad=1,
                      W=init_conv(),
                      nonlinearity=lasagne.nonlinearities.rectify,
                      name='Conv')
    net = batch_norm(net)
    net = Conv2DLayer(net,
                      num_filters=96,
                      filter_size=3,
                      pad=0,
                      W=init_conv(),
                      nonlinearity=lasagne.nonlinearities.rectify,
                      name='Conv')
    net = batch_norm(net)
    net = lasagne.layers.MaxPool2DLayer(net, pool_size=2, name='Pool')
    net = lasagne.layers.DropoutLayer(net, p=0.25, name='Dropout')

    net = Conv2DLayer(net,
                      num_filters=256,
                      filter_size=3,
                      pad=0,
                      W=init_conv(),
                      nonlinearity=lasagne.nonlinearities.rectify,
                      name='Conv')
    net = batch_norm(net)
    net = lasagne.layers.DropoutLayer(net, p=0.5, name='Dropout')
    net = Conv2DLayer(net,
                      num_filters=256,
                      filter_size=1,
                      pad=0,
                      W=init_conv(),
                      nonlinearity=lasagne.nonlinearities.rectify,
                      name='Conv')
    net = batch_norm(net)
    net = lasagne.layers.DropoutLayer(net, p=0.5, name='Dropout')

    # --- classification layers ---
    net = Conv2DLayer(net,
                      num_filters=10,
                      filter_size=1,
                      W=init_conv(),
                      nonlinearity=lasagne.nonlinearities.rectify,
                      name='Conv')
    net = batch_norm(net)
    net = lasagne.layers.Pool2DLayer(net,
                                     pool_size=5,
                                     ignore_border=False,
                                     mode='average_exc_pad',
                                     name='GlobalAveragePool')
    l_out = lasagne.layers.FlattenLayer(net, name='Flatten')

    return l_out, l_in