Example #1
0
def get_model(input_var, target_var, multiply_var):

    # input layer with unspecified batch size
    layer_input     = InputLayer(shape=(None, 30, 80, 80), input_var=input_var) #InputLayer(shape=(None, 1, 30, 64, 64), input_var=input_var)
    layer_0         = DimshuffleLayer(layer_input, (0, 'x', 1, 2, 3))

    # Z-score?

    # Convolution then batchNormalisation then activation layer, then zero padding layer followed by a dropout layer
    layer_1         = batch_norm(Conv3DDNNLayer(incoming=layer_0, num_filters=16, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=leaky_rectify))
    layer_2         = batch_norm(Conv3DDNNLayer(incoming=layer_1, num_filters=16, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=leaky_rectify))
    layer_3         = MaxPool3DDNNLayer(layer_2, pool_size=(2, 2, 2), stride=(2, 2, 2), pad=(1, 1, 1))
    layer_4         = DropoutLayer(layer_3, p=0.25)

    # Convolution then batchNormalisation then activation layer, then zero padding layer followed by a dropout layer
    layer_5         = batch_norm(Conv3DDNNLayer(incoming=layer_4, num_filters=32, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=leaky_rectify))
    layer_6         = batch_norm(Conv3DDNNLayer(incoming=layer_5, num_filters=32, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=leaky_rectify))
    layer_7         = MaxPool3DDNNLayer(layer_6, pool_size=(2, 2, 2), stride=(2, 2, 2), pad=(1, 1, 1))
    layer_8         = DropoutLayer(layer_7, p=0.25)
    
    # Convolution then batchNormalisation then activation layer, then zero padding layer followed by a dropout layer
    layer_5         = batch_norm(Conv3DDNNLayer(incoming=layer_8, num_filters=64, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=leaky_rectify))
    layer_6         = batch_norm(Conv3DDNNLayer(incoming=layer_5, num_filters=64, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=leaky_rectify))
    layer_7         = batch_norm(Conv3DDNNLayer(incoming=layer_6, num_filters=64, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=leaky_rectify))
    layer_8         = MaxPool3DDNNLayer(layer_7, pool_size=(2, 2, 2), stride=(2, 2, 2), pad=(1, 1, 1))
    layer_9         = DropoutLayer(layer_8, p=0.25)

    # LSTM
    layer         = DimshuffleLayer(layer_9, (0,2,1,3,4))
#    layer_prediction  = LSTMLayer(layer, num_units=2, only_return_final=True, learn_init=True, cell=Gate(linear))
    layer = LSTMLayer(layer, num_units=2, only_return_final=True, learn_init=True)
    layer_prediction = DenseLayer(layer, 2, nonlinearity=linear)

    # Output Layer
    # layer_hidden         = DenseLayer(layer_flatten, 500, nonlinearity=linear)
    # layer_prediction     = DenseLayer(layer_hidden, 2, nonlinearity=linear)

    # Loss
    prediction           = get_output(layer_prediction) / multiply_var**2
    loss                 = T.abs_(prediction - target_var)
    loss                 = loss.mean()

    #Updates : Stochastic Gradient Descent (SGD) with Nesterov momentum
    params               = get_all_params(layer_prediction, trainable=True)

    # Create a loss expression for validation/testing. The crucial difference
    # here is that we do a deterministic forward pass through the network, disabling dropout layers.
    test_prediction      = get_output(layer_prediction, deterministic=True) / multiply_var**2
    test_loss            = T.abs_(test_prediction - target_var)
    test_loss            = test_loss.mean()

    # crps estimate
    crps                 = T.abs_(test_prediction - target_var).mean()/600
    
    return test_prediction, crps, loss, params
Example #2
0
 def test_fail_on_mismatching_dimensionality(self):
     try:
         from lasagne.layers.dnn import MaxPool3DDNNLayer
     except ImportError:
         pytest.skip("cuDNN not available")
     with pytest.raises(ValueError) as exc:
         MaxPool3DDNNLayer((10, 20, 30, 40), 3, 2)
     assert "Expected 5 input dimensions" in exc.value.args[0]
     with pytest.raises(ValueError) as exc:
         MaxPool3DDNNLayer((10, 20, 30, 40, 50, 60), 3, 2)
     assert "Expected 5 input dimensions" in exc.value.args[0]
def construct_unet_3D(channels=1, no_f_base=8, f_size=3, branches=[2,2,2,2],dropout=0.2,bs=None,
                             class_nums=2, pad="same",nonlinearity=lasagne.nonlinearities.rectify,
                             input_dim=[None,None,None],useups=False):

    net= InputLayer((bs, channels, input_dim[0], input_dim[1], input_dim[2]))

    # Moving downwards the U-shape:
    horizontal_pass=[]
    for i in xrange(len(branches)):
        net = conv_pool_down_3D(net,no_f_base*2**(i),f_size,conv_depth=branches[i],
                             pad=pad,nonlinearity=nonlinearity,dropout=dropout)
        print "Down conv: ",net.output_shape
        horizontal_pass.append(net)
        net = MaxPool3DDNNLayer(net,pool_size=(2,2,2),stride=(2,2,2))
        print "Down Pool: ",net.output_shape

    # Bottleneck
    net = Conv3DDNNLayer(net,no_f_base*2**len(branches),f_size,pad=pad,nonlinearity=nonlinearity,W=lasagne.init.HeNormal(gain='relu'))
    print "Bottleneck conv: ",net.output_shape
    net = Conv3DDNNLayer(net,no_f_base*2**len(branches),f_size,pad=pad,nonlinearity=nonlinearity,W=lasagne.init.HeNormal(gain='relu'))
    print "Bottleneck conv: ",net.output_shape
    #net = Conv3DDNNTransposeLayer(net, no_f_base*2**(len(branches)-1), 2, (2, 2, 2))
    if not useups:
        net = TransposedConv3DLayer(net,no_f_base*2**(len(branches)-1),2,(2,2,2))
    else:
        net = upscale_plus_conv_3D(net,no_f_base*2**(len(branches)-1),f_size,pad,nonlinearity)
    print "Bottleneck up: ",net.output_shape

    # Moving upwards the U-shape:
    for i in xrange(len(branches)):
        print "Pass before concat: ",horizontal_pass[-(i+1)].output_shape
        print "net before concat: ",net.output_shape
        if not useups:
            net = ConcatLayer([net,horizontal_pass[-(i+1)]],cropping=(None,None,"center","center","center"))
        else:
            net = ConcatLayer([net,horizontal_pass[-(i+1)]],cropping=(None,None,"center","center","center"))
        print "Shape after concat: ",net.output_shape
        if i==len(branches)-1:
            net = conv_pool_up_3D(net,bs,no_f_base*2**(len(branches)-1-i),f_size,
                           pad=pad,nonlinearity=nonlinearity,conv_depth=branches[i],halt=True,useups=False)
        else:
            net = conv_pool_up_3D(net,bs,no_f_base*2**(len(branches)-1-i),f_size,
                           pad=pad,nonlinearity=nonlinearity,conv_depth=branches[i],halt=False,useups=False)
        print "Conv up: ",net.output_shape
    # Class layer: Work around standard softmax bc. it doesn't work with tensor4/3.
    # Hence, we reshape and feed it to an external Nonlinearity layer.
    # net["class_ns"] is the output in image-related shape.
    imageout = net  = Conv3DDNNLayer(net, class_nums, 1, nonlinearity=linear,W=lasagne.init.HeNormal(gain='relu'))
    print "imageout shape: ",net.output_shape
    net  = DimshuffleLayer(net, (1, 0, 2, 3, 4))
    print "After shuffle shape: ",net.output_shape
    net  = ReshapeLayer(net, (class_nums, -1))
    print "Reshape shape: ",net.output_shape
    net  = DimshuffleLayer(net, (1, 0))
    print "Dimshuffle shape: ",net.output_shape
    # Flattened output to be able to feed it to lasagne.objectives.categorical_crossentropy.
    net  = NonlinearityLayer(net, nonlinearity=lasagne.nonlinearities.softmax)
    #imout = NonlinearityLayer(imageout,nonlinearity=lasagne.nonlinearities.softmax)
    return net,imageout
    del net, imageout,imout
Example #4
0
 def test_not_implemented(self):
     try:
         from lasagne.layers.dnn import MaxPool3DDNNLayer
     except ImportError:
         pytest.skip("cuDNN not available")
     with pytest.raises(NotImplementedError) as exc:
         layer = MaxPool3DDNNLayer((1, 2, 3, 4, 5), pool_size=2,
                                   ignore_border=False)
     assert ("Pool3DDNNLayer does not support ignore_border=False" in
             exc.value.args[0])
Example #5
0
def build_model():
    '''
    Builds C3D model

    Returns
    -------
    dict
        A dictionary containing the network layers, where the output layer is at key 'prob'
    '''
    net = {}
    net['input'] = InputLayer((None, 3, 16, 112, 112))

    # ----------- 1st layer group ---------------
    net['conv1a'] = Conv3DDNNLayer(net['input'], 64, (3,3,3), pad=1,nonlinearity=lasagne.nonlinearities.rectify,flip_filters=False)
    net['pool1']  = MaxPool3DDNNLayer(net['conv1a'],pool_size=(1,2,2),stride=(1,2,2))

    # ------------- 2nd layer group --------------
    net['conv2a'] = Conv3DDNNLayer(net['pool1'], 128, (3,3,3), pad=1,nonlinearity=lasagne.nonlinearities.rectify)
    net['pool2']  = MaxPool3DDNNLayer(net['conv2a'],pool_size=(2,2,2),stride=(2,2,2))

    # ----------------- 3rd layer group --------------
    net['conv3a'] = Conv3DDNNLayer(net['pool2'], 256, (3,3,3), pad=1,nonlinearity=lasagne.nonlinearities.rectify)
    net['conv3b'] = Conv3DDNNLayer(net['conv3a'], 256, (3,3,3), pad=1,nonlinearity=lasagne.nonlinearities.rectify)
    net['pool3']  = MaxPool3DDNNLayer(net['conv3b'],pool_size=(2,2,2),stride=(2,2,2))

    # ----------------- 4th layer group --------------
    net['conv4a'] = Conv3DDNNLayer(net['pool3'], 512, (3,3,3), pad=1,nonlinearity=lasagne.nonlinearities.rectify)
    net['conv4b'] = Conv3DDNNLayer(net['conv4a'], 512, (3,3,3), pad=1,nonlinearity=lasagne.nonlinearities.rectify)
    net['pool4']  = MaxPool3DDNNLayer(net['conv4b'],pool_size=(2,2,2),stride=(2,2,2))

    # ----------------- 5th layer group --------------
    net['conv5a'] = Conv3DDNNLayer(net['pool4'], 512, (3,3,3), pad=1,nonlinearity=lasagne.nonlinearities.rectify)
    net['conv5b'] = Conv3DDNNLayer(net['conv5a'], 512, (3,3,3), pad=1,nonlinearity=lasagne.nonlinearities.rectify)
    # We need a padding layer, as C3D only pads on the right, which cannot be done with a theano pooling layer
    net['pad']    = PadLayer(net['conv5b'],width=[(0,1),(0,1)], batch_ndim=3)
    net['pool5']  = MaxPool3DDNNLayer(net['pad'],pool_size=(2,2,2),pad=(0,0,0),stride=(2,2,2))
    net['fc6-1']  = DenseLayer(net['pool5'], num_units=4096,nonlinearity=lasagne.nonlinearities.rectify)
    net['fc7-1']  = DenseLayer(net['fc6-1'], num_units=4096,nonlinearity=lasagne.nonlinearities.rectify)
    net['fc8-1']  = DenseLayer(net['fc7-1'], num_units=487, nonlinearity=None)
    net['prob']  = NonlinearityLayer(net['fc8-1'], softmax)

    return net
Example #6
0
    def layer(self, input_layer, pool_size, stride, pad):
        try:
            from lasagne.layers.dnn import MaxPool3DDNNLayer
        except ImportError:
            pytest.skip("cuDNN not available")

        return MaxPool3DDNNLayer(
            input_layer,
            pool_size=pool_size,
            stride=stride,
            pad=pad,
        )
Example #7
0
def define_network(inputs):

    network = lasagne.layers.InputLayer(shape=(None, params.CHANNELS, params.INPUT_SIZE, params.INPUT_SIZE, params.INPUT_SIZE),
                                input_var=inputs)

    network = Conv3DDNNLayer(
            network, num_filters=64, filter_size=(5, 5, 5),
            nonlinearity=lasagne.nonlinearities.leaky_rectify,
            W=HeNormal(gain='relu'))

    network = MaxPool3DDNNLayer(network, pool_size=(2, 2, 2))

    if params.BATCH_NORMALIZATION:
        network = lasagne.layers.batch_norm(network)

    network = Conv3DDNNLayer(
            network, num_filters=64, filter_size=(5, 5, 5),
            nonlinearity=lasagne.nonlinearities.leaky_rectify,
            W=HeNormal(gain='relu'))

    network = Conv3DDNNLayer(
            network, num_filters=96, filter_size=(5, 5, 5),
            nonlinearity=lasagne.nonlinearities.leaky_rectify,
            W=HeNormal(gain='relu'))

    if params.BATCH_NORMALIZATION:
        network = lasagne.layers.batch_norm(network)

    network = lasagne.layers.DenseLayer(
            network,
            num_units=420,
            nonlinearity=lasagne.nonlinearities.leaky_rectify,
            W=HeNormal(gain='relu')
    )

    network = lasagne.layers.DenseLayer(
            network, num_units=params.N_CLASSES,
            nonlinearity=lasagne.nonlinearities.softmax)

    return network
Example #8
0
def build_res_V1(input_var, batch_size):

    net = {}

    net['input'] = InputLayer((batch_size, 4, None, None, None),
                              input_var=input_var)
    net['conv1a'] = batch_norm(
        Conv3DDNNLayer(net['input'],
                       64, (3, 3, 3),
                       pad='same',
                       nonlinearity=rectify))
    net['conv1b'] = batch_norm(
        Conv3DDNNLayer(net['conv1a'],
                       64, (3, 3, 3),
                       pad='same',
                       nonlinearity=rectify))

    net['conv1c'] = Conv3DDNNLayer(net['conv1b'],
                                   num_filters=64,
                                   filter_size=(3, 3, 3),
                                   stride=(2, 2, 2),
                                   pad='same',
                                   nonlinearity=None)
    net['pool1'] = MaxPool3DDNNLayer(net['conv1b'],
                                     pool_size=(2, 2, 2))  # 80,80,16

    # Residual 2
    net['res2'] = BatchNormLayer(net['conv1c'])
    net['res2'] = NonlinearityLayer(net['res2'], nonlinearity=rectify)
    net['res2'] = batch_norm(
        Conv3DDNNLayer(net['res2'],
                       num_filters=64,
                       filter_size=(3, 3, 3),
                       pad='same',
                       nonlinearity=rectify))
    net['res2'] = Conv3DDNNLayer(net['res2'],
                                 num_filters=64,
                                 filter_size=(3, 3, 3),
                                 pad='same',
                                 nonlinearity=None)
    net['res2'] = ElemwiseSumLayer([net['res2'], net['conv1c']])

    # Residual 3
    net['res3'] = BatchNormLayer(net['res2'])
    net['res3'] = NonlinearityLayer(net['res3'], nonlinearity=rectify)
    net['res3'] = batch_norm(
        Conv3DDNNLayer(net['res3'],
                       num_filters=64,
                       filter_size=(3, 3, 3),
                       pad='same',
                       nonlinearity=rectify))
    net['res3'] = Conv3DDNNLayer(net['res3'],
                                 num_filters=64,
                                 filter_size=(3, 3, 3),
                                 pad='same',
                                 nonlinearity=None)
    net['res3'] = ElemwiseSumLayer([net['res3'], net['res2']])

    net['bn3'] = BatchNormLayer(net['res3'])
    net['relu3'] = NonlinearityLayer(net['bn3'], nonlinearity=rectify)

    net['conv3a'] = Conv3DDNNLayer(net['relu3'],
                                   num_filters=64,
                                   filter_size=(3, 3, 3),
                                   stride=(2, 2, 1),
                                   pad='same',
                                   nonlinearity=None)
    net['pool2'] = MaxPool3DDNNLayer(net['relu3'],
                                     pool_size=(2, 2, 1))  # 40,40,16

    # Residual 4
    net['res4'] = BatchNormLayer(net['conv3a'])
    net['res4'] = NonlinearityLayer(net['res4'], nonlinearity=rectify)
    net['res4'] = batch_norm(
        Conv3DDNNLayer(net['res4'],
                       num_filters=64,
                       filter_size=(3, 3, 3),
                       pad='same',
                       nonlinearity=rectify))
    net['res4'] = Conv3DDNNLayer(net['res4'],
                                 num_filters=64,
                                 filter_size=(3, 3, 3),
                                 pad='same',
                                 nonlinearity=None)
    net['res4'] = ElemwiseSumLayer([net['res4'], net['conv3a']])

    # Residual 5
    net['res5'] = BatchNormLayer(net['res4'])
    net['res5'] = NonlinearityLayer(net['res5'], nonlinearity=rectify)
    net['res5'] = batch_norm(
        Conv3DDNNLayer(net['res5'],
                       num_filters=64,
                       filter_size=(3, 3, 3),
                       pad='same',
                       nonlinearity=rectify))
    net['res5'] = Conv3DDNNLayer(net['res5'],
                                 num_filters=64,
                                 filter_size=(3, 3, 3),
                                 pad='same',
                                 nonlinearity=None)
    net['res5'] = ElemwiseSumLayer([net['res5'], net['res4']])

    net['bn5'] = BatchNormLayer(net['res5'])
    net['relu5'] = NonlinearityLayer(net['bn5'], nonlinearity=rectify)
    net['conv5a'] = Conv3DDNNLayer(net['relu5'],
                                   num_filters=64,
                                   filter_size=(3, 3, 3),
                                   stride=(2, 2, 2),
                                   pad='same',
                                   nonlinearity=None)

    # Residual 6
    net['res6'] = BatchNormLayer(net['conv5a'])
    net['res6'] = NonlinearityLayer(net['res6'], nonlinearity=rectify)
    net['res6'] = batch_norm(
        Conv3DDNNLayer(net['res6'],
                       num_filters=64,
                       filter_size=(3, 3, 3),
                       pad='same',
                       nonlinearity=rectify))
    net['res6'] = Conv3DDNNLayer(net['res6'],
                                 num_filters=64,
                                 filter_size=(3, 3, 3),
                                 pad='same',
                                 nonlinearity=None)
    net['res6'] = ElemwiseSumLayer([net['res6'], net['conv5a']])

    # Residual 7
    net['res7'] = BatchNormLayer(net['res6'])
    net['res7'] = NonlinearityLayer(net['res7'], nonlinearity=rectify)
    net['res7'] = batch_norm(
        Conv3DDNNLayer(net['res7'],
                       num_filters=64,
                       filter_size=(3, 3, 3),
                       pad='same',
                       nonlinearity=rectify))
    net['res7'] = Conv3DDNNLayer(net['res7'],
                                 num_filters=64,
                                 filter_size=(3, 3, 3),
                                 pad='same',
                                 nonlinearity=None)
    net['res7'] = ElemwiseSumLayer([net['res7'], net['res6']])

    net['bn7'] = BatchNormLayer(net['res7'])
    net['relu7'] = NonlinearityLayer(net['bn7'], nonlinearity=rectify)

    net['conv8'] = batch_norm(
        Conv3DDNNLayer(net['relu7'],
                       num_filters=64,
                       filter_size=(3, 3, 3),
                       pad='same',
                       nonlinearity=rectify))

    # upscale 1
    net['upscale1'] = Upscale3DLayer(net['conv8'],
                                     scale_factor=(2, 2, 2),
                                     mode='repeat')
    net['concat1'] = ConcatLayer([net['pool2'], net['upscale1']])
    net['upconv1a'] = batch_norm(
        Conv3DDNNLayer(net['concat1'],
                       64, (1, 1, 1),
                       pad='same',
                       nonlinearity=rectify))
    net['upconv1b'] = batch_norm(
        Conv3DDNNLayer(net['upconv1a'],
                       64, (3, 3, 3),
                       pad='same',
                       nonlinearity=rectify))

    # upscale 2
    net['upscale2'] = Upscale3DLayer(net['upconv1b'],
                                     scale_factor=(2, 2, 1),
                                     mode='repeat')
    net['concat2'] = ConcatLayer([net['pool1'], net['upscale2']])
    net['upconv2a'] = batch_norm(
        Conv3DDNNLayer(net['concat2'],
                       64, (1, 1, 1),
                       pad='same',
                       nonlinearity=rectify))
    net['upconv2b'] = batch_norm(
        Conv3DDNNLayer(net['upconv2a'],
                       64, (3, 3, 3),
                       pad='same',
                       nonlinearity=rectify))

    # upscale 3
    net['upscale3'] = Upscale3DLayer(net['upconv2b'],
                                     scale_factor=(2, 2, 2),
                                     mode='repeat')
    net['upconv3a'] = batch_norm(
        Conv3DDNNLayer(net['upscale3'],
                       64, (1, 1, 1),
                       pad='same',
                       nonlinearity=rectify))
    net['upconv3b'] = batch_norm(
        Conv3DDNNLayer(net['upconv3a'],
                       64, (3, 3, 3),
                       pad='same',
                       nonlinearity=rectify))

    net['output'] = batch_norm(
        Conv3DDNNLayer(net['upconv3b'],
                       2, (3, 3, 3),
                       pad='same',
                       nonlinearity=None))

    params = lasagne.layers.get_all_params(net['output'], trainable=True)
    l2_penalty = regularize_network_params(net['output'], l2)

    return net, params, l2_penalty
Example #9
0
net = {}
net['input'] = InputLayer(
    (None, img_channels, clip_length, H_net_input, W_net_input))
net['mask'] = InputLayer((None, num_steps))

# ----------- 1st layer group ---------------
net['conv1a'] = Conv3DDNNLayer(net['input'],
                               64, (3, 3, 3),
                               pad=1,
                               nonlinearity=lasagne.nonlinearities.rectify,
                               flip_filters=if_flip_filters,
                               W=lasagne.init.Normal(std=0.01),
                               b=lasagne.init.Constant(0.))
net['pool1'] = MaxPool3DDNNLayer(net['conv1a'],
                                 pool_size=(1, 2, 2),
                                 stride=(1, 2, 2))

# ------------- 2nd layer group --------------
net['conv2a'] = Conv3DDNNLayer(net['pool1'],
                               128, (3, 3, 3),
                               pad=1,
                               nonlinearity=lasagne.nonlinearities.rectify,
                               flip_filters=if_flip_filters,
                               W=lasagne.init.Normal(std=0.01),
                               b=lasagne.init.Constant(1.))
net['pool2'] = MaxPool3DDNNLayer(net['conv2a'],
                                 pool_size=(2, 2, 2),
                                 stride=(2, 2, 2))

# ----------------- 3rd layer group --------------
Example #10
0
def build_model(input_var=None,
                batch_size=2,
                use_cpu_compatible=theano.config.device == 'cpu'):
    '''
    Builds Video2GIF model

    @param input_var:
    @param batch_size:
    @param use_cpu_compatible: use CPU compatible layers (i.e. no cuDNN). Default for theano device CPU; otherwise False
    @return: A dictionary containing the network layers, where the output layer is at key 'score'
    '''
    net = {}
    net['input'] = InputLayer((batch_size, 3, 16, 112, 112),
                              input_var=input_var)
    if use_cpu_compatible:
        '''
        Slow implementation running on CPU
        Test snip scores: [-0.08948517, -0.01212098]; Time: 11s
        '''
        print('Use slow network implementation (without cuDNN)')
        # ----------- 1st layer group ---------------
        # Pad first, as this layer doesn't support padding
        net['pad'] = PadLayer(net['input'], width=1, batch_ndim=2)
        net['conv1a'] = lasagne.layers.conv.Conv3DLayer(
            net['pad'],
            64, (3, 3, 3),
            pad=0,
            nonlinearity=lasagne.nonlinearities.rectify,
            flip_filters=True)
        #        net['pool1']  = lasagne.layers.pool.Pool3Layer(net['conv1a'],pool_size=(1,2,2),stride=(1,2,2))
        net['pool1'] = lasagne.layers.Pool3DLayer(net['conv1a'],
                                                  pool_size=(1, 2, 2),
                                                  stride=(1, 2, 2))

        # ------------- 2nd layer group --------------
        net['pad2'] = PadLayer(net['pool1'], width=1, batch_ndim=2)
        net['conv2a'] = lasagne.layers.conv.Conv3DLayer(
            net['pad2'],
            128, (3, 3, 3),
            pad=0,
            nonlinearity=lasagne.nonlinearities.rectify)
        #        net['pool2']  = lasagne.layers.pool.Pool3Layer(net['conv2a'],pool_size=(2,2,2),stride=(2,2,2))
        net['pool2'] = lasagne.layers.Pool3DLayer(net['conv2a'],
                                                  pool_size=(2, 2, 2),
                                                  stride=(2, 2, 2))

        # ----------------- 3rd layer group --------------
        net['pad3a'] = PadLayer(net['pool2'], width=1, batch_ndim=2)
        net['conv3a'] = lasagne.layers.conv.Conv3DLayer(
            net['pad3a'],
            256, (3, 3, 3),
            pad=0,
            nonlinearity=lasagne.nonlinearities.rectify)
        net['pad3b'] = PadLayer(net['conv3a'], width=1, batch_ndim=2)
        net['conv3b'] = lasagne.layers.conv.Conv3DLayer(
            net['pad3b'],
            256, (3, 3, 3),
            pad=0,
            nonlinearity=lasagne.nonlinearities.rectify)
        #        net['pool3']  = lasagne.layers.pool.Pool3Layer(net['conv3b'],pool_size=(2,2,2),stride=(2,2,2))
        net['pool3'] = lasagne.layers.Pool3DLayer(net['conv3b'],
                                                  pool_size=(2, 2, 2),
                                                  stride=(2, 2, 2))

        # ----------------- 4th layer group --------------
        net['pad4a'] = PadLayer(net['pool3'], width=1, batch_ndim=2)
        net['conv4a'] = lasagne.layers.conv.Conv3DLayer(
            net['pad4a'],
            512, (3, 3, 3),
            pad=0,
            nonlinearity=lasagne.nonlinearities.rectify)
        net['pad4b'] = PadLayer(net['conv4a'], width=1, batch_ndim=2)
        net['conv4b'] = lasagne.layers.conv.Conv3DLayer(
            net['pad4b'],
            512, (3, 3, 3),
            pad=0,
            nonlinearity=lasagne.nonlinearities.rectify)
        #        net['pool4']  = lasagne.layers.pool.Pool3Layer(net['conv4b'],pool_size=(2,2,2),stride=(2,2,2))
        net['pool4'] = lasagne.layers.Pool3DLayer(net['conv4b'],
                                                  pool_size=(2, 2, 2),
                                                  stride=(2, 2, 2))

        # ----------------- 5th layer group --------------
        net['pad5a'] = PadLayer(net['pool4'], width=1, batch_ndim=2)
        net['conv5a'] = lasagne.layers.conv.Conv3DLayer(
            net['pad5a'],
            512, (3, 3, 3),
            pad=0,
            nonlinearity=lasagne.nonlinearities.rectify)
        net['pad5b'] = PadLayer(net['conv5a'], width=1, batch_ndim=2)
        net['conv5b'] = lasagne.layers.conv.Conv3DLayer(
            net['pad5b'],
            512, (3, 3, 3),
            pad=0,
            nonlinearity=lasagne.nonlinearities.rectify)

        # We need a padding layer, as C3D only pads on the right, which cannot be done with a theano pooling layer
        net['pad'] = PadLayer(net['conv5b'],
                              width=[(0, 1), (0, 1)],
                              batch_ndim=3)
        #        net['pool5']  = lasagne.layers.pool.Pool3Layer(net['pad'],pool_size=(2,2,2),pad=(0,0,0),stride=(2,2,2))
        net['pool5'] = lasagne.layers.Pool3DLayer(net['pad'],
                                                  pool_size=(2, 2, 2),
                                                  pad=(0, 0, 0),
                                                  stride=(2, 2, 2))
        net['fc6-1'] = DenseLayer(net['pool5'],
                                  num_units=4096,
                                  nonlinearity=lasagne.nonlinearities.rectify)

    else:
        '''
        Fast implementation running on GPU
        Test snip scores:[-0.08948528,-0.01212097]; Time: 0.33s
        '''
        print('Use fast network implementation (cuDNN)')
        # ----------- 1st layer group ---------------
        net['conv1a'] = Conv3DDNNLayer(
            net['input'],
            64, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify,
            flip_filters=False)
        net['pool1'] = MaxPool3DDNNLayer(net['conv1a'],
                                         pool_size=(1, 2, 2),
                                         stride=(1, 2, 2))

        # ------------- 2nd layer group --------------
        net['conv2a'] = Conv3DDNNLayer(
            net['pool1'],
            128, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        net['pool2'] = MaxPool3DDNNLayer(net['conv2a'],
                                         pool_size=(2, 2, 2),
                                         stride=(2, 2, 2))

        # ----------------- 3rd layer group --------------
        net['conv3a'] = Conv3DDNNLayer(
            net['pool2'],
            256, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        net['conv3b'] = Conv3DDNNLayer(
            net['conv3a'],
            256, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        net['pool3'] = MaxPool3DDNNLayer(net['conv3b'],
                                         pool_size=(2, 2, 2),
                                         stride=(2, 2, 2))

        # ----------------- 4th layer group --------------
        net['conv4a'] = Conv3DDNNLayer(
            net['pool3'],
            512, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        net['conv4b'] = Conv3DDNNLayer(
            net['conv4a'],
            512, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        net['pool4'] = MaxPool3DDNNLayer(net['conv4b'],
                                         pool_size=(2, 2, 2),
                                         stride=(2, 2, 2))

        # ----------------- 5th layer group --------------
        net['conv5a'] = Conv3DDNNLayer(
            net['pool4'],
            512, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        net['conv5b'] = Conv3DDNNLayer(
            net['conv5a'],
            512, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        # We need a padding layer, as C3D only pads on the right, which cannot be done with a theano pooling layer
        net['pad'] = PadLayer(net['conv5b'],
                              width=[(0, 1), (0, 1)],
                              batch_ndim=3)
        net['pool5'] = MaxPool3DDNNLayer(net['pad'],
                                         pool_size=(2, 2, 2),
                                         pad=(0, 0, 0),
                                         stride=(2, 2, 2))
        net['fc6-1'] = DenseLayer(net['pool5'],
                                  num_units=4096,
                                  nonlinearity=lasagne.nonlinearities.rectify)

    net['h1'] = DenseLayer(net['fc6-1'],
                           num_units=512,
                           nonlinearity=lasagne.nonlinearities.rectify)
    net['h2'] = DenseLayer(net['h1'],
                           num_units=128,
                           nonlinearity=lasagne.nonlinearities.rectify)
    net['score'] = DenseLayer(net['h2'], num_units=1, nonlinearity=None)

    return net
Example #11
0
def get_model():

    dtensor4 = T.TensorType('float32', (False,)*4)
    input_var = dtensor4('inputs')
    dtensor2 = T.TensorType('float32', (False,)*2)
    target_var = dtensor2('targets')

    # input layer with unspecified batch size
    layer_input     = InputLayer(shape=(None, 30, 64, 64), input_var=input_var) #InputLayer(shape=(None, 1, 30, 64, 64), input_var=input_var)
    layer_0         = DimshuffleLayer(layer_input, (0, 'x', 1, 2, 3))

    # Z-score?

    # Convolution then batchNormalisation then activation layer, then zero padding layer followed by a dropout layer
    layer_1         = batch_norm(Conv3DDNNLayer(incoming=layer_0, num_filters=64, filter_size=(3,3,3), stride=(1,3,3), pad='same', nonlinearity=leaky_rectify, W=Orthogonal()))
    layer_2         = MaxPool3DDNNLayer(layer_1, pool_size=(1, 2, 2), stride=(1, 2, 2), pad=(0, 1, 1))
    layer_3         = DropoutLayer(layer_2, p=0.25)

    # Convolution then batchNormalisation then activation layer, then zero padding layer followed by a dropout layer
    layer_4         = batch_norm(Conv3DDNNLayer(incoming=layer_3, num_filters=128, filter_size=(3,3,3), stride=(1,3,3), pad='same', nonlinearity=leaky_rectify, W=Orthogonal()))
    layer_5         = MaxPool3DDNNLayer(layer_4, pool_size=(1, 2, 2), stride=(1, 2, 2), pad=(0, 1, 1))
    layer_6         = DropoutLayer(layer_5, p=0.25)

    # Convolution then batchNormalisation then activation layer, then zero padding layer followed by a dropout layer
    layer_7         = batch_norm(Conv3DDNNLayer(incoming=layer_6, num_filters=256, filter_size=(3,3,3), stride=(1,3,3), pad='same', nonlinearity=leaky_rectify, W=Orthogonal()))
    layer_8         = MaxPool3DDNNLayer(layer_7, pool_size=(1, 2, 2), stride=(1, 2, 2), pad=(0, 1, 1))
    layer_9         = DropoutLayer(layer_8, p=0.25)
    
    # Recurrent layer
    layer_10         = DimshuffleLayer(layer_9, (0,2,1,3,4))
    layer_11         = LSTMLayer(layer_10, num_units=612, hid_init=Orthogonal(), only_return_final=False)

    # Output Layer
    layer_systole    = DenseLayer(layer_11, 600, nonlinearity=leaky_rectify, W=Orthogonal())
    layer_diastole   = DenseLayer(layer_11, 600, nonlinearity=leaky_rectify, W=Orthogonal())
    layer_systole_1  = DropoutLayer(layer_systole, p=0.3)
    layer_diastole_1 = DropoutLayer(layer_diastole, p=0.3)

    layer_systole_2   = DenseLayer(layer_systole_1, 1, nonlinearity=None, W=Orthogonal())
    layer_diastole_2  = DenseLayer(layer_diastole_1, 1, nonlinearity=None, W=Orthogonal())
    layer_output      = ConcatLayer([layer_systole_2, layer_diastole_2])

    # Loss
    prediction           = get_output(layer_output) 
    loss                 = squared_error(prediction, target_var)
    loss                 = loss.mean()

    #Updates : Stochastic Gradient Descent (SGD) with Nesterov momentum Or Adam
    params               = get_all_params(layer_output, trainable=True)
    updates              = adam(loss, params)
    #updates_0            = rmsprop(loss, params)
    #updates              = apply_nesterov_momentum(updates_0, params)

    # Create a loss expression for validation/testing. The crucial difference
    # here is that we do a deterministic forward pass through the network, disabling dropout layers.
    test_prediction      = get_output(layer_output, deterministic=True)
    test_loss            = squared_error(test_prediction, target_var)
    test_loss            = test_loss.mean()

    # Compile a function performing a training step on a mini-batch (by giving
    # the updates dictionary) and returning the corresponding training loss:
    train_fn             = theano.function([input_var, target_var], loss, updates=updates, allow_input_downcast=True)

    # Compile a second function computing the validation loss and accuracy
    val_fn               = theano.function([input_var, target_var], test_loss, allow_input_downcast=True)

    # Compule a third function computing the prediction
    predict_fn           = theano.function([input_var], test_prediction, allow_input_downcast=True)

    return [layer_output, train_fn, val_fn, predict_fn]
Example #12
0
    def __init__(self, input, emb_layer='fc7-1', **kwargs):
        """Initialize the parameters

        :type rng: numpy.random.RandomState
        :param rng: a random number generator used to initialize weights

        :type input: theano.tensor.TensorType
        :param input: symbolic variable that describes the input of the
        architecture (one minibatch)

        .....................
        .
        ..
        ...
        ....
        """

        self.hasSupervised = False
        self.hasUnsupervised = False

        self.net = {}

        self.net['input'] = InputLayer((None, 3, 16, 112, 112),
                                       input_var=input)

        # ----------- 1st layer group ---------------
        self.net['conv1a'] = Conv3DDNNLayer(
            self.net['input'],
            64, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify,
            flip_filters=False)
        self.net['pool1'] = MaxPool3DDNNLayer(self.net['conv1a'],
                                              pool_size=(1, 2, 2),
                                              stride=(1, 2, 2))

        # ------------- 2nd layer group --------------
        self.net['conv2a'] = Conv3DDNNLayer(
            self.net['pool1'],
            128, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        self.net['pool2'] = MaxPool3DDNNLayer(self.net['conv2a'],
                                              pool_size=(2, 2, 2),
                                              stride=(2, 2, 2))

        # ----------------- 3rd layer group --------------
        self.net['conv3a'] = Conv3DDNNLayer(
            self.net['pool2'],
            256, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        self.net['conv3b'] = Conv3DDNNLayer(
            self.net['conv3a'],
            256, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        self.net['pool3'] = MaxPool3DDNNLayer(self.net['conv3b'],
                                              pool_size=(2, 2, 2),
                                              stride=(2, 2, 2))

        # ----------------- 4th layer group --------------
        self.net['conv4a'] = Conv3DDNNLayer(
            self.net['pool3'],
            512, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        self.net['conv4b'] = Conv3DDNNLayer(
            self.net['conv4a'],
            512, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        self.net['pool4'] = MaxPool3DDNNLayer(self.net['conv4b'],
                                              pool_size=(2, 2, 2),
                                              stride=(2, 2, 2))

        # ----------------- 5th layer group --------------
        self.net['conv5a'] = Conv3DDNNLayer(
            self.net['pool4'],
            512, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        self.net['conv5b'] = Conv3DDNNLayer(
            self.net['conv5a'],
            512, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        # We need a padding layer, as C3D only pads on the right, which cannot be done with a theano pooling layer
        self.net['pad'] = PadLayer(self.net['conv5b'],
                                   width=[(0, 1), (0, 1)],
                                   batch_ndim=3)
        self.net['pool5'] = MaxPool3DDNNLayer(self.net['pad'],
                                              pool_size=(2, 2, 2),
                                              pad=(0, 0, 0),
                                              stride=(2, 2, 2))
        self.net['fc6-1'] = DenseLayer(
            self.net['pool5'],
            num_units=4096,
            nonlinearity=lasagne.nonlinearities.rectify)
        self.net['fc7-1'] = DenseLayer(
            self.net['fc6-1'],
            num_units=4096,
            nonlinearity=lasagne.nonlinearities.rectify)
        self.net['fc8-1'] = DenseLayer(self.net['fc7-1'],
                                       num_units=487,
                                       nonlinearity=None)
        self.net['prob'] = NonlinearityLayer(self.net['fc8-1'], softmax)

        self.embedding = lasagne.layers.get_output(
            self.net[emb_layer]).flatten(ndim=2)

        with open('data/c3d_model.pkl') as f:
            model = pickle.load(f)
        lasagne.layers.set_all_param_values(self.net['prob'],
                                            model,
                                            trainable=True)
Example #13
0
    def __init__(self, input_var=None, empty=False, rectified_fc_layers=False):
        '''
        Builds C3D model

        Returns
        -------
        dict
            A dictionary containing the network layers, where the output layer is at key 'prob'
        '''
        self.net = {}

        if empty:
            return

        self.net['input'] = InputLayer((None, 3, 16, 112, 112),
                                       input_var=input_var)

        # ----------- 1st layer group ---------------
        self.net['conv1a'] = Conv3DDNNLayer(
            self.net['input'],
            64, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify,
            flip_filters=False)
        self.net['pool1'] = MaxPool3DDNNLayer(self.net['conv1a'],
                                              pool_size=(1, 2, 2),
                                              stride=(1, 2, 2))

        # ------------- 2nd layer group --------------
        self.net['conv2a'] = Conv3DDNNLayer(
            self.net['pool1'],
            128, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        self.net['pool2'] = MaxPool3DDNNLayer(self.net['conv2a'],
                                              pool_size=(2, 2, 2),
                                              stride=(2, 2, 2))

        # ----------------- 3rd layer group --------------
        self.net['conv3a'] = Conv3DDNNLayer(
            self.net['pool2'],
            256, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        self.net['conv3b'] = Conv3DDNNLayer(
            self.net['conv3a'],
            256, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        self.net['pool3'] = MaxPool3DDNNLayer(self.net['conv3b'],
                                              pool_size=(2, 2, 2),
                                              stride=(2, 2, 2))

        # ----------------- 4th layer group --------------
        self.net['conv4a'] = Conv3DDNNLayer(
            self.net['pool3'],
            512, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        self.net['conv4b'] = Conv3DDNNLayer(
            self.net['conv4a'],
            512, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        self.net['pool4'] = MaxPool3DDNNLayer(self.net['conv4b'],
                                              pool_size=(2, 2, 2),
                                              stride=(2, 2, 2))

        # ----------------- 5th layer group --------------
        self.net['conv5a'] = Conv3DDNNLayer(
            self.net['pool4'],
            512, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        self.net['conv5b'] = Conv3DDNNLayer(
            self.net['conv5a'],
            512, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        # We need a padding layer, as C3D only pads on the right, which cannot be done with a theano pooling layer
        self.net['pad'] = PadLayer(self.net['conv5b'],
                                   width=[(0, 1), (0, 1)],
                                   batch_ndim=3)
        self.net['pool5'] = MaxPool3DDNNLayer(self.net['pad'],
                                              pool_size=(2, 2, 2),
                                              pad=(0, 0, 0),
                                              stride=(2, 2, 2))

        self.fc_activation = lasagne.nonlinearities.rectify if rectified_fc_layers else lasagne.nonlinearities.tanh

        self.net['fc6-1'] = DenseLayer(self.net['pool5'],
                                       num_units=4096,
                                       nonlinearity=self.fc_activation,
                                       W=lasagne.init.GlorotUniform(gain=0.05))
        self.net['fc7-1'] = DenseLayer(self.net['fc6-1'],
                                       num_units=4096,
                                       nonlinearity=self.fc_activation,
                                       W=lasagne.init.GlorotUniform(gain=0.05))
        print "FC6 has norm %f" % numpy.linalg.norm(
            self.net['fc6-1'].W.get_value(), 'fro')
        print "FC7 has norm %f" % numpy.linalg.norm(
            self.net['fc7-1'].W.get_value(), 'fro')
Example #14
0
    def replicate_model(self, input_var=None, num_layers_unshared=0):
        '''
        Builds C3D model

        num_layers_unshared = 0 means all layers are shared
        num_layers_unshared = 1 means fc7 is not shared
        num_layers_unshared = 2 means fc7 and fc6 are not shared
        ... and so on on so forth

        Returns
        -------
        dict
            A dictionary containing the network layers, where the output layer is at key 'prob'
        '''

        out = C3DModel(empty=True)

        out.net['input'] = InputLayer((None, 3, 16, 112, 112),
                                      input_var=input_var)

        # ----------- 1st layer group ---------------
        if num_layers_unshared >= 10:
            out.net['conv1a'] = Conv3DDNNLayer(
                out.net['input'],
                64, (3, 3, 3),
                pad=1,
                nonlinearity=lasagne.nonlinearities.rectify,
                flip_filters=False)
        else:
            out.net['conv1a'] = Conv3DDNNLayer(
                out.net['input'],
                64, (3, 3, 3),
                pad=1,
                nonlinearity=lasagne.nonlinearities.rectify,
                flip_filters=False,
                W=self.net['conv1a'].W,
                b=self.net['conv1a'].b)

        out.net['pool1'] = MaxPool3DDNNLayer(out.net['conv1a'],
                                             pool_size=(1, 2, 2),
                                             stride=(1, 2, 2))

        # ------------- 2nd layer group --------------
        if num_layers_unshared >= 9:
            out.net['conv2a'] = Conv3DDNNLayer(
                out.net['pool1'],
                128, (3, 3, 3),
                pad=1,
                nonlinearity=lasagne.nonlinearities.rectify)
        else:
            out.net['conv2a'] = Conv3DDNNLayer(
                out.net['pool1'],
                128, (3, 3, 3),
                pad=1,
                nonlinearity=lasagne.nonlinearities.rectify,
                W=self.net['conv2a'].W,
                b=self.net['conv2a'].b)

        out.net['pool2'] = MaxPool3DDNNLayer(out.net['conv2a'],
                                             pool_size=(2, 2, 2),
                                             stride=(2, 2, 2))

        # ----------------- 3rd layer group --------------
        if num_layers_unshared >= 8:
            out.net['conv3a'] = Conv3DDNNLayer(
                out.net['pool2'],
                256, (3, 3, 3),
                pad=1,
                nonlinearity=lasagne.nonlinearities.rectify)
        else:
            out.net['conv3a'] = Conv3DDNNLayer(
                out.net['pool2'],
                256, (3, 3, 3),
                pad=1,
                nonlinearity=lasagne.nonlinearities.rectify,
                W=self.net['conv3a'].W,
                b=self.net['conv3a'].b)

        if num_layers_unshared >= 7:
            out.net['conv3b'] = Conv3DDNNLayer(
                out.net['conv3a'],
                256, (3, 3, 3),
                pad=1,
                nonlinearity=lasagne.nonlinearities.rectify)
        else:
            out.net['conv3b'] = Conv3DDNNLayer(
                out.net['conv3a'],
                256, (3, 3, 3),
                pad=1,
                nonlinearity=lasagne.nonlinearities.rectify,
                W=self.net['conv3b'].W,
                b=self.net['conv3b'].b)

        out.net['pool3'] = MaxPool3DDNNLayer(out.net['conv3b'],
                                             pool_size=(2, 2, 2),
                                             stride=(2, 2, 2))

        # ----------------- 4th layer group --------------
        if num_layers_unshared >= 6:
            out.net['conv4a'] = Conv3DDNNLayer(
                out.net['pool3'],
                512, (3, 3, 3),
                pad=1,
                nonlinearity=lasagne.nonlinearities.rectify)
        else:
            out.net['conv4a'] = Conv3DDNNLayer(
                out.net['pool3'],
                512, (3, 3, 3),
                pad=1,
                nonlinearity=lasagne.nonlinearities.rectify,
                W=self.net['conv4a'].W,
                b=self.net['conv4a'].b)

        if num_layers_unshared >= 5:
            out.net['conv4b'] = Conv3DDNNLayer(
                out.net['conv4a'],
                512, (3, 3, 3),
                pad=1,
                nonlinearity=lasagne.nonlinearities.rectify)
        else:
            out.net['conv4b'] = Conv3DDNNLayer(
                out.net['conv4a'],
                512, (3, 3, 3),
                pad=1,
                nonlinearity=lasagne.nonlinearities.rectify,
                W=self.net['conv4b'].W,
                b=self.net['conv4b'].b)

        out.net['pool4'] = MaxPool3DDNNLayer(out.net['conv4b'],
                                             pool_size=(2, 2, 2),
                                             stride=(2, 2, 2))

        # ----------------- 5th layer group --------------
        if num_layers_unshared >= 4:
            out.net['conv5a'] = Conv3DDNNLayer(
                out.net['pool4'],
                512, (3, 3, 3),
                pad=1,
                nonlinearity=lasagne.nonlinearities.rectify)
        else:
            out.net['conv5a'] = Conv3DDNNLayer(
                out.net['pool4'],
                512, (3, 3, 3),
                pad=1,
                nonlinearity=lasagne.nonlinearities.rectify,
                W=self.net['conv5a'].W,
                b=self.net['conv5a'].b)

        if num_layers_unshared >= 3:
            out.net['conv5b'] = Conv3DDNNLayer(
                out.net['conv5a'],
                512, (3, 3, 3),
                pad=1,
                nonlinearity=lasagne.nonlinearities.rectify)
        else:
            out.net['conv5b'] = Conv3DDNNLayer(
                out.net['conv5a'],
                512, (3, 3, 3),
                pad=1,
                nonlinearity=lasagne.nonlinearities.rectify,
                W=self.net['conv5b'].W,
                b=self.net['conv5b'].b)

        # We need a padding layer, as C3D only pads on the right, which cannot be done with a theano pooling layer
        out.net['pad'] = PadLayer(out.net['conv5b'],
                                  width=[(0, 1), (0, 1)],
                                  batch_ndim=3)
        out.net['pool5'] = MaxPool3DDNNLayer(out.net['pad'],
                                             pool_size=(2, 2, 2),
                                             pad=(0, 0, 0),
                                             stride=(2, 2, 2))

        # ----------------- Fully Connected Layers ------------------
        if num_layers_unshared >= 2:
            out.net['fc6-1'] = DenseLayer(
                out.net['pool5'],
                num_units=4096,
                nonlinearity=self.fc_activation,
                W=lasagne.init.GlorotUniform(gain=0.05))
            print "FC6 has norm %f" % numpy.linalg.norm(
                out.net['fc6-1'].W.get_value(), 'fro')
        else:
            out.net['fc6-1'] = DenseLayer(out.net['pool5'],
                                          num_units=4096,
                                          nonlinearity=self.fc_activation,
                                          W=self.net['fc6-1'].W,
                                          b=self.net['fc6-1'].b)

        if num_layers_unshared >= 1:
            out.net['fc7-1'] = DenseLayer(
                out.net['fc6-1'],
                num_units=4096,
                nonlinearity=self.fc_activation,
                W=lasagne.init.GlorotUniform(gain=0.05))
            print "FC7 has norm %f" % numpy.linalg.norm(
                out.net['fc7-1'].W.get_value(), 'fro')
        else:
            out.net['fc7-1'] = DenseLayer(out.net['fc6-1'],
                                          num_units=4096,
                                          nonlinearity=self.fc_activation,
                                          W=self.net['fc7-1'].W,
                                          b=self.net['fc7-1'].b)

    #    if num_layers_unshared >= 1:
    #        out.net['fc8-1']  = DenseLayer(out.net['fc7-1'], num_units=487, nonlinearity=None)
    #    else:
    #        out.net['fc8-1']  = DenseLayer(out.net['fc7-1'], num_units=487, nonlinearity=None
    #                                   , W = self.net['fc8-1'].W, b = self.net['fc8-1'].b)
    #    out.net['prob']  = NonlinearityLayer(out.net['fc8-1'], softmax)

        return out