示例#1
0
def get_model(input_var, target_var, multiply_var):

    # input layer with unspecified batch size
    layer     = InputLayer(shape=(None, 30, 64, 64), input_var=input_var) #InputLayer(shape=(None, 1, 30, 64, 64), input_var=input_var)
    layer     = DimshuffleLayer(layer, (0, 'x', 1, 2, 3))

    # Z-score?

    # Convolution then batchNormalisation then activation layer, then zero padding layer followed by a dropout layer
    layer         = batch_norm(Conv3DDNNLayer(incoming=layer, num_filters=16, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=rectify))
    layer         = batch_norm(Conv3DDNNLayer(incoming=layer, num_filters=16, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=rectify))
    layer         = batch_norm(Conv3DDNNLayer(incoming=layer, num_filters=1, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=rectify))
    layer_prediction  = layer

    # Loss
    prediction           = get_output(layer_prediction)
    loss                 = categorical_crossentropy(prediction.flatten(), target_var.flatten())

    #Updates : Stochastic Gradient Descent (SGD) with Nesterov momentum
    params               = get_all_params(layer_prediction, trainable=True)

    # Create a loss expression for validation/testing. The crucial difference
    # here is that we do a deterministic forward pass through the network, disabling dropout layers.
    test_prediction      = get_output(layer_prediction, deterministic=True)
    test_loss            = categorical_crossentropy(test_prediction.flatten(), target_var.flatten())

    return test_prediction, prediction, loss, params
def construct_unet_3D(channels=1, no_f_base=8, f_size=3, branches=[2,2,2,2],dropout=0.2,bs=None,
                             class_nums=2, pad="same",nonlinearity=lasagne.nonlinearities.rectify,
                             input_dim=[None,None,None],useups=False):

    net= InputLayer((bs, channels, input_dim[0], input_dim[1], input_dim[2]))

    # Moving downwards the U-shape:
    horizontal_pass=[]
    for i in xrange(len(branches)):
        net = conv_pool_down_3D(net,no_f_base*2**(i),f_size,conv_depth=branches[i],
                             pad=pad,nonlinearity=nonlinearity,dropout=dropout)
        print "Down conv: ",net.output_shape
        horizontal_pass.append(net)
        net = MaxPool3DDNNLayer(net,pool_size=(2,2,2),stride=(2,2,2))
        print "Down Pool: ",net.output_shape

    # Bottleneck
    net = Conv3DDNNLayer(net,no_f_base*2**len(branches),f_size,pad=pad,nonlinearity=nonlinearity,W=lasagne.init.HeNormal(gain='relu'))
    print "Bottleneck conv: ",net.output_shape
    net = Conv3DDNNLayer(net,no_f_base*2**len(branches),f_size,pad=pad,nonlinearity=nonlinearity,W=lasagne.init.HeNormal(gain='relu'))
    print "Bottleneck conv: ",net.output_shape
    #net = Conv3DDNNTransposeLayer(net, no_f_base*2**(len(branches)-1), 2, (2, 2, 2))
    if not useups:
        net = TransposedConv3DLayer(net,no_f_base*2**(len(branches)-1),2,(2,2,2))
    else:
        net = upscale_plus_conv_3D(net,no_f_base*2**(len(branches)-1),f_size,pad,nonlinearity)
    print "Bottleneck up: ",net.output_shape

    # Moving upwards the U-shape:
    for i in xrange(len(branches)):
        print "Pass before concat: ",horizontal_pass[-(i+1)].output_shape
        print "net before concat: ",net.output_shape
        if not useups:
            net = ConcatLayer([net,horizontal_pass[-(i+1)]],cropping=(None,None,"center","center","center"))
        else:
            net = ConcatLayer([net,horizontal_pass[-(i+1)]],cropping=(None,None,"center","center","center"))
        print "Shape after concat: ",net.output_shape
        if i==len(branches)-1:
            net = conv_pool_up_3D(net,bs,no_f_base*2**(len(branches)-1-i),f_size,
                           pad=pad,nonlinearity=nonlinearity,conv_depth=branches[i],halt=True,useups=False)
        else:
            net = conv_pool_up_3D(net,bs,no_f_base*2**(len(branches)-1-i),f_size,
                           pad=pad,nonlinearity=nonlinearity,conv_depth=branches[i],halt=False,useups=False)
        print "Conv up: ",net.output_shape
    # Class layer: Work around standard softmax bc. it doesn't work with tensor4/3.
    # Hence, we reshape and feed it to an external Nonlinearity layer.
    # net["class_ns"] is the output in image-related shape.
    imageout = net  = Conv3DDNNLayer(net, class_nums, 1, nonlinearity=linear,W=lasagne.init.HeNormal(gain='relu'))
    print "imageout shape: ",net.output_shape
    net  = DimshuffleLayer(net, (1, 0, 2, 3, 4))
    print "After shuffle shape: ",net.output_shape
    net  = ReshapeLayer(net, (class_nums, -1))
    print "Reshape shape: ",net.output_shape
    net  = DimshuffleLayer(net, (1, 0))
    print "Dimshuffle shape: ",net.output_shape
    # Flattened output to be able to feed it to lasagne.objectives.categorical_crossentropy.
    net  = NonlinearityLayer(net, nonlinearity=lasagne.nonlinearities.softmax)
    #imout = NonlinearityLayer(imageout,nonlinearity=lasagne.nonlinearities.softmax)
    return net,imageout
    del net, imageout,imout
def build_net():
    """Method for VGG like net Building.

    Returns
    -------
    nn : lasagne.layer
        Network.
    """
    nn = {}
    nn['input'] = InputLayer(inp_shape, input_var=input_var)

    nn['conv1a'] = Conv3DDNNLayer(nn['input'], 8, 3)
    nn['conv1b'] = Conv3DDNNLayer(nn['conv1a'], 8, 3, nonlinearity=identity)
    nn['nl1'] = NonlinearityLayer(nn['conv1b'])
    nn['pool1'] = Pool3DDNNLayer(nn['nl1'], 2)

    nn['conv2a'] = Conv3DDNNLayer(nn['pool1'], 16, 3)
    nn['conv2b'] = Conv3DDNNLayer(nn['conv2a'], 16, 3, nonlinearity=identity)
    nn['nl2'] = NonlinearityLayer(nn['conv2b'])
    nn['pool2'] = Pool3DDNNLayer(nn['nl2'], 2)

    nn['conv3a'] = Conv3DDNNLayer(nn['pool2'], 32, 3)
    nn['conv3b'] = Conv3DDNNLayer(nn['conv3a'], 32, 3)
    nn['conv3c'] = Conv3DDNNLayer(nn['conv3b'], 32, 3, nonlinearity=identity)
    nn['nl3'] = NonlinearityLayer(nn['conv3c'])
    nn['pool3'] = Pool3DDNNLayer(nn['nl3'], 2)

    nn['conv4a'] = Conv3DDNNLayer(nn['pool3'], 64, 3)
    nn['conv4b'] = Conv3DDNNLayer(nn['conv4a'], 64, 3)
    nn['conv4c'] = Conv3DDNNLayer(nn['conv4b'], 64, 3, nonlinearity=identity)
    nn['nl4'] = NonlinearityLayer(nn['conv4c'])
    nn['pool4'] = Pool3DDNNLayer(nn['nl4'], 2)

    nn['dense1'] = DenseLayer(nn['pool4'], num_units=128)
    nn['bn'] = BatchNormLayer(nn['dense1'])
    nn['dropout'] = DropoutLayer(nn['bn'], p=0.7)

    nn['dense2'] = DenseLayer(nn['dropout'], num_units=64)

    #nn['pool4'] = GlobalPoolLayer(nn['nl4'])

    nn['prob'] = DenseLayer(nn['dense2'],
                            num_units=2,
                            nonlinearity=lasagne.nonlinearities.softmax)
    return nn
def conv_pool_up_3D(net, bs, no_f_base,f_size,conv_depth,pad,nonlinearity,halt=False,useups=False):
    for i in xrange(conv_depth):
        net = Conv3DDNNLayer(net,no_f_base,f_size,pad=pad,nonlinearity=nonlinearity,W=lasagne.init.HeNormal(gain='relu'))
    if not halt:
        #net = Conv3DDNNTransposeLayer(net,no_f_base/2,2,(2,2,2))
        if useups:
            net = upscale_plus_conv_3D(net,no_f_base/2,f_size,pad,nonlinearity)
        else:
            net = TransposedConv3DLayer(net,no_f_base/2,2,(2,2,2))
    return net
示例#5
0
def define_network(inputs):

    network = lasagne.layers.InputLayer(shape=(None, params.CHANNELS, params.INPUT_SIZE, params.INPUT_SIZE, params.INPUT_SIZE),
                                input_var=inputs)

    network = Conv3DDNNLayer(
            network, num_filters=64, filter_size=(5, 5, 5),
            nonlinearity=lasagne.nonlinearities.leaky_rectify,
            W=HeNormal(gain='relu'))

    network = MaxPool3DDNNLayer(network, pool_size=(2, 2, 2))

    if params.BATCH_NORMALIZATION:
        network = lasagne.layers.batch_norm(network)

    network = Conv3DDNNLayer(
            network, num_filters=64, filter_size=(5, 5, 5),
            nonlinearity=lasagne.nonlinearities.leaky_rectify,
            W=HeNormal(gain='relu'))

    network = Conv3DDNNLayer(
            network, num_filters=96, filter_size=(5, 5, 5),
            nonlinearity=lasagne.nonlinearities.leaky_rectify,
            W=HeNormal(gain='relu'))

    if params.BATCH_NORMALIZATION:
        network = lasagne.layers.batch_norm(network)

    network = lasagne.layers.DenseLayer(
            network,
            num_units=420,
            nonlinearity=lasagne.nonlinearities.leaky_rectify,
            W=HeNormal(gain='relu')
    )

    network = lasagne.layers.DenseLayer(
            network, num_units=params.N_CLASSES,
            nonlinearity=lasagne.nonlinearities.softmax)

    return network
示例#6
0
def get_model(input_var, target_var, multiply_var):

    # input layer with unspecified batch size
    layer_input     = InputLayer(shape=(None, 30, 80, 80), input_var=input_var) #InputLayer(shape=(None, 1, 30, 64, 64), input_var=input_var)
    layer_0         = DimshuffleLayer(layer_input, (0, 'x', 1, 2, 3))

    # Z-score?

    # Convolution then batchNormalisation then activation layer, then zero padding layer followed by a dropout layer
    layer_1         = batch_norm(Conv3DDNNLayer(incoming=layer_0, num_filters=16, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=leaky_rectify))
    layer_2         = batch_norm(Conv3DDNNLayer(incoming=layer_1, num_filters=16, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=leaky_rectify))
    layer_3         = MaxPool3DDNNLayer(layer_2, pool_size=(2, 2, 2), stride=(2, 2, 2), pad=(1, 1, 1))
    layer_4         = DropoutLayer(layer_3, p=0.25)

    # Convolution then batchNormalisation then activation layer, then zero padding layer followed by a dropout layer
    layer_5         = batch_norm(Conv3DDNNLayer(incoming=layer_4, num_filters=32, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=leaky_rectify))
    layer_6         = batch_norm(Conv3DDNNLayer(incoming=layer_5, num_filters=32, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=leaky_rectify))
    layer_7         = MaxPool3DDNNLayer(layer_6, pool_size=(2, 2, 2), stride=(2, 2, 2), pad=(1, 1, 1))
    layer_8         = DropoutLayer(layer_7, p=0.25)
    
    # Convolution then batchNormalisation then activation layer, then zero padding layer followed by a dropout layer
    layer_5         = batch_norm(Conv3DDNNLayer(incoming=layer_8, num_filters=64, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=leaky_rectify))
    layer_6         = batch_norm(Conv3DDNNLayer(incoming=layer_5, num_filters=64, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=leaky_rectify))
    layer_7         = batch_norm(Conv3DDNNLayer(incoming=layer_6, num_filters=64, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=leaky_rectify))
    layer_8         = MaxPool3DDNNLayer(layer_7, pool_size=(2, 2, 2), stride=(2, 2, 2), pad=(1, 1, 1))
    layer_9         = DropoutLayer(layer_8, p=0.25)

    # LSTM
    layer         = DimshuffleLayer(layer_9, (0,2,1,3,4))
#    layer_prediction  = LSTMLayer(layer, num_units=2, only_return_final=True, learn_init=True, cell=Gate(linear))
    layer = LSTMLayer(layer, num_units=2, only_return_final=True, learn_init=True)
    layer_prediction = DenseLayer(layer, 2, nonlinearity=linear)

    # Output Layer
    # layer_hidden         = DenseLayer(layer_flatten, 500, nonlinearity=linear)
    # layer_prediction     = DenseLayer(layer_hidden, 2, nonlinearity=linear)

    # Loss
    prediction           = get_output(layer_prediction) / multiply_var**2
    loss                 = T.abs_(prediction - target_var)
    loss                 = loss.mean()

    #Updates : Stochastic Gradient Descent (SGD) with Nesterov momentum
    params               = get_all_params(layer_prediction, trainable=True)

    # Create a loss expression for validation/testing. The crucial difference
    # here is that we do a deterministic forward pass through the network, disabling dropout layers.
    test_prediction      = get_output(layer_prediction, deterministic=True) / multiply_var**2
    test_loss            = T.abs_(test_prediction - target_var)
    test_loss            = test_loss.mean()

    # crps estimate
    crps                 = T.abs_(test_prediction - target_var).mean()/600
    
    return test_prediction, crps, loss, params
示例#7
0
def build_model():
    '''
    Builds C3D model

    Returns
    -------
    dict
        A dictionary containing the network layers, where the output layer is at key 'prob'
    '''
    net = {}
    net['input'] = InputLayer((None, 3, 16, 112, 112))

    # ----------- 1st layer group ---------------
    net['conv1a'] = Conv3DDNNLayer(net['input'], 64, (3,3,3), pad=1,nonlinearity=lasagne.nonlinearities.rectify,flip_filters=False)
    net['pool1']  = MaxPool3DDNNLayer(net['conv1a'],pool_size=(1,2,2),stride=(1,2,2))

    # ------------- 2nd layer group --------------
    net['conv2a'] = Conv3DDNNLayer(net['pool1'], 128, (3,3,3), pad=1,nonlinearity=lasagne.nonlinearities.rectify)
    net['pool2']  = MaxPool3DDNNLayer(net['conv2a'],pool_size=(2,2,2),stride=(2,2,2))

    # ----------------- 3rd layer group --------------
    net['conv3a'] = Conv3DDNNLayer(net['pool2'], 256, (3,3,3), pad=1,nonlinearity=lasagne.nonlinearities.rectify)
    net['conv3b'] = Conv3DDNNLayer(net['conv3a'], 256, (3,3,3), pad=1,nonlinearity=lasagne.nonlinearities.rectify)
    net['pool3']  = MaxPool3DDNNLayer(net['conv3b'],pool_size=(2,2,2),stride=(2,2,2))

    # ----------------- 4th layer group --------------
    net['conv4a'] = Conv3DDNNLayer(net['pool3'], 512, (3,3,3), pad=1,nonlinearity=lasagne.nonlinearities.rectify)
    net['conv4b'] = Conv3DDNNLayer(net['conv4a'], 512, (3,3,3), pad=1,nonlinearity=lasagne.nonlinearities.rectify)
    net['pool4']  = MaxPool3DDNNLayer(net['conv4b'],pool_size=(2,2,2),stride=(2,2,2))

    # ----------------- 5th layer group --------------
    net['conv5a'] = Conv3DDNNLayer(net['pool4'], 512, (3,3,3), pad=1,nonlinearity=lasagne.nonlinearities.rectify)
    net['conv5b'] = Conv3DDNNLayer(net['conv5a'], 512, (3,3,3), pad=1,nonlinearity=lasagne.nonlinearities.rectify)
    # We need a padding layer, as C3D only pads on the right, which cannot be done with a theano pooling layer
    net['pad']    = PadLayer(net['conv5b'],width=[(0,1),(0,1)], batch_ndim=3)
    net['pool5']  = MaxPool3DDNNLayer(net['pad'],pool_size=(2,2,2),pad=(0,0,0),stride=(2,2,2))
    net['fc6-1']  = DenseLayer(net['pool5'], num_units=4096,nonlinearity=lasagne.nonlinearities.rectify)
    net['fc7-1']  = DenseLayer(net['fc6-1'], num_units=4096,nonlinearity=lasagne.nonlinearities.rectify)
    net['fc8-1']  = DenseLayer(net['fc7-1'], num_units=487, nonlinearity=None)
    net['prob']  = NonlinearityLayer(net['fc8-1'], softmax)

    return net
示例#8
0
def Bilinear_3DInterpolation(incoming,
                             upscale_factor,
                             untie_biases=False,
                             nonlinearity=None,
                             pad='same'):
    """ 3Dunpool + 3DConv with fixed filters 
    In order to support multi-channel bilinear interpolation without extra effort, we can simply reshape it into 1-channel feature maps
    before do the interpolation followed with another reshape Layer.
    """
    unpooledLayer = Upscale3DLayer(
        incoming, upscale_factor, mode='dilate'
    )  # new api from lasagne, Unpool3DLayer(incoming, upscale_factor) # old API
    k_size = upscale_factor / 2 * 2 + 1

    unpooledLayer_1channel = ReshapeLayer(unpooledLayer,
                                          shape=(-1, 1) +
                                          unpooledLayer.output_shape[-3:])
    deconvedLayer = Conv3DDNNLayer(unpooledLayer_1channel,1,(k_size,k_size,k_size),nonlinearity=nonlinearity,\
                                   untie_biases=untie_biases,pad=pad,b=None,W=__W_5D__(k_size))
    deconvedLayer.params[deconvedLayer.W].remove('trainable')

    return ReshapeLayer(deconvedLayer,
                        shape=(-1, ) + unpooledLayer.output_shape[1:])
示例#9
0
def build_res_V1(input_var, batch_size):

    net = {}

    net['input'] = InputLayer((batch_size, 4, None, None, None),
                              input_var=input_var)
    net['conv1a'] = batch_norm(
        Conv3DDNNLayer(net['input'],
                       64, (3, 3, 3),
                       pad='same',
                       nonlinearity=rectify))
    net['conv1b'] = batch_norm(
        Conv3DDNNLayer(net['conv1a'],
                       64, (3, 3, 3),
                       pad='same',
                       nonlinearity=rectify))

    net['conv1c'] = Conv3DDNNLayer(net['conv1b'],
                                   num_filters=64,
                                   filter_size=(3, 3, 3),
                                   stride=(2, 2, 2),
                                   pad='same',
                                   nonlinearity=None)
    net['pool1'] = MaxPool3DDNNLayer(net['conv1b'],
                                     pool_size=(2, 2, 2))  # 80,80,16

    # Residual 2
    net['res2'] = BatchNormLayer(net['conv1c'])
    net['res2'] = NonlinearityLayer(net['res2'], nonlinearity=rectify)
    net['res2'] = batch_norm(
        Conv3DDNNLayer(net['res2'],
                       num_filters=64,
                       filter_size=(3, 3, 3),
                       pad='same',
                       nonlinearity=rectify))
    net['res2'] = Conv3DDNNLayer(net['res2'],
                                 num_filters=64,
                                 filter_size=(3, 3, 3),
                                 pad='same',
                                 nonlinearity=None)
    net['res2'] = ElemwiseSumLayer([net['res2'], net['conv1c']])

    # Residual 3
    net['res3'] = BatchNormLayer(net['res2'])
    net['res3'] = NonlinearityLayer(net['res3'], nonlinearity=rectify)
    net['res3'] = batch_norm(
        Conv3DDNNLayer(net['res3'],
                       num_filters=64,
                       filter_size=(3, 3, 3),
                       pad='same',
                       nonlinearity=rectify))
    net['res3'] = Conv3DDNNLayer(net['res3'],
                                 num_filters=64,
                                 filter_size=(3, 3, 3),
                                 pad='same',
                                 nonlinearity=None)
    net['res3'] = ElemwiseSumLayer([net['res3'], net['res2']])

    net['bn3'] = BatchNormLayer(net['res3'])
    net['relu3'] = NonlinearityLayer(net['bn3'], nonlinearity=rectify)

    net['conv3a'] = Conv3DDNNLayer(net['relu3'],
                                   num_filters=64,
                                   filter_size=(3, 3, 3),
                                   stride=(2, 2, 1),
                                   pad='same',
                                   nonlinearity=None)
    net['pool2'] = MaxPool3DDNNLayer(net['relu3'],
                                     pool_size=(2, 2, 1))  # 40,40,16

    # Residual 4
    net['res4'] = BatchNormLayer(net['conv3a'])
    net['res4'] = NonlinearityLayer(net['res4'], nonlinearity=rectify)
    net['res4'] = batch_norm(
        Conv3DDNNLayer(net['res4'],
                       num_filters=64,
                       filter_size=(3, 3, 3),
                       pad='same',
                       nonlinearity=rectify))
    net['res4'] = Conv3DDNNLayer(net['res4'],
                                 num_filters=64,
                                 filter_size=(3, 3, 3),
                                 pad='same',
                                 nonlinearity=None)
    net['res4'] = ElemwiseSumLayer([net['res4'], net['conv3a']])

    # Residual 5
    net['res5'] = BatchNormLayer(net['res4'])
    net['res5'] = NonlinearityLayer(net['res5'], nonlinearity=rectify)
    net['res5'] = batch_norm(
        Conv3DDNNLayer(net['res5'],
                       num_filters=64,
                       filter_size=(3, 3, 3),
                       pad='same',
                       nonlinearity=rectify))
    net['res5'] = Conv3DDNNLayer(net['res5'],
                                 num_filters=64,
                                 filter_size=(3, 3, 3),
                                 pad='same',
                                 nonlinearity=None)
    net['res5'] = ElemwiseSumLayer([net['res5'], net['res4']])

    net['bn5'] = BatchNormLayer(net['res5'])
    net['relu5'] = NonlinearityLayer(net['bn5'], nonlinearity=rectify)
    net['conv5a'] = Conv3DDNNLayer(net['relu5'],
                                   num_filters=64,
                                   filter_size=(3, 3, 3),
                                   stride=(2, 2, 2),
                                   pad='same',
                                   nonlinearity=None)

    # Residual 6
    net['res6'] = BatchNormLayer(net['conv5a'])
    net['res6'] = NonlinearityLayer(net['res6'], nonlinearity=rectify)
    net['res6'] = batch_norm(
        Conv3DDNNLayer(net['res6'],
                       num_filters=64,
                       filter_size=(3, 3, 3),
                       pad='same',
                       nonlinearity=rectify))
    net['res6'] = Conv3DDNNLayer(net['res6'],
                                 num_filters=64,
                                 filter_size=(3, 3, 3),
                                 pad='same',
                                 nonlinearity=None)
    net['res6'] = ElemwiseSumLayer([net['res6'], net['conv5a']])

    # Residual 7
    net['res7'] = BatchNormLayer(net['res6'])
    net['res7'] = NonlinearityLayer(net['res7'], nonlinearity=rectify)
    net['res7'] = batch_norm(
        Conv3DDNNLayer(net['res7'],
                       num_filters=64,
                       filter_size=(3, 3, 3),
                       pad='same',
                       nonlinearity=rectify))
    net['res7'] = Conv3DDNNLayer(net['res7'],
                                 num_filters=64,
                                 filter_size=(3, 3, 3),
                                 pad='same',
                                 nonlinearity=None)
    net['res7'] = ElemwiseSumLayer([net['res7'], net['res6']])

    net['bn7'] = BatchNormLayer(net['res7'])
    net['relu7'] = NonlinearityLayer(net['bn7'], nonlinearity=rectify)

    net['conv8'] = batch_norm(
        Conv3DDNNLayer(net['relu7'],
                       num_filters=64,
                       filter_size=(3, 3, 3),
                       pad='same',
                       nonlinearity=rectify))

    # upscale 1
    net['upscale1'] = Upscale3DLayer(net['conv8'],
                                     scale_factor=(2, 2, 2),
                                     mode='repeat')
    net['concat1'] = ConcatLayer([net['pool2'], net['upscale1']])
    net['upconv1a'] = batch_norm(
        Conv3DDNNLayer(net['concat1'],
                       64, (1, 1, 1),
                       pad='same',
                       nonlinearity=rectify))
    net['upconv1b'] = batch_norm(
        Conv3DDNNLayer(net['upconv1a'],
                       64, (3, 3, 3),
                       pad='same',
                       nonlinearity=rectify))

    # upscale 2
    net['upscale2'] = Upscale3DLayer(net['upconv1b'],
                                     scale_factor=(2, 2, 1),
                                     mode='repeat')
    net['concat2'] = ConcatLayer([net['pool1'], net['upscale2']])
    net['upconv2a'] = batch_norm(
        Conv3DDNNLayer(net['concat2'],
                       64, (1, 1, 1),
                       pad='same',
                       nonlinearity=rectify))
    net['upconv2b'] = batch_norm(
        Conv3DDNNLayer(net['upconv2a'],
                       64, (3, 3, 3),
                       pad='same',
                       nonlinearity=rectify))

    # upscale 3
    net['upscale3'] = Upscale3DLayer(net['upconv2b'],
                                     scale_factor=(2, 2, 2),
                                     mode='repeat')
    net['upconv3a'] = batch_norm(
        Conv3DDNNLayer(net['upscale3'],
                       64, (1, 1, 1),
                       pad='same',
                       nonlinearity=rectify))
    net['upconv3b'] = batch_norm(
        Conv3DDNNLayer(net['upconv3a'],
                       64, (3, 3, 3),
                       pad='same',
                       nonlinearity=rectify))

    net['output'] = batch_norm(
        Conv3DDNNLayer(net['upconv3b'],
                       2, (3, 3, 3),
                       pad='same',
                       nonlinearity=None))

    params = lasagne.layers.get_all_params(net['output'], trainable=True)
    l2_penalty = regularize_network_params(net['output'], l2)

    return net, params, l2_penalty
示例#10
0
from lasagne.layers import InputLayer, DenseLayer, NonlinearityLayer, DropoutLayer, ReshapeLayer, LSTMLayer, GRULayer
from lasagne.layers.shape import PadLayer
from lasagne.layers.dnn import Conv3DDNNLayer, MaxPool3DDNNLayer
from lasagne.nonlinearities import softmax
from lasagne.init import Orthogonal, HeNormal, GlorotNormal

net = {}
net['input'] = InputLayer(
    (None, img_channels, clip_length, H_net_input, W_net_input))
net['mask'] = InputLayer((None, num_steps))

# ----------- 1st layer group ---------------
net['conv1a'] = Conv3DDNNLayer(net['input'],
                               64, (3, 3, 3),
                               pad=1,
                               nonlinearity=lasagne.nonlinearities.rectify,
                               flip_filters=if_flip_filters,
                               W=lasagne.init.Normal(std=0.01),
                               b=lasagne.init.Constant(0.))
net['pool1'] = MaxPool3DDNNLayer(net['conv1a'],
                                 pool_size=(1, 2, 2),
                                 stride=(1, 2, 2))

# ------------- 2nd layer group --------------
net['conv2a'] = Conv3DDNNLayer(net['pool1'],
                               128, (3, 3, 3),
                               pad=1,
                               nonlinearity=lasagne.nonlinearities.rectify,
                               flip_filters=if_flip_filters,
                               W=lasagne.init.Normal(std=0.01),
                               b=lasagne.init.Constant(1.))
示例#11
0
def __1viewPair_SurfaceNet__(input_var_5D, input_var_shape = (None,3*2)+(64,)*3,\
        N_predicts_perGroup = 6):
    """
    from the 5D input (N_cubePair, 2rgb, h, w, d) of the colored cubePairs 
    to predicts occupancy probability map (N_cubePair, 1, h, w, d)
    """
    input_var = input_var_5D
    net={}
    net["input"] = lasagne.layers.InputLayer(input_var_shape, input_var)
    input_chunk_len = input_var.shape[0] / N_predicts_perGroup

    conv_nonlinearity = lasagne.nonlinearities.rectify
    nonlinearity_sigmoid = lasagne.nonlinearities.sigmoid

    #---------------------------    
    net["conv1_1"] = batch_norm(Conv3DDNNLayer(net["input"],32,(3,3,3),nonlinearity=conv_nonlinearity,untie_biases=False,pad='same'))
    net["conv1_2"] = batch_norm(Conv3DDNNLayer(net["conv1_1"],32,(3,3,3),nonlinearity=conv_nonlinearity,untie_biases=False,pad='same'))
    net["conv1_3"] = batch_norm(Conv3DDNNLayer(net["conv1_2"],32,(3,3,3),nonlinearity=conv_nonlinearity,untie_biases=False,pad='same'))

    net["pool1"] = Pool3DDNNLayer(net["conv1_3"], (2,2,2), stride=2)
    net["side_op1"] = batch_norm(Conv3DDNNLayer(net["conv1_3"],16,(1,1,1),nonlinearity=nonlinearity_sigmoid,untie_biases=False,pad='same'))
    net["side_op1_deconv"] = net["side_op1"]

    #---------------------------
    net["conv2_1"] = batch_norm(Conv3DDNNLayer(net["pool1"],80,(3,3,3),nonlinearity=conv_nonlinearity,untie_biases=False,pad='same'))
    net["conv2_2"] = batch_norm(Conv3DDNNLayer(net["conv2_1"],80,(3,3,3),nonlinearity=conv_nonlinearity,untie_biases=False,pad='same'))
    net["conv2_3"] = batch_norm(Conv3DDNNLayer(net["conv2_2"],80,(3,3,3),nonlinearity=conv_nonlinearity,untie_biases=False,pad='same'))

    net["pool2"] = Pool3DDNNLayer(net["conv2_3"], (2,2,2), stride=2)  
    net["side_op2"] = batch_norm(Conv3DDNNLayer(net["conv2_3"],16,(1,1,1),nonlinearity=nonlinearity_sigmoid,untie_biases=False,pad='same'))
    net["side_op2_deconv"] = Bilinear_3DInterpolation(net["side_op2"], upscale_factor=2, untie_biases=False, nonlinearity=None, pad='same')
                                                    
    #---------------------------
    net["conv3_1"] = batch_norm(Conv3DDNNLayer(net["pool2"],160,(3,3,3),nonlinearity=conv_nonlinearity,untie_biases=False,pad='same'))
    net["conv3_2"] = batch_norm(Conv3DDNNLayer(net["conv3_1"],160,(3,3,3),nonlinearity=conv_nonlinearity,untie_biases=False,pad='same'))
    net["conv3_3"] = batch_norm(Conv3DDNNLayer(net["conv3_2"],160,(3,3,3),nonlinearity=conv_nonlinearity,untie_biases=False,pad='same') )

    ##pool3 = Pool3DDNNLayer(conv3_3, (2,2,2), stride=2)  
    net["side_op3"] = batch_norm(Conv3DDNNLayer(net["conv3_3"],16,(1,1,1),nonlinearity=nonlinearity_sigmoid,untie_biases=False,pad='same'))
    net["side_op3_deconv"] = Bilinear_3DInterpolation(net["side_op3"], upscale_factor=4, untie_biases=False, nonlinearity=None, pad='same')
    
    #---------------------------
    net["conv3_3_pad"] = PadLayer(net["conv3_3"], width=2, val=0, batch_ndim=2)
    net["conv4_1"] = batch_norm(DilatedConv3DLayer(net["conv3_3_pad"],300,(3,3,3),dilation=(2,2,2),nonlinearity=conv_nonlinearity,untie_biases=False))
    net["conv4_1_pad"] = PadLayer(net["conv4_1"], width=2, val=0, batch_ndim=2)
    net["conv4_2"] = batch_norm(DilatedConv3DLayer(net["conv4_1_pad"],300,(3,3,3),dilation=(2,2,2),nonlinearity=conv_nonlinearity,untie_biases=False))
    net["conv4_2_pad"] = PadLayer(net["conv4_2"], width=2, val=0, batch_ndim=2)
    net["conv4_3"] = batch_norm(DilatedConv3DLayer(net["conv4_2_pad"],300,(3,3,3),dilation=(2,2,2),nonlinearity=conv_nonlinearity,untie_biases=False) )
    net["conv4_3_pad"] = PadLayer(net["conv4_3"], width=0, val=0, batch_ndim=2)
    net["side_op4"] = batch_norm(DilatedConv3DLayer(net["conv4_3_pad"],16,(1,1,1),dilation=(2,2,2),nonlinearity=nonlinearity_sigmoid,untie_biases=False))
    net["side_op4_deconv"] = Bilinear_3DInterpolation(net["side_op4"], upscale_factor=4, untie_biases=False, nonlinearity=None, pad='same')
                                
    #---------------------------
    net["fuse_side_outputs"] = ConcatLayer([net["side_op1_deconv"],net["side_op2_deconv"],net["side_op3_deconv"],net["side_op4_deconv"]], axis=1)
    net["merge_conv"] = batch_norm(Conv3DDNNLayer(net["fuse_side_outputs"],100,(3,3,3),nonlinearity=conv_nonlinearity,untie_biases=False,pad='same'))
    net["merge_conv"] = batch_norm(Conv3DDNNLayer(net["merge_conv"],100,(3,3,3),nonlinearity=conv_nonlinearity,untie_biases=False,pad='same'))
    net["merge_conv3"] = batch_norm(Conv3DDNNLayer(net["merge_conv"],1,(1,1,1),nonlinearity=nonlinearity_sigmoid,untie_biases=False,pad='same')) # linear output for regression
    net["output_SurfaceNet"] = net["merge_conv3"]
    return net
示例#12
0
def build_model(input_var=None,
                batch_size=2,
                use_cpu_compatible=theano.config.device == 'cpu'):
    '''
    Builds Video2GIF model

    @param input_var:
    @param batch_size:
    @param use_cpu_compatible: use CPU compatible layers (i.e. no cuDNN). Default for theano device CPU; otherwise False
    @return: A dictionary containing the network layers, where the output layer is at key 'score'
    '''
    net = {}
    net['input'] = InputLayer((batch_size, 3, 16, 112, 112),
                              input_var=input_var)
    if use_cpu_compatible:
        '''
        Slow implementation running on CPU
        Test snip scores: [-0.08948517, -0.01212098]; Time: 11s
        '''
        print('Use slow network implementation (without cuDNN)')
        # ----------- 1st layer group ---------------
        # Pad first, as this layer doesn't support padding
        net['pad'] = PadLayer(net['input'], width=1, batch_ndim=2)
        net['conv1a'] = lasagne.layers.conv.Conv3DLayer(
            net['pad'],
            64, (3, 3, 3),
            pad=0,
            nonlinearity=lasagne.nonlinearities.rectify,
            flip_filters=True)
        #        net['pool1']  = lasagne.layers.pool.Pool3Layer(net['conv1a'],pool_size=(1,2,2),stride=(1,2,2))
        net['pool1'] = lasagne.layers.Pool3DLayer(net['conv1a'],
                                                  pool_size=(1, 2, 2),
                                                  stride=(1, 2, 2))

        # ------------- 2nd layer group --------------
        net['pad2'] = PadLayer(net['pool1'], width=1, batch_ndim=2)
        net['conv2a'] = lasagne.layers.conv.Conv3DLayer(
            net['pad2'],
            128, (3, 3, 3),
            pad=0,
            nonlinearity=lasagne.nonlinearities.rectify)
        #        net['pool2']  = lasagne.layers.pool.Pool3Layer(net['conv2a'],pool_size=(2,2,2),stride=(2,2,2))
        net['pool2'] = lasagne.layers.Pool3DLayer(net['conv2a'],
                                                  pool_size=(2, 2, 2),
                                                  stride=(2, 2, 2))

        # ----------------- 3rd layer group --------------
        net['pad3a'] = PadLayer(net['pool2'], width=1, batch_ndim=2)
        net['conv3a'] = lasagne.layers.conv.Conv3DLayer(
            net['pad3a'],
            256, (3, 3, 3),
            pad=0,
            nonlinearity=lasagne.nonlinearities.rectify)
        net['pad3b'] = PadLayer(net['conv3a'], width=1, batch_ndim=2)
        net['conv3b'] = lasagne.layers.conv.Conv3DLayer(
            net['pad3b'],
            256, (3, 3, 3),
            pad=0,
            nonlinearity=lasagne.nonlinearities.rectify)
        #        net['pool3']  = lasagne.layers.pool.Pool3Layer(net['conv3b'],pool_size=(2,2,2),stride=(2,2,2))
        net['pool3'] = lasagne.layers.Pool3DLayer(net['conv3b'],
                                                  pool_size=(2, 2, 2),
                                                  stride=(2, 2, 2))

        # ----------------- 4th layer group --------------
        net['pad4a'] = PadLayer(net['pool3'], width=1, batch_ndim=2)
        net['conv4a'] = lasagne.layers.conv.Conv3DLayer(
            net['pad4a'],
            512, (3, 3, 3),
            pad=0,
            nonlinearity=lasagne.nonlinearities.rectify)
        net['pad4b'] = PadLayer(net['conv4a'], width=1, batch_ndim=2)
        net['conv4b'] = lasagne.layers.conv.Conv3DLayer(
            net['pad4b'],
            512, (3, 3, 3),
            pad=0,
            nonlinearity=lasagne.nonlinearities.rectify)
        #        net['pool4']  = lasagne.layers.pool.Pool3Layer(net['conv4b'],pool_size=(2,2,2),stride=(2,2,2))
        net['pool4'] = lasagne.layers.Pool3DLayer(net['conv4b'],
                                                  pool_size=(2, 2, 2),
                                                  stride=(2, 2, 2))

        # ----------------- 5th layer group --------------
        net['pad5a'] = PadLayer(net['pool4'], width=1, batch_ndim=2)
        net['conv5a'] = lasagne.layers.conv.Conv3DLayer(
            net['pad5a'],
            512, (3, 3, 3),
            pad=0,
            nonlinearity=lasagne.nonlinearities.rectify)
        net['pad5b'] = PadLayer(net['conv5a'], width=1, batch_ndim=2)
        net['conv5b'] = lasagne.layers.conv.Conv3DLayer(
            net['pad5b'],
            512, (3, 3, 3),
            pad=0,
            nonlinearity=lasagne.nonlinearities.rectify)

        # We need a padding layer, as C3D only pads on the right, which cannot be done with a theano pooling layer
        net['pad'] = PadLayer(net['conv5b'],
                              width=[(0, 1), (0, 1)],
                              batch_ndim=3)
        #        net['pool5']  = lasagne.layers.pool.Pool3Layer(net['pad'],pool_size=(2,2,2),pad=(0,0,0),stride=(2,2,2))
        net['pool5'] = lasagne.layers.Pool3DLayer(net['pad'],
                                                  pool_size=(2, 2, 2),
                                                  pad=(0, 0, 0),
                                                  stride=(2, 2, 2))
        net['fc6-1'] = DenseLayer(net['pool5'],
                                  num_units=4096,
                                  nonlinearity=lasagne.nonlinearities.rectify)

    else:
        '''
        Fast implementation running on GPU
        Test snip scores:[-0.08948528,-0.01212097]; Time: 0.33s
        '''
        print('Use fast network implementation (cuDNN)')
        # ----------- 1st layer group ---------------
        net['conv1a'] = Conv3DDNNLayer(
            net['input'],
            64, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify,
            flip_filters=False)
        net['pool1'] = MaxPool3DDNNLayer(net['conv1a'],
                                         pool_size=(1, 2, 2),
                                         stride=(1, 2, 2))

        # ------------- 2nd layer group --------------
        net['conv2a'] = Conv3DDNNLayer(
            net['pool1'],
            128, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        net['pool2'] = MaxPool3DDNNLayer(net['conv2a'],
                                         pool_size=(2, 2, 2),
                                         stride=(2, 2, 2))

        # ----------------- 3rd layer group --------------
        net['conv3a'] = Conv3DDNNLayer(
            net['pool2'],
            256, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        net['conv3b'] = Conv3DDNNLayer(
            net['conv3a'],
            256, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        net['pool3'] = MaxPool3DDNNLayer(net['conv3b'],
                                         pool_size=(2, 2, 2),
                                         stride=(2, 2, 2))

        # ----------------- 4th layer group --------------
        net['conv4a'] = Conv3DDNNLayer(
            net['pool3'],
            512, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        net['conv4b'] = Conv3DDNNLayer(
            net['conv4a'],
            512, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        net['pool4'] = MaxPool3DDNNLayer(net['conv4b'],
                                         pool_size=(2, 2, 2),
                                         stride=(2, 2, 2))

        # ----------------- 5th layer group --------------
        net['conv5a'] = Conv3DDNNLayer(
            net['pool4'],
            512, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        net['conv5b'] = Conv3DDNNLayer(
            net['conv5a'],
            512, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        # We need a padding layer, as C3D only pads on the right, which cannot be done with a theano pooling layer
        net['pad'] = PadLayer(net['conv5b'],
                              width=[(0, 1), (0, 1)],
                              batch_ndim=3)
        net['pool5'] = MaxPool3DDNNLayer(net['pad'],
                                         pool_size=(2, 2, 2),
                                         pad=(0, 0, 0),
                                         stride=(2, 2, 2))
        net['fc6-1'] = DenseLayer(net['pool5'],
                                  num_units=4096,
                                  nonlinearity=lasagne.nonlinearities.rectify)

    net['h1'] = DenseLayer(net['fc6-1'],
                           num_units=512,
                           nonlinearity=lasagne.nonlinearities.rectify)
    net['h2'] = DenseLayer(net['h1'],
                           num_units=128,
                           nonlinearity=lasagne.nonlinearities.rectify)
    net['score'] = DenseLayer(net['h2'], num_units=1, nonlinearity=None)

    return net
示例#13
0
    def __init__(self, input, emb_layer='fc7-1', **kwargs):
        """Initialize the parameters

        :type rng: numpy.random.RandomState
        :param rng: a random number generator used to initialize weights

        :type input: theano.tensor.TensorType
        :param input: symbolic variable that describes the input of the
        architecture (one minibatch)

        .....................
        .
        ..
        ...
        ....
        """

        self.hasSupervised = False
        self.hasUnsupervised = False

        self.net = {}

        self.net['input'] = InputLayer((None, 3, 16, 112, 112),
                                       input_var=input)

        # ----------- 1st layer group ---------------
        self.net['conv1a'] = Conv3DDNNLayer(
            self.net['input'],
            64, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify,
            flip_filters=False)
        self.net['pool1'] = MaxPool3DDNNLayer(self.net['conv1a'],
                                              pool_size=(1, 2, 2),
                                              stride=(1, 2, 2))

        # ------------- 2nd layer group --------------
        self.net['conv2a'] = Conv3DDNNLayer(
            self.net['pool1'],
            128, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        self.net['pool2'] = MaxPool3DDNNLayer(self.net['conv2a'],
                                              pool_size=(2, 2, 2),
                                              stride=(2, 2, 2))

        # ----------------- 3rd layer group --------------
        self.net['conv3a'] = Conv3DDNNLayer(
            self.net['pool2'],
            256, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        self.net['conv3b'] = Conv3DDNNLayer(
            self.net['conv3a'],
            256, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        self.net['pool3'] = MaxPool3DDNNLayer(self.net['conv3b'],
                                              pool_size=(2, 2, 2),
                                              stride=(2, 2, 2))

        # ----------------- 4th layer group --------------
        self.net['conv4a'] = Conv3DDNNLayer(
            self.net['pool3'],
            512, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        self.net['conv4b'] = Conv3DDNNLayer(
            self.net['conv4a'],
            512, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        self.net['pool4'] = MaxPool3DDNNLayer(self.net['conv4b'],
                                              pool_size=(2, 2, 2),
                                              stride=(2, 2, 2))

        # ----------------- 5th layer group --------------
        self.net['conv5a'] = Conv3DDNNLayer(
            self.net['pool4'],
            512, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        self.net['conv5b'] = Conv3DDNNLayer(
            self.net['conv5a'],
            512, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        # We need a padding layer, as C3D only pads on the right, which cannot be done with a theano pooling layer
        self.net['pad'] = PadLayer(self.net['conv5b'],
                                   width=[(0, 1), (0, 1)],
                                   batch_ndim=3)
        self.net['pool5'] = MaxPool3DDNNLayer(self.net['pad'],
                                              pool_size=(2, 2, 2),
                                              pad=(0, 0, 0),
                                              stride=(2, 2, 2))
        self.net['fc6-1'] = DenseLayer(
            self.net['pool5'],
            num_units=4096,
            nonlinearity=lasagne.nonlinearities.rectify)
        self.net['fc7-1'] = DenseLayer(
            self.net['fc6-1'],
            num_units=4096,
            nonlinearity=lasagne.nonlinearities.rectify)
        self.net['fc8-1'] = DenseLayer(self.net['fc7-1'],
                                       num_units=487,
                                       nonlinearity=None)
        self.net['prob'] = NonlinearityLayer(self.net['fc8-1'], softmax)

        self.embedding = lasagne.layers.get_output(
            self.net[emb_layer]).flatten(ndim=2)

        with open('data/c3d_model.pkl') as f:
            model = pickle.load(f)
        lasagne.layers.set_all_param_values(self.net['prob'],
                                            model,
                                            trainable=True)
示例#14
0
def cascade_model(options):
    """
    3D cascade model using Nolearn and Lasagne
    
    Inputs:
    - model_options:
    - weights_path: path to where weights should be saved

    Output:
    - nets = list of NeuralNets (CNN1, CNN2)
    """

    # model options
    channels = len(options['modalities'])
    train_split_perc = options['train_split']
    num_epochs = options['max_epochs']
    max_epochs_patience = options['patience']

    # save model to disk to re-use it. Create an experiment folder
    # organize experiment
    if not os.path.exists(
            os.path.join(options['weight_paths'], options['experiment'])):
        os.mkdir(os.path.join(options['weight_paths'], options['experiment']))
    if not os.path.exists(
            os.path.join(options['weight_paths'], options['experiment'],
                         'nets')):
        os.mkdir(
            os.path.join(options['weight_paths'], options['experiment'],
                         'nets'))

    # --------------------------------------------------
    # first model
    # --------------------------------------------------

    layer1 = InputLayer(name='in1',
                        shape=(None, channels) + options['patch_size'])
    layer1 = batch_norm_dnn(Conv3DDNNLayer(layer1,
                                           name='conv1_1',
                                           num_filters=32,
                                           filter_size=3,
                                           pad='same'),
                            name='BN1')
    layer1 = Pool3DDNNLayer(layer1,
                            name='avgpool_1',
                            mode='max',
                            pool_size=2,
                            stride=2)
    layer1 = batch_norm_dnn(Conv3DDNNLayer(layer1,
                                           name='conv2_1',
                                           num_filters=64,
                                           filter_size=3,
                                           pad='same'),
                            name='BN2')
    layer1 = Pool3DDNNLayer(layer1,
                            name='avgpoo2_1',
                            mode='max',
                            pool_size=2,
                            stride=2)
    layer1 = DropoutLayer(layer1, name='l2drop', p=0.5)
    layer1 = DenseLayer(layer1, name='d_1', num_units=256)
    layer1 = DenseLayer(layer1,
                        name='out',
                        num_units=2,
                        nonlinearity=nonlinearities.softmax)

    # save weights
    net_model = 'model_1'
    net_weights = os.path.join(options['weight_paths'], options['experiment'],
                               'nets', net_model + '.pkl')
    net_history = os.path.join(options['weight_paths'], options['experiment'],
                               'nets', net_model + '_history.pkl')

    net1 = NeuralNet(
        layers=layer1,
        objective_loss_function=objectives.categorical_crossentropy,
        batch_iterator_train=Rotate_batch_Iterator(batch_size=128),
        update=updates.adadelta,
        on_epoch_finished=[
            SaveWeights(net_weights, only_best=True, pickle=False),
            SaveTrainingHistory(net_history),
            EarlyStopping(patience=max_epochs_patience)
        ],
        verbose=options['net_verbose'],
        max_epochs=num_epochs,
        train_split=TrainSplit(eval_size=train_split_perc),
    )

    # --------------------------------------------------
    # second model
    # --------------------------------------------------

    layer2 = InputLayer(name='in2',
                        shape=(None, channels) + options['patch_size'])
    layer2 = batch_norm_dnn(Conv3DDNNLayer(layer2,
                                           name='conv1_1',
                                           num_filters=32,
                                           filter_size=3,
                                           pad='same'),
                            name='BN1')
    layer2 = Pool3DDNNLayer(layer2,
                            name='avgpool_1',
                            mode='max',
                            pool_size=2,
                            stride=2)
    layer2 = batch_norm_dnn(Conv3DDNNLayer(layer2,
                                           name='conv2_1',
                                           num_filters=64,
                                           filter_size=3,
                                           pad='same'),
                            name='BN2')
    layer2 = Pool3DDNNLayer(layer2,
                            name='avgpoo2_1',
                            mode='max',
                            pool_size=2,
                            stride=2)
    layer2 = DropoutLayer(layer2, name='l2drop', p=0.5)
    layer2 = DenseLayer(layer2, name='d_1', num_units=256)
    layer2 = DenseLayer(layer2,
                        name='out',
                        num_units=2,
                        nonlinearity=nonlinearities.softmax)

    # save weights
    net_model = 'model_2'
    net_weights2 = os.path.join(options['weight_paths'], options['experiment'],
                                'nets', net_model + '.pkl')
    net_history2 = os.path.join(options['weight_paths'], options['experiment'],
                                'nets', net_model + '_history.pkl')

    net2 = NeuralNet(
        layers=layer2,
        objective_loss_function=objectives.categorical_crossentropy,
        batch_iterator_train=Rotate_batch_Iterator(batch_size=128),
        update=updates.adadelta,
        on_epoch_finished=[
            SaveWeights(net_weights2, only_best=True, pickle=False),
            SaveTrainingHistory(net_history2),
            EarlyStopping(patience=max_epochs_patience)
        ],
        verbose=options['net_verbose'],
        max_epochs=num_epochs,
        train_split=TrainSplit(eval_size=train_split_perc),
    )

    return [net1, net2]
示例#15
0
    def __init__(self, input_var=None, empty=False, rectified_fc_layers=False):
        '''
        Builds C3D model

        Returns
        -------
        dict
            A dictionary containing the network layers, where the output layer is at key 'prob'
        '''
        self.net = {}

        if empty:
            return

        self.net['input'] = InputLayer((None, 3, 16, 112, 112),
                                       input_var=input_var)

        # ----------- 1st layer group ---------------
        self.net['conv1a'] = Conv3DDNNLayer(
            self.net['input'],
            64, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify,
            flip_filters=False)
        self.net['pool1'] = MaxPool3DDNNLayer(self.net['conv1a'],
                                              pool_size=(1, 2, 2),
                                              stride=(1, 2, 2))

        # ------------- 2nd layer group --------------
        self.net['conv2a'] = Conv3DDNNLayer(
            self.net['pool1'],
            128, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        self.net['pool2'] = MaxPool3DDNNLayer(self.net['conv2a'],
                                              pool_size=(2, 2, 2),
                                              stride=(2, 2, 2))

        # ----------------- 3rd layer group --------------
        self.net['conv3a'] = Conv3DDNNLayer(
            self.net['pool2'],
            256, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        self.net['conv3b'] = Conv3DDNNLayer(
            self.net['conv3a'],
            256, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        self.net['pool3'] = MaxPool3DDNNLayer(self.net['conv3b'],
                                              pool_size=(2, 2, 2),
                                              stride=(2, 2, 2))

        # ----------------- 4th layer group --------------
        self.net['conv4a'] = Conv3DDNNLayer(
            self.net['pool3'],
            512, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        self.net['conv4b'] = Conv3DDNNLayer(
            self.net['conv4a'],
            512, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        self.net['pool4'] = MaxPool3DDNNLayer(self.net['conv4b'],
                                              pool_size=(2, 2, 2),
                                              stride=(2, 2, 2))

        # ----------------- 5th layer group --------------
        self.net['conv5a'] = Conv3DDNNLayer(
            self.net['pool4'],
            512, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        self.net['conv5b'] = Conv3DDNNLayer(
            self.net['conv5a'],
            512, (3, 3, 3),
            pad=1,
            nonlinearity=lasagne.nonlinearities.rectify)
        # We need a padding layer, as C3D only pads on the right, which cannot be done with a theano pooling layer
        self.net['pad'] = PadLayer(self.net['conv5b'],
                                   width=[(0, 1), (0, 1)],
                                   batch_ndim=3)
        self.net['pool5'] = MaxPool3DDNNLayer(self.net['pad'],
                                              pool_size=(2, 2, 2),
                                              pad=(0, 0, 0),
                                              stride=(2, 2, 2))

        self.fc_activation = lasagne.nonlinearities.rectify if rectified_fc_layers else lasagne.nonlinearities.tanh

        self.net['fc6-1'] = DenseLayer(self.net['pool5'],
                                       num_units=4096,
                                       nonlinearity=self.fc_activation,
                                       W=lasagne.init.GlorotUniform(gain=0.05))
        self.net['fc7-1'] = DenseLayer(self.net['fc6-1'],
                                       num_units=4096,
                                       nonlinearity=self.fc_activation,
                                       W=lasagne.init.GlorotUniform(gain=0.05))
        print "FC6 has norm %f" % numpy.linalg.norm(
            self.net['fc6-1'].W.get_value(), 'fro')
        print "FC7 has norm %f" % numpy.linalg.norm(
            self.net['fc7-1'].W.get_value(), 'fro')
示例#16
0
    def replicate_model(self, input_var=None, num_layers_unshared=0):
        '''
        Builds C3D model

        num_layers_unshared = 0 means all layers are shared
        num_layers_unshared = 1 means fc7 is not shared
        num_layers_unshared = 2 means fc7 and fc6 are not shared
        ... and so on on so forth

        Returns
        -------
        dict
            A dictionary containing the network layers, where the output layer is at key 'prob'
        '''

        out = C3DModel(empty=True)

        out.net['input'] = InputLayer((None, 3, 16, 112, 112),
                                      input_var=input_var)

        # ----------- 1st layer group ---------------
        if num_layers_unshared >= 10:
            out.net['conv1a'] = Conv3DDNNLayer(
                out.net['input'],
                64, (3, 3, 3),
                pad=1,
                nonlinearity=lasagne.nonlinearities.rectify,
                flip_filters=False)
        else:
            out.net['conv1a'] = Conv3DDNNLayer(
                out.net['input'],
                64, (3, 3, 3),
                pad=1,
                nonlinearity=lasagne.nonlinearities.rectify,
                flip_filters=False,
                W=self.net['conv1a'].W,
                b=self.net['conv1a'].b)

        out.net['pool1'] = MaxPool3DDNNLayer(out.net['conv1a'],
                                             pool_size=(1, 2, 2),
                                             stride=(1, 2, 2))

        # ------------- 2nd layer group --------------
        if num_layers_unshared >= 9:
            out.net['conv2a'] = Conv3DDNNLayer(
                out.net['pool1'],
                128, (3, 3, 3),
                pad=1,
                nonlinearity=lasagne.nonlinearities.rectify)
        else:
            out.net['conv2a'] = Conv3DDNNLayer(
                out.net['pool1'],
                128, (3, 3, 3),
                pad=1,
                nonlinearity=lasagne.nonlinearities.rectify,
                W=self.net['conv2a'].W,
                b=self.net['conv2a'].b)

        out.net['pool2'] = MaxPool3DDNNLayer(out.net['conv2a'],
                                             pool_size=(2, 2, 2),
                                             stride=(2, 2, 2))

        # ----------------- 3rd layer group --------------
        if num_layers_unshared >= 8:
            out.net['conv3a'] = Conv3DDNNLayer(
                out.net['pool2'],
                256, (3, 3, 3),
                pad=1,
                nonlinearity=lasagne.nonlinearities.rectify)
        else:
            out.net['conv3a'] = Conv3DDNNLayer(
                out.net['pool2'],
                256, (3, 3, 3),
                pad=1,
                nonlinearity=lasagne.nonlinearities.rectify,
                W=self.net['conv3a'].W,
                b=self.net['conv3a'].b)

        if num_layers_unshared >= 7:
            out.net['conv3b'] = Conv3DDNNLayer(
                out.net['conv3a'],
                256, (3, 3, 3),
                pad=1,
                nonlinearity=lasagne.nonlinearities.rectify)
        else:
            out.net['conv3b'] = Conv3DDNNLayer(
                out.net['conv3a'],
                256, (3, 3, 3),
                pad=1,
                nonlinearity=lasagne.nonlinearities.rectify,
                W=self.net['conv3b'].W,
                b=self.net['conv3b'].b)

        out.net['pool3'] = MaxPool3DDNNLayer(out.net['conv3b'],
                                             pool_size=(2, 2, 2),
                                             stride=(2, 2, 2))

        # ----------------- 4th layer group --------------
        if num_layers_unshared >= 6:
            out.net['conv4a'] = Conv3DDNNLayer(
                out.net['pool3'],
                512, (3, 3, 3),
                pad=1,
                nonlinearity=lasagne.nonlinearities.rectify)
        else:
            out.net['conv4a'] = Conv3DDNNLayer(
                out.net['pool3'],
                512, (3, 3, 3),
                pad=1,
                nonlinearity=lasagne.nonlinearities.rectify,
                W=self.net['conv4a'].W,
                b=self.net['conv4a'].b)

        if num_layers_unshared >= 5:
            out.net['conv4b'] = Conv3DDNNLayer(
                out.net['conv4a'],
                512, (3, 3, 3),
                pad=1,
                nonlinearity=lasagne.nonlinearities.rectify)
        else:
            out.net['conv4b'] = Conv3DDNNLayer(
                out.net['conv4a'],
                512, (3, 3, 3),
                pad=1,
                nonlinearity=lasagne.nonlinearities.rectify,
                W=self.net['conv4b'].W,
                b=self.net['conv4b'].b)

        out.net['pool4'] = MaxPool3DDNNLayer(out.net['conv4b'],
                                             pool_size=(2, 2, 2),
                                             stride=(2, 2, 2))

        # ----------------- 5th layer group --------------
        if num_layers_unshared >= 4:
            out.net['conv5a'] = Conv3DDNNLayer(
                out.net['pool4'],
                512, (3, 3, 3),
                pad=1,
                nonlinearity=lasagne.nonlinearities.rectify)
        else:
            out.net['conv5a'] = Conv3DDNNLayer(
                out.net['pool4'],
                512, (3, 3, 3),
                pad=1,
                nonlinearity=lasagne.nonlinearities.rectify,
                W=self.net['conv5a'].W,
                b=self.net['conv5a'].b)

        if num_layers_unshared >= 3:
            out.net['conv5b'] = Conv3DDNNLayer(
                out.net['conv5a'],
                512, (3, 3, 3),
                pad=1,
                nonlinearity=lasagne.nonlinearities.rectify)
        else:
            out.net['conv5b'] = Conv3DDNNLayer(
                out.net['conv5a'],
                512, (3, 3, 3),
                pad=1,
                nonlinearity=lasagne.nonlinearities.rectify,
                W=self.net['conv5b'].W,
                b=self.net['conv5b'].b)

        # We need a padding layer, as C3D only pads on the right, which cannot be done with a theano pooling layer
        out.net['pad'] = PadLayer(out.net['conv5b'],
                                  width=[(0, 1), (0, 1)],
                                  batch_ndim=3)
        out.net['pool5'] = MaxPool3DDNNLayer(out.net['pad'],
                                             pool_size=(2, 2, 2),
                                             pad=(0, 0, 0),
                                             stride=(2, 2, 2))

        # ----------------- Fully Connected Layers ------------------
        if num_layers_unshared >= 2:
            out.net['fc6-1'] = DenseLayer(
                out.net['pool5'],
                num_units=4096,
                nonlinearity=self.fc_activation,
                W=lasagne.init.GlorotUniform(gain=0.05))
            print "FC6 has norm %f" % numpy.linalg.norm(
                out.net['fc6-1'].W.get_value(), 'fro')
        else:
            out.net['fc6-1'] = DenseLayer(out.net['pool5'],
                                          num_units=4096,
                                          nonlinearity=self.fc_activation,
                                          W=self.net['fc6-1'].W,
                                          b=self.net['fc6-1'].b)

        if num_layers_unshared >= 1:
            out.net['fc7-1'] = DenseLayer(
                out.net['fc6-1'],
                num_units=4096,
                nonlinearity=self.fc_activation,
                W=lasagne.init.GlorotUniform(gain=0.05))
            print "FC7 has norm %f" % numpy.linalg.norm(
                out.net['fc7-1'].W.get_value(), 'fro')
        else:
            out.net['fc7-1'] = DenseLayer(out.net['fc6-1'],
                                          num_units=4096,
                                          nonlinearity=self.fc_activation,
                                          W=self.net['fc7-1'].W,
                                          b=self.net['fc7-1'].b)

    #    if num_layers_unshared >= 1:
    #        out.net['fc8-1']  = DenseLayer(out.net['fc7-1'], num_units=487, nonlinearity=None)
    #    else:
    #        out.net['fc8-1']  = DenseLayer(out.net['fc7-1'], num_units=487, nonlinearity=None
    #                                   , W = self.net['fc8-1'].W, b = self.net['fc8-1'].b)
    #    out.net['prob']  = NonlinearityLayer(out.net['fc8-1'], softmax)

        return out
示例#17
0
def get_model(input_images, input_position, input_mult, target_var):

    # number of SAX and distance between SAX slices
    #indexes = []
    #for i in range(input_position.shape[0]):
    #    indexes.append(numpy.where(input_position[i][:,0] == 0.)[0][0])
    
    # input layer with unspecified batch size
    layer     = InputLayer(shape=(None, 22, 30, 64, 64), input_var=input_images) #InputLayer(shape=(None, 1, 30, 64, 64), input_var=input_var)
    
    # Z-score?

    # Convolution then batchNormalisation then activation layer, then zero padding layer followed by a dropout layer
    layer         = batch_norm(Conv3DDNNLayer(incoming=layer, num_filters=16, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=rectify))
    shortcut      = layer
    layer         = batch_norm(Conv3DDNNLayer(incoming=layer, num_filters=16, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=rectify))
    layer         = batch_norm(Conv3DDNNLayer(incoming=layer, num_filters=16, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=rectify))
    layer 	  = ElemwiseSumLayer([layer, shortcut])
    shortcut      = layer
    layer         = batch_norm(Conv3DDNNLayer(incoming=layer, num_filters=16, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=rectify))
    layer         = batch_norm(Conv3DDNNLayer(incoming=layer, num_filters=16, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=rectify))
    layer 	  = ElemwiseSumLayer([layer, shortcut])
    shortcut      = layer
    layer         = batch_norm(Conv3DDNNLayer(incoming=layer, num_filters=16, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=rectify))
    layer         = batch_norm(Conv3DDNNLayer(incoming=layer, num_filters=16, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=rectify))
    layer 	  = ElemwiseSumLayer([layer, shortcut])
    shortcut      = layer
    layer         = batch_norm(Conv3DDNNLayer(incoming=layer, num_filters=16, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=rectify))
    layer         = batch_norm(Conv3DDNNLayer(incoming=layer, num_filters=16, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=rectify))
    layer 	  = ElemwiseSumLayer([layer, shortcut])
    shortcut      = layer
    layer         = batch_norm(Conv3DDNNLayer(incoming=layer, num_filters=16, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=rectify))
    layer         = batch_norm(Conv3DDNNLayer(incoming=layer, num_filters=16, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=rectify))
    layer 	  = ElemwiseSumLayer([layer, shortcut])
    shortcut      = layer
    layer         = batch_norm(Conv3DDNNLayer(incoming=layer, num_filters=16, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=rectify))
    layer         = batch_norm(Conv3DDNNLayer(incoming=layer, num_filters=16, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=rectify))
    layer 	  = ElemwiseSumLayer([layer, shortcut])
    layer         = batch_norm(Conv3DDNNLayer(incoming=layer, num_filters=16, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=rectify))
    layer         = Conv3DDNNLayer(incoming=layer, num_filters=22, filter_size=(3,3,3), stride=(1,1,1), pad='same', nonlinearity=sigmoid)

    layer_max     = ExpressionLayer(layer, lambda X: X.max(1), output_shape='auto')
    layer_min     = ExpressionLayer(layer, lambda X: X.min(1), output_shape='auto')
    
    layer_prediction = layer
    # image prediction
    prediction           = get_output(layer_prediction)
        
    loss                 = binary_crossentropy(prediction, target_var).mean()

    #Updates : Stochastic Gradient Descent (SGD) with Nesterov momentum
    params               = get_all_params(layer_prediction, trainable=True)

    # Create a loss expression for validation/testing. The crucial difference
    # here is that we do a deterministic forward pass through the network, disabling dropout layers.
    test_prediction      = get_output(layer_prediction, deterministic=True)
    test_loss            = binary_crossentropy(test_prediction, target_var).mean()

    return test_prediction, prediction, loss, params
def upscale_plus_conv_3D(net,no_f_base,f_size,pad,nonlinearity):
    net = lasagne.layers.Upscale3DLayer(net,2)
    net = Conv3DDNNLayer(net,no_f_base,f_size,pad=pad,nonlinearity=nonlinearity,W=lasagne.init.HeNormal(gain="relu"))
    net = lasagne.layers.PadLayer(net,1)
    return net
def conv_pool_down_3D(net, no_f_base,f_size,conv_depth,pad,nonlinearity,dropout):
    for i in xrange(conv_depth):
        net = Conv3DDNNLayer(net,no_f_base,f_size,pad=pad,nonlinearity=nonlinearity,W=lasagne.init.HeNormal(gain='relu'))
    if dropout:
        net = DropoutLayer(net,p=dropout)
    return net
示例#20
0
def get_model():

    dtensor4 = T.TensorType('float32', (False,)*4)
    input_var = dtensor4('inputs')
    dtensor2 = T.TensorType('float32', (False,)*2)
    target_var = dtensor2('targets')

    # input layer with unspecified batch size
    layer_input     = InputLayer(shape=(None, 30, 64, 64), input_var=input_var) #InputLayer(shape=(None, 1, 30, 64, 64), input_var=input_var)
    layer_0         = DimshuffleLayer(layer_input, (0, 'x', 1, 2, 3))

    # Z-score?

    # Convolution then batchNormalisation then activation layer, then zero padding layer followed by a dropout layer
    layer_1         = batch_norm(Conv3DDNNLayer(incoming=layer_0, num_filters=64, filter_size=(3,3,3), stride=(1,3,3), pad='same', nonlinearity=leaky_rectify, W=Orthogonal()))
    layer_2         = MaxPool3DDNNLayer(layer_1, pool_size=(1, 2, 2), stride=(1, 2, 2), pad=(0, 1, 1))
    layer_3         = DropoutLayer(layer_2, p=0.25)

    # Convolution then batchNormalisation then activation layer, then zero padding layer followed by a dropout layer
    layer_4         = batch_norm(Conv3DDNNLayer(incoming=layer_3, num_filters=128, filter_size=(3,3,3), stride=(1,3,3), pad='same', nonlinearity=leaky_rectify, W=Orthogonal()))
    layer_5         = MaxPool3DDNNLayer(layer_4, pool_size=(1, 2, 2), stride=(1, 2, 2), pad=(0, 1, 1))
    layer_6         = DropoutLayer(layer_5, p=0.25)

    # Convolution then batchNormalisation then activation layer, then zero padding layer followed by a dropout layer
    layer_7         = batch_norm(Conv3DDNNLayer(incoming=layer_6, num_filters=256, filter_size=(3,3,3), stride=(1,3,3), pad='same', nonlinearity=leaky_rectify, W=Orthogonal()))
    layer_8         = MaxPool3DDNNLayer(layer_7, pool_size=(1, 2, 2), stride=(1, 2, 2), pad=(0, 1, 1))
    layer_9         = DropoutLayer(layer_8, p=0.25)
    
    # Recurrent layer
    layer_10         = DimshuffleLayer(layer_9, (0,2,1,3,4))
    layer_11         = LSTMLayer(layer_10, num_units=612, hid_init=Orthogonal(), only_return_final=False)

    # Output Layer
    layer_systole    = DenseLayer(layer_11, 600, nonlinearity=leaky_rectify, W=Orthogonal())
    layer_diastole   = DenseLayer(layer_11, 600, nonlinearity=leaky_rectify, W=Orthogonal())
    layer_systole_1  = DropoutLayer(layer_systole, p=0.3)
    layer_diastole_1 = DropoutLayer(layer_diastole, p=0.3)

    layer_systole_2   = DenseLayer(layer_systole_1, 1, nonlinearity=None, W=Orthogonal())
    layer_diastole_2  = DenseLayer(layer_diastole_1, 1, nonlinearity=None, W=Orthogonal())
    layer_output      = ConcatLayer([layer_systole_2, layer_diastole_2])

    # Loss
    prediction           = get_output(layer_output) 
    loss                 = squared_error(prediction, target_var)
    loss                 = loss.mean()

    #Updates : Stochastic Gradient Descent (SGD) with Nesterov momentum Or Adam
    params               = get_all_params(layer_output, trainable=True)
    updates              = adam(loss, params)
    #updates_0            = rmsprop(loss, params)
    #updates              = apply_nesterov_momentum(updates_0, params)

    # Create a loss expression for validation/testing. The crucial difference
    # here is that we do a deterministic forward pass through the network, disabling dropout layers.
    test_prediction      = get_output(layer_output, deterministic=True)
    test_loss            = squared_error(test_prediction, target_var)
    test_loss            = test_loss.mean()

    # Compile a function performing a training step on a mini-batch (by giving
    # the updates dictionary) and returning the corresponding training loss:
    train_fn             = theano.function([input_var, target_var], loss, updates=updates, allow_input_downcast=True)

    # Compile a second function computing the validation loss and accuracy
    val_fn               = theano.function([input_var, target_var], test_loss, allow_input_downcast=True)

    # Compule a third function computing the prediction
    predict_fn           = theano.function([input_var], test_prediction, allow_input_downcast=True)

    return [layer_output, train_fn, val_fn, predict_fn]
def build_net():
    """Method for VGG like net Building.

    Returns
    -------
    nn : lasagne.layer
        Network.
    """
    net = {}
    net['input'] = InputLayer((None, 1, 110, 110, 110), input_var=input_var)
    net['conv1a'] = Conv3DDNNLayer(net['input'],
                                   32,
                                   3,
                                   pad='same',
                                   nonlinearity=identity)
    net['bn1a'] = BatchNormLayer(net['conv1a'])
    net['relu1a'] = NonlinearityLayer(net['bn1a'])
    net['conv1b'] = Conv3DDNNLayer(net['relu1a'],
                                   32,
                                   3,
                                   pad='same',
                                   nonlinearity=identity)
    net['bn1b'] = BatchNormLayer(net['conv1b'])
    net['relu1b'] = NonlinearityLayer(net['bn1b'])
    net['conv1c'] = Conv3DDNNLayer(net['relu1b'],
                                   64,
                                   3,
                                   stride=(2, 2, 2),
                                   pad='same',
                                   nonlinearity=identity)
    # VoxRes block 2
    net['voxres2_bn1'] = BatchNormLayer(net['conv1c'])
    net['voxres2_relu1'] = NonlinearityLayer(net['voxres2_bn1'])
    net['voxres2_conv1'] = Conv3DDNNLayer(net['voxres2_relu1'],
                                          64,
                                          3,
                                          pad='same',
                                          nonlinearity=identity)
    net['voxres2_bn2'] = BatchNormLayer(net['voxres2_conv1'])
    net['voxres2_relu2'] = NonlinearityLayer(net['voxres2_bn2'])
    net['voxres2_conv2'] = Conv3DDNNLayer(net['voxres2_relu2'],
                                          64,
                                          3,
                                          pad='same',
                                          nonlinearity=identity)
    net['voxres2_out'] = ElemwiseSumLayer(
        [net['conv1c'], net['voxres2_conv2']])
    # VoxRes block 3
    net['voxres3_bn1'] = BatchNormLayer(net['voxres2_out'])
    net['voxres3_relu1'] = NonlinearityLayer(net['voxres3_bn1'])
    net['voxres3_conv1'] = Conv3DDNNLayer(net['voxres3_relu1'],
                                          64,
                                          3,
                                          pad='same',
                                          nonlinearity=identity)
    net['voxres3_bn2'] = BatchNormLayer(net['voxres3_conv1'])
    net['voxres3_relu2'] = NonlinearityLayer(net['voxres3_bn2'])
    net['voxres3_conv2'] = Conv3DDNNLayer(net['voxres3_relu2'],
                                          64,
                                          3,
                                          pad='same',
                                          nonlinearity=identity)
    net['voxres3_out'] = ElemwiseSumLayer(
        [net['voxres2_out'], net['voxres3_conv2']])

    net['bn4'] = BatchNormLayer(net['voxres3_out'])
    net['relu4'] = NonlinearityLayer(net['bn4'])
    net['conv4'] = Conv3DDNNLayer(net['relu4'],
                                  64,
                                  3,
                                  stride=(2, 2, 2),
                                  pad='same',
                                  nonlinearity=identity)
    # VoxRes block 5
    net['voxres5_bn1'] = BatchNormLayer(net['conv4'])
    net['voxres5_relu1'] = NonlinearityLayer(net['voxres5_bn1'])
    net['voxres5_conv1'] = Conv3DDNNLayer(net['voxres5_relu1'],
                                          64,
                                          3,
                                          pad='same',
                                          nonlinearity=identity)
    net['voxres5_bn2'] = BatchNormLayer(net['voxres5_conv1'])
    net['voxres5_relu2'] = NonlinearityLayer(net['voxres5_bn2'])
    net['voxres5_conv2'] = Conv3DDNNLayer(net['voxres5_relu2'],
                                          64,
                                          3,
                                          pad='same',
                                          nonlinearity=identity)
    net['voxres5_out'] = ElemwiseSumLayer([net['conv4'], net['voxres5_conv2']])
    # VoxRes block 6
    net['voxres6_bn1'] = BatchNormLayer(net['voxres5_out'])
    net['voxres6_relu1'] = NonlinearityLayer(net['voxres6_bn1'])
    net['voxres6_conv1'] = Conv3DDNNLayer(net['voxres6_relu1'],
                                          64,
                                          3,
                                          pad='same',
                                          nonlinearity=identity)
    net['voxres6_bn2'] = BatchNormLayer(net['voxres6_conv1'])
    net['voxres6_relu2'] = NonlinearityLayer(net['voxres6_bn2'])
    net['voxres6_conv2'] = Conv3DDNNLayer(net['voxres6_relu2'],
                                          64,
                                          3,
                                          pad='same',
                                          nonlinearity=identity)
    net['voxres6_out'] = ElemwiseSumLayer(
        [net['voxres5_out'], net['voxres6_conv2']])

    net['bn7'] = BatchNormLayer(net['voxres6_out'])
    net['relu7'] = NonlinearityLayer(net['bn7'])
    net['conv7'] = Conv3DDNNLayer(net['relu7'],
                                  128,
                                  3,
                                  stride=(2, 2, 2),
                                  pad='same',
                                  nonlinearity=identity)

    # VoxRes block 8
    net['voxres8_bn1'] = BatchNormLayer(net['conv7'])
    net['voxres8_relu1'] = NonlinearityLayer(net['voxres8_bn1'])
    net['voxres8_conv1'] = Conv3DDNNLayer(net['voxres8_relu1'],
                                          128,
                                          3,
                                          pad='same',
                                          nonlinearity=identity)
    net['voxres8_bn2'] = BatchNormLayer(net['voxres8_conv1'])
    net['voxres8_relu2'] = NonlinearityLayer(net['voxres8_bn2'])
    net['voxres8_conv2'] = Conv3DDNNLayer(net['voxres8_relu2'],
                                          128,
                                          3,
                                          pad='same',
                                          nonlinearity=identity)
    net['voxres8_out'] = ElemwiseSumLayer([net['conv7'], net['voxres8_conv2']])
    # VoxRes block 9
    net['voxres9_bn1'] = BatchNormLayer(net['voxres8_out'])
    net['voxres9_relu1'] = NonlinearityLayer(net['voxres9_bn1'])
    net['voxres9_conv1'] = Conv3DDNNLayer(net['voxres9_relu1'],
                                          128,
                                          3,
                                          pad='same',
                                          nonlinearity=identity)
    net['voxres9_bn2'] = BatchNormLayer(net['voxres9_conv1'])
    net['voxres9_relu2'] = NonlinearityLayer(net['voxres9_bn2'])
    net['voxres9_conv2'] = Conv3DDNNLayer(net['voxres9_relu2'],
                                          128,
                                          3,
                                          pad='same',
                                          nonlinearity=identity)
    net['voxres9_out'] = ElemwiseSumLayer(
        [net['voxres8_out'], net['voxres9_conv2']])

    net['pool10'] = Pool3DDNNLayer(net['voxres9_out'], 7)
    net['fc11'] = DenseLayer(net['pool10'], 128)
    net['prob'] = DenseLayer(net['fc11'], 2, nonlinearity=softmax)

    return net
示例#22
0
文件: res_net.py 项目: thesby/dsb3
def build_model():
    """Method for VoxResNet Building.

    Returns
    -------
    dictionary
        Network dictionary.
    """
    net = {}
    net['input'] = InputLayer(shape=(None, ) + nn_input_shape)
    net['dimshuffle1'] = DimshuffleLayer(net['input'],
                                         pattern=(0, 'x', 1, 2, 3))

    net['conv1a'] = Conv3DDNNLayer(net['dimshuffle1'],
                                   32,
                                   5,
                                   stride=(2, 2, 2),
                                   pad='same',
                                   nonlinearity=identity)
    net['bn1a'] = BatchNormLayer(net['conv1a'])
    net['relu1a'] = NonlinearityLayer(net['bn1a'])
    net['conv1b'] = Conv3DDNNLayer(net['relu1a'],
                                   32,
                                   3,
                                   pad='same',
                                   nonlinearity=identity)
    net['bn1b'] = BatchNormLayer(net['conv1b'])
    net['relu1b'] = NonlinearityLayer(net['bn1b'])
    net['conv1c'] = Conv3DDNNLayer(net['relu1b'],
                                   64,
                                   3,
                                   stride=(2, 2, 2),
                                   pad='same',
                                   nonlinearity=identity)
    # VoxRes block 2
    net['voxres2_bn1'] = BatchNormLayer(net['conv1c'])
    net['voxres2_relu1'] = NonlinearityLayer(net['voxres2_bn1'])
    net['voxres2_conv1'] = Conv3DDNNLayer(net['voxres2_relu1'],
                                          64,
                                          3,
                                          pad='same',
                                          nonlinearity=identity)
    net['voxres2_bn2'] = BatchNormLayer(net['voxres2_conv1'])
    net['voxres2_relu2'] = NonlinearityLayer(net['voxres2_bn2'])
    net['voxres2_conv2'] = Conv3DDNNLayer(net['voxres2_relu2'],
                                          64,
                                          3,
                                          pad='same',
                                          nonlinearity=identity)
    net['voxres2_out'] = ElemwiseSumLayer(
        [net['conv1c'], net['voxres2_conv2']])
    # VoxRes block 3
    net['voxres3_bn1'] = BatchNormLayer(net['voxres2_out'])
    net['voxres3_relu1'] = NonlinearityLayer(net['voxres3_bn1'])
    net['voxres3_conv1'] = Conv3DDNNLayer(net['voxres3_relu1'],
                                          64,
                                          3,
                                          pad='same',
                                          nonlinearity=identity)
    net['voxres3_bn2'] = BatchNormLayer(net['voxres3_conv1'])
    net['voxres3_relu2'] = NonlinearityLayer(net['voxres3_bn2'])
    net['voxres3_conv2'] = Conv3DDNNLayer(net['voxres3_relu2'],
                                          64,
                                          3,
                                          pad='same',
                                          nonlinearity=identity)
    net['voxres3_out'] = ElemwiseSumLayer(
        [net['voxres2_out'], net['voxres3_conv2']])

    net['bn4'] = BatchNormLayer(net['voxres3_out'])
    net['relu4'] = NonlinearityLayer(net['bn4'])
    net['conv4'] = Conv3DDNNLayer(net['relu4'],
                                  64,
                                  3,
                                  stride=(2, 2, 2),
                                  pad='same',
                                  nonlinearity=identity)
    # VoxRes block 5
    net['voxres5_bn1'] = BatchNormLayer(net['conv4'])
    net['voxres5_relu1'] = NonlinearityLayer(net['voxres5_bn1'])
    net['voxres5_conv1'] = Conv3DDNNLayer(net['voxres5_relu1'],
                                          64,
                                          3,
                                          pad='same',
                                          nonlinearity=identity)
    net['voxres5_bn2'] = BatchNormLayer(net['voxres5_conv1'])
    net['voxres5_relu2'] = NonlinearityLayer(net['voxres5_bn2'])
    net['voxres5_conv2'] = Conv3DDNNLayer(net['voxres5_relu2'],
                                          64,
                                          3,
                                          pad='same',
                                          nonlinearity=identity)
    net['voxres5_out'] = ElemwiseSumLayer([net['conv4'], net['voxres5_conv2']])
    # VoxRes block 6
    net['voxres6_bn1'] = BatchNormLayer(net['voxres5_out'])
    net['voxres6_relu1'] = NonlinearityLayer(net['voxres6_bn1'])
    net['voxres6_conv1'] = Conv3DDNNLayer(net['voxres6_relu1'],
                                          64,
                                          3,
                                          pad='same',
                                          nonlinearity=identity)
    net['voxres6_bn2'] = BatchNormLayer(net['voxres6_conv1'])
    net['voxres6_relu2'] = NonlinearityLayer(net['voxres6_bn2'])
    net['voxres6_conv2'] = Conv3DDNNLayer(net['voxres6_relu2'],
                                          64,
                                          3,
                                          pad='same',
                                          nonlinearity=identity)
    net['voxres6_out'] = ElemwiseSumLayer(
        [net['voxres5_out'], net['voxres6_conv2']])

    net['bn7'] = BatchNormLayer(net['voxres6_out'])
    net['relu7'] = NonlinearityLayer(net['bn7'])
    net['conv7'] = Conv3DDNNLayer(net['relu7'],
                                  128,
                                  3,
                                  stride=(2, 2, 2),
                                  pad='same',
                                  nonlinearity=identity)

    # VoxRes block 8
    net['voxres8_bn1'] = BatchNormLayer(net['conv7'])
    net['voxres8_relu1'] = NonlinearityLayer(net['voxres8_bn1'])
    net['voxres8_conv1'] = Conv3DDNNLayer(net['voxres8_relu1'],
                                          128,
                                          3,
                                          pad='same',
                                          nonlinearity=identity)
    net['voxres8_bn2'] = BatchNormLayer(net['voxres8_conv1'])
    net['voxres8_relu2'] = NonlinearityLayer(net['voxres8_bn2'])
    net['voxres8_conv2'] = Conv3DDNNLayer(net['voxres8_relu2'],
                                          128,
                                          3,
                                          pad='same',
                                          nonlinearity=identity)
    net['voxres8_out'] = ElemwiseSumLayer([net['conv7'], net['voxres8_conv2']])
    # VoxRes block 9
    net['voxres9_bn1'] = BatchNormLayer(net['voxres8_out'])
    net['voxres9_relu1'] = NonlinearityLayer(net['voxres9_bn1'])
    net['voxres9_conv1'] = Conv3DDNNLayer(net['voxres9_relu1'],
                                          128,
                                          3,
                                          pad='same',
                                          nonlinearity=identity)
    net['voxres9_bn2'] = BatchNormLayer(net['voxres9_conv1'])
    net['voxres9_relu2'] = NonlinearityLayer(net['voxres9_bn2'])
    net['voxres9_conv2'] = Conv3DDNNLayer(net['voxres9_relu2'],
                                          128,
                                          3,
                                          pad='same',
                                          nonlinearity=identity)
    net['voxres9_out'] = ElemwiseSumLayer(
        [net['voxres8_out'], net['voxres9_conv2']])

    net['gpool'] = GlobalPoolLayer(net['voxres9_out'])
    net['prob'] = DenseLayer(net['gpool'],
                             num_units=1,
                             W=lasagne.init.Constant(0.0),
                             b=None,
                             nonlinearity=lasagne.nonlinearities.sigmoid)

    net['output'] = reshape(net['prob'], shape=(-1, ))

    return {
        "inputs": {
            "bcolzall:3d": net['input'],
        },
        "outputs": {
            "predicted_probability": net['output']
        },
    }