Esempio n. 1
0
def Deconv(incoming, num_filters, filter_size=3,
         stride=(1,1), crop='valid', untie_biases=False, W=lasagne.init.HeNormal(),
         b=None, nonlinearity=None, flip_filters=False, **kwargs):
    """
    Overrides the default parameters for DeconvLayer
    """
    ensure_set_name('conv', kwargs)

    return DeconvLayer(incoming, num_filters, filter_size, stride, crop, untie_biases, W, b,
                     nonlinearity, flip_filters, **kwargs)
Esempio n. 2
0
def Deconv_Tiny_VGG(params,input_var=None):
    net = {}
    net['input'] = InputLayer(shape=(None, 3, 224, 224),input_var=input_var)
    net['conv1_1'] = ConvLayer(
        net['input'], 32, 3, pad=1, W=params[0], b=params[1], flip_filters=False)
    net['conv1_2'] = ConvLayer(
        net['conv1_1'], 32, 3, pad=1, W=params[2], b=params[3], flip_filters=False)
    net['pool1'] = PoolLayer(net['conv1_2'], 2)
    net['conv2_1'] = ConvLayer(
        net['pool1'], 32, 3, pad=1, W=params[4], b=params[5], flip_filters=False)
    net['conv2_2'] = ConvLayer(
        net['conv2_1'], 32, 3, pad=1, W=params[6], b=params[7], flip_filters=False)
    net['pool2'] = PoolLayer(net['conv2_2'], 2)
    net['conv3_1'] = ConvLayer(
        net['pool2'], 32, 3, pad=1, W=params[8], b=params[9], flip_filters=False)
    net['conv3_2'] = ConvLayer(
        net['conv3_1'], 32, 3, pad=1, W=params[10], b=params[11], flip_filters=False)
    net['pool3'] = PoolLayer(net['conv3_2'], 2)
    #deconvolution starts here
    net['unpool3'] = InverseLayer(net['pool3'],net['pool3'])
    
    net['deconv3_2'] = DeconvLayer(net['unpool3'],num_filters=32,
                                    filter_size=net['conv3_2'].filter_size, stride=net['conv3_2'].stride,
                                    crop=net['conv3_2'].pad,
                                    W=params[10], b=params[9], flip_filters=True)
    
    net['deconv3_1'] = DeconvLayer(net['deconv3_2'],num_filters=32,
                                    filter_size=net['conv3_1'].filter_size, stride=net['conv3_1'].stride,
                                    crop=net['conv3_1'].pad, 
                                    W=params[8], b=params[7], flip_filters=True)
                                    
    net['unpool2'] = InverseLayer(net['deconv3_1'],net['pool2'])                                
                                    
    net['deconv2_2'] = DeconvLayer(net['unpool2'],num_filters=32,
                                    filter_size=net['conv2_2'].filter_size, stride=net['conv2_2'].stride,
                                    crop=net['conv2_2'].pad, 
                                    W=params[6], b=params[5], flip_filters=True)
                                                                   
    net['deconv2_1'] = DeconvLayer(net['deconv2_2'],num_filters=32,
                                    filter_size=net['conv2_1'].filter_size, stride=net['conv2_1'].stride,
                                    crop=net['conv2_1'].pad, 
                                    W=params[4], b=params[3], flip_filters=True)
                                    
    net['unpool1'] = InverseLayer(net['deconv2_1'],net['pool1'])

    net['deconv1_2'] = DeconvLayer(net['unpool1'],num_filters=32,
                                    filter_size=net['conv1_2'].filter_size, stride=net['conv1_2'].stride,
                                    crop=net['conv1_2'].pad, 
                                    W=params[2], b=params[1], flip_filters=True)
                                                                   
    net['deconv1_1'] = DeconvLayer(net['deconv1_2'],num_filters=3,
                                    filter_size=net['conv1_1'].filter_size, stride=net['conv1_1'].stride,
                                    crop=net['conv1_1'].pad, 
                                    W=params[0], flip_filters=True)
    
    return net
Esempio n. 3
0
def buildUnet(nb_in_channels,
              dropout,
              input_var=None,
              path_unet="/data/lisatmp3/anirudhg/results/Unet/" +
              "polyp_unet_drop_penal1e-05_dataAugm_nbEpochs100/" +
              "u_net_model.npz",
              nclasses=2,
              trainable=False,
              padding=92):
    """
    Build u-net model
    """
    net = {}
    net['input'] = InputLayer((None, nb_in_channels, None, None), input_var)
    net['conv1_1'] = ConvLayer(net['input'], 32, 3)
    net['conv1_2'] = ConvLayer(net['conv1_1'], 32, 3)

    net['pool1'] = PoolLayer(net['conv1_2'], 2, ignore_border=False)

    net['conv2_1'] = ConvLayer(net['pool1'], 64, 3)
    net['conv2_2'] = ConvLayer(net['conv2_1'], 64, 3)

    net['pool2'] = PoolLayer(net['conv2_2'], 2, ignore_border=False)

    net['conv3_1'] = ConvLayer(net['pool2'], 128, 3)
    net['conv3_2'] = ConvLayer(net['conv3_1'], 128, 3)

    net['pool3'] = PoolLayer(net['conv3_2'], 2, ignore_border=False)

    net['conv4_1'] = ConvLayer(net['pool3'], 256, 3)
    net['conv4_2'] = ConvLayer(net['conv4_1'], 256, 3)

    if dropout:
        net['drop1'] = DropoutLayer(net['conv4_2'])
        prev_layer1 = 'drop1'
    else:
        prev_layer1 = 'conv4_2'

    net['pool4'] = PoolLayer(net[prev_layer1], 2, ignore_border=False)

    net['conv5_1'] = ConvLayer(net['pool4'], 512, 3)
    net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3)

    if dropout:
        net['drop2'] = DropoutLayer(net['conv5_2'])
        prev_layer2 = 'drop2'
    else:
        prev_layer2 = 'conv5_2'

    net['upconv4'] = DeconvLayer(net[prev_layer2], 256, 2, stride=2)
    net['Concat_4'] = ConcatLayer((net['conv4_2'], net['upconv4']),
                                  axis=1,
                                  cropping=[None, None, 'center', 'center'])

    net['conv6_1'] = ConvLayer(net['Concat_4'], 256, 3)
    net['conv6_2'] = ConvLayer(net['conv6_1'], 256, 3)

    net['upconv3'] = DeconvLayer(net['conv6_2'], 128, 2, stride=2)
    net['Concat_3'] = ConcatLayer((net['conv3_2'], net['upconv3']),
                                  axis=1,
                                  cropping=[None, None, 'center', 'center'])

    net['conv7_1'] = ConvLayer(net['Concat_3'], 128, 3)
    net['conv7_2'] = ConvLayer(net['conv7_1'], 128, 3)

    net['upconv2'] = DeconvLayer(net['conv7_2'], 64, 2, stride=2)
    net['Concat_2'] = ConcatLayer((net['conv2_2'], net['upconv2']),
                                  axis=1,
                                  cropping=[None, None, 'center', 'center'])

    net['conv8_1'] = ConvLayer(net['Concat_2'], 64, 3)
    net['conv8_2'] = ConvLayer(net['conv8_1'], 64, 3)

    net['upconv1'] = DeconvLayer(net['conv8_2'], 32, 2, stride=2)
    net['Concat_1'] = ConcatLayer((net['conv1_2'], net['upconv1']),
                                  axis=1,
                                  cropping=[None, None, 'center', 'center'])

    net['conv9_1'] = ConvLayer(net['Concat_1'], 32, 3)
    net['conv9_2'] = ConvLayer(net['conv9_1'], 32, 3)

    net['conv10'] = ConvLayer(net['conv9_2'],
                              nclasses,
                              1,
                              nonlinearity=lasagne.nonlinearities.identity)
    '''
    net['input_tmp'] = InputLayer((None, nclasses, None, None),
                                  input_var[:, :-1, :-2*padding, :-2*padding])

    net['final_crop'] = ElemwiseMergeLayer((net['input_tmp'], net['conv10']),
                                           merge_function=lambda input, deconv:
                                           deconv,
                                           cropping=[None, None,
                                                     'center', 'center'])

    net_final = lasagne.layers.DimshuffleLayer(net['final_crop'], (0, 2, 3, 1))
    laySize = lasagne.layers.get_output(net_final).shape
    net_final = lasagne.layers.ReshapeLayer(net_final,
                                            (T.prod(laySize[0:3]),
                                             laySize[3]))
    net_final = lasagne.layers.NonlinearityLayer(net_final,
                                                 nonlinearity=None)

    '''
    return net['conv1_1']
Esempio n. 4
0
def buildFCN8(nb_in_channels,
              input_var,
              path_weights='/Tmp/romerosa/itinf/models/' +
              'camvid/fcn8_model.npz',
              n_classes=21,
              load_weights=False,
              void_labels=[],
              trainable=True,
              layer=['probs_dimshuffle'],
              pascal=False,
              temperature=1.0):
    '''
    Build fcn8 model
    '''

    net = {}

    # Contracting path
    net['input'] = InputLayer((None, nb_in_channels, None, None), input_var)

    # pool 1
    net['conv1_1'] = ConvLayer(net['input'],
                               64,
                               3,
                               pad=100,
                               flip_filters=False)
    net['conv1_2'] = ConvLayer(net['conv1_1'],
                               64,
                               3,
                               pad='same',
                               flip_filters=False)
    net['pool1'] = PoolLayer(net['conv1_2'], 2)

    # pool 2
    net['conv2_1'] = ConvLayer(net['pool1'],
                               128,
                               3,
                               pad='same',
                               flip_filters=False)
    net['conv2_2'] = ConvLayer(net['conv2_1'],
                               128,
                               3,
                               pad='same',
                               flip_filters=False)
    net['pool2'] = PoolLayer(net['conv2_2'], 2)

    # pool 3
    net['conv3_1'] = ConvLayer(net['pool2'],
                               256,
                               3,
                               pad='same',
                               flip_filters=False)
    net['conv3_2'] = ConvLayer(net['conv3_1'],
                               256,
                               3,
                               pad='same',
                               flip_filters=False)
    net['conv3_3'] = ConvLayer(net['conv3_2'],
                               256,
                               3,
                               pad='same',
                               flip_filters=False)
    net['pool3'] = PoolLayer(net['conv3_3'], 2)

    # pool 4
    net['conv4_1'] = ConvLayer(net['pool3'],
                               512,
                               3,
                               pad='same',
                               flip_filters=False)
    net['conv4_2'] = ConvLayer(net['conv4_1'],
                               512,
                               3,
                               pad='same',
                               flip_filters=False)
    net['conv4_3'] = ConvLayer(net['conv4_2'],
                               512,
                               3,
                               pad='same',
                               flip_filters=False)
    net['pool4'] = PoolLayer(net['conv4_3'], 2)

    # pool 5
    net['conv5_1'] = ConvLayer(net['pool4'],
                               512,
                               3,
                               pad='same',
                               flip_filters=False)
    net['conv5_2'] = ConvLayer(net['conv5_1'],
                               512,
                               3,
                               pad='same',
                               flip_filters=False)
    net['conv5_3'] = ConvLayer(net['conv5_2'],
                               512,
                               3,
                               pad='same',
                               flip_filters=False)
    net['pool5'] = PoolLayer(net['conv5_3'], 2)

    # fc6
    net['fc6'] = ConvLayer(net['pool5'],
                           4096,
                           7,
                           pad='valid',
                           flip_filters=False)
    net['fc6_dropout'] = DropoutLayer(net['fc6'])

    # fc7
    net['fc7'] = ConvLayer(net['fc6_dropout'],
                           4096,
                           1,
                           pad='valid',
                           flip_filters=False)
    net['fc7_dropout'] = DropoutLayer(net['fc7'], p=0.5)

    net['score_fr'] = ConvLayer(net['fc7_dropout'],
                                n_classes,
                                1,
                                pad='valid',
                                flip_filters=False)

    # Upsampling path

    # Unpool
    net['score2'] = DeconvLayer(net['score_fr'],
                                n_classes,
                                4,
                                stride=2,
                                crop='valid',
                                nonlinearity=linear)
    net['score_pool4'] = ConvLayer(net['pool4'], n_classes, 1, pad='same')
    net['score_fused'] = ElemwiseSumLayer(
        (net['score2'], net['score_pool4']),
        cropping=[None, None, 'center', 'center'])

    # Unpool
    net['score4'] = DeconvLayer(net['score_fused'],
                                n_classes,
                                4,
                                stride=2,
                                crop='valid',
                                nonlinearity=linear)
    net['score_pool3'] = ConvLayer(net['pool3'], n_classes, 1, pad='valid')
    net['score_final'] = ElemwiseSumLayer(
        (net['score4'], net['score_pool3']),
        cropping=[None, None, 'center', 'center'])
    # Unpool
    net['upsample'] = DeconvLayer(net['score_final'],
                                  n_classes,
                                  16,
                                  stride=8,
                                  crop='valid',
                                  nonlinearity=linear)
    upsample_shape = lasagne.layers.get_output_shape(net['upsample'])[1]
    net['input_tmp'] = InputLayer((None, upsample_shape, None, None),
                                  input_var)

    net['score'] = ElemwiseMergeLayer(
        (net['input_tmp'], net['upsample']),
        merge_function=lambda input, deconv: deconv,
        cropping=[None, None, 'center', 'center'])

    # Final dimshuffle, reshape and softmax
    net['final_dimshuffle'] = \
        lasagne.layers.DimshuffleLayer(net['score'], (0, 2, 3, 1))
    laySize = lasagne.layers.get_output(net['final_dimshuffle']).shape
    net['final_reshape'] = \
        lasagne.layers.ReshapeLayer(net['final_dimshuffle'],
                                    (T.prod(laySize[0:3]),
                                     laySize[3]))
    net['probs'] = lasagne.layers.NonlinearityLayer(net['final_reshape'],
                                                    nonlinearity=softmax)

    # Do not train
    if not trainable:
        model_helpers.freezeParameters(net['probs'])

    # Go back to 4D
    net['probs_reshape'] = ReshapeLayer(
        net['probs'], (laySize[0], laySize[1], laySize[2], n_classes))

    net['probs_dimshuffle'] = DimshuffleLayer(net['probs_reshape'],
                                              (0, 3, 1, 2))

    # Apply temperature
    if load_weights:
        soft_value = net['upsample'].W.get_value() / temperature
        net['upsample'].W.set_value(soft_value)
        soft_value = net['upsample'].b.get_value() / temperature
        net['upsample'].b.set_value(soft_value)

    return [net[el] for el in layer]
def buildFCN8_DAE(input_concat_h_vars, input_mask_var, n_classes, nb_in_channels=3,
                  path_weights='/Tmp/romerosa/itinf/models/',
                  model_name='fcn8_model.npz', trainable=False,
                  load_weights=False, pretrained=False, freeze=False,
                  pretrained_path='/data/lisatmp4/romerosa/itinf/models/camvid/',
                  pascal=False, return_layer='probs_dimshuffle',
                  concat_h=['input'], noise=0.1, dropout=0.5):

    '''
    Build fcn8 model as DAE
    '''

    net = {}
    pos = 0

    assert all([el in ['pool1', 'pool2', 'pool3', 'pool4', 'input']
                for el in concat_h])

    # Contracting path
    net['input'] = InputLayer((None, nb_in_channels, None, None),
                              input_mask_var)
    # Add noise
    # Noise
    if noise > 0:
        # net['noisy_input'] = GaussianNoiseLayerSoftmax(net['input'],
        #                                                sigma=noise)
        net['noisy_input'] = GaussianNoiseLayer(net['input'], sigma=noise)
        in_layer = 'noisy_input'
    else:
        in_layer = 'input'

    pos, out = model_helpers.concatenate(net, in_layer, concat_h,
                                         input_concat_h_vars, pos,
                                         net['input'].output_shape[1])

    # pool 1
    net['conv1_1'] = ConvLayer(
        net[out], 64, 3, pad=100, flip_filters=False)
    net['conv1_2'] = ConvLayer(
        net['conv1_1'], 64, 3, pad='same', flip_filters=False)
    net['pool1'] = PoolLayer(net['conv1_2'], 2)

    pos, out = model_helpers.concatenate(net, 'pool1', concat_h,
                                         input_concat_h_vars, pos,
                                         net['pool1'].output_shape[1])

    # pool 2
    net['conv2_1'] = ConvLayer(
        net[out], 128, 3, pad='same', flip_filters=False)
    net['conv2_2'] = ConvLayer(
        net['conv2_1'], 128, 3, pad='same', flip_filters=False)
    net['pool2'] = PoolLayer(net['conv2_2'], 2)

    pos, out = model_helpers.concatenate(net, 'pool2', concat_h,
                                         input_concat_h_vars, pos,
                                         net['pool2'].output_shape[1])

    # pool 3
    net['conv3_1'] = ConvLayer(
        net[out], 256, 3, pad='same', flip_filters=False)
    net['conv3_2'] = ConvLayer(
        net['conv3_1'], 256, 3, pad='same', flip_filters=False)
    net['conv3_3'] = ConvLayer(
        net['conv3_2'], 256, 3, pad='same', flip_filters=False)
    net['pool3'] = PoolLayer(net['conv3_3'], 2)

    pos, out = model_helpers.concatenate(net, 'pool3', concat_h,
                                         input_concat_h_vars, pos,
                                         net['pool3'].output_shape[1])

    # pool 4
    net['conv4_1'] = ConvLayer(
        net[out], 512, 3, pad='same', flip_filters=False)
    net['conv4_2'] = ConvLayer(
        net['conv4_1'], 512, 3, pad='same', flip_filters=False)
    net['conv4_3'] = ConvLayer(
        net['conv4_2'], 512, 3, pad='same', flip_filters=False)
    net['pool4'] = PoolLayer(net['conv4_3'], 2)

    pos, out = model_helpers.concatenate(net, 'pool4', concat_h,
                                         input_concat_h_vars, pos,
                                         net['pool4'].output_shape[1])

    # pool 5
    net['conv5_1'] = ConvLayer(
        net[out], 512, 3, pad='same', flip_filters=False)
    net['conv5_2'] = ConvLayer(
        net['conv5_1'], 512, 3, pad='same', flip_filters=False)
    net['conv5_3'] = ConvLayer(
        net['conv5_2'], 512, 3, pad='same', flip_filters=False)
    net['pool5'] = PoolLayer(net['conv5_3'], 2)

    pos, out = model_helpers.concatenate(net, 'pool5', concat_h,
                                         input_concat_h_vars, pos,
                                         net['pool5'].output_shape[1])

    # fc6
    net['fc6'] = ConvLayer(
        net[out], 4096, 7, pad='valid', flip_filters=False)
    net['fc6_dropout'] = DropoutLayer(net['fc6'], p=dropout)

    # fc7
    net['fc7'] = ConvLayer(
        net['fc6_dropout'], 4096, 1, pad='valid', flip_filters=False)
    net['fc7_dropout'] = DropoutLayer(net['fc7'], p=dropout)

    net['score_fr'] = ConvLayer(
        net['fc7_dropout'], n_classes, 1, pad='valid', flip_filters=False)

    # Upsampling path

    # Unpool
    net['score2'] = DeconvLayer(net['score_fr'], n_classes, 4, stride=2,
                                crop='valid', nonlinearity=linear)
    net['score_pool4'] = ConvLayer(net['pool4'], n_classes, 1,
                                   pad='same')
    net['score_fused'] = ElemwiseSumLayer((net['score2'],
                                           net['score_pool4']),
                                          cropping=[None, None, 'center',
                                                    'center'])

    # Unpool
    net['score4'] = DeconvLayer(net['score_fused'], n_classes, 4,
                                stride=2, crop='valid', nonlinearity=linear)
    net['score_pool3'] = ConvLayer(net['pool3'], n_classes, 1,
                                   pad='valid')
    net['score_final'] = ElemwiseSumLayer((net['score4'],
                                           net['score_pool3']),
                                          cropping=[None, None, 'center',
                                                    'center'])
    # Unpool
    net['upsample'] = DeconvLayer(net['score_final'], n_classes, 16,
                                  stride=8, crop='valid', nonlinearity=linear)
    upsample_shape = lasagne.layers.get_output_shape(net['upsample'])[1]
    net['input_tmp'] = InputLayer((None, upsample_shape, None,
                                   None), input_mask_var)

    net['score'] = ElemwiseMergeLayer((net['input_tmp'], net['upsample']),
                                      merge_function=lambda input, deconv:
                                      deconv,
                                      cropping=[None, None, 'center',
                                                'center'])

    # Final dimshuffle, reshape and softmax
    net['final_dimshuffle'] = \
        lasagne.layers.DimshuffleLayer(net['score'], (0, 2, 3, 1))
    laySize = lasagne.layers.get_output(net['final_dimshuffle']).shape
    net['final_reshape'] = \
        lasagne.layers.ReshapeLayer(net['final_dimshuffle'],
                                    (T.prod(laySize[0:3]),
                                     laySize[3]))
    net['probs'] = lasagne.layers.NonlinearityLayer(net['final_reshape'],
                                                    nonlinearity=softmax)

    # Load weights
    if load_weights:
        pretrained = False
        with np.load(os.path.join(path_weights, model_name)) as f:
            param_values = [f['arr_%d' % i] for i in range(len(f.files))]
        lasagne.layers.set_all_param_values(net['probs'], param_values)

    # In case we want to re-use the weights of an FCN8 model pretrained from images (not GT)
    if pretrained:
        print 'Loading pretrained weights'
        if pascal:
            path_weights = '/data/lisatmp4/romerosa/itinf/models/camvid/pascal-fcn8s-tvg-dag.mat'
            if 'tvg' in path_weights:
                str_filter = 'f'
                str_bias = 'b'
            else:
                str_filter = '_filter'
                str_bias = '_bias'

            W = sio.loadmat(path_weights)

            # Load the parameter values into the net
            num_params = W.get('params').shape[1]
            str_ind = [''.join(x for x in concat if x.isdigit()) for concat in concat_h]
            list_of_lays = ['conv' + str(int(x)+1) + '_1' for x in str_ind if x]
            list_of_lays += ['conv1_1'] if nb_in_channels != 3 or 'input' in concat_h else []
            print list_of_lays

            for i in range(num_params):
                # Get layer name from the saved model
                name = str(W.get('params')[0][i][0])[3:-2]
                # Get parameter value
                param_value = W.get('params')[0][i][1]

                # Load weights
                if name.endswith(str_filter):
                    raw_name = name[:-len(str_filter)]

                    if raw_name not in list_of_lays:
                        print 'Copying weights for ' + raw_name
                        if 'score' not in raw_name and \
                           'upsample' not in raw_name and \
                           'final' not in raw_name and \
                           'probs' not in raw_name:

                            # print 'Initializing layer ' + raw_name
                            param_value = param_value.T
                            param_value = np.swapaxes(param_value, 2, 3)
                            net[raw_name].W.set_value(param_value)
                    else:
                        print 'Ignoring ' + raw_name

                # Load bias terms
                if name.endswith(str_bias):
                    raw_name = name[:-len(str_bias)]
                    if 'score' not in raw_name and \
                       'upsample' not in raw_name and \
                       'final' not in raw_name and \
                       'probs' not in raw_name:

                        param_value = np.squeeze(param_value)
                        net[raw_name].b.set_value(param_value)

        else:
            with np.load(os.path.join(pretrained_path, model_name)) as f:
                start = 0 if nb_in_channels == f['arr_%d' % 0].shape[1] \
                    else 2
                param_values = [f['arr_%d' % i] for i in range(start,
                                                               len(f.files))]
            all_layers = lasagne.layers.get_all_layers(net['probs'])
            all_layers = [l for l in all_layers if (not isinstance(l, InputLayer) and not isinstance(l, GaussianNoiseLayerSoftmax) and not isinstance(l,GaussianNoiseLayer))]
            all_layers = all_layers[1:] if start > 0 else all_layers
            # Freeze parameters after last concatenation layer
            last_concat = [idx for idx,l in enumerate(all_layers) if isinstance(l,ConcatLayer)][-1]
            count = 0
            for ixd, layer in enumerate(all_layers):
                layer_params = layer.get_params()
                for p in layer_params:
                    if hasattr(layer, 'input_layer') and not isinstance(layer.input_layer, ConcatLayer):
                        p.set_value(param_values[count])
                        if freeze:
                            model_helpers.freezeParameters(layer, single=True)
                    if isinstance(layer.input_layer, ConcatLayer) and idx == last_concat:
                        print('freezing')
                        freeze = True
                    count += 1

    # Do not train
    if not trainable:
        model_helpers.freezeParameters(net['probs'])

    # Go back to 4D
    net['probs_reshape'] = ReshapeLayer(net['probs'], (laySize[0], laySize[1],
                                                       laySize[2], n_classes))

    net['probs_dimshuffle'] = DimshuffleLayer(net['probs_reshape'],
                                              (0, 3, 1, 2))

    return net[return_layer]
def UnpoolNet(incoming_net,
              net,
              p,
              unpool,
              n_classes,
              incoming_layer,
              skip,
              dropout=0.,
              unpool_type='standard',
              layer_name=None,
              bn=0):
    '''
    Add upsampling layer

    Parameters
    ----------
    incoming_net: contracting path network
    net: upsampling path network been built here
    p: int, the corresponding unpooling
    unpool: int, total number of unpoolings
    n_classes: int, number of classes
    incoming_layer: string, name of the last layer of the contracting path
    skip: bool, whether to skip connections
    unpool_type: string, unpooling type
    '''

    # last unpooling must have n_filters = number of classes
    if p == 1:
        n_cl = n_classes
    else:
        laySize = incoming_net['pool' + str(p - 1)].input_shape
        n_cl = laySize[1]
    print('Number of feature maps (out):', n_cl)

    if unpool_type == 'standard':  # that would be standard deconv, with zeros
        # Unpool: the unpooling will use the last layer of the contracting path
        # if it is the first unpooling, otherwise it will use the last merged
        # layer (resulting from the previous unpooling)
        net['up'+str(p)] = \
                DeconvLayer(incoming_net[incoming_layer] if p == unpool else
                            net['fused_up' + str(p+1)],
                            n_cl, 4, stride=2,
                            crop='valid', nonlinearity=linear)
        # Add skip connection if required (sum)
        if skip and p > 1:
            # Merge
            net['fused_up'+str(p)] = \
                ElemwiseSumLayer((net['up'+str(p)],
                                  incoming_net['pool'+str(p-1)]),
                                 cropping=[None, None, 'center', 'center'],
                                 name=layer_name)
        else:
            # Crop to ensure the right output size
            net['fused_up'+str(p)] = \
                CroppingLayer((incoming_net['pool'+str(p-1) if p >
                                            1 else 'input'],
                               net['up'+str(p)]),
                              merge_function=lambda input, deconv:
                              deconv, cropping=[None, None,
                                                'center', 'center'],
                              name=layer_name)

    elif unpool_type == 'trackind' or unpool_type == 'inverse':
        # that would be index tracking as in SegNet
        # Depool: the unpooling will use the last layer of the contracting path
        # if it is the first unpooling, otherwise it will use the last merged
        # layer (resulting from the previous unpooling)
        if unpool_type == 'trackind':
            net['up'+str(p)] = \
                DePool2D(incoming_net[incoming_layer] if p == unpool else
                net['fused_up'+str(p+1)],
                2, incoming_net['pool'+str(p)],
                incoming_net['pool'+str(p)].input_layer)
        else:
            net['up'+str(p)] = \
                InverseLayer(incoming_net[incoming_layer] if p == unpool else
                    net['fused_up'+str(p+1)], incoming_net['pool'+str(p)])

        # Convolve
        out = 'up_conv' + str(p)
        net[out] = \
            ConvLayer(net['up'+str(p)], n_cl, 3,
                      pad='same', flip_filters=False, nonlinearity=linear)

        if dropout > 0:
            net[out + '_drop'] = DropoutLayer(net[out], p=dropout)
            out += '_drop'
        if bn:
            net[out + '_bn'] = BatchNormLayer(net[out])
            out += '_bn'

        # Add skip connection if required (sum)
        if skip and p > 1:
            # Merge
            net['fused_up'+str(p)] = \
                ElemwiseSumLayer((net[out],
                                  incoming_net['pool'+str(p-1)]),
                                 cropping=[None, None, 'center', 'center'],
                                 name=layer_name)

        else:
            # Crop to ensure the right output size
            net['fused_up'+str(p)] = \
                    CroppingLayer((incoming_net['pool'+str(p-1) if p >
                                                1 else 'input'],
                                   net[out]),
                                  merge_function=lambda input, deconv:
                                  deconv, cropping=[None, None,
                                                    'center', 'center'],
                                  name=layer_name)
    else:
        raise ValueError('Unkown unpool type')
def buildFCN8(nb_in_channels, input_var,
              path_weights='/Tmp/romerosa/itinf/models/' +
              'camvid/fcn8_model.npz',
              n_classes=21, load_weights=True,
              void_labels=[], trainable=False,
              layer=['probs_dimshuffle']):

    '''
    Build fcn8 model (generator)
    '''

    net = {}

    # Contracting path
    net['input'] = InputLayer((None, nb_in_channels, None, None),
                              input_var)

    # pool 1
    net['conv1_1'] = ConvLayer(
        net['input'], 64, 3, pad=100, flip_filters=False)
    net['conv1_2'] = ConvLayer(
        net['conv1_1'], 64, 3, pad='same', flip_filters=False)
    net['pool1'] = PoolLayer(net['conv1_2'], 2)

    # pool 2
    net['conv2_1'] = ConvLayer(
        net['pool1'], 128, 3, pad='same', flip_filters=False)
    net['conv2_2'] = ConvLayer(
        net['conv2_1'], 128, 3, pad='same', flip_filters=False)
    net['pool2'] = PoolLayer(net['conv2_2'], 2)

    # pool 3
    net['conv3_1'] = ConvLayer(
        net['pool2'], 256, 3, pad='same', flip_filters=False)
    net['conv3_2'] = ConvLayer(
        net['conv3_1'], 256, 3, pad='same', flip_filters=False)
    net['conv3_3'] = ConvLayer(
        net['conv3_2'], 256, 3, pad='same', flip_filters=False)
    net['pool3'] = PoolLayer(net['conv3_3'], 2)

    # pool 4
    net['conv4_1'] = ConvLayer(
        net['pool3'], 512, 3, pad='same', flip_filters=False)
    net['conv4_2'] = ConvLayer(
        net['conv4_1'], 512, 3, pad='same', flip_filters=False)
    net['conv4_3'] = ConvLayer(
        net['conv4_2'], 512, 3, pad='same', flip_filters=False)
    net['pool4'] = PoolLayer(net['conv4_3'], 2)

    # pool 5
    net['conv5_1'] = ConvLayer(
        net['pool4'], 512, 3, pad='same', flip_filters=False)
    net['conv5_2'] = ConvLayer(
        net['conv5_1'], 512, 3, pad='same', flip_filters=False)
    net['conv5_3'] = ConvLayer(
        net['conv5_2'], 512, 3, pad='same', flip_filters=False)
    net['pool5'] = PoolLayer(net['conv5_3'], 2)

    # fc6
    net['fc6'] = ConvLayer(
        net['pool5'], 4096, 7, pad='valid', flip_filters=False)
    net['fc6_dropout'] = DropoutLayer(net['fc6'])

    # fc7
    net['fc7'] = ConvLayer(
        net['fc6_dropout'], 4096, 1, pad='valid', flip_filters=False)
    net['fc7_dropout'] = DropoutLayer(net['fc7'], p=0.5)

    net['score_fr'] = ConvLayer(
        net['fc7_dropout'], n_classes, 1, pad='valid', flip_filters=False)

    # Upsampling path

    # Unpool
    net['score2'] = DeconvLayer(net['score_fr'], n_classes, 4, stride=2,
                                crop='valid', nonlinearity=linear)
    net['score_pool4'] = ConvLayer(net['pool4'], n_classes, 1,
                                   pad='same')
    net['score_fused'] = ElemwiseSumLayer((net['score2'],
                                           net['score_pool4']),
                                          cropping=[None, None, 'center',
                                                    'center'])

    # Unpool
    net['score4'] = DeconvLayer(net['score_fused'], n_classes, 4,
                                stride=2, crop='valid', nonlinearity=linear)
    net['score_pool3'] = ConvLayer(net['pool3'], n_classes, 1,
                                   pad='valid')
    net['score_final'] = ElemwiseSumLayer((net['score4'],
                                           net['score_pool3']),
                                          cropping=[None, None, 'center',
                                                    'center'])
    # Unpool
    net['upsample'] = DeconvLayer(net['score_final'], n_classes, 16,
                                  stride=8, crop='valid', nonlinearity=linear)
    upsample_shape = lasagne.layers.get_output_shape(net['upsample'])[1]
    net['input_tmp'] = InputLayer((None, upsample_shape, None,
                                   None), input_var)

    net['score'] = ElemwiseMergeLayer((net['input_tmp'], net['upsample']),
                                      merge_function=lambda input, deconv:
                                      deconv,
                                      cropping=[None, None, 'center',
                                                'center'])

    # Final dimshuffle, reshape and softmax
    net['final_dimshuffle'] = DimshuffleLayer(net['score'], (0, 2, 3, 1))
    laySize = lasagne.layers.get_output(net['final_dimshuffle']).shape
    net['final_reshape'] = ReshapeLayer(net['final_dimshuffle'],
                                        (T.prod(laySize[0:3]),
                                         laySize[3]))
    net['probs'] = NonlinearityLayer(net['final_reshape'],
                                     nonlinearity=softmax)

    # Load weights
    if load_weights:
        with np.load(path_weights) as f:
            param_values = [f['arr_%d' % i] for i in range(len(f.files))]
        lasagne.layers.set_all_param_values(net['probs'], param_values)

    if not trainable:
        freezeParameters(net['probs'], single=False)

    if any(void_labels):
        layVoid = lasagne.layers.get_output(net['probs']).shape
        input_discrim_var = T.zeros((layVoid[0], 1))
        net['input_void'] = InputLayer((None, 1), input_discrim_var)
        net['concat'] = ConcatLayer([net['probs'], net['input_void']],
                                    axis=1, cropping=None)
        n_classes = n_classes + 1
    else:
        net['concat'] = net['probs']

    # Go back to 4D
    net['probs_reshape'] = ReshapeLayer(net['concat'], (laySize[0], laySize[1],
                                                        laySize[2], n_classes))

    net['probs_dimshuffle'] = DimshuffleLayer(net['probs_reshape'],
                                              (0, 3, 1, 2))

    return [net[el] for el in layer]
def buildFCN8(nb_in_channels, input_var,
              path_weights='/Tmp/romerosa/itinf/models/' +
              'camvid/new_fcn8_model_best.npz',
              n_classes=21, load_weights=True,
              void_labels=[], trainable=False,
              layer=['probs_dimshuffle'], pascal=False,
              temperature=1.0, dropout=0.5):
    '''
    Build fcn8 model
    '''

    net = {}

    # Contracting path
    net['input'] = InputLayer((None, nb_in_channels, None, None),
                              input_var)

    # pool 1
    net['conv1_1'] = ConvLayer(
        net['input'], 64, 3, pad=100, flip_filters=False)
    net['conv1_2'] = ConvLayer(
        net['conv1_1'], 64, 3, pad='same', flip_filters=False)
    net['pool1'] = PoolLayer(net['conv1_2'], 2)

    # pool 2
    net['conv2_1'] = ConvLayer(
        net['pool1'], 128, 3, pad='same', flip_filters=False)
    net['conv2_2'] = ConvLayer(
        net['conv2_1'], 128, 3, pad='same', flip_filters=False)
    net['pool2'] = PoolLayer(net['conv2_2'], 2)

    # pool 3
    net['conv3_1'] = ConvLayer(
        net['pool2'], 256, 3, pad='same', flip_filters=False)
    net['conv3_2'] = ConvLayer(
        net['conv3_1'], 256, 3, pad='same', flip_filters=False)
    net['conv3_3'] = ConvLayer(
        net['conv3_2'], 256, 3, pad='same', flip_filters=False)
    net['pool3'] = PoolLayer(net['conv3_3'], 2)

    # pool 4
    net['conv4_1'] = ConvLayer(
        net['pool3'], 512, 3, pad='same', flip_filters=False)
    net['conv4_2'] = ConvLayer(
        net['conv4_1'], 512, 3, pad='same', flip_filters=False)
    net['conv4_3'] = ConvLayer(
        net['conv4_2'], 512, 3, pad='same', flip_filters=False)
    net['pool4'] = PoolLayer(net['conv4_3'], 2)

    # pool 5
    net['conv5_1'] = ConvLayer(
        net['pool4'], 512, 3, pad='same', flip_filters=False)
    net['conv5_2'] = ConvLayer(
        net['conv5_1'], 512, 3, pad='same', flip_filters=False)
    net['conv5_3'] = ConvLayer(
        net['conv5_2'], 512, 3, pad='same', flip_filters=False)
    net['pool5'] = PoolLayer(net['conv5_3'], 2)

    # fc6
    net['fc6'] = ConvLayer(
        net['pool5'], 4096, 7, pad='valid', flip_filters=False)
    net['fc6_dropout'] = DropoutLayer(net['fc6'], p=dropout)

    # fc7
    net['fc7'] = ConvLayer(
        net['fc6_dropout'], 4096, 1, pad='valid', flip_filters=False)
    net['fc7_dropout'] = DropoutLayer(net['fc7'], p=dropout)

    net['score_fr'] = ConvLayer(
        net['fc7_dropout'], n_classes, 1, pad='valid', flip_filters=False)

    # Upsampling path

    # Unpool
    net['score2'] = DeconvLayer(net['score_fr'], n_classes, 4, stride=2,
                                crop='valid', nonlinearity=linear)
    net['score_pool4'] = ConvLayer(net['pool4'], n_classes, 1,
                                   pad='same')
    net['score_fused'] = ElemwiseSumLayer((net['score2'],
                                           net['score_pool4']),
                                          cropping=[None, None, 'center',
                                                    'center'])

    # Unpool
    net['score4'] = DeconvLayer(net['score_fused'], n_classes, 4,
                                stride=2, crop='valid', nonlinearity=linear)
    net['score_pool3'] = ConvLayer(net['pool3'], n_classes, 1,
                                   pad='valid')
    net['score_final'] = ElemwiseSumLayer((net['score4'],
                                           net['score_pool3']),
                                          cropping=[None, None, 'center',
                                                    'center'])
    # Unpool
    net['upsample'] = DeconvLayer(net['score_final'], n_classes, 16,
                                  stride=8, crop='valid', nonlinearity=linear)
    upsample_shape = lasagne.layers.get_output_shape(net['upsample'])[1]
    net['input_tmp'] = InputLayer((None, upsample_shape, None,
                                   None), input_var)

    net['score'] = ElemwiseMergeLayer((net['input_tmp'], net['upsample']),
                                      merge_function=lambda input, deconv:
                                      deconv,
                                      cropping=[None, None, 'center',
                                                'center'])

    # Final dimshuffle, reshape and softmax
    net['final_dimshuffle'] = \
        lasagne.layers.DimshuffleLayer(net['score'], (0, 2, 3, 1))
    laySize = lasagne.layers.get_output(net['final_dimshuffle']).shape
    net['final_reshape'] = \
        lasagne.layers.ReshapeLayer(net['final_dimshuffle'],
                                    (T.prod(laySize[0:3]),
                                     laySize[3]))
    net['probs'] = lasagne.layers.NonlinearityLayer(net['final_reshape'],
                                                    nonlinearity=softmax)

    # Load weights
    if load_weights:
        if pascal:
            path_weights = '/data/lisatmp4/erraqabi/data/att-segm/' + \
                          'pre_trained_weights/pascal-fcn8s-tvg-dag.mat'
            if 'tvg' in path_weights:
                str_filter = 'f'
                str_bias = 'b'
            else:
                str_filter = '_filter'
                str_bias = '_bias'

            W = sio.loadmat(path_weights)

            # Load the parameter values into the net
            num_params = W.get('params').shape[1]
            for i in range(num_params):
                # Get layer name from the saved model
                name = str(W.get('params')[0][i][0])[3:-2]
                # Get parameter value
                param_value = W.get('params')[0][i][1]

                # Load weights
                if name.endswith(str_filter):
                    raw_name = name[:-len(str_filter)]
                    if 'score' not in raw_name and \
                       'upsample' not in raw_name and \
                       'final' not in raw_name and \
                       'probs' not in raw_name:

                        # print 'Initializing layer ' + raw_name
                        param_value = param_value.T
                        param_value = np.swapaxes(param_value, 2, 3)
                        net[raw_name].W.set_value(param_value)

                # Load bias terms
                if name.endswith(str_bias):
                    raw_name = name[:-len(str_bias)]
                    if 'score' not in raw_name and \
                       'upsample' not in raw_name and \
                       'final' not in raw_name and \
                       'probs' not in raw_name:

                        param_value = np.squeeze(param_value)
                        net[raw_name].b.set_value(param_value)
        else:
            with np.load(path_weights) as f:
                param_values = [f['arr_%d' % i] for i in range(len(f.files))]
            lasagne.layers.set_all_param_values(net['probs'], param_values)

    # Do not train
    if not trainable:
        model_helpers.freezeParameters(net['probs'], single=False)

    # Go back to 4D
    net['probs_reshape'] = ReshapeLayer(net['probs'], (laySize[0], laySize[1],
                                                       laySize[2], n_classes))

    net['probs_dimshuffle'] = DimshuffleLayer(net['probs_reshape'],
                                              (0, 3, 1, 2))

    # Apply temperature
    if load_weights:
        soft_value = net['upsample'].W.get_value() / temperature
        net['upsample'].W.set_value(soft_value)
        soft_value = net['upsample'].b.get_value() / temperature
        net['upsample'].b.set_value(soft_value)

    return [net[el] for el in layer]