Ejemplo n.º 1
0
def build_fft_scale(x, y, size):
    W = []
    pnet = ll.InputLayer((None, 3, 101, 101), input_var=None)
    pnet = ll.Conv2DLayer(pnet, 64, (3, 3), pad='same', nonlinearity=None)
    pnet = ll.NonlinearityLayer(ll.BatchNormLayer(pnet))
    pnet = ll.Pool2DLayer(pnet, (3, 3), (2, 2))
    pnet = ll.Conv2DLayer(pnet, 64, (3, 3), pad='same', nonlinearity=None)
    pnet = ll.NonlinearityLayer(
        ll.BatchNormLayer(pnet),
        nonlinearity=l.nonlinearities.LeakyRectify(0.1))
    pnet = ll.Conv2DLayer(pnet, 32, (3, 3), pad='same', nonlinearity=None)
    pnet = ll.BatchNormLayer(pnet)
    x_p, y_p = ll.get_output(pnet, x), ll.get_output(pnet, y)
    z_p = Customfftlayer(x_p, y_p)
    net = ll.InputLayer((None, 64, 50, 50), input_var=z_p)
    net = ll.BatchNormLayer(net)
    net = ll.NonlinearityLayer(
        ll.BatchNormLayer(
            ll.Conv2DLayer(net, 64, (5, 5), pad='same', nonlinearity=None)))
    net = ll.Pool2DLayer(net, (2, 2), mode='average_inc_pad')
    net = ll.NonlinearityLayer(
        ll.BatchNormLayer(
            ll.Conv2DLayer(net, 64, (5, 5), pad='same', nonlinearity=None)))
    net = ll.BatchNormLayer(ll.Conv2DLayer(net, 10, (1, 1), nonlinearity=None))

    # return scale different: x_new/x_lod-1
    p_scale = ll.get_output(net)
    #p_scale = theano.gradient.disconnected_grad(p_scale)
    net_scale = ll.InputLayer((None, 10, 25, 25), p_scale)
    net_scale = ll.DenseLayer(net_scale,
                              100,
                              b=None,
                              nonlinearity=l.nonlinearities.tanh)
    W.append(net_scale.get_params(regularizable=True)[0])
    net_scale = ll.DenseLayer(net_scale, 2, b=None, nonlinearity=None)
    # return heatmap with 2 times upsample of size
    net_heat = ll.DenseLayer(net,
                             500,
                             b=None,
                             nonlinearity=l.nonlinearities.tanh)
    W.append(net_heat.get_params(regularizable=True)[0])
    net_heat = ll.DenseLayer(net, size**2, b=None, nonlinearity=None)
    W.append(net_heat.get_params(regularizable=True)[0])
    net_heat = ll.BatchNormLayer(net_heat)
    net_heat = ll.ReshapeLayer(net_heat, ([0], 1, size, size))
    net_heat = ll.Deconv2DLayer(net_heat,
                                64, (5, 5), (2, 2),
                                b=None,
                                crop='same',
                                nonlinearity=None)
    net_heat = ll.BatchNormLayer(net_heat)
    net_heat = ll.Conv2DLayer(net_heat,
                              1, (3, 3),
                              b=None,
                              pad='same',
                              nonlinearity=None)
    W.append(net_heat.get_params(regularizable=True)[0])
    return pnet, net_scale, net_heat, W
Ejemplo n.º 2
0
def build_autoencoder_network():
    input_var = T.tensor4('input_var')

    layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var)
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           80,
                           filter_size=(5, 5),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           80,
                           filter_size=(5, 5),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           80,
                           filter_size=(5, 5),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           80,
                           filter_size=(5, 5),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           100,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           100,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           100,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    prely = batch_norm(
        layers.Conv2DLayer(layer,
                           100,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))

    featm = batch_norm(
        layers.Conv2DLayer(prely,
                           180,
                           filter_size=(1, 1),
                           nonlinearity=leaky_rectify))
    feat_map = batch_norm(
        layers.Conv2DLayer(featm,
                           120,
                           filter_size=(1, 1),
                           nonlinearity=rectify,
                           name="feat_map"))
    maskm = batch_norm(
        layers.Conv2DLayer(prely,
                           100,
                           filter_size=(1, 1),
                           nonlinearity=leaky_rectify))
    mask_rep = batch_norm(layers.Conv2DLayer(maskm,
                                             1,
                                             filter_size=(1, 1),
                                             nonlinearity=None),
                          beta=None,
                          gamma=None)
    mask_map = SoftThresPerc(mask_rep,
                             perc=90.0,
                             alpha=0.5,
                             beta=init.Constant(0.1),
                             tight=100.0,
                             name="mask_map")
    layer = ChInnerProdMerge(feat_map, mask_map, name="encoder")

    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             100,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             100,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             100,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             100,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             80,
                             filter_size=(5, 5),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             80,
                             filter_size=(5, 5),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             80,
                             filter_size=(5, 5),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             80,
                             filter_size=(5, 5),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = layers.Deconv2DLayer(layer,
                                 3,
                                 filter_size=(1, 1),
                                 stride=1,
                                 crop='same',
                                 nonlinearity=identity)

    glblf = batch_norm(
        layers.Conv2DLayer(prely,
                           100,
                           filter_size=(1, 1),
                           nonlinearity=leaky_rectify))
    glblf = layers.Pool2DLayer(glblf,
                               pool_size=(20, 20),
                               stride=20,
                               mode='average_inc_pad')
    glblf = batch_norm(
        layers.Conv2DLayer(glblf,
                           64,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    glblf = batch_norm(layers.Conv2DLayer(glblf,
                                          3,
                                          filter_size=(1, 1),
                                          nonlinearity=rectify),
                       name="global_feature")

    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             64,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             64,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = layers.Upscale2DLayer(glblf, scale_factor=20)
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             48,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             48,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             48,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             32,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             32,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             32,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = layers.Deconv2DLayer(glblf,
                                 3,
                                 filter_size=(1, 1),
                                 stride=1,
                                 crop='same',
                                 nonlinearity=identity)

    layer = layers.ElemwiseSumLayer([layer, glblf])

    network = ReshapeLayer(layer, ([0], -1))
    mask_var = lasagne.layers.get_output(mask_map)
    output_var = lasagne.layers.get_output(network)

    return network, input_var, mask_var, output_var
Ejemplo n.º 3
0
def build_network_from_ae(classn):
    input_var = T.tensor4('input_var');

    layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
    layer = batch_norm(layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 120,  filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
    layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad');
    layer = batch_norm(layers.Conv2DLayer(layer, 240,  filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad');
    layer = batch_norm(layers.Conv2DLayer(layer, 640,  filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    prely = batch_norm(layers.Conv2DLayer(layer, 1024, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));

    featm    = batch_norm(layers.Conv2DLayer(prely, 640, filter_size=(1,1), nonlinearity=leaky_rectify));
    feat_map = batch_norm(layers.Conv2DLayer(featm, 100, filter_size=(1,1), nonlinearity=rectify, name="feat_map"));
    maskm    = batch_norm(layers.Conv2DLayer(prely, 100, filter_size=(1,1), nonlinearity=leaky_rectify));
    mask_rep = batch_norm(layers.Conv2DLayer(maskm, 1,   filter_size=(1,1), nonlinearity=None),   beta=None, gamma=None);
    mask_map = SoftThresPerc(mask_rep, perc=0.0, alpha=0.1, beta=init.Constant(0.5), tight=100.0, bias=-10, name="mask_map");
    enlyr    = ChInnerProdMerge(feat_map, mask_map, name="encoder");

    layer = batch_norm(layers.Deconv2DLayer(enlyr, 1024, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 640,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 640,  filter_size=(4,4), stride=2, crop=(1,1),  nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 320,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 320,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 240,  filter_size=(4,4), stride=2, crop=(1,1),  nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 120,  filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 100,  filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer =            layers.Deconv2DLayer(layer, 3,    filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);

    glblf = batch_norm(layers.Conv2DLayer(prely, 128,  filter_size=(1,1), nonlinearity=leaky_rectify));
    glblf = layers.Pool2DLayer(glblf, pool_size=(5,5), stride=5, mode='average_inc_pad');
    glblf = batch_norm(layers.Conv2DLayer(glblf, 64,   filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    gllyr = batch_norm(layers.Conv2DLayer(glblf, 5,    filter_size=(1,1), nonlinearity=rectify), name="global_feature");

    glblf = batch_norm(layers.Deconv2DLayer(gllyr, 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(9,9), stride=5, crop=(2,2),  nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 64,  filter_size=(4,4), stride=2, crop=(1,1),  nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 64,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 64,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 32,  filter_size=(4,4), stride=2, crop=(1,1),  nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 32,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 32,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf =            layers.Deconv2DLayer(glblf, 3,   filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);

    layer = layers.ElemwiseSumLayer([layer, glblf]);

    network = ReshapeLayer(layer, ([0], -1));
    layers.set_all_param_values(network, pickle.load(open(filename_model_ae, 'rb')));
    mask_map.beta.set_value(np.float32(-10.0*mask_map.beta.get_value()));

    # Adding more layers
    aug_var = T.matrix('aug_var');
    target_var = T.imatrix('targets');
    add_a = layers.Conv2DLayer(enlyr, 320, filter_size=(1,1), nonlinearity=leaky_rectify);
    add_b = layers.Conv2DLayer(add_a, 320, filter_size=(1,1), nonlinearity=leaky_rectify);
    add_c = layers.Conv2DLayer(add_b, 320, filter_size=(1,1), nonlinearity=leaky_rectify);
    add_d = layers.Conv2DLayer(add_c, 320, filter_size=(1,1), nonlinearity=leaky_rectify);
    add_0 = layers.Pool2DLayer(add_d, pool_size=(15,15), stride=15, mode='average_inc_pad');
    add_1 = layers.DenseLayer(add_0, 100, nonlinearity=leaky_rectify);

    add_2 = layers.DenseLayer(gllyr, 320, nonlinearity=leaky_rectify);
    add_3 = layers.DenseLayer(add_2, 320, nonlinearity=leaky_rectify);
    add_4 = layers.DenseLayer(add_3, 100, nonlinearity=leaky_rectify);

    aug_layer = layers.InputLayer(shape=(None, aug_fea_n), input_var=aug_var);

    cat_layer = lasagne.layers.ConcatLayer([add_1, add_4, aug_layer], axis=1);

    hidden_layer = layers.DenseLayer(cat_layer, 80, nonlinearity=leaky_rectify);
    network = layers.DenseLayer(hidden_layer, classn, nonlinearity=sigmoid);

    new_params = [add_a.W, add_a.b, add_b.W, add_b.b, add_c.W, add_c.b, add_d.W, add_d.b, add_1.W, add_1.b, add_2.W, add_2.b, add_3.W, add_3.b, add_4.W, add_4.b, hidden_layer.W, hidden_layer.b, network.W, network.b];

    return network, new_params, input_var, aug_var, target_var;
Ejemplo n.º 4
0
def build_autoencoder_network():
    input_var = T.tensor4('input_var');

    layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
    layer = batch_norm(layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 120,  filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
    layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad');
    layer = batch_norm(layers.Conv2DLayer(layer, 240,  filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad');
    layer = batch_norm(layers.Conv2DLayer(layer, 640,  filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    prely = batch_norm(layers.Conv2DLayer(layer, 1024, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));

    featm    = batch_norm(layers.Conv2DLayer(prely, 640, filter_size=(1,1), nonlinearity=leaky_rectify));
    feat_map = batch_norm(layers.Conv2DLayer(featm, 100, filter_size=(1,1), nonlinearity=rectify, name="feat_map"));
    layer    = feat_map;

    layer = batch_norm(layers.Deconv2DLayer(layer, 1024, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 640,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 640,  filter_size=(4,4), stride=2, crop=(1,1),  nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 320,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 320,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 240,  filter_size=(4,4), stride=2, crop=(1,1),  nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 120,  filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 100,  filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer =            layers.Deconv2DLayer(layer, 3,    filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);

    glblf = batch_norm(layers.Conv2DLayer(prely, 128,  filter_size=(1,1), nonlinearity=leaky_rectify));
    glblf = layers.Pool2DLayer(glblf, pool_size=(5,5), stride=5, mode='average_inc_pad');
    glblf = batch_norm(layers.Conv2DLayer(glblf, 64,   filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Conv2DLayer(glblf, 5,    filter_size=(1,1), nonlinearity=rectify), name="global_feature");

    glblf = batch_norm(layers.Deconv2DLayer(glblf, 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(9,9), stride=5, crop=(2,2),  nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 64,  filter_size=(4,4), stride=2, crop=(1,1),  nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 64,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 64,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 32,  filter_size=(4,4), stride=2, crop=(1,1),  nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 32,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 32,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf =            layers.Deconv2DLayer(glblf, 3,   filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);

    layer = layers.ElemwiseSumLayer([layer, glblf]);

    network = ReshapeLayer(layer, ([0], -1));
    output_var = lasagne.layers.get_output(network);

    return network, input_var, output_var;
def build_autoencoder_network():
    input_var = T.tensor4('input_var');

    layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
    layer = batch_norm(layers.Conv2DLayer(layer, 32,   filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 32,   filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
    layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='max');
    layer = batch_norm(layers.Conv2DLayer(layer, 48,   filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 48,   filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='max');
    layer = batch_norm(layers.Conv2DLayer(layer, 64,   filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 64,   filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='max');
    layer = batch_norm(layers.Conv2DLayer(layer, 96,   filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    prely = batch_norm(layers.Conv2DLayer(layer, 96,   filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));

    featm    = batch_norm(layers.Conv2DLayer(prely, 128, filter_size=(1,1), nonlinearity=leaky_rectify));
    feat_map = batch_norm(layers.Conv2DLayer(featm, 64,  filter_size=(1,1), nonlinearity=rectify, name="feat_map"));
    maskm    = batch_norm(layers.Conv2DLayer(prely, 64,  filter_size=(1,1), nonlinearity=leaky_rectify));
    mask_rep = batch_norm(layers.Conv2DLayer(maskm, 1,   filter_size=(1,1), nonlinearity=None),   beta=None, gamma=None);
    mask_map = SoftThresPerc(mask_rep, perc=98.4, alpha=0.1, beta=init.Constant(0.5), tight=100.0, name="mask_map");
    layer    = ChInnerProdMerge(feat_map, mask_map, name="encoder");

    layer = batch_norm(layers.Deconv2DLayer(layer, 96, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 96, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 96, filter_size=(4,4), stride=2, crop=(1,1),  nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 64, filter_size=(4,4), stride=2, crop=(1,1),  nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 48, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 48, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 48, filter_size=(4,4), stride=2, crop=(1,1),  nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 32, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 32, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer =            layers.Deconv2DLayer(layer, 3,  filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);

    glblf = batch_norm(layers.Conv2DLayer(prely, 128,  filter_size=(1,1), nonlinearity=leaky_rectify));
    glblf = layers.Pool2DLayer(glblf, pool_size=(5,5), stride=5, mode='average_inc_pad');
    glblf = batch_norm(layers.Conv2DLayer(glblf, 64,   filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Conv2DLayer(glblf, 5,    filter_size=(1,1), nonlinearity=rectify), name="global_feature");

    glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(9,9), stride=5, crop=(2,2),  nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 16, filter_size=(4,4), stride=2, crop=(1,1),  nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 16, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 16, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 8,  filter_size=(4,4), stride=2, crop=(1,1),  nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 8,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 8,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 8,  filter_size=(4,4), stride=2, crop=(1,1),  nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 8,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 8,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf =            layers.Deconv2DLayer(glblf, 3,  filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);

    layer = layers.ElemwiseSumLayer([layer, glblf]);

    network = ReshapeLayer(layer, ([0], -1));
    mask_var = lasagne.layers.get_output(mask_map);
    output_var = lasagne.layers.get_output(network);

    return network, input_var, mask_var, output_var;
Ejemplo n.º 6
0
def modelinv(y, fnet):
    net = {}
    net['input'] = layers.InputLayer(shape=(None, 10), input_var=y)
    net['input'] = layers.ReshapeLayer(net['input'], (-1, 10, 1, 1))
    net['ipool3'] = layers.Upscale2DLayer(net['input'], 8)
    biasremove = myUtils.layers.RemoveBiasLayer(net['ipool3'], fnet['cccp6'].b)
    net['icccp6'] = layers.Deconv2DLayer(
        biasremove,
        num_filters=fnet['cccp6'].input_shape[1],
        filter_size=fnet['cccp6'].filter_size,
        stride=fnet['cccp6'].stride,
        crop=fnet['cccp6'].pad,
        W=fnet['cccp6'].W,
        b=None,
        flip_filters=not fnet['cccp6'].flip_filters)
    biasremove = myUtils.layers.RemoveBiasLayer(net['icccp6'], fnet['cccp5'].b)
    net['icccp5'] = layers.Deconv2DLayer(
        biasremove,
        num_filters=fnet['cccp5'].input_shape[1],
        filter_size=fnet['cccp5'].filter_size,
        stride=fnet['cccp5'].stride,
        crop=fnet['cccp5'].pad,
        W=fnet['cccp5'].W,
        b=None,
        flip_filters=not fnet['cccp5'].flip_filters)
    biasremove = myUtils.layers.RemoveBiasLayer(net['icccp5'], fnet['conv3'].b)
    net['iconv3'] = layers.Deconv2DLayer(
        biasremove,
        num_filters=fnet['conv3'].input_shape[1],
        filter_size=fnet['conv3'].filter_size,
        stride=fnet['conv3'].stride,
        crop=fnet['conv3'].pad,
        W=fnet['conv3'].W,
        b=None,
        flip_filters=not fnet['conv3'].flip_filters)
    net['ipool2'] = layers.Upscale2DLayer(net['iconv3'], 2)
    biasremove = myUtils.layers.RemoveBiasLayer(net['ipool2'], fnet['cccp4'].b)
    net['icccp4'] = layers.Deconv2DLayer(
        biasremove,
        num_filters=fnet['cccp4'].input_shape[1],
        filter_size=fnet['cccp4'].filter_size,
        stride=fnet['cccp4'].stride,
        crop=fnet['cccp4'].pad,
        W=fnet['cccp4'].W,
        b=None,
        flip_filters=not fnet['cccp4'].flip_filters)
    biasremove = myUtils.layers.RemoveBiasLayer(net['icccp4'], fnet['cccp3'].b)
    net['icccp3'] = layers.Deconv2DLayer(
        biasremove,
        num_filters=fnet['cccp3'].input_shape[1],
        filter_size=fnet['cccp3'].filter_size,
        stride=fnet['cccp3'].stride,
        crop=fnet['cccp3'].pad,
        W=fnet['cccp3'].W,
        b=None,
        flip_filters=not fnet['cccp3'].flip_filters)
    biasremove = myUtils.layers.RemoveBiasLayer(net['icccp3'], fnet['conv2'].b)
    net['iconv2'] = layers.Deconv2DLayer(
        biasremove,
        num_filters=fnet['conv2'].input_shape[1],
        filter_size=fnet['conv2'].filter_size,
        stride=fnet['conv2'].stride,
        crop=fnet['conv2'].pad,
        W=fnet['conv2'].W,
        b=None,
        flip_filters=not fnet['conv2'].flip_filters)
    net['ipool1'] = layers.Upscale2DLayer(net['iconv2'], 2)
    biasremove = myUtils.layers.RemoveBiasLayer(net['ipool1'], fnet['cccp2'].b)
    net['icccp2'] = layers.Deconv2DLayer(
        biasremove,
        num_filters=fnet['cccp2'].input_shape[1],
        filter_size=fnet['cccp2'].filter_size,
        stride=fnet['cccp2'].stride,
        crop=fnet['cccp2'].pad,
        W=fnet['cccp2'].W,
        b=None,
        flip_filters=not fnet['cccp2'].flip_filters)
    biasremove = myUtils.layers.RemoveBiasLayer(net['icccp2'], fnet['cccp1'].b)
    net['icccp1'] = layers.Deconv2DLayer(
        biasremove,
        num_filters=fnet['cccp1'].input_shape[1],
        filter_size=fnet['cccp1'].filter_size,
        stride=fnet['cccp1'].stride,
        crop=fnet['cccp1'].pad,
        W=fnet['cccp1'].W,
        b=None,
        flip_filters=not fnet['cccp1'].flip_filters)
    biasremove = myUtils.layers.RemoveBiasLayer(net['icccp1'], fnet['conv1'].b)
    net['iconv1'] = layers.Deconv2DLayer(
        biasremove,
        num_filters=fnet['conv1'].input_shape[1],
        filter_size=fnet['conv1'].filter_size,
        stride=fnet['conv1'].stride,
        crop=fnet['conv1'].pad,
        W=fnet['conv1'].W,
        b=None,
        flip_filters=not fnet['conv1'].flip_filters)
    net['out'] = net['iconv1']
    return net
Ejemplo n.º 7
0
def build_network_from_ae(classn):
    input_var = T.tensor4('input_var')

    layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var)
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           100,
                           filter_size=(5, 5),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           120,
                           filter_size=(5, 5),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = layers.Pool2DLayer(layer,
                               pool_size=(2, 2),
                               stride=2,
                               mode='average_inc_pad')
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           240,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           320,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = layers.Pool2DLayer(layer,
                               pool_size=(2, 2),
                               stride=2,
                               mode='average_inc_pad')
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           640,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    prely = batch_norm(
        layers.Conv2DLayer(layer,
                           1024,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))

    featm = batch_norm(
        layers.Conv2DLayer(prely,
                           640,
                           filter_size=(1, 1),
                           nonlinearity=leaky_rectify))
    feat_map = batch_norm(
        layers.Conv2DLayer(featm,
                           100,
                           filter_size=(1, 1),
                           nonlinearity=rectify,
                           name="feat_map"))
    mask_map = feat_map
    enlyr = feat_map

    layer = batch_norm(
        layers.Deconv2DLayer(enlyr,
                             1024,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             640,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             640,
                             filter_size=(4, 4),
                             stride=2,
                             crop=(1, 1),
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             320,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             320,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             240,
                             filter_size=(4, 4),
                             stride=2,
                             crop=(1, 1),
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             120,
                             filter_size=(5, 5),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             100,
                             filter_size=(5, 5),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = layers.Deconv2DLayer(layer,
                                 3,
                                 filter_size=(1, 1),
                                 stride=1,
                                 crop='same',
                                 nonlinearity=identity)

    glblf = batch_norm(
        layers.Conv2DLayer(prely,
                           128,
                           filter_size=(1, 1),
                           nonlinearity=leaky_rectify))
    glblf = layers.Pool2DLayer(glblf,
                               pool_size=(5, 5),
                               stride=5,
                               mode='average_inc_pad')
    glblf = batch_norm(
        layers.Conv2DLayer(glblf,
                           64,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    gllyr = batch_norm(layers.Conv2DLayer(glblf,
                                          5,
                                          filter_size=(1, 1),
                                          nonlinearity=rectify),
                       name="global_feature")

    glblf = batch_norm(
        layers.Deconv2DLayer(gllyr,
                             256,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             128,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             128,
                             filter_size=(9, 9),
                             stride=5,
                             crop=(2, 2),
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             128,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             128,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             64,
                             filter_size=(4, 4),
                             stride=2,
                             crop=(1, 1),
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             64,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             64,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             32,
                             filter_size=(4, 4),
                             stride=2,
                             crop=(1, 1),
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             32,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             32,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = layers.Deconv2DLayer(glblf,
                                 3,
                                 filter_size=(1, 1),
                                 stride=1,
                                 crop='same',
                                 nonlinearity=identity)

    layer = layers.ElemwiseSumLayer([layer, glblf])

    network = ReshapeLayer(layer, ([0], -1))
    layers.set_all_param_values(network,
                                pickle.load(open(filename_model_ae, 'rb')))
    old_params = layers.get_all_params(network, trainable=True)

    # Adding more layers
    aug_var = T.matrix('aug_var')
    target_var = T.imatrix('targets')
    add_a = batch_norm(
        layers.Conv2DLayer(enlyr,
                           320,
                           filter_size=(1, 1),
                           nonlinearity=leaky_rectify))
    add_b = batch_norm(
        layers.Conv2DLayer(add_a,
                           320,
                           filter_size=(1, 1),
                           nonlinearity=leaky_rectify))
    add_c = batch_norm(
        layers.Conv2DLayer(add_b,
                           320,
                           filter_size=(1, 1),
                           nonlinearity=leaky_rectify))
    add_d = batch_norm(
        layers.Conv2DLayer(add_c,
                           320,
                           filter_size=(1, 1),
                           nonlinearity=leaky_rectify))
    add_0 = layers.Pool2DLayer(add_d,
                               pool_size=(15, 15),
                               stride=15,
                               mode='average_inc_pad')
    add_1 = batch_norm(
        layers.DenseLayer(add_0, 100, nonlinearity=leaky_rectify))

    add_2 = batch_norm(
        layers.DenseLayer(gllyr, 320, nonlinearity=leaky_rectify))
    add_3 = batch_norm(
        layers.DenseLayer(add_2, 320, nonlinearity=leaky_rectify))
    add_4 = batch_norm(
        layers.DenseLayer(add_3, 100, nonlinearity=leaky_rectify))

    aug_layer = layers.InputLayer(shape=(None, aug_fea_n), input_var=aug_var)

    cat_layer = lasagne.layers.ConcatLayer([add_1, add_4, aug_layer], axis=1)

    hidden_layer = layers.DenseLayer(cat_layer, 80, nonlinearity=leaky_rectify)
    network = layers.DenseLayer(hidden_layer, classn, nonlinearity=sigmoid)

    all_params = layers.get_all_params(network, trainable=True)
    new_params = [x for x in all_params if x not in old_params]

    return network, new_params, input_var, aug_var, target_var
Ejemplo n.º 8
0
def build_deconv_network():
    input_var = theano.tensor.tensor4('input_var')

    net = {}
    net['input'] = layers.InputLayer(shape=(None, 3, PS, PS),
                                     input_var=input_var)

    # Encoding part
    net['conv1_1'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Conv2DLayer(net['input'],
                               64,
                               filter_size=(3, 3),
                               stride=1,
                               pad=1)))
    net['conv1_2'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Conv2DLayer(net['conv1_1'],
                               64,
                               filter_size=(3, 3),
                               stride=1,
                               pad=1)))
    net['pool1'] = layers.Pool2DLayer(net['conv1_2'],
                                      pool_size=(2, 2),
                                      stride=2,
                                      mode='max')

    net['conv2_1'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Conv2DLayer(net['pool1'],
                               128,
                               filter_size=(3, 3),
                               stride=1,
                               pad=1)))
    net['conv2_2'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Conv2DLayer(net['conv2_1'],
                               128,
                               filter_size=(3, 3),
                               stride=1,
                               pad=1)))
    net['pool2'] = layers.Pool2DLayer(net['conv2_2'],
                                      pool_size=(2, 2),
                                      stride=2,
                                      mode='max')

    net['conv3_1'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Conv2DLayer(net['pool2'],
                               256,
                               filter_size=(3, 3),
                               stride=1,
                               pad=1)))
    net['conv3_2'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Conv2DLayer(net['conv3_1'],
                               256,
                               filter_size=(3, 3),
                               stride=1,
                               pad=1)))
    net['conv3_3'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Conv2DLayer(net['conv3_2'],
                               256,
                               filter_size=(3, 3),
                               stride=1,
                               pad=1)))
    net['pool3'] = layers.Pool2DLayer(net['conv3_3'],
                                      pool_size=(2, 2),
                                      stride=2,
                                      mode='max')

    net['conv4_1'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Conv2DLayer(net['pool3'],
                               512,
                               filter_size=(3, 3),
                               stride=1,
                               pad=1)))
    net['conv4_2'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Conv2DLayer(net['conv4_1'],
                               512,
                               filter_size=(3, 3),
                               stride=1,
                               pad=1)))
    net['conv4_3'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Conv2DLayer(net['conv4_2'],
                               512,
                               filter_size=(3, 3),
                               stride=1,
                               pad=1)))
    net['pool4'] = layers.Pool2DLayer(net['conv4_3'],
                                      pool_size=(2, 2),
                                      stride=2,
                                      mode='max')

    net['conv5_1'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Conv2DLayer(net['pool4'],
                               512,
                               filter_size=(3, 3),
                               stride=1,
                               pad=1)))
    net['conv5_2'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Conv2DLayer(net['conv5_1'],
                               512,
                               filter_size=(3, 3),
                               stride=1,
                               pad=1)))
    net['conv5_3'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Conv2DLayer(net['conv5_2'],
                               512,
                               filter_size=(3, 3),
                               stride=1,
                               pad=1)))
    net['pool5'] = layers.Pool2DLayer(net['conv5_3'],
                                      pool_size=(2, 2),
                                      stride=2,
                                      mode='max')

    net['fc6'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Conv2DLayer(net['pool5'],
                               4096,
                               filter_size=(7, 7),
                               stride=1,
                               pad='same')))

    # fc7 is the encoding layer
    net['fc7'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Conv2DLayer(net['fc6'],
                               4096,
                               filter_size=(1, 1),
                               stride=1,
                               pad='same')))

    # Decoding part
    net['fc6_deconv'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Deconv2DLayer(net['fc7'],
                                 512,
                                 filter_size=(7, 7),
                                 stride=1,
                                 crop='same')))
    net['unpool5'] = layers.InverseLayer(net['fc6_deconv'], net['pool5'])

    net['deconv5_1'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Deconv2DLayer(net['unpool5'],
                                 512,
                                 filter_size=(3, 3),
                                 stride=1,
                                 crop='same')))
    net['deconv5_2'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Deconv2DLayer(net['deconv5_1'],
                                 512,
                                 filter_size=(3, 3),
                                 stride=1,
                                 crop='same')))
    net['deconv5_3'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Deconv2DLayer(net['deconv5_2'],
                                 512,
                                 filter_size=(3, 3),
                                 stride=1,
                                 crop='same')))
    net['unpool4'] = layers.InverseLayer(net['deconv5_3'], net['pool4'])

    net['deconv4_1'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Deconv2DLayer(net['unpool4'],
                                 512,
                                 filter_size=(3, 3),
                                 stride=1,
                                 crop='same')))
    net['deconv4_2'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Deconv2DLayer(net['deconv4_1'],
                                 512,
                                 filter_size=(3, 3),
                                 stride=1,
                                 crop='same')))
    net['deconv4_3'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Deconv2DLayer(net['deconv4_2'],
                                 256,
                                 filter_size=(3, 3),
                                 stride=1,
                                 crop='same')))
    net['unpool3'] = layers.InverseLayer(net['deconv4_3'], net['pool3'])

    net['deconv3_1'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Deconv2DLayer(net['unpool3'],
                                 256,
                                 filter_size=(3, 3),
                                 stride=1,
                                 crop='same')))
    net['deconv3_2'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Deconv2DLayer(net['deconv3_1'],
                                 256,
                                 filter_size=(3, 3),
                                 stride=1,
                                 crop='same')))
    net['deconv3_3'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Deconv2DLayer(net['deconv3_2'],
                                 128,
                                 filter_size=(3, 3),
                                 stride=1,
                                 crop='same')))
    net['unpool2'] = layers.InverseLayer(net['deconv3_3'], net['pool2'])

    net['deconv2_1'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Deconv2DLayer(net['unpool2'],
                                 128,
                                 filter_size=(3, 3),
                                 stride=1,
                                 crop='same')))
    net['deconv2_2'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Deconv2DLayer(net['deconv2_1'],
                                 64,
                                 filter_size=(3, 3),
                                 stride=1,
                                 crop='same')))
    net['unpool1'] = layers.InverseLayer(net['deconv2_2'], net['pool1'])

    net['deconv1_1'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Deconv2DLayer(net['unpool1'],
                                 64,
                                 filter_size=(3, 3),
                                 stride=1,
                                 crop='same')))
    net['deconv1_2'] = layers.NonlinearityLayer(
        batch_norm(
            layers.Deconv2DLayer(net['deconv1_1'],
                                 64,
                                 filter_size=(3, 3),
                                 stride=1,
                                 crop='same')))

    # Segmentation layer
    net['seg_score'] = layers.Deconv2DLayer(
        net['deconv1_2'],
        1,
        filter_size=(1, 1),
        stride=1,
        crop='same',
        nonlinearity=lasagne.nonlinearities.sigmoid)

    network = ReshapeLayer(net['seg_score'], ([0], -1))
    output_var = lasagne.layers.get_output(network)
    all_param = lasagne.layers.get_all_params(network, trainable=True)

    return network, input_var, output_var, all_param
Ejemplo n.º 9
0
    def makeGenerator_decoder(self, gen, yb, aYSize):
        #(G7)
        gen_dec = self.conv_cond_concat(gen, yb, aYSize)
        print 'G7:', gen_dec.output_shape  #(128, 74, 7, 7)

        #(G8)
        # deconvolution
        if self.IS_USE_B_DECONV_1:
            gen_dec = binary_net_ex.Deconv2DLayer(
                gen_dec,
                num_filters=self.NUM_GEN_FILTERS,
                filter_size=(5, 5),
                stride=(2, 2),
                output_size=(14, 14),
                crop=2,
                nonlinearity=None,
                binary=True,
                stochastic=IS_STOCHASTIC,
                H=H,
                W_LR_scale=W_LR_scale,
                b=None)
        else:
            gen_dec = ll.Deconv2DLayer(gen_dec,
                                       num_filters=self.NUM_GEN_FILTERS,
                                       filter_size=(5, 5),
                                       stride=(2, 2),
                                       output_size=(14, 14),
                                       b=None,
                                       crop=2,
                                       nonlinearity=None)
        print 'G8:gen_dec.shape', gen_dec.input_shape, gen_dec.output_shape  #(128, 64, 14, 14)

        #(G9)
        if self.IS_USE_B_BNA_2:
            gen_dec = binary_net_ex.BatchNormLayer(gen_dec,
                                                   verbose=True,
                                                   epsilon=EPSILON,
                                                   alpha=ALPHA,
                                                   H=1.0)
        else:
            gen_dec = ll.BatchNormLayer(gen_dec, epsilon=EPSILON, alpha=ALPHA)
            gen_dec = ll.NonlinearityLayer(
                gen_dec, nonlinearity=binary_net.binary_tanh_unit)
        print 'G9:gen_dec.shape', gen_dec.input_shape, gen_dec.output_shape

        #(G10)
        gen_dec = self.conv_cond_concat(gen_dec, yb, aYSize)
        print 'G10:gen_dec.shape', gen_dec.output_shape
        out_G10 = ll.get_output(gen_dec)

        #(G11)
        if self.IS_USE_B_DECONV_2:
            gen_dec = binary_net_ex.Deconv2DLayer(gen_dec,
                                                  num_filters=NUM_IMG_CHANNELS,
                                                  filter_size=(5, 5),
                                                  stride=(2, 2),
                                                  output_size=(28, 28),
                                                  crop=2,
                                                  nonlinearity=None,
                                                  binary=True,
                                                  stochastic=IS_STOCHASTIC,
                                                  H=H,
                                                  W_LR_scale=W_LR_scale,
                                                  b=None)
        else:
            gen_dec = ll.Deconv2DLayer(gen_dec,
                                       num_filters=NUM_IMG_CHANNELS,
                                       filter_size=(5, 5),
                                       stride=(2, 2),
                                       output_size=(28, 28),
                                       b=None,
                                       crop=2,
                                       nonlinearity=None)

        print 'G11:gen_dec.shape', gen_dec.input_shape, gen_dec.output_shape
        out_G11 = ll.get_output(gen_dec)

        #(G12)
        if self.IS_USE_B_BNA_2:
            #gen_dec = ll.ExpressionLayer(gen_dec, function = lambda X : X/self.A)
            pass
        #
        gen_dec = ll.NonlinearityLayer(
            gen_dec,
            nonlinearity=lasagne.nonlinearities.sigmoid)  #TODO binary ?

        print 'G12:gen_dec.shape', gen_dec.input_shape, gen_dec.output_shape
        out_G12 = ll.get_output(gen_dec)

        return gen_dec, out_G10, out_G11, out_G12
def build_network_from_ae(classn):
    input_var = T.tensor4('input_var');

    layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
    layer = batch_norm(layers.Conv2DLayer(layer, 100,  filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 120,  filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
    layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad');
    layer = batch_norm(layers.Conv2DLayer(layer, 240,  filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Conv2DLayer(layer, 320,  filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad');
    layer = batch_norm(layers.Conv2DLayer(layer, 640,  filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    prely = batch_norm(layers.Conv2DLayer(layer, 1024, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));

    featm    = batch_norm(layers.Conv2DLayer(prely, 640, filter_size=(1,1), nonlinearity=leaky_rectify));
    feat_map = batch_norm(layers.Conv2DLayer(featm, 100, filter_size=(1,1), nonlinearity=rectify, name="feat_map"));
    maskm    = batch_norm(layers.Conv2DLayer(prely, 100, filter_size=(1,1), nonlinearity=leaky_rectify));
    mask_rep = batch_norm(layers.Conv2DLayer(maskm, 1,   filter_size=(1,1), nonlinearity=None),   beta=None, gamma=None);
    mask_map = SoftThresPerc(mask_rep, perc=98.4, alpha=0.1, beta=init.Constant(0.5), tight=100.0, name="mask_map");
    enlyr    = ChInnerProdMerge(feat_map, mask_map, name="encoder");

    layer = batch_norm(layers.Deconv2DLayer(enlyr, 1024, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 640,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 640,  filter_size=(4,4), stride=2, crop=(1,1),  nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 320,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 320,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 240,  filter_size=(4,4), stride=2, crop=(1,1),  nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 120,  filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer = batch_norm(layers.Deconv2DLayer(layer, 100,  filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
    layer =            layers.Deconv2DLayer(layer, 3,    filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);

    glblf = batch_norm(layers.Conv2DLayer(prely, 128,  filter_size=(1,1), nonlinearity=leaky_rectify));
    glblf = layers.Pool2DLayer(glblf, pool_size=(5,5), stride=5, mode='average_inc_pad');
    glblf = batch_norm(layers.Conv2DLayer(glblf, 64,   filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    gllyr = batch_norm(layers.Conv2DLayer(glblf, 5,    filter_size=(1,1), nonlinearity=rectify), name="global_feature");

    glblf = batch_norm(layers.Deconv2DLayer(gllyr, 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(9,9), stride=5, crop=(2,2),  nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 64,  filter_size=(4,4), stride=2, crop=(1,1),  nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 64,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 64,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 32,  filter_size=(4,4), stride=2, crop=(1,1),  nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 32,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf = batch_norm(layers.Deconv2DLayer(glblf, 32,  filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    glblf =            layers.Deconv2DLayer(glblf, 3,   filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);

    layer = layers.ElemwiseSumLayer([layer, glblf]);

    network = ReshapeLayer(layer, ([0], -1));
    layers.set_all_param_values(network, pickle.load(open(filename_model_ae, 'rb')));
    old_params = layers.get_all_params(network, trainable=True);

    # Adding more layers
    aug_var = T.matrix('aug_var');
    target_var = T.imatrix('targets');
    add_a = batch_norm(layers.Conv2DLayer(enlyr, 320, filter_size=(1,1), nonlinearity=leaky_rectify));
    add_b = batch_norm(layers.Conv2DLayer(add_a, 320, filter_size=(1,1), nonlinearity=leaky_rectify));
    add_c = batch_norm(layers.Conv2DLayer(add_b, 320, filter_size=(1,1), nonlinearity=leaky_rectify));
    add_d = batch_norm(layers.Conv2DLayer(add_c, 320, filter_size=(1,1), nonlinearity=leaky_rectify));
    add_0 = layers.Pool2DLayer(add_d, pool_size=(25,25), stride=25, mode='average_inc_pad');
    add_1 = batch_norm(layers.DenseLayer(add_0, 100, nonlinearity=leaky_rectify));

    add_2 = batch_norm(layers.DenseLayer(gllyr, 320, nonlinearity=leaky_rectify));
    add_3 = batch_norm(layers.DenseLayer(add_2, 320, nonlinearity=leaky_rectify));
    add_4 = batch_norm(layers.DenseLayer(add_3, 100, nonlinearity=leaky_rectify));

    aug_layer = layers.InputLayer(shape=(None, aug_fea_n), input_var=aug_var);

    cat_layer = lasagne.layers.ConcatLayer([add_1, add_4, aug_layer], axis=1);

    hidden_layer = layers.DenseLayer(cat_layer, 80, nonlinearity=leaky_rectify);
    network = layers.DenseLayer(hidden_layer, classn, nonlinearity=sigmoid);

    layers.set_all_param_values(network, pickle.load(open('model_vals/deep_conv_classification_alt48_luad10_skcm10_lr0.py_e32_cv0.pkl', 'rb')));

    all_params = layers.get_all_params(network, trainable=True);
    new_params = [x for x in all_params if x not in old_params];

    return network, new_params, input_var, aug_var, target_var;
def build_autoencoder_network():
    input_var = T.tensor4('input_var')

    layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var)
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           100,
                           filter_size=(5, 5),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           120,
                           filter_size=(5, 5),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = layers.Pool2DLayer(layer,
                               pool_size=(2, 2),
                               stride=2,
                               mode='average_inc_pad')
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           240,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           240,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = layers.Pool2DLayer(layer,
                               pool_size=(2, 2),
                               stride=2,
                               mode='average_inc_pad')
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           320,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           320,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = layers.Pool2DLayer(layer,
                               pool_size=(3, 3),
                               stride=2,
                               mode='average_inc_pad')
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           480,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           480,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = layers.Pool2DLayer(layer,
                               pool_size=(2, 2),
                               stride=2,
                               mode='average_inc_pad')
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           640,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           640,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = layers.Pool2DLayer(layer,
                               pool_size=(2, 2),
                               stride=2,
                               mode='average_inc_pad')

    mask_map = layer
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           300,
                           filter_size=(1, 1),
                           stride=1,
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           1000,
                           filter_size=(3, 3),
                           stride=1,
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             300,
                             filter_size=(3, 3),
                             stride=1,
                             nonlinearity=leaky_rectify))

    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             640,
                             filter_size=(4, 4),
                             stride=2,
                             crop=(1, 1),
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             640,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             640,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             480,
                             filter_size=(4, 4),
                             stride=2,
                             crop=(1, 1),
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             480,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             480,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             320,
                             filter_size=(3, 3),
                             stride=2,
                             crop=(0, 0),
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             320,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             320,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             240,
                             filter_size=(4, 4),
                             stride=2,
                             crop=(1, 1),
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             240,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             240,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             120,
                             filter_size=(4, 4),
                             stride=2,
                             crop=(1, 1),
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             120,
                             filter_size=(5, 5),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             100,
                             filter_size=(5, 5),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = layers.Deconv2DLayer(layer,
                                 3,
                                 filter_size=(1, 1),
                                 stride=1,
                                 crop='same',
                                 nonlinearity=identity)

    network = ReshapeLayer(layer, ([0], -1))
    mask_var = lasagne.layers.get_output(mask_map)
    output_var = lasagne.layers.get_output(network)

    return network, input_var, mask_var, output_var
Ejemplo n.º 12
0
def build_network_from_ae_old(classn):
    input_var = T.tensor4('input_var');

    net = {}

    net['input'] =  layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);

    net['conv1_1'] = batch_norm(layers.Conv2DLayer(net['input'], 64,  filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    net['conv1_2'] = batch_norm(layers.Conv2DLayer(net['conv1_1'], 64,  filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    net['pool1'] = layers.Pool2DLayer(net['conv1_2'], pool_size=(2,2), stride=2, mode='max');

    net['conv2_1'] = batch_norm(layers.Conv2DLayer(net['pool1'], 128,  filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    net['conv2_2'] = batch_norm(layers.Conv2DLayer(net['conv2_1'], 128,  filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    net['pool2'] = layers.Pool2DLayer(net['conv2_2'], pool_size=(2,2), stride=2, mode='max');

    net['conv3_1'] = batch_norm(layers.Conv2DLayer(net['pool2'], 256,  filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    net['conv3_2'] = batch_norm(layers.Conv2DLayer(net['conv3_1'], 256,  filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    net['conv3_3'] = batch_norm(layers.Conv2DLayer(net['conv3_2'], 256,  filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    net['pool3'] = layers.Pool2DLayer(net['conv3_3'], pool_size=(2,2), stride=2, mode='max');

    net['conv4_1'] = batch_norm(layers.Conv2DLayer(net['pool3'], 512,  filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    net['conv4_2'] = batch_norm(layers.Conv2DLayer(net['conv4_1'], 512,  filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    net['conv4_3'] = batch_norm(layers.Conv2DLayer(net['conv4_2'], 512,  filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    net['pool4'] = layers.Pool2DLayer(net['conv4_3'], pool_size=(2,2), stride=2, mode='max');

    net['conv5_1'] = batch_norm(layers.Conv2DLayer(net['pool4'], 512,  filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    net['conv5_2'] = batch_norm(layers.Conv2DLayer(net['conv5_1'], 512,  filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    net['conv5_3'] = batch_norm(layers.Conv2DLayer(net['conv5_2'], 512,  filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
    net['pool5'] = layers.Pool2DLayer(net['conv5_3'], pool_size=(2,2), stride=2, mode='max');

    net['fc6'] = batch_norm(layers.Conv2DLayer(net['pool5'], 4096,  filter_size=(7,7), stride=1, pad='valid', nonlinearity=leaky_rectify));
    net['fc7'] = batch_norm(layers.Conv2DLayer(net['fc6'], 4096,  filter_size=(1,1), stride=1, pad='valid', nonlinearity=leaky_rectify));
    #net['fc6'] = batch_norm(layers.DenseLayer(net['pool5'], 4096, nonlinearity=leaky_rectify));
    #net['fc7'] = batch_norm(layers.DenseLayer(net['fc6'], 4096, nonlinearity=leaky_rectify));

    net['fc6_deconv'] = batch_norm(layers.Deconv2DLayer(net['fc7'], 512, filter_size=(7,7), stride=1, crop='valid', nonlinearity=leaky_rectify));
    net['unpool5'] = layers.InverseLayer(net['fc6_deconv'], net['pool5']);

    net['deconv5_1'] = batch_norm(layers.Deconv2DLayer(net['unpool5'], 512, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    net['deconv5_2'] = batch_norm(layers.Deconv2DLayer(net['deconv5_1'], 512, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    net['deconv5_3'] = batch_norm(layers.Deconv2DLayer(net['deconv5_2'], 512, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    net['unpool4'] = layers.InverseLayer(net['deconv5_3'], net['pool4']);

    net['deconv4_1'] = batch_norm(layers.Deconv2DLayer(net['unpool4'], 512, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    net['deconv4_2'] = batch_norm(layers.Deconv2DLayer(net['deconv4_1'], 512, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    net['deconv4_3'] = batch_norm(layers.Deconv2DLayer(net['deconv4_2'], 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    net['unpool3'] = layers.InverseLayer(net['deconv4_3'], net['pool3']);

    net['deconv3_1'] = batch_norm(layers.Deconv2DLayer(net['unpool3'], 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    net['deconv3_2'] = batch_norm(layers.Deconv2DLayer(net['deconv3_1'], 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    net['deconv3_3'] = batch_norm(layers.Deconv2DLayer(net['deconv3_2'], 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    net['unpool2'] = layers.InverseLayer(net['deconv3_3'], net['pool2']);

    net['deconv2_1'] = batch_norm(layers.Deconv2DLayer(net['unpool2'], 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    net['deconv2_2'] = batch_norm(layers.Deconv2DLayer(net['deconv2_1'], 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    net['unpool1'] = layers.InverseLayer(net['deconv2_2'], net['pool1']);

    net['deconv1_1'] = batch_norm(layers.Deconv2DLayer(net['unpool1'], 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    net['deconv1_2'] = batch_norm(layers.Deconv2DLayer(net['deconv1_1'], 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));

    net['score'] = layers.Deconv2DLayer(net['deconv1_2'], 1,  filter_size=(1,1), stride=1, crop='same', nonlinearity=sigmoid);
    net['score_flat'] = ReshapeLayer(net['score'], ([0], -1));

    all_params = layers.get_all_params(net['score_flat'], trainable=True);
    target_var = T.fmatrix('targets');

    return net, all_params, input_var, target_var;
Ejemplo n.º 13
0
def build_network_from_ae(classn):
    input_var = theano.tensor.tensor4('input_var');

    net = {};
    net['input'] = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);


    # Encoding part
    net['conv1_1'] = batch_norm(layers.Conv2DLayer(net['input'], 64,   filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify));
    net['conv1_2'] = batch_norm(layers.Conv2DLayer(net['conv1_1'], 64,   filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify));
    net['pool1'] = layers.Pool2DLayer(net['conv1_2'], pool_size=(2,2), stride=2, mode='max');

    net['conv2_1'] = batch_norm(layers.Conv2DLayer(net['pool1'], 128,   filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify));
    net['conv2_2'] = batch_norm(layers.Conv2DLayer(net['conv2_1'], 128,   filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify));
    net['pool2'] = layers.Pool2DLayer(net['conv2_2'], pool_size=(2,2), stride=2, mode='max');

    net['conv3_1'] = batch_norm(layers.Conv2DLayer(net['pool2'], 256,   filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify));
    net['conv3_2'] = batch_norm(layers.Conv2DLayer(net['conv3_1'], 256,   filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify));
    net['conv3_3'] = batch_norm(layers.Conv2DLayer(net['conv3_2'], 256,   filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify));
    net['pool3'] = layers.Pool2DLayer(net['conv3_3'], pool_size=(2,2), stride=2, mode='max');

    net['conv4_1'] = batch_norm(layers.Conv2DLayer(net['pool3'], 512,   filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify));
    net['conv4_2'] = batch_norm(layers.Conv2DLayer(net['conv4_1'], 512,   filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify));
    net['conv4_3'] = batch_norm(layers.Conv2DLayer(net['conv4_2'], 512,   filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify));
    net['pool4'] = layers.Pool2DLayer(net['conv4_3'], pool_size=(2,2), stride=2, mode='max');

    net['conv5_1'] = batch_norm(layers.Conv2DLayer(net['pool4'], 512,   filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify));
    net['conv5_2'] = batch_norm(layers.Conv2DLayer(net['conv5_1'], 512,   filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify));
    net['conv5_3'] = batch_norm(layers.Conv2DLayer(net['conv5_2'], 512,   filter_size=(3,3), stride=1, pad=1, nonlinearity=leaky_rectify));
    net['pool5'] = layers.Pool2DLayer(net['conv5_3'], pool_size=(2,2), stride=2, mode='max');

    net['fc6'] = batch_norm(layers.Conv2DLayer(net['pool5'], 4096,   filter_size=(7,7), stride=1, pad='same', nonlinearity=leaky_rectify));

    # fc7 is the encoding layer
    net['fc7'] = batch_norm(layers.Conv2DLayer(net['fc6'], 4096,   filter_size=(1,1), stride=1, pad='same', nonlinearity=leaky_rectify));

    # Decoding part
    net['fc6_deconv'] = batch_norm(layers.Deconv2DLayer(net['fc7'], 512, filter_size=(7,7), stride=1, crop='same', nonlinearity=leaky_rectify));
    net['unpool5'] = layers.InverseLayer(net['fc6_deconv'], net['pool5']);

    net['deconv5_1'] = batch_norm(layers.Deconv2DLayer(net['unpool5'], 512, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    net['deconv5_2'] = batch_norm(layers.Deconv2DLayer(net['deconv5_1'], 512, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    net['deconv5_3'] = batch_norm(layers.Deconv2DLayer(net['deconv5_2'], 512, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    net['unpool4'] = layers.InverseLayer(net['deconv5_3'], net['pool4']);

    net['deconv4_1'] = batch_norm(layers.Deconv2DLayer(net['unpool4'], 512, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    net['deconv4_2'] = batch_norm(layers.Deconv2DLayer(net['deconv4_1'], 512, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    net['deconv4_3'] = batch_norm(layers.Deconv2DLayer(net['deconv4_2'], 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    net['unpool3'] = layers.InverseLayer(net['deconv4_3'], net['pool3']);

    net['deconv3_1'] = batch_norm(layers.Deconv2DLayer(net['unpool3'], 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    net['deconv3_2'] = batch_norm(layers.Deconv2DLayer(net['deconv3_1'], 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    net['deconv3_3'] = batch_norm(layers.Deconv2DLayer(net['deconv3_2'], 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    net['unpool2'] = layers.InverseLayer(net['deconv3_3'], net['pool2']);

    net['deconv2_1'] = batch_norm(layers.Deconv2DLayer(net['unpool2'], 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    net['deconv2_2'] = batch_norm(layers.Deconv2DLayer(net['deconv2_1'], 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    net['unpool1'] = layers.InverseLayer(net['deconv2_2'], net['pool1']);

    net['deconv1_1'] = batch_norm(layers.Deconv2DLayer(net['unpool1'], 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
    net['deconv1_2'] = batch_norm(layers.Deconv2DLayer(net['deconv1_1'], 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));


    # Segmentation layer
    net['seg_score'] = layers.Deconv2DLayer(net['deconv1_2'], 1, filter_size=(1,1), stride=1, crop='same', nonlinearity=lasagne.nonlinearities.sigmoid);

    net['score_flat'] = ReshapeLayer(net['seg_score'], ([0], -1));
    output_var = lasagne.layers.get_output(net['score_flat']);
    all_param = lasagne.layers.get_all_params(net['score_flat'], trainable=True);

    target_var = T.fmatrix('targets');
    #return network, input_var, output_var, all_param;
    return net, all_param, input_var, target_var;
def build_autoencoder_network():
    input_var = T.tensor4('input_var')

    layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var)
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           100,
                           filter_size=(5, 5),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           120,
                           filter_size=(5, 5),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = layers.Pool2DLayer(layer,
                               pool_size=(2, 2),
                               stride=2,
                               mode='average_inc_pad')
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           240,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           320,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = layers.Pool2DLayer(layer,
                               pool_size=(2, 2),
                               stride=2,
                               mode='average_inc_pad')
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           640,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    prely = batch_norm(
        layers.Conv2DLayer(layer,
                           1024,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))

    featm = batch_norm(
        layers.Conv2DLayer(prely,
                           640,
                           filter_size=(1, 1),
                           nonlinearity=leaky_rectify))
    feat_map = batch_norm(
        layers.Conv2DLayer(featm,
                           100,
                           filter_size=(1, 1),
                           nonlinearity=rectify,
                           name="feat_map"))
    mask_map = SoftThresPerc(feat_map,
                             perc=98.4,
                             alpha=0.1,
                             beta=init.Constant(0.5),
                             tight=20.0,
                             name="mask_map")
    layer = mask_map

    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             1024,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             640,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             640,
                             filter_size=(4, 4),
                             stride=2,
                             crop=(1, 1),
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             320,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             320,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             240,
                             filter_size=(4, 4),
                             stride=2,
                             crop=(1, 1),
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             120,
                             filter_size=(5, 5),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             100,
                             filter_size=(5, 5),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = layers.Deconv2DLayer(layer,
                                 3,
                                 filter_size=(1, 1),
                                 stride=1,
                                 crop='same',
                                 nonlinearity=identity)

    glblf = batch_norm(
        layers.Conv2DLayer(prely,
                           128,
                           filter_size=(1, 1),
                           nonlinearity=leaky_rectify))
    glblf = layers.Pool2DLayer(glblf,
                               pool_size=(5, 5),
                               stride=5,
                               mode='average_inc_pad')
    glblf = batch_norm(
        layers.Conv2DLayer(glblf,
                           64,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    glblf = batch_norm(layers.Conv2DLayer(glblf,
                                          5,
                                          filter_size=(1, 1),
                                          nonlinearity=rectify),
                       name="global_feature")

    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             256,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             128,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             128,
                             filter_size=(9, 9),
                             stride=5,
                             crop=(2, 2),
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             128,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             128,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             64,
                             filter_size=(4, 4),
                             stride=2,
                             crop=(1, 1),
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             64,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             64,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             32,
                             filter_size=(4, 4),
                             stride=2,
                             crop=(1, 1),
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             32,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             32,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = layers.Deconv2DLayer(glblf,
                                 3,
                                 filter_size=(1, 1),
                                 stride=1,
                                 crop='same',
                                 nonlinearity=identity)

    layer = layers.ElemwiseSumLayer([layer, glblf])

    network = ReshapeLayer(layer, ([0], -1))
    mask_var = lasagne.layers.get_output(mask_map)
    output_var = lasagne.layers.get_output(network)

    return network, input_var, mask_var, output_var
Ejemplo n.º 15
0
def deconv(_in, *args, **kwargs):
    return L.Deconv2DLayer(_in, *args, untie_biases=False, flip_filters=False, **kwargs)  
Ejemplo n.º 16
0
def build_autoencoder_network():
    input_var = T.tensor4('input_var')

    layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var)
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           80,
                           filter_size=(5, 5),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           80,
                           filter_size=(5, 5),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           80,
                           filter_size=(5, 5),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           80,
                           filter_size=(5, 5),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           100,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           100,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           100,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    prely = batch_norm(
        layers.Conv2DLayer(layer,
                           100,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))

    featm = batch_norm(
        layers.Conv2DLayer(prely,
                           180,
                           filter_size=(1, 1),
                           nonlinearity=leaky_rectify))
    feat_map = batch_norm(
        layers.Conv2DLayer(featm,
                           120,
                           filter_size=(1, 1),
                           nonlinearity=rectify,
                           name="feat_map"))
    maskm = batch_norm(
        layers.Conv2DLayer(prely,
                           120,
                           filter_size=(1, 1),
                           nonlinearity=leaky_rectify))
    mask_rep = batch_norm(layers.Conv2DLayer(maskm,
                                             1,
                                             filter_size=(1, 1),
                                             nonlinearity=None),
                          beta=None,
                          gamma=None)
    mask_map = SoftThresPerc(mask_rep,
                             perc=99.9,
                             alpha=0.5,
                             beta=init.Constant(0.5),
                             tight=100.0,
                             name="mask_map")
    layer = ChInnerProdMerge(feat_map, mask_map, name="encoder")

    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             100,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             100,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             100,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             100,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             80,
                             filter_size=(5, 5),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             80,
                             filter_size=(5, 5),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             80,
                             filter_size=(5, 5),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             80,
                             filter_size=(5, 5),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = layers.Deconv2DLayer(layer,
                                 3,
                                 filter_size=(1, 1),
                                 stride=1,
                                 crop='same',
                                 nonlinearity=identity)

    glblf = batch_norm(
        layers.Conv2DLayer(prely,
                           100,
                           filter_size=(1, 1),
                           nonlinearity=leaky_rectify))
    glblf = layers.Pool2DLayer(glblf,
                               pool_size=(5, 5),
                               stride=5,
                               mode='average_inc_pad')
    glblf = batch_norm(
        layers.Conv2DLayer(glblf,
                           64,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    glblf = batch_norm(layers.Conv2DLayer(glblf,
                                          3,
                                          filter_size=(1, 1),
                                          nonlinearity=rectify),
                       name="global_feature")

    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             64,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             64,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             64,
                             filter_size=(9, 9),
                             stride=5,
                             crop=(2, 2),
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             48,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             48,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             48,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             32,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             32,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             32,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = layers.Deconv2DLayer(glblf,
                                 3,
                                 filter_size=(1, 1),
                                 stride=1,
                                 crop='same',
                                 nonlinearity=identity)

    layer = layers.ElemwiseSumLayer([layer, glblf])

    network = ReshapeLayer(layer, ([0], -1))
    layers.set_all_param_values(network,
                                pickle.load(open(filename_model_ae, 'rb')))
    feat_var = lasagne.layers.get_output(feat_map, deterministic=True)
    mask_var = lasagne.layers.get_output(mask_map, deterministic=True)
    outp_var = lasagne.layers.get_output(network, deterministic=True)

    return network, input_var, feat_var, mask_var, outp_var
    def init_generator(self, first_layer, input_var=None, embedding_var=None):
        """
        Initialize the DCGAN generator network using lasagne
        Additional units: Number of units to be added at the dense layer to compensate for embedding
        Returns the network
        """

        layers = []

        l_noise = lyr.InputLayer((None, 100), input_var)
        layers.append(l_noise)

        l_embedding = lyr.InputLayer((None, 300), embedding_var)
        layers.append(l_embedding)

        l_in = lyr.ConcatLayer([l_noise, l_embedding], axis=1)
        layers.append(l_in)

        l_1 = lyr.batch_norm(
            lyr.DenseLayer(incoming=l_in,
                           num_units=4 * 4 * first_layer * 8,
                           nonlinearity=nonlinearities.rectify))

        l_1 = lyr.ReshapeLayer(incoming=l_1, shape=(-1, first_layer * 8, 4, 4))
        layers.append(l_1)

        l_2 = lyr.batch_norm(
            lyr.Deconv2DLayer(incoming=l_1,
                              num_filters=first_layer * 4,
                              filter_size=5,
                              stride=2,
                              crop=2,
                              output_size=8,
                              nonlinearity=nonlinearities.rectify))
        layers.append(l_2)

        l_3 = lyr.batch_norm(
            lyr.Deconv2DLayer(incoming=l_2,
                              num_filters=first_layer * 2,
                              filter_size=5,
                              stride=2,
                              crop=2,
                              output_size=16,
                              nonlinearity=nonlinearities.rectify))
        layers.append(l_3)

        l_4 = lyr.batch_norm(
            lyr.Deconv2DLayer(incoming=l_3,
                              num_filters=first_layer,
                              filter_size=5,
                              stride=2,
                              crop=2,
                              output_size=32,
                              nonlinearity=nonlinearities.rectify))
        layers.append(l_4)

        l_out = lyr.Deconv2DLayer(incoming=l_4,
                                  num_filters=3,
                                  filter_size=5,
                                  stride=2,
                                  crop=2,
                                  output_size=64,
                                  nonlinearity=nonlinearities.sigmoid)
        layers.append(l_out)

        if self.verbose:
            for i, layer in enumerate(layers):
                print 'generator layer %s output shape:' % i, layer.output_shape

        return l_out, l_noise, l_embedding
def build_network_from_ae(classn):
    input_var = T.tensor4('input_var')

    layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var)
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           100,
                           filter_size=(5, 5),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           120,
                           filter_size=(5, 5),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = layers.Pool2DLayer(layer,
                               pool_size=(2, 2),
                               stride=2,
                               mode='average_inc_pad')
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           240,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           320,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    layer = layers.Pool2DLayer(layer,
                               pool_size=(2, 2),
                               stride=2,
                               mode='average_inc_pad')
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           640,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    prely = batch_norm(
        layers.Conv2DLayer(layer,
                           1024,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))

    featm = batch_norm(
        layers.Conv2DLayer(prely,
                           640,
                           filter_size=(1, 1),
                           nonlinearity=leaky_rectify))
    feat_map = batch_norm(
        layers.Conv2DLayer(featm,
                           100,
                           filter_size=(1, 1),
                           nonlinearity=rectify,
                           name="feat_map"))
    maskm = batch_norm(
        layers.Conv2DLayer(prely,
                           100,
                           filter_size=(1, 1),
                           nonlinearity=leaky_rectify))
    mask_rep = batch_norm(layers.Conv2DLayer(maskm,
                                             1,
                                             filter_size=(1, 1),
                                             nonlinearity=None),
                          beta=None,
                          gamma=None)
    mask_map = SoftThresPerc(mask_rep,
                             perc=97.0,
                             alpha=0.1,
                             beta=init.Constant(0.5),
                             tight=100.0,
                             name="mask_map")
    enlyr = ChInnerProdMerge(feat_map, mask_map, name="encoder")

    layer = batch_norm(
        layers.Deconv2DLayer(enlyr,
                             1024,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             640,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             640,
                             filter_size=(4, 4),
                             stride=2,
                             crop=(1, 1),
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             320,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             320,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             240,
                             filter_size=(4, 4),
                             stride=2,
                             crop=(1, 1),
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             120,
                             filter_size=(5, 5),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             100,
                             filter_size=(5, 5),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    layer = layers.Deconv2DLayer(layer,
                                 3,
                                 filter_size=(1, 1),
                                 stride=1,
                                 crop='same',
                                 nonlinearity=identity)

    glblf = batch_norm(
        layers.Conv2DLayer(prely,
                           128,
                           filter_size=(1, 1),
                           nonlinearity=leaky_rectify))
    glblf = layers.Pool2DLayer(glblf,
                               pool_size=(5, 5),
                               stride=5,
                               mode='average_inc_pad')
    glblf = batch_norm(
        layers.Conv2DLayer(glblf,
                           64,
                           filter_size=(3, 3),
                           stride=1,
                           pad='same',
                           nonlinearity=leaky_rectify))
    gllyr = batch_norm(layers.Conv2DLayer(glblf,
                                          5,
                                          filter_size=(1, 1),
                                          nonlinearity=rectify),
                       name="global_feature")

    glblf = batch_norm(
        layers.Deconv2DLayer(gllyr,
                             256,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             128,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             128,
                             filter_size=(9, 9),
                             stride=5,
                             crop=(2, 2),
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             128,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             128,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             64,
                             filter_size=(4, 4),
                             stride=2,
                             crop=(1, 1),
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             64,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             64,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             32,
                             filter_size=(4, 4),
                             stride=2,
                             crop=(1, 1),
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             32,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = batch_norm(
        layers.Deconv2DLayer(glblf,
                             32,
                             filter_size=(3, 3),
                             stride=1,
                             crop='same',
                             nonlinearity=leaky_rectify))
    glblf = layers.Deconv2DLayer(glblf,
                                 3,
                                 filter_size=(1, 1),
                                 stride=1,
                                 crop='same',
                                 nonlinearity=identity)

    layer = layers.ElemwiseSumLayer([layer, glblf])

    network = ReshapeLayer(layer, ([0], -1))

    mask_map.beta.set_value(np.float32(0.9 * mask_map.beta.get_value()))
    old_params = layers.get_all_params(network, trainable=True)

    # Adding more layers
    aug_var = T.matrix('aug_var')
    target_var = T.imatrix('targets')
    add_a = batch_norm(
        layers.Conv2DLayer(enlyr,
                           320,
                           filter_size=(1, 1),
                           nonlinearity=leaky_rectify))
    add_b = batch_norm(
        layers.Conv2DLayer(add_a,
                           320,
                           filter_size=(1, 1),
                           nonlinearity=leaky_rectify))
    add_c = batch_norm(
        layers.Conv2DLayer(add_b,
                           320,
                           filter_size=(1, 1),
                           nonlinearity=leaky_rectify))
    add_d = batch_norm(
        layers.Conv2DLayer(add_c,
                           320,
                           filter_size=(1, 1),
                           nonlinearity=leaky_rectify))
    add_0 = layers.Pool2DLayer(add_d,
                               pool_size=(25, 25),
                               stride=25,
                               mode='average_inc_pad')
    add_1 = batch_norm(
        layers.DenseLayer(add_0, 100, nonlinearity=leaky_rectify))

    add_2 = batch_norm(
        layers.DenseLayer(gllyr, 320, nonlinearity=leaky_rectify))
    add_3 = batch_norm(
        layers.DenseLayer(add_2, 320, nonlinearity=leaky_rectify))
    add_4 = batch_norm(
        layers.DenseLayer(add_3, 100, nonlinearity=leaky_rectify))

    aug_layer = layers.InputLayer(shape=(None, aug_fea_n), input_var=aug_var)

    cat_layer = lasagne.layers.ConcatLayer([add_1, add_4, aug_layer], axis=1)

    hidden_layer = layers.DenseLayer(cat_layer, 80, nonlinearity=leaky_rectify)
    network = layers.DenseLayer(hidden_layer, classn, nonlinearity=sigmoid)

    all_params = layers.get_all_params(network, trainable=True)
    new_params = [x for x in all_params if x not in old_params]

    return network, new_params, input_var, aug_var, target_var
def build_autoencoder_network():
    input_var = T.tensor4('input_var')

    layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var)
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           80,
                           filter_size=(5, 5),
                           stride=1,
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           80,
                           filter_size=(5, 5),
                           stride=1,
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           80,
                           filter_size=(5, 5),
                           stride=1,
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           80,
                           filter_size=(5, 5),
                           stride=1,
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           100,
                           filter_size=(3, 3),
                           stride=1,
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           100,
                           filter_size=(3, 3),
                           stride=1,
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           100,
                           filter_size=(3, 3),
                           stride=1,
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           100,
                           filter_size=(3, 3),
                           stride=1,
                           nonlinearity=leaky_rectify))

    mask_map = layer
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           10,
                           filter_size=(1, 1),
                           stride=1,
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Conv2DLayer(layer,
                           1000,
                           filter_size=(76, 76),
                           stride=1,
                           nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             10,
                             filter_size=(76, 76),
                             stride=1,
                             nonlinearity=leaky_rectify))

    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             100,
                             filter_size=(3, 3),
                             stride=1,
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             100,
                             filter_size=(3, 3),
                             stride=1,
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             100,
                             filter_size=(3, 3),
                             stride=1,
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             100,
                             filter_size=(3, 3),
                             stride=1,
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             80,
                             filter_size=(5, 5),
                             stride=1,
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             80,
                             filter_size=(5, 5),
                             stride=1,
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             80,
                             filter_size=(5, 5),
                             stride=1,
                             nonlinearity=leaky_rectify))
    layer = batch_norm(
        layers.Deconv2DLayer(layer,
                             80,
                             filter_size=(5, 5),
                             stride=1,
                             nonlinearity=leaky_rectify))
    layer = layers.Deconv2DLayer(layer,
                                 3,
                                 filter_size=(1, 1),
                                 stride=1,
                                 nonlinearity=identity)

    network = ReshapeLayer(layer, ([0], -1))
    mask_var = lasagne.layers.get_output(mask_map)
    output_var = lasagne.layers.get_output(network)

    return network, input_var, mask_var, output_var