def build_generator_32(noise=None, ngf=128):
    # noise input
    InputNoise = InputLayer(shape=(None, 100), input_var=noise)
    #FC Layer
    gnet0 = DenseLayer(InputNoise,
                       ngf * 4 * 4 * 4,
                       W=Normal(0.02),
                       nonlinearity=relu)
    print("Gen fc1:", gnet0.output_shape)
    #Reshape Layer
    gnet1 = ReshapeLayer(gnet0, ([0], ngf * 4, 4, 4))
    print("Gen rs1:", gnet1.output_shape)
    # DeConv Layer
    gnet2 = Deconv2DLayer(gnet1,
                          ngf * 2, (4, 4), (2, 2),
                          crop=1,
                          W=Normal(0.02),
                          nonlinearity=relu)
    print("Gen deconv1:", gnet2.output_shape)
    # DeConv Layer
    gnet3 = Deconv2DLayer(gnet2,
                          ngf, (4, 4), (2, 2),
                          crop=1,
                          W=Normal(0.02),
                          nonlinearity=relu)
    print("Gen deconv2:", gnet3.output_shape)
    # DeConv Layer
    gnet4 = Deconv2DLayer(gnet3,
                          3, (4, 4), (2, 2),
                          crop=1,
                          W=Normal(0.02),
                          nonlinearity=tanh)
    print("Gen output:", gnet4.output_shape)
    return gnet4
Esempio n. 2
0
 def build_decoder_28(self, in_layer):
     lrelu = LeakyRectify(0.2)
     # fully-connected layer
     layer = batch_norm(DenseLayer(
         in_layer, 1024, nonlinearity=lrelu))  # original with relu
     # project and reshape
     layer = batch_norm(DenseLayer(
         layer, 256 * 7 * 7, nonlinearity=lrelu))  # original with relu
     layer = ReshapeLayer(layer, ([0], 256, 7, 7))
     # two fractional-stride convolutions
     layer = batch_norm(
         Deconv2DLayer(layer,
                       128,
                       5,
                       stride=2,
                       crop='same',
                       output_size=14,
                       nonlinearity=lrelu))  # original with relu
     return Deconv2DLayer(layer,
                          self.channels,
                          5,
                          stride=2,
                          crop='same',
                          output_size=28,
                          nonlinearity=None)
Esempio n. 3
0
def build_generator(input_var=None):
    from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer
    try:
        from lasagne.layers import TransposedConv2DLayer as Deconv2DLayer
    except ImportError:
        raise ImportError("Your Lasagne is too old. Try the bleeding-edge "
                          "version: http://lasagne.readthedocs.io/en/latest/"
                          "user/installation.html#bleeding-edge-version")
    try:
        from lasagne.layers.dnn import batch_norm_dnn as batch_norm
    except ImportError:
        from lasagne.layers import batch_norm
    from lasagne.nonlinearities import sigmoid
    # input: 100dim
    layer = InputLayer(shape=(None, 100), input_var=input_var)
    # fully-connected layer
    layer = batch_norm(DenseLayer(layer, 1024))
    # project and reshape
    layer = batch_norm(DenseLayer(layer, 128*7*7))
    layer = ReshapeLayer(layer, ([0], 128, 7, 7))
    # two fractional-stride convolutions
    layer = batch_norm(Deconv2DLayer(layer, 64, 5, stride=2, crop='same',
                                     output_size=14))
    layer = Deconv2DLayer(layer, 1, 5, stride=2, crop='same', output_size=28,
                          nonlinearity=sigmoid)
    print ("Generator output:", layer.output_shape)
    return layer
Esempio n. 4
0
def build_generator(input_var=None, do_batch_norm=False, activation='sigmoid'):
    from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer
    from lasagne.layers import TransposedConv2DLayer as Deconv2DLayer

    # define non-linearity at last layer
    if activation == 'sigmoid':
        from lasagne.nonlinearities import sigmoid as activation_fn
    elif activation == 'scaled_tanh':
        from lasagne.nonlinearities import ScaledTanH
        activation_fn = ScaledTanH(2 / 3., 1.7519)
    elif activation == 'linear':
        from lasagne.nonlinearities import linear as activation_fn
    else:
        raise Exception("{} non-linearity not supported".format(activation))

    # input: 100dim
    layer = InputLayer(shape=(None, 100), input_var=input_var)
    # fully-connected layer
    layer = BatchNorm(DenseLayer(layer, 1024), do_batch_norm)
    # project and reshape
    layer = BatchNorm(DenseLayer(layer, 128 * 7 * 7), do_batch_norm)
    layer = ReshapeLayer(layer, ([0], 128, 7, 7))
    # two fractional-stride convolutions
    layer = BatchNorm(
        Deconv2DLayer(layer, 64, 5, stride=2, crop='same', output_size=14),
        do_batch_norm)
    layer = Deconv2DLayer(layer,
                          1,
                          5,
                          stride=2,
                          crop='same',
                          output_size=28,
                          nonlinearity=activation_fn)
    print("Generator output:", layer.output_shape)
    return layer
def UNet_decoder_2(LR_conv1, LR_conv2, LR_conv3, LR_conv4, warp_conv1, warp_conv2, warp_conv3, warp_conv4, activation = SELU_activation, W_init = W_init_SELU): 
    # 80
    warp_deconv4 = Deconv2DLayer(ConcatLayer([LR_conv4, warp_conv4]), num_filters=64, filter_size=4, stride=2, crop=1, W = W_init, b=Constant(0.), nonlinearity=activation)
    # 160
    warp_deconv3 = Deconv2DLayer(ConcatLayer([warp_deconv4, LR_conv3, warp_conv3]), num_filters=64, filter_size=4, stride=2, crop=1, W = W_init, b=Constant(0.), nonlinearity=activation)
    # 320
    warp_deconv2 = Deconv2DLayer(ConcatLayer([warp_deconv3, LR_conv2, warp_conv2]), num_filters=64, filter_size=4, stride=2, crop=1, W = W_init, b=Constant(0.), nonlinearity=activation)
    # final 
    post_fusion1 =   Conv2DLayer(ConcatLayer([warp_deconv2, LR_conv1, warp_conv1]), 64, 5, pad=2, W = W_init, b=Constant(0.), nonlinearity=activation)
    post_fusion2 =   Conv2DLayer(post_fusion1, 64, 5, pad=2, W = W_init, b=Constant(0.), nonlinearity=activation)
    final = Conv2DLayer(post_fusion1, 3, 5, pad=2, W = W_init_linear, b=Constant(0.), nonlinearity=linear)
    return final
Esempio n. 6
0
def build_net(nz=10):
    # nz = size of latent code
    #N.B. using batch_norm applies bn before non-linearity!
    F = 32
    enc = InputLayer(shape=(None, 1, 28, 28))
    enc = Conv2DLayer(incoming=enc,
                      num_filters=F * 2,
                      filter_size=5,
                      stride=2,
                      nonlinearity=lrelu(0.2),
                      pad=2)
    enc = Conv2DLayer(incoming=enc,
                      num_filters=F * 4,
                      filter_size=5,
                      stride=2,
                      nonlinearity=lrelu(0.2),
                      pad=2)
    enc = Conv2DLayer(incoming=enc,
                      num_filters=F * 4,
                      filter_size=5,
                      stride=1,
                      nonlinearity=lrelu(0.2),
                      pad=2)
    enc = reshape(incoming=enc, shape=(-1, F * 4 * 7 * 7))
    enc = DenseLayer(incoming=enc, num_units=nz, nonlinearity=sigmoid)
    #Generator networks
    dec = InputLayer(shape=(None, nz))
    dec = DenseLayer(incoming=dec, num_units=F * 4 * 7 * 7)
    dec = reshape(incoming=dec, shape=(-1, F * 4, 7, 7))
    dec = Deconv2DLayer(incoming=dec,
                        num_filters=F * 4,
                        filter_size=4,
                        stride=2,
                        nonlinearity=relu,
                        crop=1)
    dec = Deconv2DLayer(incoming=dec,
                        num_filters=F * 4,
                        filter_size=4,
                        stride=2,
                        nonlinearity=relu,
                        crop=1)
    dec = Deconv2DLayer(incoming=dec,
                        num_filters=1,
                        filter_size=3,
                        stride=1,
                        nonlinearity=sigmoid,
                        crop=1)

    return enc, dec
Esempio n. 7
0
def leaky_deconv(input_layer, **kwargs):
    return Deconv2DLayer(input_layer,
                         nonlinearity=leaky_rectify,
                         filter_size=4,
                         stride=2,
                         crop=1,
                         b=None,
                         flip_filters=True,
                         **kwargs)
Esempio n. 8
0
def upsample(input_layer, **kwargs):
    return Deconv2DLayer(input_layer,
                         num_filters=2,
                         filter_size=4,
                         stride=2,
                         crop=1,
                         b=None,
                         nonlinearity=linear,
                         flip_filters=True,
                         **kwargs)
Esempio n. 9
0
def create_model(incoming, options):
    input_p = 0.2
    hidden_p = 0.5
    conv_num_filters1 = int(100 / (1.0 - input_p))
    conv_num_filters2 = int(150 / (1.0 - hidden_p))
    conv_num_filters3 = int(200 / (1.0 - hidden_p))
    filter_size1 = 5
    filter_size2 = 5
    filter_size3 = 3
    pool_size = 2
    encode_size = int(options['BOTTLENECK'] / 0.5)
    dense_mid_size = int(options['DENSE'] / 0.5)
    pad_in = 'valid'
    pad_out = 'full'
    scaled_tanh = create_scaled_tanh()
    dropout0 = DropoutLayer(incoming, p=0.2, name='dropout0')
    conv2d1 = Conv2DLayer(dropout0, num_filters=conv_num_filters1, filter_size=filter_size1, pad=pad_in, name='conv2d1', nonlinearity=scaled_tanh)
    maxpool2d2 = MaxPool2DLayer(conv2d1, pool_size=pool_size, name='maxpool2d2')
    dropout1 = DropoutLayer(maxpool2d2, name='dropout1')
    conv2d3 = Conv2DLayer(dropout1, num_filters=conv_num_filters2, filter_size=filter_size2, pad=pad_in, name='conv2d3', nonlinearity=scaled_tanh)
    maxpool2d4 = MaxPool2DLayer(conv2d3, pool_size=pool_size, name='maxpool2d4', pad=(1,0))
    dropout2 = DropoutLayer(maxpool2d4, name='dropout2')
    conv2d5 = Conv2DLayer(dropout2, num_filters=conv_num_filters3, filter_size=filter_size3, pad=pad_in, name='conv2d5', nonlinearity=scaled_tanh)
    reshape6 = ReshapeLayer(conv2d5, shape=([0], -1), name='reshape6')  # 3000
    reshape6_output = reshape6.output_shape[1]
    dropout3 = DropoutLayer(reshape6, name='dropout3')
    dense7 = DenseLayer(dropout3, num_units=dense_mid_size, name='dense7', nonlinearity=scaled_tanh)
    dropout4 = DropoutLayer(dense7, name='dropout4')
    bottleneck = DenseLayer(dropout4, num_units=encode_size, name='bottleneck', nonlinearity=linear)
    # print_network(bottleneck)
    dense8 = DenseLayer(bottleneck, num_units=dense_mid_size, W=bottleneck.W.T, name='dense8', nonlinearity=linear)
    dense9 = DenseLayer(dense8, num_units=reshape6_output, W=dense7.W.T, nonlinearity=scaled_tanh, name='dense9')
    reshape10 = ReshapeLayer(dense9, shape=([0], conv_num_filters3, 3, 5), name='reshape10')  # 32 x 4 x 7
    deconv2d11 = Deconv2DLayer(reshape10, conv2d5.input_shape[1], conv2d5.filter_size, stride=conv2d5.stride,
                               W=conv2d5.W, flip_filters=not conv2d5.flip_filters, name='deconv2d11', nonlinearity=scaled_tanh)
    upscale2d12 = Upscale2DLayer(deconv2d11, scale_factor=pool_size, name='upscale2d12')
    deconv2d13 = Deconv2DLayer(upscale2d12, conv2d3.input_shape[1], conv2d3.filter_size, stride=conv2d3.stride,
                               W=conv2d3.W, flip_filters=not conv2d3.flip_filters, name='deconv2d13', nonlinearity=scaled_tanh)
    upscale2d14 = Upscale2DLayer(deconv2d13, scale_factor=pool_size, name='upscale2d14')
    deconv2d15 = Deconv2DLayer(upscale2d14, conv2d1.input_shape[1], conv2d1.filter_size, stride=conv2d1.stride,
                               crop=(1, 0), W=conv2d1.W, flip_filters=not conv2d1.flip_filters, name='deconv2d14', nonlinearity=scaled_tanh)
    reshape16 = ReshapeLayer(deconv2d15, ([0], -1), name='reshape16')
    return reshape16, bottleneck
Esempio n. 10
0
def create_model(incoming, options):
    conv_num_filters1 = 100
    conv_num_filters2 = 150
    conv_num_filters3 = 200
    filter_size1 = 5
    filter_size2 = 5
    filter_size3 = 3
    pool_size = 2
    encode_size = options['BOTTLENECK']
    dense_mid_size = options['DENSE']
    pad_in = 'valid'
    pad_out = 'full'
    scaled_tanh = create_scaled_tanh()

    conv2d1 = Conv2DLayer(incoming, num_filters=conv_num_filters1, filter_size=filter_size1, pad=pad_in, name='conv2d1', nonlinearity=scaled_tanh)
    maxpool2d3 = MaxPool2DLayer(conv2d1, pool_size=pool_size, name='maxpool2d3')
    bn2 = BatchNormLayer(maxpool2d3, name='batchnorm2')
    conv2d4 = Conv2DLayer(bn2, num_filters=conv_num_filters2, filter_size=filter_size2, pad=pad_in, name='conv2d4', nonlinearity=scaled_tanh)
    maxpool2d6 = MaxPool2DLayer(conv2d4, pool_size=pool_size, name='maxpool2d6', pad=(1,0))
    bn3 = BatchNormLayer(maxpool2d6, name='batchnorm3')
    conv2d7 = Conv2DLayer(bn3, num_filters=conv_num_filters3, filter_size=filter_size3, pad=pad_in, name='conv2d7', nonlinearity=scaled_tanh)
    reshape9 = ReshapeLayer(conv2d7, shape=([0], -1), name='reshape9')  # 3000
    reshape9_output = reshape9.output_shape[1]
    bn8 = BatchNormLayer(reshape9, name='batchnorm8')
    dense10 = DenseLayer(bn8, num_units=dense_mid_size, name='dense10', nonlinearity=scaled_tanh)
    bn11 = BatchNormLayer(dense10, name='batchnorm11')
    bottleneck = DenseLayer(bn11, num_units=encode_size, name='bottleneck', nonlinearity=linear)
    # print_network(bottleneck)
    dense12 = DenseLayer(bottleneck, num_units=dense_mid_size, W=bottleneck.W.T, name='dense12', nonlinearity=linear)
    dense13 = DenseLayer(dense12, num_units=reshape9_output, W=dense10.W.T, nonlinearity=scaled_tanh, name='dense13')
    reshape14 = ReshapeLayer(dense13, shape=([0], conv_num_filters3, 3, 5), name='reshape14')  # 32 x 4 x 7
    deconv2d19 = Deconv2DLayer(reshape14, conv2d7.input_shape[1], conv2d7.filter_size, stride=conv2d7.stride,
                               W=conv2d7.W, flip_filters=not conv2d7.flip_filters, name='deconv2d19', nonlinearity=scaled_tanh)
    upscale2d16 = Upscale2DLayer(deconv2d19, scale_factor=pool_size, name='upscale2d16')
    deconv2d17 = Deconv2DLayer(upscale2d16, conv2d4.input_shape[1], conv2d4.filter_size, stride=conv2d4.stride,
                               W=conv2d4.W, flip_filters=not conv2d4.flip_filters, name='deconv2d17', nonlinearity=scaled_tanh)
    upscale2d18 = Upscale2DLayer(deconv2d17, scale_factor=pool_size, name='upscale2d18')
    deconv2d19 = Deconv2DLayer(upscale2d18, conv2d1.input_shape[1], conv2d1.filter_size, stride=conv2d1.stride,
                               crop=(1, 0), W=conv2d1.W, flip_filters=not conv2d1.flip_filters, name='deconv2d14', nonlinearity=scaled_tanh)
    reshape20 = ReshapeLayer(deconv2d19, ([0], -1), name='reshape20')
    return reshape20, bottleneck
def upsample(input_layer, deconv = 'default', **kwargs):
    stride = 2
    if deconv == 'default':
        return Deconv2DLayer(
            input_layer, num_filters=2, filter_size=4, stride=2,
            crop=1, W = W_init(gain=1.0), b=Constant(0.), nonlinearity=linear, flip_filters=False, **kwargs) # flip_filters=True(original) False(mine)
    elif deconv == 'subpixel':
        deconv_layer = Conv2DLayer(input_layer,
                                num_filters=2*stride*stride, filter_size=3, 
                                pad = 1, nonlinearity = linear,
                                W = W_init(gain=1.0), b=Constant(0.), name = 'flow_subpixel_conv')
        return SubpixelReshuffleLayer(deconv_layer, 2, stride, name = 'flow_subpixel_shuffle')
def leaky_deconv(input_layer, num_filters=64, activation = None, init = W_init_linear, deconv = 'default',  **kwargs):
    stride = 2
    if deconv == 'default':
        return Deconv2DLayer(
            input_layer, num_filters = num_filters, nonlinearity = activation,
            filter_size=4, stride=2, crop=1, W = init,  b=Constant(0.), flip_filters=False, **kwargs)   # flip_filters=True(original) False(mine)
    elif deconv == 'subpixel':
        deconv_layer = Conv2DLayer(input_layer,
                                num_filters=num_filters*stride*stride, filter_size=3, 
                                pad = 1, nonlinearity = activation,
                                W = init, b=Constant(0.), name = 'subpixel_conv')
        return SubpixelReshuffleLayer(deconv_layer, num_filters, stride, name = 'subpixel_shuffle')
Esempio n. 13
0
def build_generator(inp):
    net = InputLayer((None, 100), input_var=inp)
    net = batch_norm(DenseLayer(net, 1024))
    net = ReshapeLayer(DenseLayer(net, 4096), ([0], 1024, 2, 2))
    print("Generator output:", net.output_shape)
    # 2 * 2
    net = batch_norm(Deconv2DLayer(net, 512, 4, stride=4))
    print("Generator output:", net.output_shape)
    # 8 * 8
    net = batch_norm(Deconv2DLayer(net, 256, 4, stride=4))
    print("Generator output:", net.output_shape)
    # 32 * 32
    net = batch_norm(Deconv2DLayer(net, 128, 4, stride=2, crop=1))
    print("Generator output:", net.output_shape)
    # 64 * 64
    net = batch_norm(Deconv2DLayer(net, 64, 4, stride=2, crop=1))
    print("Generator output:", net.output_shape)
    # 128 * 128
    net = Deconv2DLayer(net, 3, 4, stride=2, crop=1, nonlinearity=tanh)
    print("Generator output:", net.output_shape)
    # 3 * 256 * 256
    return net
Esempio n. 14
0
def build_decoder(hid_layer, nkwargs):

    # the last shape before the fully connected layers
    last_conv_shape = get_last_conv_shape(hid_layer)

    #set kwargs
    dc_kwargs = {k: nkwargs[k] for k in ['num_filters', 'W', 'nonlinearity']}
    edc_kwargs = dict(num_filters=nkwargs['input_shape'][1],
                      W=nkwargs['W'],
                      nonlinearity=linear)
    fc_kwargs = {k: nkwargs[k] for k in ['W', 'nonlinearity']}
    fc_kwargs.update({'num_units': np.prod(last_conv_shape[1:])})

    #make layers
    net = DropoutLayer(hid_layer, p=nkwargs['p'])
    net = DenseLayer(net, **fc_kwargs)
    d1, d2, d3 = last_conv_shape[1:]
    net = ReshapeLayer(net, shape=([0], d1, d2, d3))
    net = Deconv2DLayer(net, filter_size=(2, 2), stride=(2, 2), **dc_kwargs)
    net = Deconv2DLayer(net, filter_size=(2, 2), stride=(2, 2), **dc_kwargs)
    net = Deconv2DLayer(net, filter_size=(2, 2), stride=(2, 2), **edc_kwargs)

    return net
def UNet_decoder_3(LR_conv1, LR_conv2, LR_conv3, LR_conv4, warp_conv1, warp_conv2, warp_conv3, warp_conv4): 
    # 80
    mask4 = Conv2DLayer(ConcatLayer([LR_conv4, warp_conv4]), 64, 5, pad=2, W = W_init_SELU, b=Constant(0.), nonlinearity=sigmoid)
    warp_conv4_m = ElemwiseMergeLayer([warp_conv4, mask4], T.mul)
    warp_deconv4 = Deconv2DLayer(ConcatLayer([LR_conv4, warp_conv4_m]), num_filters=64, filter_size=4, stride=2, crop=1, W = W_init_SELU, b=Constant(0.), nonlinearity=SELU_activation)

    # 160
    mask3 = Conv2DLayer(ConcatLayer([warp_deconv4, LR_conv3, warp_conv3]), 64, 5, pad=2, W = W_init_SELU, b=Constant(0.), nonlinearity=sigmoid)
    warp_conv3_m = ElemwiseMergeLayer([warp_conv3, mask3], T.mul)
    warp_deconv3 = Deconv2DLayer(ConcatLayer([warp_deconv4, LR_conv3, warp_conv3_m]), num_filters=64, filter_size=4, stride=2, crop=1, W = W_init_SELU, b=Constant(0.), nonlinearity=SELU_activation)
    # 320
    mask2 = Conv2DLayer(ConcatLayer([warp_deconv3, LR_conv2, warp_conv2]), 64, 5, pad=2, W = W_init_SELU, b=Constant(0.), nonlinearity=sigmoid)
    warp_conv2_m = ElemwiseMergeLayer([warp_conv2, mask2], T.mul)
    warp_deconv2 = Deconv2DLayer(ConcatLayer([warp_deconv3, LR_conv2, warp_conv2_m]), num_filters=64, filter_size=4, stride=2, crop=1, W = W_init_SELU, b=Constant(0.), nonlinearity=SELU_activation)
    # final 
    mask1 = Conv2DLayer(ConcatLayer([warp_deconv2, LR_conv1, warp_conv1]), 64, 5, pad=2, W = W_init_SELU, b=Constant(0.), nonlinearity=sigmoid)
    warp_conv1_m = ElemwiseMergeLayer([warp_conv1, mask1], T.mul)
    post_fusion1 =  Conv2DLayer(ConcatLayer([warp_deconv2, LR_conv1, warp_conv1_m]), 64, 5, pad=2, W = W_init_SELU, b=Constant(0.), nonlinearity=SELU_activation)

    post_fusion2 =  Conv2DLayer(post_fusion1, 64, 5, pad=2, W = W_init_SELU, b=Constant(0.), nonlinearity=SELU_activation)
    final = Conv2DLayer(post_fusion1, 3, 5, pad=2, W = W_init_linear, b=Constant(0.), nonlinearity=linear)

    test = Conv2DLayer(final, 3, 5, pad=2, W = W_init_linear, b=Constant(0.), nonlinearity=linear)
    return final
def build_generator_128(noise=None, ngf=128):
    lrelu = LeakyRectify(0.2)
    # noise input
    InputNoise = InputLayer(shape=(None, 100), input_var=noise)
    #FC Layer
    gnet0 = DenseLayer(InputNoise,
                       ngf * 16 * 4 * 4,
                       W=Normal(0.02),
                       nonlinearity=lrelu)
    print("Gen fc1:", gnet0.output_shape)
    #Reshape Layer
    gnet1 = ReshapeLayer(gnet0, ([0], ngf * 16, 4, 4))
    print("Gen rs1:", gnet1.output_shape)
    # DeConv Layer
    gnet2 = Deconv2DLayer(gnet1,
                          ngf * 8, (4, 4), (2, 2),
                          crop=1,
                          W=Normal(0.02),
                          nonlinearity=lrelu)
    print("Gen deconv1:", gnet2.output_shape)
    # DeConv Layer
    gnet3 = Deconv2DLayer(gnet2,
                          ngf * 8, (4, 4), (2, 2),
                          crop=1,
                          W=Normal(0.02),
                          nonlinearity=lrelu)
    print("Gen deconv2:", gnet3.output_shape)
    # DeConv Layer
    gnet4 = Deconv2DLayer(gnet3,
                          ngf * 4, (4, 4), (2, 2),
                          crop=1,
                          W=Normal(0.02),
                          nonlinearity=lrelu)
    print("Gen deconv3:", gnet4.output_shape)
    # DeConv Layer
    gnet5 = Deconv2DLayer(gnet4,
                          ngf * 4, (4, 4), (2, 2),
                          crop=1,
                          W=Normal(0.02),
                          nonlinearity=lrelu)
    print("Gen deconv4:", gnet5.output_shape)
    # DeConv Layer
    gnet6 = Deconv2DLayer(gnet5,
                          ngf * 2, (4, 4), (2, 2),
                          crop=1,
                          W=Normal(0.02),
                          nonlinearity=lrelu)
    print("Gen deconv5:", gnet6.output_shape)
    # DeConv Layer
    gnet7 = Deconv2DLayer(gnet6,
                          3, (3, 3), (1, 1),
                          crop='same',
                          W=Normal(0.02),
                          nonlinearity=tanh)
    print("Gen output:", gnet7.output_shape)
    return gnet7
def upsample_bn(input_layer, name = '', num_filters = None, filter_size= None, stride=None, crop=None, 
                activation = 'relu', use_bn = True, W_init = 1, deconv_mode = None, **kwargs):
    if (deconv_mode == ''):
        deconv = Deconv2DLayer(input_layer, name = name+'_linear',nonlinearity=linear, num_filters=num_filters, filter_size=filter_size, stride=stride,
            crop=crop, W = W_init, b=Constant(0.), flip_filters=False, **kwargs)   
    elif (deconv_mode == 'Subpixel'):
        deconv = lasagne.layers.Conv2DLayer(input_layer,name=name+'_linear',
                                num_filters=num_filters*stride*stride, filter_size=3, 
                                pad = (filter_size-1)/2, nonlinearity = linear,
                                W = W_init,b=Constant(0.))
        deconv = lasagne.layers.SubpixelReshuffleLayer(deconv,num_filters,stride,name = name+'_linear_shuffle')

    if use_bn:
        bn = BatchNormLayer(deconv, name = name+'_bn')
        out = NonlinearityLayer(bn, name = name+'_activation', nonlinearity = activation)
    else:
        out = NonlinearityLayer(deconv, name = name+'_activation', nonlinearity = activation)
    return out
Esempio n. 18
0
def TransitionUp(skip_connection, block_to_upsample, n_filters_keep):
    """
    Performs upsampling on block_to_upsample by a factor 2 and concatenates it with the skip_connection """

    # Upsample
    l = ConcatLayer(block_to_upsample)
    l = Deconv2DLayer(l,
                      n_filters_keep,
                      filter_size=3,
                      stride=2,
                      crop='valid',
                      W=HeUniform(gain='relu'),
                      nonlinearity=linear)
    # Concatenate with skip connection
    l = ConcatLayer([l, skip_connection],
                    cropping=[None, None, 'center', 'center'])

    return l
Esempio n. 19
0
def synthesiser(input_var=None, configs=None):

    network = lasagne.layers.InputLayer(shape=(None, configs['GIN']),
                                        input_var=input_var)
    print('L0:' + str(lasagne.layers.get_output_shape(network)))
    network = batch_norm(DenseLayer(incoming=network, num_units=1024))
    print('L1:' + str(lasagne.layers.get_output_shape(network)))
    network = lasagne.layers.ReshapeLayer(network, (-1, 1024))
    # Project, reshape
    network = batch_norm(DenseLayer(
        incoming=network,
        num_units=128 * 7 * 7,
    ))
    print('L2:' + str(lasagne.layers.get_output_shape(network)))
    network = lasagne.layers.ReshapeLayer(network, (-1, 128, 7, 7))
    # Two fractional-stride convolutions
    network = batch_norm(
        Deconv2DLayer(incoming=network,
                      num_filters=64,
                      filter_size=(5, 5),
                      stride=2,
                      crop='same',
                      output_size=14))
    print('L3:' + str(lasagne.layers.get_output_shape(network)))
    network = lasagne.layers.Deconv2DLayer(
        incoming=network,
        num_filters=1,
        filter_size=(5, 5),
        stride=2,
        crop='same',
        output_size=(28, 28),
        nonlinearity=lasagne.nonlinearities.sigmoid)

    print('L4:' + str(lasagne.layers.get_output_shape(network)))
    network = lasagne.layers.ReshapeLayer(
        network, (-1, 1, configs['img_rows'], configs['img_cols']))
    print('L5:' + str(lasagne.layers.get_output_shape(network)))

    return network
def construct_unet_recursive(channels=1, no_f_base=8, f_size=3, branches=[2,2,2,2],dropout=0.2,bs=None,
                             class_nums=2, pad="same",nonlinearity=lasagne.nonlinearities.rectify, input_dim=[512,512]):

    net= InputLayer((bs, channels, input_dim[0], input_dim[1]))
    # Moving downwards the U-shape:
    horizontal_pass=[]
    for i in xrange(len(branches)):
        net = conv_pool_down(net,no_f_base*2**(i),f_size,conv_depth=branches[i],
                             pad=pad,nonlinearity=nonlinearity,dropout=dropout)
        horizontal_pass.append(net)
        net = Pool2DLayer(net,pool_size=2)
    # Bottleneck
    net = Conv2DLayer(net,no_f_base*2**len(branches),f_size,pad=pad,nonlinearity=nonlinearity,W=lasagne.init.HeNormal(gain='relu'))
    net = Conv2DLayer(net,no_f_base*2**len(branches),f_size,pad=pad,nonlinearity=nonlinearity,W=lasagne.init.HeNormal(gain='relu'))
    net = Deconv2DLayer(net, no_f_base*2**(len(branches)-1), 2, 2)
    # Moving upwards the U-shape:
    for i in xrange(len(branches)):
        net = PadLayer(net,1)
        net = ConcatLayer([net,horizontal_pass[-(i+1)]],cropping=(None,None,"center","center"))
        if i==len(branches)-1:
            net = conv_pool_up(net,no_f_base*2**(len(branches)-1-i),f_size,
                           pad=pad,nonlinearity=nonlinearity,conv_depth=branches[i],halt=True)
        else:
            net = conv_pool_up(net,no_f_base*2**(len(branches)-1-i),f_size,
                           pad=pad,nonlinearity=nonlinearity,conv_depth=branches[i],halt=False)

    # Class layer: Work around standard softmax bc. it doesn't work with tensor4/3.
    # Hence, we reshape and feed it to an external Nonlinearity layer.
    # net["class_ns"] is the output in image-related shape.
    imageout = net  = Conv2DLayer(net, class_nums, 1, nonlinearity=linear,W=lasagne.init.HeNormal(gain='relu'))
    net  = DimshuffleLayer(net, (1, 0, 2, 3))
    net  = ReshapeLayer(net, (class_nums, -1))
    net  = DimshuffleLayer(net, (1, 0))
    # Flattened output to be able to feed it to lasagne.objectives.categorical_crossentropy.
    net  = NonlinearityLayer(net, nonlinearity=lasagne.nonlinearities.softmax)

    return net,imageout
    del net, imageout
def conv_pool_up(net, no_f_base,f_size,conv_depth,pad,nonlinearity,halt=False):
    for i in xrange(conv_depth):
        net = Conv2DLayer(net,no_f_base,f_size,pad=pad,nonlinearity=nonlinearity,W=lasagne.init.HeNormal(gain='relu'))
    if not halt:
        net = Deconv2DLayer(net,no_f_base/2,2,2)
    return net
def construct_unet(channels=1, no_f_base=8, f_size=3, dropout=False, bs=None, class_nums=2, pad="same",nonlinearity=lasagne.nonlinearities.rectify, input_dim=[512,512]):
    net={}
    net["input"]= InputLayer(shape=(bs, channels, input_dim[0], input_dim[1]))

    # Moving downwards the U-shape. Simplified:
    net["conv_down11"] = Conv2DLayer(net["input"],no_f_base,f_size,pad=pad,nonlinearity=nonlinearity,W=lasagne.init.HeNormal(gain='relu'))
    net["conv_down12"] = Conv2DLayer(net["conv_down11"],no_f_base,f_size,pad=pad,nonlinearity=nonlinearity,W=lasagne.init.HeNormal(gain='relu'))
    net["pool1"]      = Pool2DLayer(net["conv_down12"],pool_size=2)

    net["conv_down21"] = Conv2DLayer(net["pool1"],no_f_base*2,f_size,pad=pad,nonlinearity=nonlinearity,W=lasagne.init.HeNormal(gain='relu'))
    net["conv_down22"] = Conv2DLayer(net["conv_down21"],no_f_base*2,f_size,pad=pad,nonlinearity=nonlinearity,W=lasagne.init.HeNormal(gain='relu'))
    net["pool2"]      = Pool2DLayer(net["conv_down22"],pool_size=2)

    net["conv_down31"] = Conv2DLayer(net["pool2"],no_f_base*4,f_size,pad=pad,nonlinearity=nonlinearity,W=lasagne.init.HeNormal(gain='relu'))
    net["conv_down32"] = Conv2DLayer(net["conv_down31"],no_f_base*4,f_size,pad=pad,nonlinearity=nonlinearity,W=lasagne.init.HeNormal(gain='relu'))
    net["pool3"]      = Pool2DLayer(net["conv_down32"],pool_size=2)

    net["conv_down41"] = Conv2DLayer(net["pool3"],no_f_base*8,f_size,pad=pad,nonlinearity=nonlinearity,W=lasagne.init.HeNormal(gain='relu'))
    net["conv_down42"] = Conv2DLayer(net["conv_down41"],no_f_base*8,f_size,pad=pad,nonlinearity=nonlinearity,W=lasagne.init.HeNormal(gain='relu'))
    to_drop1 = net["pool4"]      = Pool2DLayer(net["conv_down42"],pool_size=2)

    if dropout:
        to_drop1 = DropoutLayer(to_drop1, p=0.5)

    #vvvv bottom vvvv
    net["conv_bottom1"] = Conv2DLayer(to_drop1,no_f_base*16,f_size,pad=pad,nonlinearity=nonlinearity,W=lasagne.init.HeNormal(gain='relu'))
    net["conv_bottom2"] = Conv2DLayer(net["conv_bottom1"],no_f_base*16,f_size,pad=pad,nonlinearity=nonlinearity,W=lasagne.init.HeNormal(gain='relu'))
    net["deconv_bottom1"]      = Deconv2DLayer(net["conv_bottom2"], no_f_base*8, 2, 2)
    #^^^^ bottom ^^^^

    # Moving upwards the U-shape. Simplified:
    net["concat1"] = concat([net["deconv_bottom1"], net["conv_down42"]], cropping=(None, None, "center", "center"))
    net["conv_up11"]= Conv2DLayer(net["concat1"], no_f_base*8, f_size, pad=pad, nonlinearity=nonlinearity,W=lasagne.init.HeNormal(gain='relu'))
    net["conv_up11"]= Conv2DLayer(net["conv_up11"], no_f_base*8, f_size, pad=pad, nonlinearity=nonlinearity,W=lasagne.init.HeNormal(gain='relu'))
    net["deconv_up1"] = Deconv2DLayer(net["conv_up11"], no_f_base*4, 2, 2)

    net["concat2"] = concat([net["deconv_up1"], net["conv_down32"]], cropping=(None, None, "center", "center"))
    net["conv_up21"]= Conv2DLayer(net["concat2"], no_f_base*4, f_size, pad=pad, nonlinearity=nonlinearity,W=lasagne.init.HeNormal(gain='relu'))
    net["conv_up22"]= Conv2DLayer(net["conv_up21"], no_f_base*4, f_size, pad=pad, nonlinearity=nonlinearity,W=lasagne.init.HeNormal(gain='relu'))
    net["deconv_up2"] = Deconv2DLayer(net["conv_up22"], no_f_base*2, 2, 2)

    net["concat3"] = concat([net["deconv_up2"], net["conv_down22"]], cropping=(None, None, "center", "center"))
    net["conv_up31"]= Conv2DLayer(net["concat3"], no_f_base*2, f_size, pad=pad, nonlinearity=nonlinearity,W=lasagne.init.HeNormal(gain='relu'))
    net["conv_up32"]= Conv2DLayer(net["conv_up31"], no_f_base*2, f_size, pad=pad, nonlinearity=nonlinearity,W=lasagne.init.HeNormal(gain='relu'))
    net["deconv_up3"] = Deconv2DLayer(net["conv_up32"], no_f_base, 2, 2)

    net["concat4"] = concat([net["deconv_up3"], net["conv_down12"]], cropping=(None, None, "center", "center"))
    net["conv_up41"]= Conv2DLayer(net["concat4"], no_f_base, f_size, pad=pad, nonlinearity=nonlinearity,W=lasagne.init.HeNormal(gain='relu'))
    net["conv_up42"]= Conv2DLayer(net["conv_up41"], no_f_base, f_size, pad=pad, nonlinearity=nonlinearity,W=lasagne.init.HeNormal(gain='relu'))
    # Class layer: Work around standard softmax bc. it doesn't work with tensor4/3.
    # Hence, we reshape and feed it to an external Nonlinearity layer.
    # net["class_ns"] is the output in image-related shape.

    net["out"] = Conv2DLayer(net["conv_up42"], class_nums, 1, nonlinearity=None,W=lasagne.init.HeNormal(gain='relu'))
    net["layer_shuffle_dim"] = DimshuffleLayer(net["out"], (1, 0, 2, 3))
    net["reshape_layer"] = ReshapeLayer(net["layer_shuffle_dim"], (class_nums, -1))
    net["layer_shuffle_dim2"] = DimshuffleLayer(net["reshape_layer"], (1, 0))
    # Flattened output to be able to feed it to lasagne.objectives.categorical_crossentropy.
    net["out_optim"] = NonlinearityLayer(net["layer_shuffle_dim2"], nonlinearity=lasagne.nonlinearities.softmax)

    return net
    net = None
def tiramisu_transistion_up(net, f_size_base, stride):
    net = Deconv2DLayer(net, net.output_shape[1], f_size_base, stride)
    return net
def TransitionUp(skip_connection, block_to_upsample, n_filters_keep):
    l = ConcatLayer(block_to_upsample)
    l = Deconv2DLayer(l, n_filters_keep, filter_size=3, stride=2,
                      crop='valid', W=HeUniform(gain='relu'), nonlinearity=linear)
    l = ConcatLayer([l, skip_connection], cropping=[None, None, 'center', 'center'])
    return l
Esempio n. 25
0
              3,
              nonlinearity=nonlinearity,
              pad=pad,
              W=HeNormal(gain="relu")))
net['encode_2'] = batch_norm(
    ConvLayer(net['encode_1'],
              base_n_filters * 16,
              3,
              nonlinearity=nonlinearity,
              pad=pad,
              W=HeNormal(gain="relu")))
net['upscale1'] = batch_norm(
    Deconv2DLayer(net['encode_2'],
                  base_n_filters * 16,
                  2,
                  2,
                  crop="valid",
                  nonlinearity=nonlinearity,
                  W=HeNormal(gain="relu")))

net['concat1'] = ConcatLayer([net['upscale1'], net['contr_4_2']],
                             cropping=(None, None, "center", "center"))
net['expand_1_1'] = batch_norm(
    ConvLayer(net['concat1'],
              base_n_filters * 8,
              3,
              nonlinearity=nonlinearity,
              pad=pad,
              W=HeNormal(gain="relu")))
net['expand_1_2'] = batch_norm(
    ConvLayer(net['expand_1_1'],
def build_generator(input_var, noise_size, cond_var=None, n_conds=0, arch=0,
                    with_BatchNorm=True, batch_size=None, n_steps=None):
    from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer, concat
    from lasagne.layers import Upscale2DLayer, Conv2DLayer
    from lasagne.layers import TransposedConv2DLayer as Deconv2DLayer
    from lasagne.nonlinearities import LeakyRectify, rectify
    from lasagne.init import GlorotUniform, Normal, Orthogonal

    # non_lin = LeakyRectify(0.01)
    non_lin = rectify
    # init = Orthogonal(np.sqrt(2/(1+0.01**2)))
    init = Normal(0.02, 0.0)
    # init = GlorotUniform()

    layer = InputLayer(shape=(batch_size, noise_size), input_var=input_var)
    if cond_var is not None:
        layer = BatchNorm(DenseLayer(
            layer, noise_size, nonlinearity=non_lin), with_BatchNorm)
        layer = concat([
            layer, InputLayer(shape=(batch_size, n_conds), input_var=cond_var)])
    if arch == 'dcgan':
        # DCGAN
        layer = BatchNorm(DenseLayer(
            layer, 1024*4*4, W=init, b=None, nonlinearity=non_lin))
        layer = ReshapeLayer(layer, ([0], 1024, 4, 4))
        layer = BatchNorm(Deconv2DLayer(
            layer, 512, 5, stride=2, crop=(2, 2), W=init, b=None,
            output_size=8, nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, 5, stride=2, crop=(2, 2), W=init, b=None,
            output_size=16, nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 128, 5, stride=2, crop=(2, 2), W=init, b=None,
            output_size=32, nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 64, 5, stride=2, crop=(2, 2), W=init, b=None,
            output_size=64, nonlinearity=non_lin), with_BatchNorm)
        layer = Deconv2DLayer(
            layer, 1, 5, stride=2, crop=(2, 2), W=init, b=None,
            output_size=128, nonlinearity=tanh_temperature)
    elif arch == 'mnist':
        # Jan Schluechter MNIST generator
        # fully-connected layers
        layer = BatchNorm(DenseLayer(
            layer, 1024, W=init, b=None), with_BatchNorm)
        # project and reshape
        layer = BatchNorm(DenseLayer(
            layer, 1024*8*8, W=init, b=None), with_BatchNorm)
        layer = ReshapeLayer(layer, ([0], 1024, 8, 8))
        # fractional-stride convolutions
        layer = BatchNorm(Deconv2DLayer(
            layer, 512, 5, stride=2, crop='same', W=init, b=None,
            output_size=16, nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, 5, stride=2, crop='same', W=init, b=None,
            output_size=32, nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 128, 5, stride=2, crop='same', W=init, b=None,
            output_size=64, nonlinearity=non_lin), with_BatchNorm)
        layer = Deconv2DLayer(
            layer, 1, 5, stride=2, crop='same', W=init, b=None,
            output_size=128, nonlinearity=tanh_temperature)
    elif 'cont-enc':
        # build generator from concatenated prefix and noise features
        layer = ReshapeLayer(layer, ([0], layer.output_shape[1], 1, 1))
        layer = BatchNorm(Deconv2DLayer(
            layer, 1024, 4, stride=1, crop=0, W=init), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 512, 4, stride=2, crop=1, W=init), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, 4, stride=2, crop=1, W=init), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 128, 4, stride=2, crop=1, W=init), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 128, 4, stride=2, crop=1, W=init), with_BatchNorm)
        layer = Deconv2DLayer(
            layer, 1, 4, stride=2, crop=1, W=init,
            nonlinearity=tanh_temperature)
    elif 'lsgan':
        layer = batch_norm(DenseLayer(layer, 1024))
        layer = batch_norm(DenseLayer(layer, 1024*8*8))
        layer = ReshapeLayer(layer, ([0], 1024, 8, 8))
        layer = batch_norm(Deconv2DLayer(
            layer, 256, 5, stride=2, crop='same', output_size=16))
        layer = batch_norm(Deconv2DLayer(
            layer, 256, 5, stride=2, crop='same', output_size=32))
        layer = batch_norm(Deconv2DLayer(
            layer, 256, 5, stride=2, crop='same', output_size=64))
        layer = Deconv2DLayer(
            layer, 1, 5, stride=2, crop='same', output_size=128,
            nonlinearity=tanh_temperature)
    elif arch == 2:
        # non-overlapping transposed convolutions
        # fully-connected layers
        layer = BatchNorm(DenseLayer(
            layer, 1024, W=init, b=None), with_BatchNorm)
        layer = BatchNorm(DenseLayer(
            layer, 1024, W=init, b=None), with_BatchNorm)
        # project and reshape
        layer = BatchNorm(DenseLayer(layer, 256*36*36), with_BatchNorm)
        layer = ReshapeLayer(layer, ([0], 256, 36, 36))
        # two fractional-stride convolutions
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, 4, stride=2, crop='full', b=None, nonlinearity=non_lin),
            with_BatchNorm)
        layer = Deconv2DLayer(
            layer, 1, 8, stride=2, crop='full', b=None,
            nonlinearity=tanh_temperature)
    elif arch == 3:
        # resize-convolution, more full layer weights less convolutions
        # fully-connected layers
        layer = BatchNorm(DenseLayer(
            layer, 1024, W=init, b=None), with_BatchNorm)
        layer = BatchNorm(DenseLayer(
            layer, 1024, W=init, b=None), with_BatchNorm)
        # project and reshape
        layer = BatchNorm(DenseLayer(layer, 32*68*68), with_BatchNorm)
        layer = ReshapeLayer(layer, ([0], 32, 68, 68))
        # resize-convolutions
        layer = BatchNorm(Conv2DLayer(
            layer, 256, 3, stride=1, pad='valid'), with_BatchNorm)
        layer = Upscale2DLayer(layer, (2, 2))
        layer = Conv2DLayer(
            layer, 1, 5, stride=1, pad='valid', nonlinearity=tanh_temperature)
    elif arch == 4:
        # resize-convolution, less full layer weights more convolutions
        # fully-connected layers
        layer = BatchNorm(DenseLayer(
            layer, 1024, W=init, b=None), with_BatchNorm)
        layer = BatchNorm(DenseLayer(
            layer, 1024, W=init, b=None), with_BatchNorm)
        # project and reshape
        layer = BatchNorm(DenseLayer(layer, 128*18*18), with_BatchNorm)
        layer = ReshapeLayer(layer, ([0], 128, 18, 18))
        # resize-convolutions
        layer = Upscale2DLayer(layer, (2, 2), mode='bilinear')
        layer = BatchNorm(Conv2DLayer(
            layer, 256, 3, stride=1, pad='valid', nonlinearity=non_lin),
            with_BatchNorm)
        layer = Upscale2DLayer(layer, (2, 2), mode='bilinear')
        layer = BatchNorm(Conv2DLayer(
            layer, 256, 3, stride=1, pad='valid', nonlinearity=non_lin),
            with_BatchNorm)
        layer = Upscale2DLayer(layer, (2, 2), mode='bilinear')
        layer = Conv2DLayer(
            layer, 1, 5, stride=1, pad='valid',
            nonlinearity=tanh_temperature)
    elif arch == 'crepe_up':
        # CREPE transposed with upscaling
        # fully-connected layers
        layer = BatchNorm(DenseLayer(
            layer, 1024, W=init, b=None), with_BatchNorm)
        layer = BatchNorm(DenseLayer(
            layer, 1024, W=init, b=None), with_BatchNorm)
        # project and reshape
        layer = BatchNorm(DenseLayer(layer, 2**15*1*3), with_BatchNorm)
        layer = ReshapeLayer(layer, ([0], 2**15, 1, 3))
        # temporal convolutions
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = Upscale2DLayer(layer, (1, 3), mode='repeat')
        layer = BatchNorm(Deconv2DLayer(
            layer, 512, (1, 9), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = Upscale2DLayer(layer, (1, 3), mode='repeat')
        layer = Deconv2DLayer(
            layer, 1, (128, 6), stride=1, crop=0, W=init, b=None,
            nonlinearity=tanh_temperature)
    elif arch == 'crepe_noup_a':
        # CREPE transposed no upscaling
        # fully-connected layer
        layer = BatchNorm(DenseLayer(
            layer, 1024, W=init, b=None), with_BatchNorm)
        # project and reshape
        layer = BatchNorm(DenseLayer(
            layer, 1024*1*3, W=init, b=None), with_BatchNorm)
        layer = ReshapeLayer(layer, ([0], 1024, 1, 3))
        # temporal convolutions
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 512, (1, 7), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 1024, (128, 7), stride=3, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = Deconv2DLayer(
            layer, 1, (1, 8), stride=1, crop=0, W=init, b=None,
            nonlinearity=tanh_temperature)
    elif arch == 'crepe_noup_b':
        # CREPE transposed no upscaling
        # fully-connected layer
        layer = BatchNorm(DenseLayer(layer, 1024))
        # project and reshape
        layer = BatchNorm(DenseLayer(layer, 1024*1*3))
        layer = ReshapeLayer(layer, ([0], 1024, 1, 3))
        # temporal convolutions
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0,
            nonlinearity=non_lin))
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, nonlinearity=non_lin))
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, nonlinearity=non_lin))
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, nonlinearity=non_lin))
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=3, crop=0, nonlinearity=non_lin))
        layer = Deconv2DLayer(
            layer, 512, (1, 9), stride=1, crop=0, nonlinearity=non_lin)
        layer = Deconv2DLayer(
            layer, 1, (128, 8), stride=3, crop=0, nonlinearity=tanh_temperature)
    else:
        return None

    print("Generator output:", layer.output_shape)
    return layer
Esempio n. 27
0
def build_model(nBaseFilters=64, fs1=3, fs2=3):
    net = OrderedDict()
    net['input'] = InputLayer((None, 1, 540, 960))
    net['econv1_1'] = batch_norm(
        ConvLayer(net['input'],
                  num_filters=nBaseFilters,
                  filter_size=fs1,
                  pad='same',
                  nonlinearity=ReLU))
    net['econv1_2'] = batch_norm(
        ConvLayer(net['econv1_1'],
                  num_filters=nBaseFilters,
                  filter_size=fs2,
                  pad='same',
                  nonlinearity=ReLU))
    net['pool1'] = PoolLayer(net['econv1_2'],
                             pool_size=2,
                             stride=2,
                             ignore_border=False,
                             mode='max')
    net['econv2_1'] = batch_norm(
        ConvLayer(net['pool1'],
                  num_filters=nBaseFilters * 2,
                  filter_size=3,
                  pad='same',
                  nonlinearity=ReLU))
    net['econv2_2'] = batch_norm(
        ConvLayer(net['econv2_1'],
                  num_filters=nBaseFilters * 2,
                  filter_size=3,
                  pad='same',
                  nonlinearity=ReLU))
    net['pool2'] = PoolLayer(net['econv2_2'],
                             pool_size=2,
                             stride=2,
                             ignore_border=False,
                             mode='max')
    net['econv3_1'] = batch_norm(
        ConvLayer(net['pool2'],
                  num_filters=nBaseFilters * 4,
                  filter_size=3,
                  pad='same',
                  nonlinearity=ReLU))
    net['econv3_2'] = batch_norm(
        ConvLayer(net['econv3_1'],
                  num_filters=nBaseFilters * 4,
                  filter_size=3,
                  pad='same',
                  nonlinearity=ReLU))
    net['pool3'] = PoolLayer(net['econv3_2'],
                             pool_size=2,
                             stride=2,
                             ignore_border=False,
                             mode='max')
    net['econv4_1'] = batch_norm(
        ConvLayer(net['pool3'],
                  num_filters=nBaseFilters * 8,
                  filter_size=3,
                  pad='same',
                  nonlinearity=ReLU))
    net['econv4_2'] = batch_norm(
        ConvLayer(net['econv4_1'],
                  num_filters=nBaseFilters * 8,
                  filter_size=3,
                  pad='same',
                  nonlinearity=ReLU))
    net['pool4'] = PoolLayer(net['econv4_2'],
                             pool_size=2,
                             stride=2,
                             ignore_border=False,
                             mode='max')
    net['econv5_1'] = batch_norm(
        ConvLayer(net['pool4'],
                  num_filters=nBaseFilters * 16,
                  filter_size=3,
                  pad='same',
                  nonlinearity=ReLU))
    net['econv5_2'] = batch_norm(
        ConvLayer(net['econv5_1'],
                  num_filters=nBaseFilters * 16,
                  filter_size=3,
                  pad='same',
                  nonlinearity=ReLU))
    net['upconv1'] = batch_norm(
        Deconv2DLayer(net['econv5_2'],
                      num_filters=nBaseFilters * 8,
                      filter_size=2,
                      stride=2,
                      crop="valid",
                      nonlinearity=ReLU))
    net['concat1'] = ConcatLayer([net['upconv1'], net['econv4_2']],
                                 cropping=(None, None, "center", "center"))
    net['dconv1_1'] = batch_norm(
        ConvLayer(net['concat1'],
                  num_filters=nBaseFilters * 8,
                  filter_size=3,
                  pad='same',
                  nonlinearity=ReLU))
    net['dconv1_2'] = batch_norm(
        ConvLayer(net['dconv1_1'],
                  num_filters=nBaseFilters * 8,
                  filter_size=3,
                  pad='same',
                  nonlinearity=ReLU))
    net['upconv2'] = batch_norm(
        Deconv2DLayer(net['dconv1_2'],
                      num_filters=nBaseFilters * 4,
                      filter_size=2,
                      stride=2,
                      crop="valid",
                      nonlinearity=ReLU))
    net['concat2'] = ConcatLayer([net['upconv2'], net['econv3_2']],
                                 cropping=(None, None, "center", "center"))
    net['dconv2_1'] = batch_norm(
        ConvLayer(net['concat2'],
                  num_filters=nBaseFilters * 4,
                  filter_size=3,
                  pad='same',
                  nonlinearity=ReLU))
    net['dconv2_2'] = batch_norm(
        ConvLayer(net['dconv2_1'],
                  num_filters=nBaseFilters * 4,
                  filter_size=3,
                  pad='same',
                  nonlinearity=ReLU))
    net['upconv3'] = batch_norm(
        Deconv2DLayer(net['dconv2_2'],
                      num_filters=nBaseFilters * 2,
                      filter_size=2,
                      stride=2,
                      crop="valid",
                      nonlinearity=ReLU))
    net['concat3'] = ConcatLayer([net['upconv3'], net['econv2_2']],
                                 cropping=(None, None, "center", "center"))
    net['dconv3_1'] = batch_norm(
        ConvLayer(net['concat3'],
                  num_filters=nBaseFilters * 2,
                  filter_size=3,
                  pad='same',
                  nonlinearity=ReLU))
    net['dconv3_2'] = batch_norm(
        ConvLayer(net['dconv3_1'],
                  num_filters=nBaseFilters * 2,
                  filter_size=3,
                  pad='same',
                  nonlinearity=ReLU))
    net['upconv4'] = batch_norm(
        Deconv2DLayer(net['dconv3_2'],
                      num_filters=nBaseFilters,
                      filter_size=2,
                      stride=2,
                      crop="valid",
                      nonlinearity=ReLU))
    net['concat4'] = ConcatLayer([net['upconv4'], net['econv1_2']],
                                 cropping=(None, None, "center", "center"))
    net['dconv4_1'] = batch_norm(
        ConvLayer(net['concat4'],
                  num_filters=nBaseFilters,
                  filter_size=3,
                  pad='same',
                  nonlinearity=ReLU))
    net['dconv4_2'] = batch_norm(
        ConvLayer(net['dconv4_1'],
                  num_filters=nBaseFilters,
                  filter_size=3,
                  pad='same',
                  nonlinearity=ReLU))
    net['output_segmentation'] = ConvLayer(net['dconv4_2'],
                                           num_filters=2,
                                           filter_size=1,
                                           nonlinearity=None)
    net['dimshuffle'] = DimshuffleLayer(net['output_segmentation'],
                                        (1, 0, 2, 3))
    net['reshapeSeg'] = ReshapeLayer(net['dimshuffle'], (2, -1))
    net['dimshuffle2'] = DimshuffleLayer(net['reshapeSeg'], (1, 0))
    net['output_flattened'] = NonlinearityLayer(net['dimshuffle2'],
                                                nonlinearity=softmax)
    return net
Esempio n. 28
0
def create_model(input_var, input_shape, options):
    conv_num_filters1 = 100
    conv_num_filters2 = 150
    conv_num_filters3 = 200
    filter_size1 = 5
    filter_size2 = 5
    filter_size3 = 3
    pool_size = 2
    encode_size = options['BOTTLENECK']
    dense_mid_size = options['DENSE']
    pad_in = 'valid'
    pad_out = 'full'
    scaled_tanh = create_scaled_tanh()

    input = InputLayer(shape=input_shape, input_var=input_var, name='input')
    conv2d1 = Conv2DLayer(input,
                          num_filters=conv_num_filters1,
                          filter_size=filter_size1,
                          pad=pad_in,
                          name='conv2d1',
                          nonlinearity=scaled_tanh)
    maxpool2d2 = MaxPool2DLayer(conv2d1,
                                pool_size=pool_size,
                                name='maxpool2d2')
    conv2d3 = Conv2DLayer(maxpool2d2,
                          num_filters=conv_num_filters2,
                          filter_size=filter_size2,
                          pad=pad_in,
                          name='conv2d3',
                          nonlinearity=scaled_tanh)
    maxpool2d4 = MaxPool2DLayer(conv2d3,
                                pool_size=pool_size,
                                name='maxpool2d4',
                                pad=(1, 0))
    conv2d5 = Conv2DLayer(maxpool2d4,
                          num_filters=conv_num_filters3,
                          filter_size=filter_size3,
                          pad=pad_in,
                          name='conv2d5',
                          nonlinearity=scaled_tanh)
    reshape6 = ReshapeLayer(conv2d5, shape=([0], -1), name='reshape6')  # 3000
    reshape6_output = reshape6.output_shape[1]
    dense7 = DenseLayer(reshape6,
                        num_units=dense_mid_size,
                        name='dense7',
                        nonlinearity=scaled_tanh)
    bottleneck = DenseLayer(dense7,
                            num_units=encode_size,
                            name='bottleneck',
                            nonlinearity=linear)
    # print_network(bottleneck)
    dense8 = DenseLayer(bottleneck,
                        num_units=dense_mid_size,
                        W=bottleneck.W.T,
                        name='dense8',
                        nonlinearity=linear)
    dense9 = DenseLayer(dense8,
                        num_units=reshape6_output,
                        W=dense7.W.T,
                        nonlinearity=scaled_tanh,
                        name='dense9')
    reshape10 = ReshapeLayer(dense9,
                             shape=([0], conv_num_filters3, 3, 5),
                             name='reshape10')  # 32 x 4 x 7
    deconv2d11 = Deconv2DLayer(reshape10,
                               conv2d5.input_shape[1],
                               conv2d5.filter_size,
                               stride=conv2d5.stride,
                               W=conv2d5.W,
                               flip_filters=not conv2d5.flip_filters,
                               name='deconv2d11',
                               nonlinearity=scaled_tanh)
    upscale2d12 = Upscale2DLayer(deconv2d11,
                                 scale_factor=pool_size,
                                 name='upscale2d12')
    deconv2d13 = Deconv2DLayer(upscale2d12,
                               conv2d3.input_shape[1],
                               conv2d3.filter_size,
                               stride=conv2d3.stride,
                               W=conv2d3.W,
                               flip_filters=not conv2d3.flip_filters,
                               name='deconv2d13',
                               nonlinearity=scaled_tanh)
    upscale2d14 = Upscale2DLayer(deconv2d13,
                                 scale_factor=pool_size,
                                 name='upscale2d14')
    deconv2d15 = Deconv2DLayer(upscale2d14,
                               conv2d1.input_shape[1],
                               conv2d1.filter_size,
                               stride=conv2d1.stride,
                               crop=(1, 0),
                               W=conv2d1.W,
                               flip_filters=not conv2d1.flip_filters,
                               name='deconv2d14',
                               nonlinearity=scaled_tanh)
    reshape16 = ReshapeLayer(deconv2d15, ([0], -1), name='reshape16')
    print_network(reshape16)
    return reshape16
Esempio n. 29
0
                    stride=1,
                    pad=0,
                    flip_filters=False,
                    nonlinearity=rectify,
                    W=lasagne.init.Normal(0.01))  ###
l_fc7 = Conv2DLayer(l_fc6,
                    num_filters=4096,
                    filter_size=(1, 1),
                    stride=1,
                    pad=0,
                    flip_filters=False,
                    nonlinearity=rectify,
                    W=lasagne.init.Normal(0.01))  ###
l_fc6_deconv = Deconv2DLayer(l_fc7,
                             num_filters=512,
                             filter_size=(7, 7),
                             stride=1,
                             nonlinearity=rectify,
                             W=lasagne.init.Normal(0.01))

l_unpool5 = InverseLayer(l_fc6_deconv, l_pool5)

l_deconv5_1 = Deconv2DLayer(l_unpool5,
                            num_filters=512,
                            filter_size=(3, 3),
                            stride=1,
                            crop='same',
                            nonlinearity=rectify,
                            W=lasagne.init.Normal(0.01))
l_deconv5_2 = Deconv2DLayer(l_deconv5_1,
                            num_filters=512,
                            filter_size=(3, 3),
Esempio n. 30
0
def build_UNet(n_input_channels=3,
               BATCH_SIZE=None,
               num_output_classes=2,
               pad='same',
               nonlinearity=elu,
               input_dim=(128, 128),
               base_n_filters=64,
               do_dropout=False,
               weights=None):
    net = OrderedDict()
    net['input'] = InputLayer(
        (BATCH_SIZE, n_input_channels, input_dim[0], input_dim[1]))

    net['contr_1_1'] = batch_norm(
        ConvLayer(
            net['input'],
            num_filters=base_n_filters,
            filter_size=3,
            nonlinearity=nonlinearity,
            pad=pad,
            W=GlorotNormal(),
        ))
    net['contr_1_2'] = batch_norm(
        ConvLayer(net['contr_1_1'],
                  num_filters=base_n_filters,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))
    net['pool1'] = Pool2DLayer(net['contr_1_2'], pool_size=2)

    net['contr_2_1'] = batch_norm(
        ConvLayer(net['pool1'],
                  num_filters=base_n_filters * 2,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))
    net['contr_2_2'] = batch_norm(
        ConvLayer(net['contr_2_1'],
                  num_filters=base_n_filters * 2,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))
    net['pool2'] = Pool2DLayer(net['contr_2_2'], pool_size=2)

    net['contr_3_1'] = batch_norm(
        ConvLayer(net['pool2'],
                  num_filters=base_n_filters * 4,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))
    net['contr_3_2'] = batch_norm(
        ConvLayer(net['contr_3_1'],
                  num_filters=base_n_filters * 4,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))
    net['pool3'] = Pool2DLayer(net['contr_3_2'], pool_size=2)

    net['contr_4_1'] = batch_norm(
        ConvLayer(net['pool3'],
                  num_filters=base_n_filters * 8,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))
    net['contr_4_2'] = batch_norm(
        ConvLayer(net['contr_4_1'],
                  num_filters=base_n_filters * 8,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))
    l = net['pool4'] = Pool2DLayer(net['contr_4_2'], pool_size=2)

    if do_dropout:
        l = DropoutLayer(l, p=0.4)

    net['encode_1'] = batch_norm(
        ConvLayer(l,
                  num_filters=base_n_filters * 16,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))
    net['encode_2'] = batch_norm(
        ConvLayer(net['encode_1'],
                  num_filters=base_n_filters * 16,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))
    net['upscale1'] = batch_norm(
        Deconv2DLayer(net['encode_2'],
                      num_filters=base_n_filters * 16,
                      filter_size=2,
                      stride=2,
                      crop="valid",
                      nonlinearity=nonlinearity,
                      W=GlorotNormal()))

    net['concat1'] = ConcatLayer([net['upscale1'], net['contr_4_2']],
                                 cropping=(None, None, "center", "center"))
    net['expand_1_1'] = batch_norm(
        ConvLayer(net['concat1'],
                  num_filters=base_n_filters * 8,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))
    net['expand_1_2'] = batch_norm(
        ConvLayer(net['expand_1_1'],
                  num_filters=base_n_filters * 8,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))
    net['upscale2'] = batch_norm(
        Deconv2DLayer(net['expand_1_2'],
                      num_filters=base_n_filters * 8,
                      filter_size=2,
                      stride=2,
                      crop="valid",
                      nonlinearity=nonlinearity,
                      W=GlorotNormal()))

    net['concat2'] = ConcatLayer([net['upscale2'], net['contr_3_2']],
                                 cropping=(None, None, "center", "center"))
    net['expand_2_1'] = batch_norm(
        ConvLayer(net['concat2'],
                  num_filters=base_n_filters * 4,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))
    net['expand_2_2'] = batch_norm(
        ConvLayer(net['expand_2_1'],
                  num_filters=base_n_filters * 4,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))
    net['upscale3'] = batch_norm(
        Deconv2DLayer(net['expand_2_2'],
                      num_filters=base_n_filters * 4,
                      filter_size=2,
                      stride=2,
                      crop="valid",
                      nonlinearity=nonlinearity,
                      W=GlorotNormal()))

    net['concat3'] = ConcatLayer([net['upscale3'], net['contr_2_2']],
                                 cropping=(None, None, "center", "center"))
    net['expand_3_1'] = batch_norm(
        ConvLayer(net['concat3'],
                  num_filters=base_n_filters * 2,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))
    net['expand_3_2'] = batch_norm(
        ConvLayer(net['expand_3_1'],
                  num_filters=base_n_filters * 2,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))
    net['upscale4'] = batch_norm(
        Deconv2DLayer(net['expand_3_2'],
                      num_filters=base_n_filters * 2,
                      filter_size=2,
                      stride=2,
                      crop="valid",
                      nonlinearity=nonlinearity,
                      W=GlorotNormal()))

    net['concat4'] = ConcatLayer([net['upscale4'], net['contr_1_2']],
                                 cropping=(None, None, "center", "center"))
    net['expand_4_1'] = batch_norm(
        ConvLayer(net['concat4'],
                  num_filters=base_n_filters,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))
    net['expand_4_2'] = batch_norm(
        ConvLayer(net['expand_4_1'],
                  num_filters=base_n_filters,
                  filter_size=3,
                  nonlinearity=nonlinearity,
                  pad=pad,
                  W=GlorotNormal()))

    net['output_segmentation'] = ConvLayer(net['expand_4_2'],
                                           num_filters=num_output_classes,
                                           filter_size=1,
                                           nonlinearity=None)
    net['dimshuffle'] = DimshuffleLayer(net['output_segmentation'],
                                        (1, 0, 2, 3))
    net['reshapeSeg'] = ReshapeLayer(net['dimshuffle'],
                                     (num_output_classes, -1))
    net['dimshuffle2'] = DimshuffleLayer(net['reshapeSeg'], (1, 0))
    net['output_flattened'] = NonlinearityLayer(
        net['dimshuffle2'], nonlinearity=lasagne.nonlinearities.softmax)

    if weights is not None:
        lasagne.layers.set_all_param_values(net['output_flattened'], weights)

    return net