def createGenerator2(input_var=None):

	_ = InputLayer(shape=(None, 64), input_var=input_var)
	_ = batch_norm(DenseLayer(_, num_units=1000, nonlinearity=lasagne.nonlinearities.rectify))
	_ = batch_norm(DenseLayer(_, num_units=64*16*16, nonlinearity=lasagne.nonlinearities.rectify))
	_ = ReshapeLayer(_, ([0], 64, 16, 16))
	_ = batch_norm(Conv2DDNNLayer(_, 128, 3, pad='same'))
	_ = Upscale2DLayer(_, 2)
	_ = batch_norm(Conv2DDNNLayer(_, 128, 3, pad='same'))
	_ = Upscale2DLayer(_, 2)
	_ = batch_norm(Conv2DDNNLayer(_, 256, 3, pad='same'))
	_ = batch_norm(Conv2DDNNLayer(_, 256, 3, pad='same'))
	l_generator = batch_norm(Conv2DDNNLayer(_, 3, 3, pad='same', nonlinearity=lasagne.nonlinearities.sigmoid))


	print('--------------------')
	print('Generator architecture: \n')

	#get all layers
	allLayers=lasagne.layers.get_all_layers(l_generator)
	#for each layer print its shape information
	for l in allLayers:
		print(lasagne.layers.get_output_shape(l))

	print ("Generator output:", l_generator.output_shape)
	return l_generator
예제 #2
0
    def get_UNet(n_input_channels=1, BATCH_SIZE=None, num_output_classes=2, pad='same', nonlinearity=L.nonlinearities.leaky_rectify,
                   input_dim=(128, 128), base_n_filters=128):

        net = OrderedDict()
        net['input'] = InputLayer((BATCH_SIZE, n_input_channels, input_dim[0], input_dim[1]))

        net['contr_1_1'] = batch_norm(ConvLayer(net['input'], base_n_filters, 3, nonlinearity=nonlinearity, pad=pad))
        net['contr_1_2'] = batch_norm(ConvLayer(net['contr_1_1'], base_n_filters, 3, nonlinearity=nonlinearity, pad=pad))
        net['pool1'] = Pool2DLayer(net['contr_1_2'], 2)

        net['contr_2_1'] = batch_norm(ConvLayer(net['pool1'], base_n_filters * 2, 3, nonlinearity=nonlinearity, pad=pad))
        net['contr_2_2'] = batch_norm(ConvLayer(net['contr_2_1'], base_n_filters * 2, 3, nonlinearity=nonlinearity, pad=pad))
        net['pool2'] = Pool2DLayer(net['contr_2_2'], 2)

        net['contr_3_1'] = batch_norm(ConvLayer(net['pool2'], base_n_filters * 4, 3, nonlinearity=nonlinearity, pad=pad))
        net['contr_3_2'] = batch_norm(ConvLayer(net['contr_3_1'], base_n_filters * 4, 3, nonlinearity=nonlinearity, pad=pad))
        net['pool3'] = Pool2DLayer(net['contr_3_2'], 2)

        net['contr_4_1'] = batch_norm(ConvLayer(net['pool3'], base_n_filters * 8, 3, nonlinearity=nonlinearity, pad=pad))
        net['contr_4_2'] = batch_norm(ConvLayer(net['contr_4_1'], base_n_filters * 8, 3, nonlinearity=nonlinearity, pad=pad))
        l = net['pool4'] = Pool2DLayer(net['contr_4_2'], 2)

        # the paper does not really describe where and how dropout is added. Feel free to try more options
        l = DropoutLayer(l, p=0.4)

        net['encode_1'] = batch_norm(ConvLayer(l, base_n_filters * 16, 3, nonlinearity=nonlinearity, pad=pad))
        net['encode_2'] = batch_norm(ConvLayer(net['encode_1'], base_n_filters * 16, 3, nonlinearity=nonlinearity, pad=pad))
        net['deconv1'] = Upscale2DLayer(net['encode_2'], 2)

        net['concat1'] = ConcatLayer([net['deconv1'], net['contr_4_2']], cropping=(None, None, "center", "center"))
        net['expand_1_1'] = batch_norm(ConvLayer(net['concat1'], base_n_filters * 8, 3, nonlinearity=nonlinearity, pad=pad))
        net['expand_1_2'] = batch_norm(ConvLayer(net['expand_1_1'], base_n_filters * 8, 3, nonlinearity=nonlinearity, pad=pad))
        net['deconv2'] = Upscale2DLayer(net['expand_1_2'], 2)

        net['concat2'] = ConcatLayer([net['deconv2'], net['contr_3_2']], cropping=(None, None, "center", "center"))
        net['expand_2_1'] = batch_norm(ConvLayer(net['concat2'], base_n_filters * 4, 3, nonlinearity=nonlinearity, pad=pad))
        net['expand_2_2'] = batch_norm(ConvLayer(net['expand_2_1'], base_n_filters * 4, 3, nonlinearity=nonlinearity, pad=pad))
        net['deconv3'] = Upscale2DLayer(net['expand_2_2'], 2)

        net['concat3'] = ConcatLayer([net['deconv3'], net['contr_2_2']], cropping=(None, None, "center", "center"))
        net['expand_3_1'] = batch_norm(ConvLayer(net['concat3'], base_n_filters * 2, 3, nonlinearity=nonlinearity, pad=pad))
        net['expand_3_2'] = batch_norm(ConvLayer(net['expand_3_1'], base_n_filters * 2, 3, nonlinearity=nonlinearity, pad=pad))
        net['deconv4'] = Upscale2DLayer(net['expand_3_2'], 2)

        net['concat4'] = ConcatLayer([net['deconv4'], net['contr_1_2']], cropping=(None, None, "center", "center"))
        net['expand_4_1'] = batch_norm(ConvLayer(net['concat4'], base_n_filters, 3, nonlinearity=nonlinearity, pad=pad))
        net['expand_4_2'] = batch_norm(ConvLayer(net['expand_4_1'], base_n_filters, 3, nonlinearity=nonlinearity, pad=pad))

        net['conv_5'] = ConvLayer(net['expand_4_2'], num_output_classes, 1, nonlinearity=None)  # (bs, nrClasses, x, y)
        net['dimshuffle'] = DimshuffleLayer(net['conv_5'], (1, 0, 2, 3))  # (nrClasses, bs, x, y)
        net['reshapeSeg'] = ReshapeLayer(net['dimshuffle'], (num_output_classes, -1))  # (nrClasses, bs*x*y)
        net['dimshuffle2'] = DimshuffleLayer(net['reshapeSeg'], (1, 0))  # (bs*x*y, nrClasses)

        #Watch out: here is another nonlinearity -> do not use layers before this layer!
        net['output_flat'] = NonlinearityLayer(net['dimshuffle2'], nonlinearity=L.nonlinearities.sigmoid)  # (bs*x*y, nrClasses)
        img_shape = net["conv_5"].output_shape
        net['output'] = ReshapeLayer(net['output_flat'], (-1, img_shape[2], img_shape[3], img_shape[1]))  # (bs, x, y, nrClasses)

        return net
예제 #3
0
def build_decoder(net):
    net['uconv5_3']= ConvLayer(net['conv5_3'], 512, 3, pad=1)
    print "uconv5_3: {}".format(net['uconv5_3'].output_shape[1:])

    net['uconv5_2'] = ConvLayer(net['uconv5_3'], 512, 3, pad=1)
    print "uconv5_2: {}".format(net['uconv5_2'].output_shape[1:])

    net['uconv5_1'] = ConvLayer(net['uconv5_2'], 512, 3, pad=1)
    print "uconv5_1: {}".format(net['uconv5_1'].output_shape[1:])

    net['upool4'] = Upscale2DLayer(net['uconv5_1'], scale_factor=2)
    print "upool4: {}".format(net['upool4'].output_shape[1:])

    net['uconv4_3'] = ConvLayer(net['upool4'], 512, 3, pad=1)
    print "uconv4_3: {}".format(net['uconv4_3'].output_shape[1:])

    net['uconv4_2'] = ConvLayer(net['uconv4_3'], 512, 3, pad=1)
    print "uconv4_2: {}".format(net['uconv4_2'].output_shape[1:])

    net['uconv4_1'] = ConvLayer(net['uconv4_2'], 512, 3, pad=1)
    print "uconv4_1: {}".format(net['uconv4_1'].output_shape[1:])

    net['upool3'] = Upscale2DLayer(net['uconv4_1'], scale_factor=2)
    print "upool3: {}".format(net['upool3'].output_shape[1:])

    net['uconv3_3'] = ConvLayer(net['upool3'], 256, 3, pad=1)
    print "uconv3_3: {}".format(net['uconv3_3'].output_shape[1:])

    net['uconv3_2'] = ConvLayer(net['uconv3_3'], 256, 3, pad=1)
    print "uconv3_2: {}".format(net['uconv3_2'].output_shape[1:])

    net['uconv3_1'] = ConvLayer(net['uconv3_2'], 256, 3, pad=1)
    print "uconv3_1: {}".format(net['uconv3_1'].output_shape[1:])

    net['upool2'] = Upscale2DLayer(net['uconv3_1'], scale_factor=2)
    print "upool2: {}".format(net['upool2'].output_shape[1:])

    net['uconv2_2'] = ConvLayer(net['upool2'], 128, 3, pad=1)
    print "uconv2_2: {}".format(net['uconv2_2'].output_shape[1:])

    net['uconv2_1'] = ConvLayer(net['uconv2_2'], 128, 3, pad=1)
    print "uconv2_1: {}".format(net['uconv2_1'].output_shape[1:])

    net['upool1'] = Upscale2DLayer(net['uconv2_1'], scale_factor=2)
    print "upool1: {}".format(net['upool1'].output_shape[1:])

    net['uconv1_2'] = ConvLayer(net['upool1'], 64, 3, pad=1,)
    print "uconv1_2: {}".format(net['uconv1_2'].output_shape[1:])

    net['uconv1_1'] = ConvLayer(net['uconv1_2'], 64, 3, pad=1)
    print "uconv1_1: {}".format(net['uconv1_1'].output_shape[1:])

    net['output'] = ConvLayer(net['uconv1_1'], 1, 1, pad=0,nonlinearity=sigmoid)
    print "output: {}".format(net['output'].output_shape[1:])

    return net
예제 #4
0
def build_UNet(n_input_channels=1, BATCH_SIZE=None, num_output_classes=2, pad='same', nonlinearity=lasagne.nonlinearities.elu, input_dim=(128, 128), base_n_filters=64, do_dropout=False):
    net = OrderedDict()
    net['input'] = InputLayer((BATCH_SIZE, n_input_channels, input_dim[0], input_dim[1]))

    net['contr_1_1'] = ConvLayer(net['input'], base_n_filters, 3, nonlinearity=nonlinearity, pad=pad)
    net['contr_1_2'] = ConvLayer(net['contr_1_1'], base_n_filters, 3, nonlinearity=nonlinearity, pad=pad)
    net['pool1'] = Pool2DLayer(net['contr_1_2'], 2)

    net['contr_2_1'] = ConvLayer(net['pool1'], base_n_filters*2, 3, nonlinearity=nonlinearity, pad=pad)
    net['contr_2_2'] = ConvLayer(net['contr_2_1'], base_n_filters*2, 3, nonlinearity=nonlinearity, pad=pad)
    net['pool2'] = Pool2DLayer(net['contr_2_2'], 2)

    net['contr_3_1'] = ConvLayer(net['pool2'], base_n_filters*4, 3, nonlinearity=nonlinearity, pad=pad)
    net['contr_3_2'] = ConvLayer(net['contr_3_1'], base_n_filters*4, 3, nonlinearity=nonlinearity, pad=pad)
    net['pool3'] = Pool2DLayer(net['contr_3_2'], 2)

    net['contr_4_1'] = ConvLayer(net['pool3'], base_n_filters*8, 3, nonlinearity=nonlinearity, pad=pad)
    net['contr_4_2'] = ConvLayer(net['contr_4_1'], base_n_filters*8, 3, nonlinearity=nonlinearity, pad=pad)
    l = net['pool4'] = Pool2DLayer(net['contr_4_2'], 2)
    # the paper does not really describe where and how dropout is added. Feel free to try more options
    if do_dropout:
        l = DropoutLayer(l, p=0.4)

    net['encode_1'] = ConvLayer(l, base_n_filters*16, 3, nonlinearity=nonlinearity, pad=pad)
    net['encode_2'] = ConvLayer(net['encode_1'], base_n_filters*16, 3, nonlinearity=nonlinearity, pad=pad)
    net['deconv1'] = Upscale2DLayer(net['encode_2'], 2)

    net['concat1'] = ConcatLayer([net['deconv1'], net['contr_4_2']], cropping=(None, None, "center", "center"))
    net['expand_1_1'] = ConvLayer(net['concat1'], base_n_filters*8, 3, nonlinearity=nonlinearity, pad=pad)
    net['expand_1_2'] = ConvLayer(net['expand_1_1'], base_n_filters*8, 3, nonlinearity=nonlinearity, pad=pad)
    net['deconv2'] = Upscale2DLayer(net['expand_1_2'], 2)

    net['concat2'] = ConcatLayer([net['deconv2'], net['contr_3_2']], cropping=(None, None, "center", "center"))
    net['expand_2_1'] = ConvLayer(net['concat2'], base_n_filters*4, 3, nonlinearity=nonlinearity, pad=pad)
    net['expand_2_2'] = ConvLayer(net['expand_2_1'], base_n_filters*4, 3, nonlinearity=nonlinearity, pad=pad)
    net['deconv3'] = Upscale2DLayer(net['expand_2_2'], 2)

    net['concat3'] = ConcatLayer([net['deconv3'], net['contr_2_2']], cropping=(None, None, "center", "center"))
    net['expand_3_1'] = ConvLayer(net['concat3'], base_n_filters*2, 3, nonlinearity=nonlinearity, pad=pad)
    net['expand_3_2'] = ConvLayer(net['expand_3_1'], base_n_filters*2, 3, nonlinearity=nonlinearity, pad=pad)
    net['deconv4'] = Upscale2DLayer(net['expand_3_2'], 2)

    net['concat4'] = ConcatLayer([net['deconv4'], net['contr_1_2']], cropping=(None, None, "center", "center"))
    net['expand_4_1'] = ConvLayer(net['concat4'], base_n_filters, 3, nonlinearity=nonlinearity, pad=pad)
    net['expand_4_2'] = ConvLayer(net['expand_4_1'], base_n_filters, 3, nonlinearity=nonlinearity, pad=pad)

    net['output_segmentation'] = ConvLayer(net['expand_4_2'], num_output_classes, 1, nonlinearity=None)
    net['dimshuffle'] = DimshuffleLayer(net['output_segmentation'], (1, 0, 2, 3))
    net['reshapeSeg'] = ReshapeLayer(net['dimshuffle'], (num_output_classes, -1))
    net['dimshuffle2'] = DimshuffleLayer(net['reshapeSeg'], (1, 0))
    net['output_flattened'] = NonlinearityLayer(net['dimshuffle2'], nonlinearity=lasagne.nonlinearities.softmax)

    return net
예제 #5
0
def build_model_L(ratio=[2, 3], mode='repeat'):
    input_var = tensor.ftensor4('x')  # (B, C, H, W)
    input0 = InputLayer(shape=(None, None, None, None),
                        input_var=input_var,
                        name='input0')
    x = Upscale2DLayer(input0, scale_factor=ratio, mode=mode)
    return x
예제 #6
0
def build_baseline5_fan(input_var):
    # TODO remove these imports + move relevant parts to layers.py once everything is
    # up and running
    import theano.tensor as T
    import numpy as np
    """ Using Baseline 1 with the novel FAN layer.

    VGG conv4_1 is used for feature extraction
    """
    net = OrderedDict()

    # Input, standardization
    last = net['input'] = InputLayer(
        (None, 3, tools.INP_PSIZE, tools.INP_PSIZE), input_var=input_var)
    last = net['norm'] = ExpressionLayer(last, lambda x: normalize(x))

    net['features_s8'] = get_features(last)["conv4_1"]
    net['features'] = Upscale2DLayer(net["features_s8"], 8)
    net['mask'] = ExpressionLayer(
        net["features"], lambda x: 1. * T.eq(x, x.max(axis=1, keepdims=True)))

    last = net["middle"] = ConvLayer(last, 3, 1, nonlinearity=linear)
    last = net["fan"] = FeatureAwareNormLayer(
        (last, net['mask']),
        beta=nn.init.Constant(np.float32(128.)),
        gamma=nn.init.Constant(np.float32(25.)))

    return last, net
예제 #7
0
def upsample(layer, scale, mode="repeat"):
    """ Upsampling by repetition or bilinear upsampling
    """
    if mode in ["repeat", "dilate"]:
        return Upscale2DLayer(layer, scale, mode=mode)
    elif mode in ["bilinear"]:
        nb_kernels = nn.layers.get_output_shape(layer)[1]
        return upsample_bilinear(layer, nb_kernels, ratio=scale)
    raise ValueError("Invalid mode: " + str(mode))
예제 #8
0
def build_model(Zs):
    net = dict()

    net['Z1'] = InputLayer(input_var=Zs['Z1'], shape=(None, 1, 256, 256))
    net['Z2'] = InputLayer(input_var=Zs['Z2'], shape=(None, 1, 128, 128))
    net['Z3'] = InputLayer(input_var=Zs['Z3'], shape=(None, 1, 64, 64))
    net['Z4'] = InputLayer(input_var=Zs['Z4'], shape=(None, 1, 32, 32))
    net['Z5'] = InputLayer(input_var=Zs['Z5'], shape=(None, 1, 16, 16))

    # first block
    net.update(build_conv(net['Z5'], '5', 8))
    net.update(build_conv(net['Z4'], '4', 8))
    net.update(build_conv(net['Z3'], '3', 8))
    net.update(build_conv(net['Z2'], '2', 8))
    net.update(build_conv(net['Z1'], '1', 8))

    # merge 4 & 5
    net['upsample_5'] = Upscale2DLayer(net['conv5_3'], 2)
    net['concat_45'] = ConcatLayer([net['upsample_5'], net['conv4_3']])
    net.update(build_conv(net['concat_45'], '45', 16))

    # merge 3 & 45
    net['upsample_45'] = Upscale2DLayer(net['conv45_3'], 2)
    net['concat_345'] = ConcatLayer([net['upsample_45'], net['conv3_3']])
    net.update(build_conv(net['concat_345'], '345', 24))

    # merge 2 & 345
    net['upsample_345'] = Upscale2DLayer(net['conv345_3'], 2)
    net['concat_2345'] = ConcatLayer([net['upsample_345'], net['conv2_3']])
    net.update(build_conv(net['concat_2345'], '2345', 32))

    # merge 1 & 2345
    net['upsample_2345'] = Upscale2DLayer(net['conv2345_3'], 2)
    net['concat_12345'] = ConcatLayer([net['upsample_2345'], net['conv1_3']])
    net.update(build_conv(net['concat_12345'], '12345', 32))

    net['output'] = ConvLayer(net['conv12345_3'],
                              3,
                              1,
                              pad=0,
                              flip_filters=False)

    return net
예제 #9
0
def create_model(incoming, options):
    input_p = 0.2
    hidden_p = 0.5
    conv_num_filters1 = int(100 / (1.0 - input_p))
    conv_num_filters2 = int(150 / (1.0 - hidden_p))
    conv_num_filters3 = int(200 / (1.0 - hidden_p))
    filter_size1 = 5
    filter_size2 = 5
    filter_size3 = 3
    pool_size = 2
    encode_size = int(options['BOTTLENECK'] / 0.5)
    dense_mid_size = int(options['DENSE'] / 0.5)
    pad_in = 'valid'
    pad_out = 'full'
    scaled_tanh = create_scaled_tanh()
    dropout0 = DropoutLayer(incoming, p=0.2, name='dropout0')
    conv2d1 = Conv2DLayer(dropout0, num_filters=conv_num_filters1, filter_size=filter_size1, pad=pad_in, name='conv2d1', nonlinearity=scaled_tanh)
    maxpool2d2 = MaxPool2DLayer(conv2d1, pool_size=pool_size, name='maxpool2d2')
    dropout1 = DropoutLayer(maxpool2d2, name='dropout1')
    conv2d3 = Conv2DLayer(dropout1, num_filters=conv_num_filters2, filter_size=filter_size2, pad=pad_in, name='conv2d3', nonlinearity=scaled_tanh)
    maxpool2d4 = MaxPool2DLayer(conv2d3, pool_size=pool_size, name='maxpool2d4', pad=(1,0))
    dropout2 = DropoutLayer(maxpool2d4, name='dropout2')
    conv2d5 = Conv2DLayer(dropout2, num_filters=conv_num_filters3, filter_size=filter_size3, pad=pad_in, name='conv2d5', nonlinearity=scaled_tanh)
    reshape6 = ReshapeLayer(conv2d5, shape=([0], -1), name='reshape6')  # 3000
    reshape6_output = reshape6.output_shape[1]
    dropout3 = DropoutLayer(reshape6, name='dropout3')
    dense7 = DenseLayer(dropout3, num_units=dense_mid_size, name='dense7', nonlinearity=scaled_tanh)
    dropout4 = DropoutLayer(dense7, name='dropout4')
    bottleneck = DenseLayer(dropout4, num_units=encode_size, name='bottleneck', nonlinearity=linear)
    # print_network(bottleneck)
    dense8 = DenseLayer(bottleneck, num_units=dense_mid_size, W=bottleneck.W.T, name='dense8', nonlinearity=linear)
    dense9 = DenseLayer(dense8, num_units=reshape6_output, W=dense7.W.T, nonlinearity=scaled_tanh, name='dense9')
    reshape10 = ReshapeLayer(dense9, shape=([0], conv_num_filters3, 3, 5), name='reshape10')  # 32 x 4 x 7
    deconv2d11 = Deconv2DLayer(reshape10, conv2d5.input_shape[1], conv2d5.filter_size, stride=conv2d5.stride,
                               W=conv2d5.W, flip_filters=not conv2d5.flip_filters, name='deconv2d11', nonlinearity=scaled_tanh)
    upscale2d12 = Upscale2DLayer(deconv2d11, scale_factor=pool_size, name='upscale2d12')
    deconv2d13 = Deconv2DLayer(upscale2d12, conv2d3.input_shape[1], conv2d3.filter_size, stride=conv2d3.stride,
                               W=conv2d3.W, flip_filters=not conv2d3.flip_filters, name='deconv2d13', nonlinearity=scaled_tanh)
    upscale2d14 = Upscale2DLayer(deconv2d13, scale_factor=pool_size, name='upscale2d14')
    deconv2d15 = Deconv2DLayer(upscale2d14, conv2d1.input_shape[1], conv2d1.filter_size, stride=conv2d1.stride,
                               crop=(1, 0), W=conv2d1.W, flip_filters=not conv2d1.flip_filters, name='deconv2d14', nonlinearity=scaled_tanh)
    reshape16 = ReshapeLayer(deconv2d15, ([0], -1), name='reshape16')
    return reshape16, bottleneck
예제 #10
0
def G_mnist_mode_recovery(
    num_channels        = 1,
    resolution          = 32,
    fmap_base           = 64,
    fmap_decay          = 1.0,
    fmap_max            = 256,
    latent_size         = None,
    label_size          = 10,
    normalize_latents   = True,
    use_wscale          = False,
    use_pixelnorm       = False,
    use_batchnorm       = True,
    tanh_at_end         = True,
    progressive         = False,
    **kwargs):

    R = int(np.log2(resolution))
    assert resolution == 2**R and resolution >= 4
    cur_lod = theano.shared(np.float32(0.0))
    def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max)
    def PN(layer): return PixelNormLayer(layer, name=layer.name+'pn') if use_pixelnorm else layer
    def BN(layer): return lasagne.layers.batch_norm(layer) if use_batchnorm else layer
    def WS(layer): return WScaleLayer(layer, name=layer.name+'S') if use_wscale else layer
    if latent_size is None: latent_size = nf(0)

    input_layers = [InputLayer(name='Glatents', shape=[None, latent_size])]
    net = input_layers[-1]
    if normalize_latents:
        net = PixelNormLayer(net, name='Glnorm')
    if label_size:
        input_layers += [InputLayer(name='Glabels', shape=[None, label_size])]
        net = ConcatLayer (name='Gina', incomings=[net, input_layers[-1]])

    net = ReshapeLayer(name='Ginb', incoming=net, shape=[[0], [1], 1, 1])
    net = PN(BN(WS(Conv2DLayer(net, name='G1a', num_filters=64, filter_size=4, pad='full', nonlinearity=vlrelu, W=irelu))))

    lods  = [net]
    for I in xrange(2, R): # I = 2, 3, ..., R-1
        net = Upscale2DLayer(net, name='G%dup' % I, scale_factor=2)
        net = PN(BN(WS(Conv2DLayer(net, name='G%da'  % I, num_filters=nf(I-1), filter_size=3, pad=1, nonlinearity=vlrelu, W=irelu))))
        lods += [net]

    if progressive:
        lods = [WS(Conv2DLayer(l, name='Glod%d' % i, num_filters=num_channels, filter_size=3, pad=1, nonlinearity=linear, W=ilinear)) for i, l in enumerate(reversed(lods))]        # Should be this
        #lods = [WS(NINLayer(l, name='Glod%d' % i, num_units=num_channels, nonlinearity=linear, W=ilinear)) for i, l in enumerate(reversed(lods))]                                  # .. but this is better
        output_layer = LODSelectLayer(name='Glod', incomings=lods, cur_lod=cur_lod, first_incoming_lod=0)
    else:
        net = WS(Conv2DLayer(net, name='toRGB', num_filters=num_channels, filter_size=3, pad=1, nonlinearity=linear, W=ilinear))                                                    # Should be this
        #net = WS(NINLayer(net, name='toRGB', num_units=num_channels, nonlinearity=linear, W=ilinear))                                                                              # .. but this is better
        output_layer = net

    if tanh_at_end:
        output_layer = NonlinearityLayer(output_layer, name='Gtanh', nonlinearity=tanh)

    return dict(input_layers=input_layers, output_layers=[output_layer], cur_lod=cur_lod)
예제 #11
0
    def network_generator(self, input_var, network_weights=None):

        # Input layer
        layers = []
        n_blocks = int(np.log2(
            self.input_size / 8)) + 1  # end up with 8x8 output
        layers.append(
            InputLayer(shape=(None, self.hidden_size),
                       input_var=input_var,
                       name='generator/input'))

        # Dense layer up (from h to n*8*8)
        layers.append(
            dense_layer(layers[-1],
                        n_units=(8 * 8 * self.n_filters),
                        name='generator/dense%d' % len(layers),
                        network_weights=network_weights))
        layers.append(
            ReshapeLayer(layers[-1], (-1, self.n_filters, 8, 8),
                         name='generator/reshape%d' % len(layers)))

        # Convolutional blocks (decoder)
        for i_block in range(1, n_blocks + 1):
            layers.append(
                conv_layer(layers[-1],
                           n_filters=self.n_filters,
                           stride=1,
                           name='generator/conv%d' % len(layers),
                           network_weights=network_weights))
            layers.append(
                conv_layer(layers[-1],
                           n_filters=self.n_filters,
                           stride=1,
                           name='generator/conv%d' % len(layers),
                           network_weights=network_weights))
            if i_block != n_blocks:
                layers.append(
                    Upscale2DLayer(layers[-1],
                                   scale_factor=2,
                                   name='generator/upsample%d' % len(layers)))

        # Final layer (make sure input images are in the range [-1, 1] if tanh used)
        layers.append(
            conv_layer(layers[-1],
                       n_filters=3,
                       stride=1,
                       name='generator/output',
                       network_weights=network_weights,
                       nonlinearity=sigmoid))

        # Network in dictionary form
        network = {layer.name: layer for layer in layers}

        return network
예제 #12
0
def create_model(incoming, options):
    conv_num_filters1 = 100
    conv_num_filters2 = 150
    conv_num_filters3 = 200
    filter_size1 = 5
    filter_size2 = 5
    filter_size3 = 3
    pool_size = 2
    encode_size = options['BOTTLENECK']
    dense_mid_size = options['DENSE']
    pad_in = 'valid'
    pad_out = 'full'
    scaled_tanh = create_scaled_tanh()

    conv2d1 = Conv2DLayer(incoming, num_filters=conv_num_filters1, filter_size=filter_size1, pad=pad_in, name='conv2d1', nonlinearity=scaled_tanh)
    maxpool2d3 = MaxPool2DLayer(conv2d1, pool_size=pool_size, name='maxpool2d3')
    bn2 = BatchNormLayer(maxpool2d3, name='batchnorm2')
    conv2d4 = Conv2DLayer(bn2, num_filters=conv_num_filters2, filter_size=filter_size2, pad=pad_in, name='conv2d4', nonlinearity=scaled_tanh)
    maxpool2d6 = MaxPool2DLayer(conv2d4, pool_size=pool_size, name='maxpool2d6', pad=(1,0))
    bn3 = BatchNormLayer(maxpool2d6, name='batchnorm3')
    conv2d7 = Conv2DLayer(bn3, num_filters=conv_num_filters3, filter_size=filter_size3, pad=pad_in, name='conv2d7', nonlinearity=scaled_tanh)
    reshape9 = ReshapeLayer(conv2d7, shape=([0], -1), name='reshape9')  # 3000
    reshape9_output = reshape9.output_shape[1]
    bn8 = BatchNormLayer(reshape9, name='batchnorm8')
    dense10 = DenseLayer(bn8, num_units=dense_mid_size, name='dense10', nonlinearity=scaled_tanh)
    bn11 = BatchNormLayer(dense10, name='batchnorm11')
    bottleneck = DenseLayer(bn11, num_units=encode_size, name='bottleneck', nonlinearity=linear)
    # print_network(bottleneck)
    dense12 = DenseLayer(bottleneck, num_units=dense_mid_size, W=bottleneck.W.T, name='dense12', nonlinearity=linear)
    dense13 = DenseLayer(dense12, num_units=reshape9_output, W=dense10.W.T, nonlinearity=scaled_tanh, name='dense13')
    reshape14 = ReshapeLayer(dense13, shape=([0], conv_num_filters3, 3, 5), name='reshape14')  # 32 x 4 x 7
    deconv2d19 = Deconv2DLayer(reshape14, conv2d7.input_shape[1], conv2d7.filter_size, stride=conv2d7.stride,
                               W=conv2d7.W, flip_filters=not conv2d7.flip_filters, name='deconv2d19', nonlinearity=scaled_tanh)
    upscale2d16 = Upscale2DLayer(deconv2d19, scale_factor=pool_size, name='upscale2d16')
    deconv2d17 = Deconv2DLayer(upscale2d16, conv2d4.input_shape[1], conv2d4.filter_size, stride=conv2d4.stride,
                               W=conv2d4.W, flip_filters=not conv2d4.flip_filters, name='deconv2d17', nonlinearity=scaled_tanh)
    upscale2d18 = Upscale2DLayer(deconv2d17, scale_factor=pool_size, name='upscale2d18')
    deconv2d19 = Deconv2DLayer(upscale2d18, conv2d1.input_shape[1], conv2d1.filter_size, stride=conv2d1.stride,
                               crop=(1, 0), W=conv2d1.W, flip_filters=not conv2d1.flip_filters, name='deconv2d14', nonlinearity=scaled_tanh)
    reshape20 = ReshapeLayer(deconv2d19, ([0], -1), name='reshape20')
    return reshape20, bottleneck
예제 #13
0
def build_fcn_segmenter(input_var, shape, version=2):
    ret = {}

    if version == 2:
        ret['input'] = la = InputLayer(shape, input_var)
        ret['conv%d' % len(ret)] = la = bn(
            Conv2DLayer(la, num_filters=8, filter_size=7))
        ret['conv%d' % len(ret)] = la = bn(
            Conv2DLayer(la, num_filters=16, filter_size=3))
        ret['pool%d' % len(ret)] = la = MaxPool2DLayer(la, pool_size=2)
        ret['conv%d' % len(ret)] = la = bn(
            Conv2DLayer(la, num_filters=32, filter_size=3))
        ret['pool%d' % len(ret)] = la = MaxPool2DLayer(la, pool_size=2)
        ret['conv%d' % len(ret)] = la = bn(
            Conv2DLayer(la, num_filters=64, filter_size=3))
        ret['pool%d' % len(ret)] = la = MaxPool2DLayer(la, pool_size=2)
        ret['conv%d' % len(ret)] = la = bn(
            Conv2DLayer(la, num_filters=64, filter_size=3))
        ret['dec%d' % len(ret)] = la = bn(
            Conv2DLayer(la, num_filters=64, filter_size=3, pad='full'))
        ret['ups%d' % len(ret)] = la = Upscale2DLayer(la, scale_factor=2)
        ret['dec%d' % len(ret)] = la = bn(
            Conv2DLayer(la, num_filters=64, filter_size=3, pad='full'))
        ret['ups%d' % len(ret)] = la = Upscale2DLayer(la, scale_factor=2)
        ret['dec%d' % len(ret)] = la = bn(
            Conv2DLayer(la, num_filters=32, filter_size=7, pad='full'))
        ret['ups%d' % len(ret)] = la = Upscale2DLayer(la, scale_factor=2)
        ret['dec%d' % len(ret)] = la = bn(
            Conv2DLayer(la, num_filters=16, filter_size=3, pad='full'))
        ret['conv%d' % len(ret)] = la = bn(
            Conv2DLayer(la, num_filters=8, filter_size=7))
        ret['output'] = la = Conv2DLayer(
            la,
            num_filters=1,
            filter_size=7,
            pad='full',
            nonlinearity=nn.nonlinearities.sigmoid)

    return ret, nn.layers.get_output(ret['output']), \
            nn.layers.get_output(ret['output'], deterministic=True)
예제 #14
0
def build_net(IMAGE_W):
    net = {}
    l = InputLayer((None, 3, IMAGE_W, IMAGE_W))
    net['input'] = l

    l = Conv(l, 16, 3, pad='same')
    net['T4'] = l

    l = Conv(Pool(l, 2), 32, 3, pad='same')
    net['T3'] = l

    l = Conv(Pool(l, 2), 48, 3, pad='same')
    net['T2'] = l

    l = Conv(Pool(l, 2), 48, 3, pad='same')
    net['T1'] = l

    l = Conv(Conv(net['T1'], 48, 3, pad='same'), 48, 3, pad='same')
    l = Upscale2DLayer(l, 2)
    net['M1'] = l

    l = ConcatLayer((net['T2'], net['M1']))
    l = Conv(Conv(l, 48, 3, pad='same'), 32, 3, pad='same')
    l = Upscale2DLayer(l, 2)
    net['M2'] = l

    l = ConcatLayer((net['T3'], net['M2']))
    l = Conv(Conv(l, 32, 3, pad='same'), 16, 3, pad='same')
    l = Upscale2DLayer(l, 2)
    net['M3'] = l

    l = ConcatLayer((net['T4'], net['M3']))
    l = Conv(Conv(l, 16, 3, pad='same'), 2, 3, pad='same', nonlinearity=None)

    l = lasagne.layers.ReshapeLayer(l, (-1, IMAGE_W*IMAGE_W))
    l = lasagne.layers.NonlinearityLayer(l, softmax)
    l = lasagne.layers.ReshapeLayer(l, (-1, 2, IMAGE_W, IMAGE_W))
    net['M4'] = l

    return net
예제 #15
0
def G_paper(
    num_channels        = 1,        # Overridden based on dataset.
    resolution          = 32,       # Overridden based on dataset.
    label_size          = 0,        # Overridden based on dataset.
    fmap_base           = 4096,
    fmap_decay          = 1.0,
    fmap_max            = 256,
    latent_size         = None,
    normalize_latents   = True,
    use_wscale          = True,
    use_pixelnorm       = True,
    use_leakyrelu       = True,
    use_batchnorm       = False,
    tanh_at_end         = None,
    **kwargs):

    R = int(np.log2(resolution))
    assert resolution == 2**R and resolution >= 4
    cur_lod = theano.shared(np.float32(0.0))
    def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max)
    def PN(layer): return PixelNormLayer(layer, name=layer.name+'pn') if use_pixelnorm else layer
    def BN(layer): return lasagne.layers.batch_norm(layer) if use_batchnorm else layer
    def WS(layer): return WScaleLayer(layer, name=layer.name+'S') if use_wscale else layer
    if latent_size is None: latent_size = nf(0)
    (act, iact) = (lrelu, ilrelu) if use_leakyrelu else (relu, irelu)

    input_layers = [InputLayer(name='Glatents', shape=[None, latent_size])]
    net = input_layers[-1]
    if normalize_latents:
        net = PixelNormLayer(net, name='Glnorm')
    if label_size:
        input_layers += [InputLayer(name='Glabels', shape=[None, label_size])]
        net = ConcatLayer(name='Gina', incomings=[net, input_layers[-1]])

    net = ReshapeLayer(name='Ginb', incoming=net, shape=[[0], [1], 1, 1])
    net = PN(BN(WS(Conv2DLayer(net, name='G1a', num_filters=nf(1), filter_size=4, pad='full', nonlinearity=act, W=iact))))
    net = PN(BN(WS(Conv2DLayer(net, name='G1b', num_filters=nf(1), filter_size=3, pad=1,      nonlinearity=act, W=iact))))
    lods  = [net]

    for I in xrange(2, R): # I = 2, 3, ..., R-1
        net = Upscale2DLayer(net, name='G%dup' % I, scale_factor=2)
        net = PN(BN(WS(Conv2DLayer(net, name='G%da'  % I, num_filters=nf(I), filter_size=3, pad=1, nonlinearity=act, W=iact))))
        net = PN(BN(WS(Conv2DLayer(net, name='G%db'  % I, num_filters=nf(I), filter_size=3, pad=1, nonlinearity=act, W=iact))))
        lods += [net]

    lods = [WS(NINLayer(l, name='Glod%d' % i, num_units=num_channels, nonlinearity=linear, W=ilinear)) for i, l in enumerate(reversed(lods))]
    output_layer = LODSelectLayer(name='Glod', incomings=lods, cur_lod=cur_lod, first_incoming_lod=0)
    if tanh_at_end is not None:
        output_layer = NonlinearityLayer(output_layer, name='Gtanh', nonlinearity=tanh)
        if tanh_at_end != 1.0:
            output_layer = non_trainable(ScaleLayer(output_layer, name='Gtanhs', scales=lasagne.init.Constant(tanh_at_end)))
    return dict(input_layers=input_layers, output_layers=[output_layer], cur_lod=cur_lod)
예제 #16
0
def build_nets(input_var, channels=1, do_batchnorm=True, z_dim=100):
    
    def ns(shape):
        ret=list(shape)
        ret[0]=[0]
        return tuple(ret)
    
    ret = {}
    bn = batch_norm if do_batchnorm else lambda x:x
    ret['ae_in'] = layer = InputLayer(shape=(None,channels,28,28), input_var=input_var)
    ret['ae_conv1'] = layer = bn(Conv2DLayer(layer, num_filters=64, filter_size=5))
    ret['ae_pool1'] = layer = MaxPool2DLayer(layer, pool_size=2)
    ret['ae_conv2'] = layer = bn(Conv2DLayer(layer, num_filters=128, filter_size=3))
    ret['ae_pool2'] = layer = MaxPool2DLayer(layer, pool_size=2)
    ret['ae_enc'] = layer = DenseLayer(layer, num_units=z_dim,
            nonlinearity=nn.nonlinearities.tanh)
    ret['ae_unenc'] = layer = bn(nn.layers.DenseLayer(layer,
        num_units = np.product(nn.layers.get_output_shape(ret['ae_pool2'])[1:])))
    ret['ae_resh'] = layer = ReshapeLayer(layer,
            shape=ns(nn.layers.get_output_shape(ret['ae_pool2'])))
    ret['ae_depool2'] = layer = Upscale2DLayer(layer, scale_factor=2)
    ret['ae_deconv2'] = layer = bn(Conv2DLayer(layer, num_filters=64, filter_size=3,
        pad='full'))
    ret['ae_depool1'] = layer = Upscale2DLayer(layer, scale_factor=2)
    ret['ae_out'] = Conv2DLayer(layer, num_filters=1, filter_size=5, pad='full',
            nonlinearity=nn.nonlinearities.sigmoid)
    
    ret['disc_in'] = layer = InputLayer(shape=(None,channels,28,28), input_var=input_var)
    ret['disc_conv1'] = layer = bn(Conv2DLayer(layer, num_filters=64, filter_size=5))
    ret['disc_pool1'] = layer = MaxPool2DLayer(layer, pool_size=2)
    ret['disc_conv2'] = layer = bn(Conv2DLayer(layer, num_filters=128, filter_size=3))
    ret['disc_pool2'] = layer = MaxPool2DLayer(layer, pool_size=2)
    ret['disc_hid'] = layer = bn(DenseLayer(layer, num_units=100))
    ret['disc_out'] = DenseLayer(layer, num_units=1, nonlinearity=nn.nonlinearities.sigmoid)
    
    return ret
예제 #17
0
파일: unet.py 프로젝트: DataForces/CV_LUNA
    def expansion(depth, deepest):
        n_filters = filter_for_depth(depth)

        incoming = net['conv{}_2'.format(depth + 1)] if deepest else net[
            '_conv{}_2'.format(depth + 1)]

        upscaling = Upscale2DLayer(incoming, 4)
        net['upconv{}'.format(depth)] = Conv2DLayer(upscaling,
                                                    num_filters=n_filters,
                                                    filter_size=2,
                                                    stride=2,
                                                    W=HeNormal(gain='relu'),
                                                    nonlinearity=nonlinearity)

        if P.SPATIAL_DROPOUT > 0:
            bridge_from = DropoutLayer(net['conv{}_2'.format(depth)],
                                       P.SPATIAL_DROPOUT)
        else:
            bridge_from = net['conv{}_2'.format(depth)]

        net['bridge{}'.format(depth)] = ConcatLayer(
            [net['upconv{}'.format(depth)], bridge_from],
            axis=1,
            cropping=[None, None, 'center', 'center'])

        net['_conv{}_1'.format(depth)] = Conv2DLayer(
            net['bridge{}'.format(depth)],
            num_filters=n_filters,
            filter_size=3,
            pad='valid',
            W=HeNormal(gain='relu'),
            nonlinearity=nonlinearity)

        #if P.BATCH_NORMALIZATION:
        #    net['_conv{}_1'.format(depth)] = batch_norm(net['_conv{}_1'.format(depth)])

        if P.DROPOUT > 0:
            net['_conv{}_1'.format(depth)] = DropoutLayer(
                net['_conv{}_1'.format(depth)], P.DROPOUT)

        net['_conv{}_2'.format(depth)] = Conv2DLayer(
            net['_conv{}_1'.format(depth)],
            num_filters=n_filters,
            filter_size=3,
            pad='valid',
            W=HeNormal(gain='relu'),
            nonlinearity=nonlinearity)
예제 #18
0
def build_cnnae_network(input_shape):
    conv_filters = 16
    filter_size = 3
    pool_size = 2
    encode_size = input_shape[2] * 2

    l_in = InputLayer(shape=(None, input_shape[1], input_shape[2],
                             input_shape[3]))

    l_conv1 = Conv2DLayer(l_in,
                          num_filters=conv_filters,
                          filter_size=(filter_size, filter_size),
                          nonlinearity=None)

    l_pool1 = MaxPool2DLayer(l_conv1, pool_size=(pool_size, pool_size))

    l_dropout1 = DropoutLayer(l_pool1, p=0.5)

    l_reshape1 = ReshapeLayer(l_dropout1, shape=([0], -1))

    l_encode = DenseLayer(l_reshape1, name='encode', num_units=encode_size)

    l_decode = DenseLayer(l_encode,
                          W=l_encode.W.T,
                          num_units=l_reshape1.output_shape[1])

    l_reshape2 = ReshapeLayer(
        l_decode,
        shape=([0], conv_filters,
               int(np.sqrt(l_reshape1.output_shape[1] / conv_filters)),
               int(np.sqrt(l_reshape1.output_shape[1] / conv_filters))))

    l_unpool1 = Upscale2DLayer(l_reshape2, scale_factor=pool_size)

    l_de = TransposedConv2DLayer(l_unpool1,
                                 num_filters=l_conv1.input_shape[1],
                                 W=l_conv1.W,
                                 filter_size=l_conv1.filter_size,
                                 stride=l_conv1.stride,
                                 crop=l_conv1.pad,
                                 flip_filters=not l_conv1.flip_filters)

    l_output = ReshapeLayer(l_de, shape=([0], -1))

    return l_output
예제 #19
0
def build_model():
    net = {}
    net['input'] = InputLayer((None, 512 * 20, 3, 3))

    au_fc_layers = []
    for i in range(20):
        net['roi_AU_N_' + str(i)] = SliceLayer(net['input'],
                                               indices=slice(
                                                   i * 512, (i + 1) * 512),
                                               axis=1)

        #try to adding upsampling here for more conv

        net['Roi_upsample_' + str(i)] = Upscale2DLayer(net['roi_AU_N_' +
                                                           str(i)],
                                                       scale_factor=2)

        net['conv_roi_' + str(i)] = ConvLayer(net['Roi_upsample_' + str(i)],
                                              512, 3)

        net['au_fc_' + str(i)] = DenseLayer(net['conv_roi_' + str(i)],
                                            num_units=150)

        au_fc_layers += [net['au_fc_' + str(i)]]

    #
    net['local_fc'] = concat(au_fc_layers)
    net['local_fc2'] = DenseLayer(net['local_fc'], num_units=2048)

    net['local_fc_dp'] = DropoutLayer(net['local_fc2'], p=0.5)

    # net['fc_comb']=concat([net['au_fc_layer'],net['local_fc_dp']])

    # net['fc_dense']=DenseLayer(net['fc_comb'],num_units=1024)

    # net['fc_dense_dp']=DropoutLayer(net['fc_dense'],p=0.3)

    net['real_out'] = DenseLayer(net['local_fc_dp'],
                                 num_units=12,
                                 nonlinearity=sigmoid)

    # net['final']=concat([net['pred_pos_layer'],net['output_layer']])

    return net
예제 #20
0
def build_baseline3_vgg(input_var, nb_filter=64):
    net = OrderedDict()

    def get_weights(file):
        with open(file, "rb") as f:
            vgg16 = pickle.load(f, encoding="latin-1")
            weights = vgg16['param values']
        return weights[0], weights[1], weights[2], weights[3]

    # Input, standardization
    last = net['input'] = InputLayer(
        (None, 3, tools.INP_PSIZE, tools.INP_PSIZE), input_var=input_var)
    last = net['norm'] = ExpressionLayer(last, lambda x: normalize(x))

    # load feature encoder
    net['features_s8'] = get_features(last)["conv4_1"]
    net['features_s4'] = get_features(last)["conv3_3"]

    # Pretrained Encoder as before
    W1, b1, W2, b2 = get_weights("vgg16.pkl")
    last = net["conv1_1"] = ConvLayer(last,
                                      nb_filter,
                                      3,
                                      pad=1,
                                      flip_filters=False,
                                      nonlinearity=linear,
                                      W=W1,
                                      b=b1)
    last = net["relu1_1"] = NonlinearityLayer(last, nonlinearity=rectify)
    last = net["conv1_2"] = ConvLayer(last,
                                      nb_filter,
                                      3,
                                      pad=1,
                                      flip_filters=False,
                                      nonlinearity=linear,
                                      W=W2,
                                      b=b2)
    last = net["relu1_2"] = NonlinearityLayer(last, nonlinearity=rectify)
    last = net["pool"] = PoolLayer(last, 2, mode="average_exc_pad")

    # Modified Middle Part
    last = net["middle"] = ConvLayer(last, nb_filter, 1, nonlinearity=linear)

    # feature aggregation at multiple scales
    last = fan_module_simple(last,
                             net,
                             "s8",
                             net['features_s8'],
                             nb_filter=64,
                             scale=4)
    last = fan_module_simple(last,
                             net,
                             "s4",
                             net['features_s4'],
                             nb_filter=64,
                             scale=2)

    # Decoder as before
    last = net["unpool"] = Upscale2DLayer(last, 2)
    last = net["deconv1_2"] = transpose(last,
                                        net["conv1_2"],
                                        nonlinearity=None)
    last = net["deconv1_1"] = transpose(last,
                                        net["conv1_1"],
                                        nonlinearity=None)

    last = net["bn"] = BatchNormLayer(last,
                                      beta=nn.init.Constant(128.),
                                      gamma=nn.init.Constant(25.))

    return last, net
def build_stereo_cnn(input_var=None):
    
    conv_num_filters1 = 16
    conv_num_filters2 = 32
    conv_num_filters3 = 64
    conv_num_filters4 = 128
    filter_size1 = 7
    filter_size2 = 5
    filter_size3 = 3
    filter_size4 = 3
    pool_size = 2
    scale_factor = 2
    pad_in = 'valid'
    pad_out = 'full'

    # Input layer, as usual:                                                                                                                                                                                
    network = InputLayer(shape=(None,2, X_train.shape[2], X_train.shape[3]),input_var=input_var,name="input_layer")                                                                                                                             
        
    network = batch_norm(Conv2DLayer(
            network, num_filters=conv_num_filters1, filter_size=(filter_size1, filter_size1),pad=pad_in,
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.GlorotUniform(),name="conv1"))
    
    network = MaxPool2DLayer(network, pool_size=(pool_size, pool_size),name="pool1")
    
    network = batch_norm(Conv2DLayer(
            network, num_filters=conv_num_filters2, filter_size=(filter_size2, filter_size2),pad=pad_in,
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.GlorotUniform(),name="conv2"))
    
    network = MaxPool2DLayer(network, pool_size=(pool_size, pool_size),name="pool2")
                                                                                                                                     
    network = batch_norm(Conv2DLayer(
            network, num_filters=conv_num_filters3, filter_size=(filter_size3, filter_size3),pad=pad_in,
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.GlorotUniform(),name="conv3"))
    
    network = MaxPool2DLayer(network, pool_size=(pool_size, pool_size),name="pool3")
                                                                                                                                     
    network = batch_norm(Conv2DLayer(
            network, num_filters=conv_num_filters4, filter_size=(filter_size4, filter_size4),pad=pad_in,
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.GlorotUniform(),name="conv4"))
    
    network = batch_norm(Conv2DLayer(
            network, num_filters=32, filter_size=(filter_size4, filter_size4),pad=pad_out,
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.GlorotUniform(),name="deconv1"))
    
    network = Upscale2DLayer(network, scale_factor=(pool_size, pool_size),name="upscale1")
    
    network = batch_norm(Conv2DLayer(
            network, num_filters=16, filter_size=(filter_size3, filter_size3),pad=pad_out,
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.GlorotUniform(),name="deconv2"))
    
    network = Upscale2DLayer(network, scale_factor=(pool_size, pool_size),name="upscale2")
    
    network = batch_norm(Conv2DLayer(
            network, num_filters=8, filter_size=(filter_size2, filter_size2),pad=pad_out,
            nonlinearity=lasagne.nonlinearities.rectify,
            W=lasagne.init.GlorotUniform(),name="deconv3"))
    
    network = Upscale2DLayer(network, scale_factor=(pool_size, pool_size),name="upscale3")
    
    network = batch_norm(Conv2DLayer(
            network, num_filters=1, filter_size=(filter_size1, filter_size1),pad=pad_out,
            nonlinearity=lasagne.nonlinearities.sigmoid,
            W=lasagne.init.GlorotUniform(),name="deconv4"))
                                 
    return network
예제 #22
0
lae_dense5 = DenseLayer(lae_dense4,
                        dflat,
                        W=GlorotUniform(),
                        nonlinearity=rectify)
print(get_output_shape(lae_dense5))

lae_dense5_reshape = ReshapeLayer(lae_dense5, ([0], nf, dc1, -1))  # lae_conv3
print(get_output_shape(lae_dense5_reshape))

lae_deconv6 = Conv2DLayerFast(lae_dense5_reshape,
                              150, (3, 3),
                              pad=(2, 2),
                              W=GlorotUniform(),
                              nonlinearity=rectify)
print(get_output_shape(lae_deconv6))
lae_unpool6 = Upscale2DLayer(lae_deconv6, (2, 2))
print(get_output_shape(lae_unpool6))

lae_deconv7 = Conv2DLayerFast(lae_unpool6,
                              100, (5, 5),
                              pad=(2, 2),
                              W=GlorotUniform(),
                              nonlinearity=rectify)
print(get_output_shape(lae_deconv7))
lae_unpool7 = Upscale2DLayer(lae_deconv7, (2, 2))
print(get_output_shape(lae_unpool7))

convae = Conv2DLayerFast(lae_unpool7,
                         1, (5, 5),
                         pad=(2, 2),
                         W=GlorotUniform(),
예제 #23
0
def build_model(x=None, layer='fc8', shape=(None, 3, 227, 227), up_scale=4):
    net = {'data': InputLayer(shape=shape, input_var=x)}
    net['data_s'] = Upscale2DLayer(net['data'], up_scale)
    net['conv1'] = Conv2DLayer(net['data_s'],
                               num_filters=96,
                               filter_size=(11, 11),
                               stride=4,
                               nonlinearity=lasagne.nonlinearities.rectify)

    if layer is 'conv1':
        return net

    # pool1
    net['pool1'] = MaxPool2DLayer(net['conv1'], pool_size=(3, 3), stride=2)

    # norm1
    net['norm1'] = LocalResponseNormalization2DLayer(net['pool1'],
                                                     n=5,
                                                     alpha=0.0001 / 5.0,
                                                     beta=0.75,
                                                     k=1)

    # conv2
    # before conv2 split the data
    net['conv2_data1'] = SliceLayer(net['norm1'], indices=slice(0, 48), axis=1)
    net['conv2_data2'] = SliceLayer(net['norm1'],
                                    indices=slice(48, 96),
                                    axis=1)

    # now do the convolutions
    net['conv2_part1'] = Conv2DLayer(net['conv2_data1'],
                                     num_filters=128,
                                     filter_size=(5, 5),
                                     pad=2)
    net['conv2_part2'] = Conv2DLayer(net['conv2_data2'],
                                     num_filters=128,
                                     filter_size=(5, 5),
                                     pad=2)

    # now combine
    net['conv2'] = concat((net['conv2_part1'], net['conv2_part2']), axis=1)
    if layer is 'conv2':
        return net
    # pool2
    net['pool2'] = MaxPool2DLayer(net['conv2'], pool_size=(3, 3), stride=2)

    # norm2
    net['norm2'] = LocalResponseNormalization2DLayer(net['pool2'],
                                                     n=5,
                                                     alpha=0.0001 / 5.0,
                                                     beta=0.75,
                                                     k=1)
    # conv3
    # no group
    net['conv3'] = Conv2DLayer(net['norm2'],
                               num_filters=384,
                               filter_size=(3, 3),
                               pad=1)
    if layer is 'conv3':
        return net

    # conv4
    net['conv4_data1'] = SliceLayer(net['conv3'],
                                    indices=slice(0, 192),
                                    axis=1)
    net['conv4_data2'] = SliceLayer(net['conv3'],
                                    indices=slice(192, 384),
                                    axis=1)
    net['conv4_part1'] = Conv2DLayer(net['conv4_data1'],
                                     num_filters=192,
                                     filter_size=(3, 3),
                                     pad=1)
    net['conv4_part2'] = Conv2DLayer(net['conv4_data2'],
                                     num_filters=192,
                                     filter_size=(3, 3),
                                     pad=1)
    net['conv4'] = concat((net['conv4_part1'], net['conv4_part2']), axis=1)
    if layer is 'conv4':
        return net

    # conv5
    # group 2
    net['conv5_data1'] = SliceLayer(net['conv4'],
                                    indices=slice(0, 192),
                                    axis=1)
    net['conv5_data2'] = SliceLayer(net['conv4'],
                                    indices=slice(192, 384),
                                    axis=1)
    net['conv5_part1'] = Conv2DLayer(net['conv5_data1'],
                                     num_filters=128,
                                     filter_size=(3, 3),
                                     pad=1)
    net['conv5_part2'] = Conv2DLayer(net['conv5_data2'],
                                     num_filters=128,
                                     filter_size=(3, 3),
                                     pad=1)
    net['conv5'] = concat((net['conv5_part1'], net['conv5_part2']), axis=1)
    if layer is 'conv5':
        return net

    # pool 5
    net['pool5'] = MaxPool2DLayer(net['conv5'], pool_size=(3, 3), stride=2)

    # fc6
    net['fc6'] = DenseLayer(net['pool5'],
                            num_units=4096,
                            nonlinearity=lasagne.nonlinearities.rectify)
    if layer is 'fc6':
        return net

    # fc7
    net['fc7'] = DenseLayer(net['fc6'],
                            num_units=4096,
                            nonlinearity=lasagne.nonlinearities.rectify)
    if layer is 'fc7':
        return net

    # fc8
    net['fc8'] = DenseLayer(net['fc7'],
                            num_units=1000,
                            nonlinearity=lasagne.nonlinearities.softmax)
    if layer is 'fc8':
        # st()
        return net
예제 #24
0
def build_baseline6_fan_fan(input_var, nb_filter=96):
    net = OrderedDict()

    import theano.tensor as T
    import numpy as np

    # Input, standardization
    last = net['input'] = InputLayer(
        (None, 3, tools.INP_PSIZE, tools.INP_PSIZE), input_var=input_var)
    last = net['norm'] = ExpressionLayer(last, lambda x: normalize(x))

    # load feature encoder
    net['features_s8'] = get_features(last)["conv4_1"]
    net['features_s4'] = get_features(last)["conv3_3"]
    net['mask'] = ExpressionLayer(
        Upscale2DLayer(net["features_s8"], 8),
        lambda x: 1. * T.eq(x, x.max(axis=1, keepdims=True)))

    # Pretrained Encoder as before
    last = net["conv1_1"] = ConvLayer(last,
                                      nb_filter,
                                      1,
                                      pad=0,
                                      flip_filters=False,
                                      nonlinearity=linear)
    last = net["bn1_1"] = BatchNormLayer(last)
    last = net["relu1_1"] = NonlinearityLayer(last, nonlinearity=rectify)
    last = net["conv1_2"] = ConvLayer(last,
                                      nb_filter,
                                      1,
                                      pad=0,
                                      flip_filters=False,
                                      nonlinearity=linear)
    last = net["bn1_2"] = BatchNormLayer(last)
    last = net["relu1_2"] = NonlinearityLayer(last, nonlinearity=rectify)

    # Modified Middle Part
    last = net["middle"] = ConvLayer(last, nb_filter, 1, nonlinearity=linear)

    # feature aggregation at multiple scales
    last = net["fan1"] = FeatureAwareNormLayer((last, net['mask']))
    last = fan_module_simple(last,
                             net,
                             "s8",
                             net['features_s8'],
                             nb_filter=nb_filter,
                             scale=8)
    last = net["fan2"] = FeatureAwareNormLayer((last, net['mask']))
    last = fan_module_simple(last,
                             net,
                             "s4",
                             net['features_s4'],
                             nb_filter=nb_filter,
                             scale=4)

    # Decoder as before
    last = net["deconv1_2"] = transpose(last,
                                        net["conv1_2"],
                                        nonlinearity=None)
    last = net["deconv1_1"] = transpose(last,
                                        net["conv1_1"],
                                        nonlinearity=None)

    last = net["fan"] = FeatureAwareNormLayer(
        (last, net['mask']),
        beta=nn.init.Constant(np.float32(128.)),
        gamma=nn.init.Constant(np.float32(25.)))

    return last, net
예제 #25
0
def main():

    print("Building model and compiling functions...")
    X_batch = [T.tensor4('x')]
    y_batch = [T.tensor4('y')]

    net = {}
    net['input'] = InputLayer((None, 3, 256, 256), input_var= X_batch[0])
    net['conv1_1'] = ConvLayer(
        net['input'], 64, 3, pad=1, flip_filters=False)
    net['conv1_2'] = ConvLayer(
        net['conv1_1'], 64, 3, pad=1, flip_filters=False)

    net['pool1'] = PoolLayer(net['conv1_2'], 2)
    net['conv2_1'] = ConvLayer(
        net['pool1'], 128, 3, pad=1, flip_filters=False)
    net['conv2_2'] = ConvLayer(
        net['conv2_1'], 128, 3, pad=1, flip_filters=False)

    net['pool2'] = PoolLayer(net['conv2_2'], 2)
    net['conv3_1'] = ConvLayer(
        net['pool2'], 256, 3, pad=1, flip_filters=False)
    net['conv3_2'] = ConvLayer(
        net['conv3_1'], 256, 3, pad=1, flip_filters=False)
    net['conv3_3'] = ConvLayer(
        net['conv3_2'], 256, 3, pad=1, flip_filters=False)

    net['pool3'] = PoolLayer(net['conv3_3'], 2)
    net['conv4_1'] = ConvLayer(
        net['pool3'], 512, 3, pad=1, flip_filters=False)
    net['conv4_2'] = ConvLayer(
        net['conv4_1'], 512, 3, pad=1, flip_filters=False)
    net['conv4_3'] = ConvLayer(
        net['conv4_2'], 512, 3, pad=1, flip_filters=False)

    net['pool4'] = PoolLayer(net['conv4_3'], 2)
    net['conv5_1'] = ConvLayer(
        net['pool4'], 512, 3, pad=1, flip_filters=False)
    net['conv5_2'] = ConvLayer(
        net['conv5_1'], 512, 3, pad=1, flip_filters=False)
    net['conv5_3'] = ConvLayer(
        net['conv5_2'], 512, 3, pad=1, flip_filters=False)

    initSal = {}
    initSal['up'] = Upscale2DLayer(net['conv5_3'], (2, 2))
    initSal['concat'] = ConcatLayer([initSal['up'], net['conv4_3']])

    initSal['conv1'] = ConvLayer(initSal['concat'], 1024, (3, 3), pad=1, flip_filters=False)
    initSal['conv2'] = ConvLayer(initSal['conv1'], 512, (1, 1), pad=0, flip_filters=False)
    initSal['conv3'] = ConvLayer(initSal['conv2'], 256, (5, 5), pad=2, flip_filters=False)
    initSal['output'] = ConvLayer(initSal['conv3'], 1, (1, 1), nonlinearity=lasagne.nonlinearities.sigmoid, flip_filters=False)

    # ***************************************************************************************************
    recurent = {}
    recurent['1-sal'] = BatchNormLayer(Upscale2DLayer(initSal['conv3'], (2, 2)))
    recurent['1-vgg'] = BatchNormLayer(ConvLayer(net['conv3_3'], 256, 1))
    recurent['1-input'] = ConcatLayer([recurent['1-sal'], recurent['1-vgg']])

    recurent['1-NIN1'] = ConvLayer(recurent['1-input'], 256, 1, pad=0, nonlinearity=lasagne.nonlinearities.linear, flip_filters=False)
    recurent['1-NIN1'] = NonlinearityLayer(BatchNormLayer(recurent['1-NIN1']))
    recurent['1-rconv1'] = ConvLayer(recurent['1-NIN1'], 256, 3, pad=1, nonlinearity=lasagne.nonlinearities.linear, flip_filters=False)
    recurent['1-rconv1'] = NonlinearityLayer(BatchNormLayer(recurent['1-rconv1']))
    recurent['1-rconv2'] = ConvLayer(recurent['1-rconv1'], 256, 3, pad=1, flip_filters=False)
    recurent['1-rconv2'] = NonlinearityLayer(BatchNormLayer(recurent['1-rconv2']))
    recurent['1-rconv3'] = ConvLayer(recurent['1-rconv2'], 256, 1, nonlinearity=lasagne.nonlinearities.linear, flip_filters=False)
    recurent['1-sum']    = ElemwiseSumLayer([recurent['1-rconv3'], recurent['1-sal']])
    recurent['1-sum']    = NonlinearityLayer(recurent['1-sum'])
    # ***************************************************************************************************
    recurent['2-sal'] = BatchNormLayer(Upscale2DLayer(recurent['1-sum'], (2, 2)))
    recurent['2-vgg'] = BatchNormLayer(ConvLayer(net['conv2_2'], 256, 1))
    recurent['2-input'] = ConcatLayer([recurent['2-sal'], recurent['2-vgg']])

    recurent['2-NIN1'] = ConvLayer(recurent['2-input'], 256, 1, pad=0, nonlinearity=lasagne.nonlinearities.linear, flip_filters=False)
    recurent['2-NIN1'] = NonlinearityLayer(BatchNormLayer(recurent['2-NIN1']))
    recurent['2-rconv1'] = ConvLayer(recurent['2-NIN1'], 256, 3, pad=1, flip_filters=False)
    recurent['2-rconv1'] = NonlinearityLayer(BatchNormLayer(recurent['2-rconv1']))
    recurent['2-rconv2'] = ConvLayer(recurent['2-rconv1'], 256, 3, pad=1, flip_filters=False)
    recurent['2-rconv2'] = NonlinearityLayer(BatchNormLayer(recurent['2-rconv2']))
    recurent['2-rconv3'] = ConvLayer(recurent['2-rconv2'], 256, 1, nonlinearity=lasagne.nonlinearities.linear, flip_filters=False)
    recurent['2-sum']    = ElemwiseSumLayer([recurent['2-rconv3'], recurent['2-sal']])
    recurent['2-sum']    = NonlinearityLayer(recurent['2-sum'])
    # ***************************************************************************************************
    recurent['3-sal'] = BatchNormLayer(Upscale2DLayer(recurent['2-sum'], (2, 2)))
    recurent['3-vgg'] = ConvLayer(net['conv1_2'], 128, 3, pad=1)
    recurent['3-vgg'] = BatchNormLayer(ConvLayer(recurent['3-vgg'], 256, 1))
    recurent['3-input'] = ConcatLayer([recurent['3-sal'], recurent['3-vgg']])
    recurent['3-NIN1'] = ConvLayer(recurent['3-input'], 256, 1, pad=0, flip_filters=False)
    recurent['3-NIN1'] = NonlinearityLayer(BatchNormLayer(recurent['3-NIN1']))
    recurent['3-rconv1'] = ConvLayer(recurent['3-NIN1'], 256, 3, pad=1, nonlinearity=lasagne.nonlinearities.linear, flip_filters=False)
    recurent['3-rconv1'] = NonlinearityLayer(BatchNormLayer(recurent['3-rconv1']))
    recurent['3-rconv2'] = ConvLayer(recurent['3-rconv1'], 256, 3, pad=1, flip_filters=False)
    recurent['3-rconv2'] = NonlinearityLayer(BatchNormLayer(recurent['3-rconv2']))
    recurent['3-rconv3'] = ConvLayer(recurent['3-rconv2'], 256, 1, nonlinearity=lasagne.nonlinearities.linear, flip_filters=False)
    recurent['3-sum']    = ElemwiseSumLayer([recurent['3-rconv3'], recurent['3-sal']])
    recurent['3-sum']    = NonlinearityLayer(recurent['3-sum'])
    recurent['3-output'] = ConvLayer(recurent['3-sum'], 1, (1, 1), nonlinearity=lasagne.nonlinearities.sigmoid, flip_filters=False)



    prediction = []
    # loss_train = []
    # all_params = []
    # accuracy = []
    output_layer = recurent['3-output']
    prediction.append(lasagne.layers.get_output(output_layer))
    # loss_train.append(T.mean(lasagne.objectives.binary_crossentropy(prediction[0], y_batch[0])))
    # all_params.append(lasagne.layers.get_all_params(output_layer, trainable=True))
    # accuracy.append(T.mean(T.square(prediction[0]-y_batch[0]))),
    # updates = OrderedDict()
    # update = lasagne.updates.sgd(loss_train[0], all_params[0][32::], LEARNING_RATE)
    # updates.update(update)
    # updates = lasagne.updates.apply_nesterov_momentum(updates, momentum=MOMENTUM)

    FitSetting = dict(
        Input_Shape=Input_Shape,
        Output_Shape=Output_Shape,
        output_layer=output_layer,
        saveParamName=saveParamName,
    )
    output_layer = [output_layer]
    output = lasagne.layers.get_output(output_layer)
    Fun_test(['./image/'], ['.jpg'], FitSetting, X_batch, output, writeimg=['TPRPN'])
예제 #26
0
def build_fcn(input_var, inner_size):
    l_in = InputLayer(shape=(None, 1) + inner_size, input_var=input_var)

    # stage 1
    conv1_1 = batch_norm(
        Conv2DLayer(l_in,
                    num_filters=32,
                    filter_size=(5, 5),
                    nonlinearity=rectify,
                    W=HeNormal(),
                    pad=2))
    conv1_2 = batch_norm(
        Conv2DLayer(conv1_1,
                    num_filters=32,
                    filter_size=(5, 5),
                    nonlinearity=rectify,
                    W=HeNormal(),
                    pad=2))
    conv1_3 = batch_norm(
        Conv2DLayer(conv1_2,
                    num_filters=32,
                    filter_size=(5, 5),
                    nonlinearity=rectify,
                    W=HeNormal(),
                    pad=2))
    pool1 = MaxPool2DLayer(conv1_3, pool_size=(2, 2))

    # stage 2
    conv2_1 = batch_norm(
        Conv2DLayer(pool1,
                    num_filters=64,
                    filter_size=(3, 3),
                    nonlinearity=rectify,
                    W=HeNormal(),
                    pad=1))
    conv2_2 = batch_norm(
        Conv2DLayer(conv2_1,
                    num_filters=64,
                    filter_size=(3, 3),
                    nonlinearity=rectify,
                    W=HeNormal(),
                    pad=1))
    conv2_3 = batch_norm(
        Conv2DLayer(conv2_2,
                    num_filters=64,
                    filter_size=(3, 3),
                    nonlinearity=rectify,
                    W=HeNormal(),
                    pad=1))
    pool2 = MaxPool2DLayer(conv2_3, pool_size=(2, 2))

    # stage 3
    conv3_1 = batch_norm(
        Conv2DLayer(pool2,
                    num_filters=64,
                    filter_size=(3, 3),
                    nonlinearity=rectify,
                    W=HeNormal(),
                    pad=1))
    conv3_2 = batch_norm(
        Conv2DLayer(conv3_1,
                    num_filters=64,
                    filter_size=(3, 3),
                    nonlinearity=rectify,
                    W=HeNormal(),
                    pad=1))
    pool3 = MaxPool2DLayer(conv3_2, pool_size=(2, 2))

    # stage 3
    conv4_1 = batch_norm(
        Conv2DLayer(pool3,
                    num_filters=128,
                    filter_size=(3, 3),
                    nonlinearity=rectify,
                    W=HeNormal(),
                    pad=1))
    conv4_2 = batch_norm(
        Conv2DLayer(conv4_1,
                    num_filters=128,
                    filter_size=(3, 3),
                    nonlinearity=rectify,
                    W=HeNormal(),
                    pad=1))
    pool4 = MaxPool2DLayer(conv4_2, pool_size=(2, 2))

    # top-down stage 0
    up4 = Upscale2DLayer(pool4, (2, 2))
    up4_conv = batch_norm(
        Conv2DLayer(up4,
                    num_filters=2,
                    filter_size=(1, 1),
                    nonlinearity=my_softmax,
                    W=HeNormal()))
    pool3_conv = batch_norm(
        Conv2DLayer(pool3,
                    num_filters=2,
                    filter_size=(1, 1),
                    nonlinearity=my_softmax,
                    W=HeNormal()))
    concat4 = ElemwiseSumLayer([up4_conv, pool3_conv])

    # top-down stage 1
    up3 = Upscale2DLayer(concat4, (2, 2))
    pool2_conv = batch_norm(
        Conv2DLayer(pool2,
                    num_filters=2,
                    filter_size=(1, 1),
                    nonlinearity=my_softmax,
                    W=HeNormal()))
    concat3 = ElemwiseSumLayer([up3, pool2_conv])

    # top-down stage 2
    pool1_conv = batch_norm(
        Conv2DLayer(pool1,
                    num_filters=2,
                    filter_size=(1, 1),
                    nonlinearity=my_softmax,
                    W=HeNormal()))
    up2 = Upscale2DLayer(concat3, (2, 2))
    concat2 = ElemwiseSumLayer([up2, pool1_conv])

    pred = averageLayer(Upscale2DLayer(concat2, (2, 2)))
    sli = SliceLayer(pred, indices=slice(0, 1), axis=1)
    area = GlobalPoolLayer(sli)
    return pred, sli, area
예제 #27
0
def build_mitosis_encoder(input_shape, encoding_size=32, withst=False):
    # Parameters
    filter_size = (3, 3)
    num_filters = 32
    pool_size = (2, 2)
    # Localization Network

    l_input = InputLayer(shape=(None, input_shape[1], input_shape[2],
                                input_shape[3]))
    l_conv1 = Conv2DLayer(l_input,
                          num_filters=num_filters,
                          filter_size=filter_size)
    l_conv2 = Conv2DLayer(l_conv1,
                          num_filters=num_filters,
                          filter_size=filter_size)
    l_pool1 = MaxPool2DLayer(l_conv2, pool_size=pool_size)
    l_pipe1_layer = l_pool1  # We need this

    # ST Network
    if withst:
        # ST Params
        b = np.zeros((2, 3), dtype=theano.config.floatX)
        b[0, 0] = 1
        b[1, 1] = 1
        b = b.flatten()

        # ST Layers
        st_encode1 = DenseLayer(l_pool1,
                                num_units=50,
                                W=lasagne.init.HeUniform('relu'))
        st_encode2 = DenseLayer(st_encode1,
                                num_units=6,
                                b=b,
                                W=lasagne.init.Constant(0.0))
        l_trans1 = TransformerLayer(l_input, st_encode2, downsample_factor=1.0)

        # Localization Network

        st_conv1 = Conv2DLayer(l_trans1,
                               num_filters=num_filters,
                               filter_size=filter_size)
        st_covn2 = Conv2DLayer(st_conv1,
                               num_filters=num_filters,
                               filter_size=filter_size)
        st_pool1 = MaxPool2DLayer(st_covn2, pool_size=pool_size)
        l_pipe1_layer = st_pool1

    # Encoding Step
    l_reshape1 = ReshapeLayer(l_pipe1_layer, shape=([0], -1))
    l_encode = DenseLayer(l_reshape1,
                          num_units=encoding_size,
                          W=lasagne.init.HeUniform('relu'),
                          name='encoder')

    # Decoding Step
    l_decode = DenseLayer(l_encode,
                          W=l_encode.W.T,
                          num_units=l_reshape1.output_shape[1])
    l_reshape2 = ReshapeLayer(
        l_decode,
        shape=([0], num_filters,
               int(np.sqrt(l_reshape1.output_shape[1] / num_filters)),
               int(np.sqrt(l_reshape1.output_shape[1] / num_filters))))

    # Deconv Network
    l_unpool1 = Upscale2DLayer(l_reshape2, scale_factor=pool_size)
    l_deconv2 = TransposedConv2DLayer(l_unpool1,
                                      num_filters=l_conv2.input_shape[1],
                                      W=l_conv2.W,
                                      filter_size=l_conv2.filter_size,
                                      stride=l_conv2.stride,
                                      crop=l_conv2.pad,
                                      flip_filters=not l_conv2.flip_filters)

    l_deconv1 = TransposedConv2DLayer(l_deconv2,
                                      num_filters=l_conv1.input_shape[1],
                                      W=l_conv1.W,
                                      filter_size=l_conv1.filter_size,
                                      stride=l_conv1.stride,
                                      crop=l_conv1.pad,
                                      flip_filters=not l_conv1.flip_filters)

    return l_deconv1
def build_generator(input_var, noise_size, cond_var=None, n_conds=0, arch=0,
                    with_BatchNorm=True, batch_size=None, n_steps=None):
    from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer, concat
    from lasagne.layers import Upscale2DLayer, Conv2DLayer
    from lasagne.layers import TransposedConv2DLayer as Deconv2DLayer
    from lasagne.nonlinearities import LeakyRectify, rectify
    from lasagne.init import GlorotUniform, Normal, Orthogonal

    # non_lin = LeakyRectify(0.01)
    non_lin = rectify
    # init = Orthogonal(np.sqrt(2/(1+0.01**2)))
    init = Normal(0.02, 0.0)
    # init = GlorotUniform()

    layer = InputLayer(shape=(batch_size, noise_size), input_var=input_var)
    if cond_var is not None:
        layer = BatchNorm(DenseLayer(
            layer, noise_size, nonlinearity=non_lin), with_BatchNorm)
        layer = concat([
            layer, InputLayer(shape=(batch_size, n_conds), input_var=cond_var)])
    if arch == 'dcgan':
        # DCGAN
        layer = BatchNorm(DenseLayer(
            layer, 1024*4*4, W=init, b=None, nonlinearity=non_lin))
        layer = ReshapeLayer(layer, ([0], 1024, 4, 4))
        layer = BatchNorm(Deconv2DLayer(
            layer, 512, 5, stride=2, crop=(2, 2), W=init, b=None,
            output_size=8, nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, 5, stride=2, crop=(2, 2), W=init, b=None,
            output_size=16, nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 128, 5, stride=2, crop=(2, 2), W=init, b=None,
            output_size=32, nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 64, 5, stride=2, crop=(2, 2), W=init, b=None,
            output_size=64, nonlinearity=non_lin), with_BatchNorm)
        layer = Deconv2DLayer(
            layer, 1, 5, stride=2, crop=(2, 2), W=init, b=None,
            output_size=128, nonlinearity=tanh_temperature)
    elif arch == 'mnist':
        # Jan Schluechter MNIST generator
        # fully-connected layers
        layer = BatchNorm(DenseLayer(
            layer, 1024, W=init, b=None), with_BatchNorm)
        # project and reshape
        layer = BatchNorm(DenseLayer(
            layer, 1024*8*8, W=init, b=None), with_BatchNorm)
        layer = ReshapeLayer(layer, ([0], 1024, 8, 8))
        # fractional-stride convolutions
        layer = BatchNorm(Deconv2DLayer(
            layer, 512, 5, stride=2, crop='same', W=init, b=None,
            output_size=16, nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, 5, stride=2, crop='same', W=init, b=None,
            output_size=32, nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 128, 5, stride=2, crop='same', W=init, b=None,
            output_size=64, nonlinearity=non_lin), with_BatchNorm)
        layer = Deconv2DLayer(
            layer, 1, 5, stride=2, crop='same', W=init, b=None,
            output_size=128, nonlinearity=tanh_temperature)
    elif 'cont-enc':
        # build generator from concatenated prefix and noise features
        layer = ReshapeLayer(layer, ([0], layer.output_shape[1], 1, 1))
        layer = BatchNorm(Deconv2DLayer(
            layer, 1024, 4, stride=1, crop=0, W=init), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 512, 4, stride=2, crop=1, W=init), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, 4, stride=2, crop=1, W=init), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 128, 4, stride=2, crop=1, W=init), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 128, 4, stride=2, crop=1, W=init), with_BatchNorm)
        layer = Deconv2DLayer(
            layer, 1, 4, stride=2, crop=1, W=init,
            nonlinearity=tanh_temperature)
    elif 'lsgan':
        layer = batch_norm(DenseLayer(layer, 1024))
        layer = batch_norm(DenseLayer(layer, 1024*8*8))
        layer = ReshapeLayer(layer, ([0], 1024, 8, 8))
        layer = batch_norm(Deconv2DLayer(
            layer, 256, 5, stride=2, crop='same', output_size=16))
        layer = batch_norm(Deconv2DLayer(
            layer, 256, 5, stride=2, crop='same', output_size=32))
        layer = batch_norm(Deconv2DLayer(
            layer, 256, 5, stride=2, crop='same', output_size=64))
        layer = Deconv2DLayer(
            layer, 1, 5, stride=2, crop='same', output_size=128,
            nonlinearity=tanh_temperature)
    elif arch == 2:
        # non-overlapping transposed convolutions
        # fully-connected layers
        layer = BatchNorm(DenseLayer(
            layer, 1024, W=init, b=None), with_BatchNorm)
        layer = BatchNorm(DenseLayer(
            layer, 1024, W=init, b=None), with_BatchNorm)
        # project and reshape
        layer = BatchNorm(DenseLayer(layer, 256*36*36), with_BatchNorm)
        layer = ReshapeLayer(layer, ([0], 256, 36, 36))
        # two fractional-stride convolutions
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, 4, stride=2, crop='full', b=None, nonlinearity=non_lin),
            with_BatchNorm)
        layer = Deconv2DLayer(
            layer, 1, 8, stride=2, crop='full', b=None,
            nonlinearity=tanh_temperature)
    elif arch == 3:
        # resize-convolution, more full layer weights less convolutions
        # fully-connected layers
        layer = BatchNorm(DenseLayer(
            layer, 1024, W=init, b=None), with_BatchNorm)
        layer = BatchNorm(DenseLayer(
            layer, 1024, W=init, b=None), with_BatchNorm)
        # project and reshape
        layer = BatchNorm(DenseLayer(layer, 32*68*68), with_BatchNorm)
        layer = ReshapeLayer(layer, ([0], 32, 68, 68))
        # resize-convolutions
        layer = BatchNorm(Conv2DLayer(
            layer, 256, 3, stride=1, pad='valid'), with_BatchNorm)
        layer = Upscale2DLayer(layer, (2, 2))
        layer = Conv2DLayer(
            layer, 1, 5, stride=1, pad='valid', nonlinearity=tanh_temperature)
    elif arch == 4:
        # resize-convolution, less full layer weights more convolutions
        # fully-connected layers
        layer = BatchNorm(DenseLayer(
            layer, 1024, W=init, b=None), with_BatchNorm)
        layer = BatchNorm(DenseLayer(
            layer, 1024, W=init, b=None), with_BatchNorm)
        # project and reshape
        layer = BatchNorm(DenseLayer(layer, 128*18*18), with_BatchNorm)
        layer = ReshapeLayer(layer, ([0], 128, 18, 18))
        # resize-convolutions
        layer = Upscale2DLayer(layer, (2, 2), mode='bilinear')
        layer = BatchNorm(Conv2DLayer(
            layer, 256, 3, stride=1, pad='valid', nonlinearity=non_lin),
            with_BatchNorm)
        layer = Upscale2DLayer(layer, (2, 2), mode='bilinear')
        layer = BatchNorm(Conv2DLayer(
            layer, 256, 3, stride=1, pad='valid', nonlinearity=non_lin),
            with_BatchNorm)
        layer = Upscale2DLayer(layer, (2, 2), mode='bilinear')
        layer = Conv2DLayer(
            layer, 1, 5, stride=1, pad='valid',
            nonlinearity=tanh_temperature)
    elif arch == 'crepe_up':
        # CREPE transposed with upscaling
        # fully-connected layers
        layer = BatchNorm(DenseLayer(
            layer, 1024, W=init, b=None), with_BatchNorm)
        layer = BatchNorm(DenseLayer(
            layer, 1024, W=init, b=None), with_BatchNorm)
        # project and reshape
        layer = BatchNorm(DenseLayer(layer, 2**15*1*3), with_BatchNorm)
        layer = ReshapeLayer(layer, ([0], 2**15, 1, 3))
        # temporal convolutions
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = Upscale2DLayer(layer, (1, 3), mode='repeat')
        layer = BatchNorm(Deconv2DLayer(
            layer, 512, (1, 9), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = Upscale2DLayer(layer, (1, 3), mode='repeat')
        layer = Deconv2DLayer(
            layer, 1, (128, 6), stride=1, crop=0, W=init, b=None,
            nonlinearity=tanh_temperature)
    elif arch == 'crepe_noup_a':
        # CREPE transposed no upscaling
        # fully-connected layer
        layer = BatchNorm(DenseLayer(
            layer, 1024, W=init, b=None), with_BatchNorm)
        # project and reshape
        layer = BatchNorm(DenseLayer(
            layer, 1024*1*3, W=init, b=None), with_BatchNorm)
        layer = ReshapeLayer(layer, ([0], 1024, 1, 3))
        # temporal convolutions
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 512, (1, 7), stride=1, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = BatchNorm(Deconv2DLayer(
            layer, 1024, (128, 7), stride=3, crop=0, W=init, b=None,
            nonlinearity=non_lin), with_BatchNorm)
        layer = Deconv2DLayer(
            layer, 1, (1, 8), stride=1, crop=0, W=init, b=None,
            nonlinearity=tanh_temperature)
    elif arch == 'crepe_noup_b':
        # CREPE transposed no upscaling
        # fully-connected layer
        layer = BatchNorm(DenseLayer(layer, 1024))
        # project and reshape
        layer = BatchNorm(DenseLayer(layer, 1024*1*3))
        layer = ReshapeLayer(layer, ([0], 1024, 1, 3))
        # temporal convolutions
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0,
            nonlinearity=non_lin))
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, nonlinearity=non_lin))
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, nonlinearity=non_lin))
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=1, crop=0, nonlinearity=non_lin))
        layer = BatchNorm(Deconv2DLayer(
            layer, 256, (1, 3), stride=3, crop=0, nonlinearity=non_lin))
        layer = Deconv2DLayer(
            layer, 512, (1, 9), stride=1, crop=0, nonlinearity=non_lin)
        layer = Deconv2DLayer(
            layer, 1, (128, 8), stride=3, crop=0, nonlinearity=tanh_temperature)
    else:
        return None

    print("Generator output:", layer.output_shape)
    return layer
예제 #29
0
파일: models.py 프로젝트: htyao89/drcn
    def create_architecture(self,
                            input_shape,
                            dense_dim=1024,
                            input_var_=None,
                            output_var_=None,
                            convnet_=None,
                            is_enc_fixed=False):

        print('[ConvAE: create_architecture]')
        if input_var_ is not None:
            self.X_ = input_var_

        if output_var_ is not None:
            self.Y_ = output_var_

        (c, d1, d2) = input_shape

        self.lin = InputLayer((None, c, d1, d2), self.X_)
        if convnet_ is not None:
            self.lconv1 = Conv2DLayerFast(self.lin,
                                          100, (5, 5),
                                          pad=(2, 2),
                                          W=convnet_.lconv1.W,
                                          nonlinearity=rectify)
        else:
            self.lconv1 = Conv2DLayerFast(self.lin,
                                          100, (5, 5),
                                          pad=(2, 2),
                                          W=GlorotUniform(),
                                          nonlinearity=rectify)

        self.lpool1 = MaxPool2DLayerFast(self.lconv1, (2, 2))

        if convnet_ is not None:
            self.lconv2 = Conv2DLayerFast(self.lpool1,
                                          150, (5, 5),
                                          pad=(2, 2),
                                          W=convnet_.lconv2.W,
                                          nonlinearity=rectify)
        else:
            self.lconv2 = Conv2DLayerFast(self.lpool1,
                                          150, (5, 5),
                                          pad=(2, 2),
                                          W=GlorotUniform(),
                                          nonlinearity=rectify)

        self.lpool2 = MaxPool2DLayerFast(self.lconv2, (2, 2))

        if convnet_ is not None:
            self.lconv3 = Conv2DLayerFast(self.lpool2,
                                          200, (3, 3),
                                          W=convnet_.lconv3.W,
                                          nonlinearity=rectify)
        else:
            self.lconv3 = Conv2DLayerFast(self.lpool2,
                                          200, (3, 3),
                                          W=GlorotUniform(),
                                          nonlinearity=rectify)
        [nd, nf, dc1, dc2] = get_output_shape(self.lconv3)

        self.lconv3_flat = FlattenLayer(self.lconv3)
        [_, dflat] = get_output_shape(self.lconv3_flat)

        if convnet_ is not None:
            self.ldense1 = DenseLayer(self.lconv3_flat,
                                      dense_dim,
                                      W=convnet_.ldense1.W,
                                      nonlinearity=rectify)
        else:
            self.ldense1 = DenseLayer(self.lconv3_flat,
                                      dense_dim,
                                      W=GlorotUniform(),
                                      nonlinearity=rectify)

        if convnet_ is not None:
            self.ldense2 = DenseLayer(self.ldense1,
                                      dense_dim,
                                      W=convnet_.ldense2.W,
                                      nonlinearity=rectify)
        else:
            self.ldense2 = DenseLayer(self.ldense1,
                                      dense_dim,
                                      W=GlorotUniform(),
                                      nonlinearity=rectify)

        self.ldense3 = DenseLayer(self.ldense2,
                                  dflat,
                                  W=GlorotUniform(),
                                  nonlinearity=rectify)
        self.ldense3_reshape = ReshapeLayer(self.ldense3,
                                            ([0], nf, dc1, -1))  # lae_conv3

        self.ldeconv1 = Conv2DLayerFast(self.ldense3_reshape,
                                        150, (3, 3),
                                        pad=(2, 2),
                                        W=GlorotUniform(),
                                        nonlinearity=rectify)
        self.lunpool1 = Upscale2DLayer(self.ldeconv1, (2, 2))

        self.ldeconv2 = Conv2DLayerFast(self.lunpool1,
                                        100, (5, 5),
                                        pad=(2, 2),
                                        W=GlorotUniform(),
                                        nonlinearity=rectify)
        self.lunpool2 = Upscale2DLayer(self.ldeconv2, (2, 2))

        self.model_ = Conv2DLayerFast(self.lunpool2,
                                      1, (5, 5),
                                      pad=(2, 2),
                                      W=GlorotUniform(),
                                      nonlinearity=linear)

        self.is_enc_fixed = is_enc_fixed
예제 #30
0
def buildnet(weight_file, z_hid=50):

    conv_num_filters = 16
    filter_size = 3
    pool_size = 2
    pad_in = 'valid'
    pad_out = 'full'
    input_var = T.tensor4('inputs')
    #    target_var = T.matrix('targets')
    encode_hid = 1000
    decode_hid = encode_hid
    ii1 = 45
    ii2 = 36
    dense_upper_mid_size = conv_num_filters * (ii1 - 2) * (ii2 - 2) * 2
    relu_shift = 10
    input_layer = InputLayer(shape=(None, 27, 32, 30), input_var=input_var)
    conv1 = Conv2DLayer(input_layer,
                        num_filters=conv_num_filters,
                        filter_size=filter_size,
                        pad=pad_in)
    conv2 = Conv2DLayer(conv1,
                        num_filters=conv_num_filters,
                        filter_size=filter_size,
                        pad=pad_in)
    pool1 = MaxPool2DLayer(conv2, pool_size=pool_size)
    conv3 = Conv2DLayer(pool1,
                        num_filters=2 * conv_num_filters,
                        filter_size=filter_size,
                        pad=pad_in)
    pool2 = MaxPool2DLayer(conv3, pool_size=pool_size)
    reshape1 = ReshapeLayer(pool2, shape=(([0], -1)))
    encode_h_layer = DenseLayer(reshape1,
                                num_units=encode_hid,
                                nonlinearity=None)
    mu_layer = DenseLayer(encode_h_layer, num_units=z_hid, nonlinearity=None)
    log_sigma_layer = DenseLayer(
        encode_h_layer,
        num_units=z_hid,
        nonlinearity=lambda a: T.nnet.relu(a + relu_shift) - relu_shift)
    q_layer = Q_Layer([mu_layer, log_sigma_layer])
    decode_h_layer = DenseLayer(q_layer,
                                num_units=decode_hid,
                                nonlinearity=tanh)
    decode_h_layer_second = DenseLayer(decode_h_layer,
                                       num_units=dense_upper_mid_size,
                                       nonlinearity=None)
    reshape2 = ReshapeLayer(decode_h_layer_second,
                            shape=([0], 2 * conv_num_filters, (ii1 - 2),
                                   (ii2 - 2)))
    upscale1 = Upscale2DLayer(reshape2, scale_factor=pool_size)
    deconv1 = Conv2DLayer(upscale1,
                          num_filters=conv_num_filters,
                          filter_size=filter_size,
                          pad=pad_out)
    upscale2 = Upscale2DLayer(deconv1, scale_factor=pool_size)
    deconv2 = Conv2DLayer(upscale2,
                          num_filters=conv_num_filters,
                          filter_size=filter_size,
                          pad=pad_out)
    deconv3 = Conv2DLayer(deconv2,
                          num_filters=1,
                          filter_size=filter_size,
                          pad=pad_out,
                          nonlinearity=sigmoid)
    network = ReshapeLayer(deconv3, shape=(([0], -1)))

    with open(weight_file, 'rb') as f:
        updated_param_values = pickle.load(f)
        lasagne.layers.set_all_param_values(network, updated_param_values)

    encoded_mu = lasagne.layers.get_output(mu_layer)
    ae_encode_mu = theano.function([input_var], encoded_mu)
    encoded_log_sigma = lasagne.layers.get_output(log_sigma_layer)
    ae_encode_log_sigma = theano.function([input_var], encoded_log_sigma)
    x = theano.tensor.matrix()
    mu = theano.tensor.matrix()
    log_sigma = theano.tensor.matrix()
    noise_adjust = theano.function([x, mu, log_sigma],
                                   x * T.exp(log_sigma) + mu)
    noise_var = T.matrix()
    gen = get_output(network, {q_layer: noise_var})
    gen_from_noise = theano.function([noise_var], gen)

    def gen_model_from_enc(noise_input,
                           n_steps,
                           gen_from_noise,
                           noise_adjust,
                           ae_encode_mu,
                           ae_encode_log_sigma,
                           threshold=False):
        generated_i = gen_from_noise(noise_input)
        generated = generated_i.reshape(-1, 27, 32, 30)
        for ii in range(0, n_steps):
            mu = ae_encode_mu(generated)
            log_sigma = ae_encode_log_sigma(generated)
            noise_adj = noise_adjust(noise_input, mu, log_sigma)
            generated = gen_from_noise(noise_adj)
            generated = generated.reshape(-1, 27, 32, 30)
        if threshold:
            generated[generated < 0.5] = 0
            generated[generated >= 0.5] = 1
            X_gen = generated
        else:
            X_gen = generated
        return X_gen

    return ae_encode_mu, ae_encode_log_sigma, noise_adjust, gen_from_noise, gen_model_from_enc