def build(input_height, input_width, concat_var):
    """
    Build the discriminator, all weights initialized from scratch
    :param input_width:
    :param input_height: 
    :param concat_var: Theano symbolic tensor variable
    :return: Dictionary that contains the discriminator
    """

    net = {'input': InputLayer((None, 4, input_height, input_width), input_var=concat_var)}
    print "Input: {}".format(net['input'].output_shape[1:])

    net['merge'] = ConvLayer(net['input'], 3, 1, pad=0, flip_filters=False)
    print "merge: {}".format(net['merge'].output_shape[1:])

    net['conv1'] = ConvLayer(net['merge'], 32, 3, pad=1)
    print "conv1: {}".format(net['conv1'].output_shape[1:])

    net['pool1'] = PoolLayer(net['conv1'], 4)
    print "pool1: {}".format(net['pool1'].output_shape[1:])

    net['conv2_1'] = ConvLayer(net['pool1'], 64, 3, pad=1)
    print "conv2_1: {}".format(net['conv2_1'].output_shape[1:])

    net['conv2_2'] = ConvLayer(net['conv2_1'], 64, 3, pad=1)
    print "conv2_2: {}".format(net['conv2_2'].output_shape[1:])

    net['pool2'] = PoolLayer(net['conv2_2'], 2)
    print "pool2: {}".format(net['pool2'].output_shape[1:])

    net['conv3_1'] = nn.weight_norm(ConvLayer(net['pool2'], 64, 3, pad=1))
    print "conv3_1: {}".format(net['conv3_1'].output_shape[1:])

    net['conv3_2'] = nn.weight_norm(ConvLayer(net['conv3_1'], 64, 3, pad=1))
    print "conv3_2: {}".format(net['conv3_2'].output_shape[1:])

    net['pool3'] = PoolLayer(net['conv3_2'], 2)
    print "pool3: {}".format(net['pool3'].output_shape[1:])

    net['fc4'] = DenseLayer(net['pool3'], num_units=100, nonlinearity=tanh)
    print "fc4: {}".format(net['fc4'].output_shape[1:])

    net['fc5'] = DenseLayer(net['fc4'], num_units=2, nonlinearity=tanh)
    print "fc5: {}".format(net['fc5'].output_shape[1:])

    net['prob'] = DenseLayer(net['fc5'], num_units=1, nonlinearity=sigmoid)
    print "prob: {}".format(net['prob'].output_shape[1:])

    return net
Esempio n. 2
0
def get_generator(batch_size, theano_rng, noise_length=100):

    noise_dim = (batch_size, noise_length)
    noise = theano_rng.uniform(size=noise_dim)
    gen_layers = [ll.InputLayer(shape=noise_dim, input_var=noise)]
    gen_layers.append(
        nn.batch_norm(ll.DenseLayer(gen_layers[-1],
                                    num_units=4 * 4 * 512,
                                    W=Normal(0.05),
                                    nonlinearity=nn.relu),
                      g=None))
    gen_layers.append(ll.ReshapeLayer(gen_layers[-1], (batch_size, 512, 4, 4)))
    gen_layers.append(
        nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (batch_size, 256, 8, 8),
                                       (5, 5),
                                       W=Normal(0.05),
                                       nonlinearity=nn.relu),
                      g=None))  # 4 -> 8
    gen_layers.append(
        nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1],
                                       (batch_size, 128, 16, 16), (5, 5),
                                       W=Normal(0.05),
                                       nonlinearity=nn.relu),
                      g=None))  # 8 -> 16
    gen_layers.append(
        nn.weight_norm(nn.Deconv2DLayer(gen_layers[-1],
                                        (batch_size, 3, 32, 32), (5, 5),
                                        W=Normal(0.05),
                                        nonlinearity=T.tanh),
                       train_g=True,
                       init_stdv=0.1))  # 16 -> 32

    return gen_layers
Esempio n. 3
0
    def g_architecture(self):
#        lrelu = lasagne.nonlinearities.LeakyRectify(0.2)
#        gen_layers = [lasagne.layers.InputLayer(shape=self.size_input, input_var=self.distribution)]
#        gen_layers.append(lasagne.layers.batch_norm(lasagne.layers.DenseLayer(gen_layers[-1], num_units=4*4*512, nonlinearity=lrelu)))
#        gen_layers.append(lasagne.layers.ReshapeLayer(gen_layers[-1], (-1,512,4,4)))
#        gen_layers.append(lasagne.layers.batch_norm(lasagne.layers.Deconv2DLayer(gen_layers[-1], num_filters=256, filter_size=5, stride=2, crop=2,output_size=8,nonlinearity=lasagne.nonlinearities.rectify)))
#        gen_layers.append(lasagne.layers.batch_norm(lasagne.layers.Deconv2DLayer(gen_layers[-1], num_filters=128, filter_size=5, stride=2, crop=2,output_size=16,nonlinearity=lasagne.nonlinearities.rectify)))
#        gen_layers.append(lasagne.layers.batch_norm(lasagne.layers.Deconv2DLayer(gen_layers[-1], num_filters=64, filter_size=5, stride=2, crop=2,output_size=32,nonlinearity=lasagne.nonlinearities.rectify)))
#        #gen_layers.append(lasagne.layers.batch_norm(lasagne.layers.Deconv2DLayer(gen_layers[-1], num_filters=3, filter_size=5, stride=2, crop=2,output_size=64,W=lasagne.init.Normal(0.05), nonlinearity=lasagne.nonlinearities.tanh))) # 16 -> 32
#        gen_layers.append(nn.weight_norm(lasagne.layers.Deconv2DLayer(gen_layers[-1], num_filters=3, filter_size=5, stride=2, crop=2,output_size=64,W=lasagne.init.Normal(0.05), nonlinearity=T.tanh), train_g=True, init_stdv=0.1)) 
#        self.architecture = gen_layers
###        gen_layers.append(lasagne.layers.batch_norm(lasagne.layers.Deconv2DLayer(gen_layers[-1], num_filters=3, filter_size=5, stride=2, crop=2,output_size=32,nonlinearity=lasagne.nonlinearities.sigmoid)))
#
#        self.architecture = gen_layers
#        #self.last_layer   = gen_layers[-1]
        #self.parameters = lasagne.layers.get_all_params(gen_layers,trainable=True)
#
        lrelu = lasagne.nonlinearities.LeakyRectify(0.2)
        gen_layers = [lasagne.layers.InputLayer(shape=self.size_input, input_var=self.distribution)]
        #gen_layers.append(lasagne.layers.DropoutLayer(gen_layers[-1], p=0.5))
        gen_layers.append(lasagne.layers.batch_norm(lasagne.layers.DenseLayer(gen_layers[-1], num_units=4*4*512, nonlinearity=lasagne.nonlinearities.rectify)))
        gen_layers.append(lasagne.layers.ReshapeLayer(gen_layers[-1], (self.BATCH_SIZE,512,4,4)))
        gen_layers.append(lasagne.layers.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (self.BATCH_SIZE,256,8,8), (5,5), W=lasagne.init.Normal(0.05), nonlinearity=lasagne.nonlinearities.rectify)))# 4 -> 8
        gen_layers.append(lasagne.layers.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (self.BATCH_SIZE,128,16,16), (5,5), W=lasagne.init.Normal(0.05), nonlinearity=lasagne.nonlinearities.rectify))) # 4 -> 8
        gen_layers.append(lasagne.layers.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (self.BATCH_SIZE,64,32,32), (5,5), W=lasagne.init.Normal(0.05), nonlinearity=lasagne.nonlinearities.rectify))) # 4 -> 8
        #gen_layers.append(nn.weight_norm(nn.Deconv2DLayer(gen_layers[-1], (self.BATCH_SIZE,3,64,64), (5,5), W=init.Normal(0.05), nonlinearity=T.tanh), train_g=True, init_stdv=0.1))  
        gen_layers.append(nn.weight_norm(nn.Deconv2DLayer(gen_layers[-1], (self.BATCH_SIZE,3,64,64), (5,5), W=init.Normal(0.05), nonlinearity=T.tanh), train_g=True, init_stdv=0.1))  # 16 -> 32, train_g=True, init_stdv=0.1
        self.architecture = gen_layers
   



        return       
Esempio n. 4
0
def _sample_trained_minibatch_gan(params_file, n, batch_size, rs):
    import lasagne
    from lasagne.init import Normal
    import lasagne.layers as ll
    import theano as th
    from theano.sandbox.rng_mrg import MRG_RandomStreams
    import theano.tensor as T

    import nn

    theano_rng = MRG_RandomStreams(rs.randint(2**15))
    lasagne.random.set_rng(np.random.RandomState(rs.randint(2**15)))

    noise_dim = (batch_size, 100)
    noise = theano_rng.uniform(size=noise_dim)
    ls = [ll.InputLayer(shape=noise_dim, input_var=noise)]
    ls.append(
        nn.batch_norm(ll.DenseLayer(ls[-1],
                                    num_units=4 * 4 * 512,
                                    W=Normal(0.05),
                                    nonlinearity=nn.relu),
                      g=None))
    ls.append(ll.ReshapeLayer(ls[-1], (batch_size, 512, 4, 4)))
    ls.append(
        nn.batch_norm(nn.Deconv2DLayer(ls[-1], (batch_size, 256, 8, 8), (5, 5),
                                       W=Normal(0.05),
                                       nonlinearity=nn.relu),
                      g=None))  # 4 -> 8
    ls.append(
        nn.batch_norm(nn.Deconv2DLayer(ls[-1], (batch_size, 128, 16, 16),
                                       (5, 5),
                                       W=Normal(0.05),
                                       nonlinearity=nn.relu),
                      g=None))  # 8 -> 16
    ls.append(
        nn.weight_norm(nn.Deconv2DLayer(ls[-1], (batch_size, 3, 32, 32),
                                        (5, 5),
                                        W=Normal(0.05),
                                        nonlinearity=T.tanh),
                       train_g=True,
                       init_stdv=0.1))  # 16 -> 32
    gen_dat = ll.get_output(ls[-1])

    with np.load(params_file) as d:
        params = [d['arr_{}'.format(i)] for i in range(9)]
    ll.set_all_param_values(ls[-1], params, trainable=True)

    sample_batch = th.function(inputs=[], outputs=gen_dat)
    samps = []
    while len(samps) < n:
        samps.extend(sample_batch())
    samps = np.array(samps[:n])
    return samps
def discriminator(input_var):
    network = lasagne.layers.InputLayer(shape=(None, 1, 28, 28),
                                        input_var=input_var)

    network = ll.DropoutLayer(network, p=0.5)

    network = nn.weight_norm(dnn.Conv2DDNNLayer(network, 64, (4,4), pad='valid', W=Normal(0.05), nonlinearity=nn.lrelu))

    network = nn.weight_norm(dnn.Conv2DDNNLayer(network, 32, (5,5), stride=2, pad='valid', W=Normal(0.05), nonlinearity=nn.lrelu))
    network = nn.weight_norm(dnn.Conv2DDNNLayer(network, 32, (5,5), pad='valid', W=Normal(0.05), nonlinearity=nn.lrelu))

    network = nn.weight_norm(dnn.Conv2DDNNLayer(network, 32, (5,5), pad='valid', W=Normal(0.05), nonlinearity=nn.lrelu))

    network = nn.weight_norm(dnn.Conv2DDNNLayer(network, 16, (3,3), pad='valid', W=Normal(0.05), nonlinearity=nn.lrelu))

    network =nn.weight_norm(ll.DenseLayer(network, num_units=1, W=Normal(0.05), nonlinearity=None), train_g=True, init_stdv=0.1)




    return network
Esempio n. 6
0
    nn.Deconv2DLayer(gen_layers[-1], (batch_size_g, 256, 8, 8), (5, 5),
                     W=Normal(0.05),
                     nonlinearity=nn.relu))
gen_layers.append(nn.batch_norm(gen_layers[-1], g=None))
gen_layers.append(nn.ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes))
gen_layers.append(
    nn.Deconv2DLayer(gen_layers[-1], (batch_size_g, 128, 16, 16), (5, 5),
                     W=Normal(0.05),
                     nonlinearity=nn.relu))
gen_layers.append(nn.batch_norm(gen_layers[-1], g=None))
gen_layers.append(nn.ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes))
gen_layers.append(
    nn.Deconv2DLayer(gen_layers[-1], (batch_size_g, 3, 32, 32), (5, 5),
                     W=Normal(0.05),
                     nonlinearity=T.tanh))
gen_layers.append(nn.weight_norm(gen_layers[-1], train_g=True, init_stdv=0.1))
# for layer in gen_layers:
#     print layer.params
#outputs
gen_out_x = ll.get_output(gen_layers[-1], {
    gen_in_y: sym_y_g,
    gen_in_z: sym_z_rand
},
                          deterministic=False)
gen_out_x_shared = ll.get_output(gen_layers[-1], {
    gen_in_y: sym_y_g,
    gen_in_z: sym_z_shared
},
                                 deterministic=False)
gen_out_x_interpolation = ll.get_output(gen_layers[-1], {
    gen_in_y: sym_y_g,
Esempio n. 7
0
nr_batches_train = int(trainx.shape[0]/args.batch_size) #50000.0/100 = 500
nr_batches_test = int(testx.shape[0]/args.batch_size)   #10000.0/100 =100





# specify generative model
noise_dim = (args.batch_size, 50)
noise = theano_rng.uniform(size=noise_dim)
gen_layers = [ll.InputLayer(shape=noise_dim, input_var=noise)]
gen_layers.append(nn.batch_norm(ll.DenseLayer(gen_layers[-1], num_units=4*4*512, W=Normal(0.05), nonlinearity=nn.relu), g=None))
gen_layers.append(ll.ReshapeLayer(gen_layers[-1], (args.batch_size,512,4,4)))
gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size,256,8,8), (5,5), W=Normal(0.05), nonlinearity=nn.relu), g=None)) # 4 -> 8
gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size,128,16,16), (5,5), W=Normal(0.05), nonlinearity=nn.relu), g=None)) # 8 -> 16
gen_layers.append(nn.weight_norm(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size,3,32,32), (5,5), W=Normal(0.05), nonlinearity=T.tanh), train_g=True, init_stdv=0.1)) # 16 -> 32
gen_dat = ll.get_output(gen_layers[-1])

disc_layers = [ll.InputLayer(shape=(None, 3, 32, 32))]
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.2))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 128, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 128, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 128, (3,3), pad=1, stride=2, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 256, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 256, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 256, (3,3), pad=1, stride=2, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 512, (3,3), pad=0, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(ll.NINLayer(disc_layers[-1], num_units=256, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(ll.NINLayer(disc_layers[-1], num_units=128, W=Normal(0.05), nonlinearity=nn.lrelu)))
Esempio n. 8
0
trainx = np.concatenate([d['x'] for d in train_data],axis=0)
trainy = np.concatenate([d['y'] for d in train_data])
test_data = unpickle('/home/ubuntu/data/cifar-10-python/cifar-10-batches-py/test_batch')
testx = test_data['x']
testy = test_data['y']
nr_batches_train = int(trainx.shape[0]/args.batch_size)
nr_batches_test = int(testx.shape[0]/args.batch_size)

# whitening
whitener = nn.ZCA(x=trainx)
trainx_white = whitener.apply(trainx)
testx_white = whitener.apply(testx)

# specify model
if args.norm_type=='weight_norm':
    normalizer = lambda l: nn.weight_norm(l)
elif args.norm_type=='batch_norm':
    normalizer = lambda l: nn.batch_norm(l)
elif args.norm_type=='mean_only_bn':
    normalizer = lambda l: nn.mean_only_bn(l)
elif args.norm_type=='no_norm':
    normalizer = lambda l: nn.no_norm(l)
else:
    raise NotImplementedError('incorrect norm type')

layers = [ll.InputLayer(shape=(None, 3, 32, 32))]
layers.append(ll.GaussianNoiseLayer(layers[-1], sigma=0.15))
layers.append(normalizer(dnn.Conv2DDNNLayer(layers[-1], 96, (3,3), pad=1, nonlinearity=nn.lrelu)))
layers.append(normalizer(dnn.Conv2DDNNLayer(layers[-1], 96, (3,3), pad=1, nonlinearity=nn.lrelu)))
layers.append(normalizer(dnn.Conv2DDNNLayer(layers[-1], 96, (3,3), pad=1, nonlinearity=nn.lrelu)))
layers.append(ll.MaxPool2DLayer(layers[-1], 2))
Esempio n. 9
0
classifier = build_network()

# generator y2x: p_g(x, y) = p(y) p_g(x | y) where x = G(z, y), z follows p_g(z)
gen_in_z = ll.InputLayer(shape=(None, n_z))
gen_in_y = ll.InputLayer(shape=(None,))
gen_layers = [gen_in_z]
gen_layers.append(MLPConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-00'))
gen_layers.append(nn.batch_norm(ll.DenseLayer(gen_layers[-1], num_units=4*4*512, W=Normal(0.05), nonlinearity=nn.relu, name='gen-01'), g=None, name='gen-02'))
gen_layers.append(ll.ReshapeLayer(gen_layers[-1], (-1,512,4,4), name='gen-03'))
gen_layers.append(ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-10'))
gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (None,256,8,8), (5,5), W=Normal(0.05), nonlinearity=nn.relu, name='gen-11'), g=None, name='gen-12')) # 4 -> 8
gen_layers.append(ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-20'))
gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (None,128,16,16), (5,5), W=Normal(0.05), nonlinearity=nn.relu, name='gen-21'), g=None, name='gen-22')) # 8 -> 16
gen_layers.append(ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-30'))
gen_layers.append(nn.weight_norm(nn.Deconv2DLayer(gen_layers[-1], (None,3,32,32), (5,5), W=Normal(0.05), nonlinearity=gen_final_non, name='gen-31'), train_g=True, init_stdv=0.1, name='gen-32')) # 16 -> 32

# discriminator xy2p: test a pair of input comes from p(x, y) instead of p_c or p_g
dis_in_x = ll.InputLayer(shape=(None, in_channels) + dim_input)
dis_in_y = ll.InputLayer(shape=(None,))
dis_layers = [dis_in_x]
dis_layers.append(ll.DropoutLayer(dis_layers[-1], p=0.2, name='dis-00'))
dis_layers.append(ConvConcatLayer([dis_layers[-1], dis_in_y], num_classes, name='dis-01'))
dis_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(dis_layers[-1], 32, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu, name='dis-02'), name='dis-03'))
dis_layers.append(ConvConcatLayer([dis_layers[-1], dis_in_y], num_classes, name='dis-20'))
dis_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(dis_layers[-1], 32, (3,3), pad=1, stride=2, W=Normal(0.05), nonlinearity=nn.lrelu, name='dis-21'), name='dis-22'))
dis_layers.append(ll.DropoutLayer(dis_layers[-1], p=0.2, name='dis-23'))
dis_layers.append(ConvConcatLayer([dis_layers[-1], dis_in_y], num_classes, name='dis-30'))
dis_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(dis_layers[-1], 64, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu, name='dis-31'), name='dis-32'))
dis_layers.append(ConvConcatLayer([dis_layers[-1], dis_in_y], num_classes, name='dis-40'))
dis_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(dis_layers[-1], 64, (3,3), pad=1, stride=2, W=Normal(0.05), nonlinearity=nn.lrelu, name='dis-41'), name='dis-42'))
Esempio n. 10
0
    def d_architecture2(self):
        ### Input
        input_shape = self.size_input  # 50000*1*28*28 (channel=1, lenght=28,with=28)
        print(input_shape)
        discriminator_layers = [
            lasagne.layers.InputLayer(shape=(None, input_shape[1],
                                             input_shape[2], input_shape[3]),
                                      input_var=self.input_var)
        ]
        # on passe de 3*32*32  a  96*32*32 car on a 96 filtres
        Tempw = lasagne.utils.create_param(lasagne.init.Normal(0.05),
                                           (1, 96, 32, 32), "W_D_conv1")
        Tempb = lasagne.utils.create_param(lasagne.init.Constant(0.0), (96, ),
                                           "b_D_conv1")
        discriminator_layers.append(
            lasagne.layers.Conv2DLayer(discriminator_layers[-1],
                                       num_filters=96,
                                       filter_size=(5, 5),
                                       nonlinearity=self.nonlinear,
                                       W=Tempw,
                                       b=Tempb))
        # check if convolution type (full, same,valid)
        #########################
        # on passe de 96*32*32 a 96*16*16  car pooling 2*2
        #discriminator_layers.append(lasagne.layers.MaxPool2DLayer(discriminator_layers[-1], pool_size=(2,2)))
        # on passe de 96*32*32 a 192*32*32
        Tempw = lasagne.utils.create_param(lasagne.init.Normal(0.05),
                                           (1, 192, 32, 32), "W_D_conv2")
        Tempb = lasagne.utils.create_param(lasagne.init.Constant(0.0), (192, ),
                                           "b_D_conv2")
        #Tempw = theano.shared(self.fonction_ini(0.05,1,(1,192,32,32)).astype('float32'))
        #Tempb = theano.shared(self.fonction_ini(0.05,1,(192,)).astype('float32'))
        discriminator_layers.append(
            lasagne.layers.Conv2DLayer(discriminator_layers[-1],
                                       num_filters=192,
                                       filter_size=(5, 5),
                                       nonlinearity=self.nonlinear,
                                       W=Tempw,
                                       b=Tempb))
        # on passe de 192*32*32 a 192
        Tempw = lasagne.utils.create_param(lasagne.init.Normal(0.05),
                                           (1, 192, 32, 32), "W_D_conv3")
        Tempb = lasagne.utils.create_param(lasagne.init.Constant(0.0), (192, ),
                                           "b_D_conv3")
        discriminator_layers.append(
            lasagne.layers.DropoutLayer(discriminator_layers[-1], p=.5))
        discriminator_layers.append(
            lasagne.layers.Conv2DLayer(discriminator_layers[-1],
                                       num_filters=192,
                                       filter_size=(5, 5),
                                       nonlinearity=self.nonlinear,
                                       W=Tempw,
                                       b=Tempb))
        # on passe de 192 a 192
        Tempw = lasagne.utils.create_param(lasagne.init.Normal(0.05), (1, 192),
                                           "W_D_conv4")
        Tempb = lasagne.utils.create_param(lasagne.init.Constant(0.0), (1, ),
                                           "b_D_conv4")
        discriminator_layers.append(
            lasagne.layers.NINLayer(discriminator_layers[-1],
                                    num_units=192,
                                    nonlinearity=self.nonlinear,
                                    W=Tempw,
                                    b=Tempb))
        discriminator_layers.append(
            lasagne.layers.GlobalPoolLayer(discriminator_layers[-1]))
        # on passe de 92 a 2
        Tempw = lasagne.utils.create_param(lasagne.init.Normal(0.05), (1, 2),
                                           "W_D_conv5")
        Tempb = lasagne.utils.create_param(lasagne.init.Constant(0.0), (1, ),
                                           "b_D_conv5")
        discriminator_layers.append(
            lasagne.layers.DenseLayer(discriminator_layers[-1],
                                      num_units=2,
                                      W=Tempw,
                                      b=Tempb,
                                      nonlinearity=None))
        print(input_shape)
        self.parameters = lasagne.layers.get_all_params(discriminator_layers,
                                                        trainable=True)
        self.last_layer = discriminator_layers[-1]
        ##########################
        self.architecture = discriminator_layers

        input_shape = self.size_input  # 50000*1*28*28 (channel=1, lenght=28,with=28)

        discriminator_layers = [
            lasagne.layers.InputLayer(shape=(None, input_shape[1],
                                             input_shape[2], input_shape[3]),
                                      input_var=self.input_var)
        ]
        # on passe de 3*32*32  a  96*32*32 car on a 96 filtres
        discriminator_layers.append(
            lasagne.layers.DropoutLayer(discriminator_layers[-1], p=0.5))
        discriminator_layers.append(
            nn.weight_norm(
                Conv2DDNNLayer(
                    discriminator_layers[-1],
                    192, (3, 3),
                    pad=1,
                    W=lasagne.init.Normal(0.05),
                    nonlinearity=lasagne.nonlinearities.Leakyrectify)))
        discriminator_layers.append(
            nn.weight_norm(
                Conv2DDNNLayer(
                    discriminator_layers[-1],
                    192, (3, 3),
                    pad=1,
                    W=lasagne.init.Normal(0.05),
                    nonlinearity=lasagne.nonlinearities.Leakyrectify)))
        discriminator_layers.append(
            nn.weight_norm(
                Conv2DDNNLayer(
                    discriminator_layers[-1],
                    192, (3, 3),
                    pad=1,
                    stride=2,
                    W=lasagne.init.Normal(0.05),
                    nonlinearity=lasagne.nonlinearities.Leakyrectify)))

        discriminator_layers.append(
            nn.weight_norm(
                Conv2DDNNLayer(discriminator_layers[-1],
                               256, (3, 3),
                               pad=1,
                               W=lasagne.init.Normal(0.05),
                               nonlinearity=lasagne.nonlinearities.rectify)))
        discriminator_layers.append(
            nn.weight_norm(
                Conv2DDNNLayer(discriminator_layers[-1],
                               256, (3, 3),
                               pad=1,
                               W=lasagne.init.Normal(0.05),
                               nonlinearity=lasagne.nonlinearities.rectify)))
        discriminator_layers.append(
            nn.weight_norm(
                Conv2DDNNLayer(discriminator_layers[-1],
                               256, (3, 3),
                               pad=1,
                               stride=2,
                               W=lasagne.init.Normal(0.05),
                               nonlinearity=lasagne.nonlinearities.rectify)))
        discriminator_layers.append(
            lasagne.layers.DropoutLayer(discriminator_layers[-1], p=0.5))

        discriminator_layers.append(
            nn.weight_norm(
                Conv2DDNNLayer(discriminator_layers[-1],
                               256, (3, 3),
                               pad=0,
                               W=lasagne.init.Normal(0.05),
                               nonlinearity=lasagne.nonlinearities.rectify)))
        discriminator_layers.append(
            nn.weight_norm(
                lasagne.layers.NINLayer(
                    discriminator_layers[-1],
                    num_units=256,
                    W=lasagne.init.Normal(0.05),
                    nonlinearity=lasagne.nonlinearities.rectify)))
        discriminator_layers.append(
            nn.weight_norm(
                lasagne.layers.NINLayer(
                    discriminator_layers[-1],
                    num_units=256,
                    W=lasagne.init.Normal(0.05),
                    nonlinearity=lasagne.nonlinearities.rectify)))
        discriminator_layers.append(
            lasagne.layers.GlobalPoolLayer(discriminator_layers[-1]))

        self.architecture = disc_layers

        return
Esempio n. 11
0
def get_discriminator_brown(num_feature=256):

    disc_layers = [ll.InputLayer(shape=(None, 3, 32, 32))]
    disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.2))
    disc_layers.append(
        nn.weight_norm(
            dnn.Conv2DDNNLayer(disc_layers[-1],
                               96, (3, 3),
                               pad=1,
                               W=Normal(0.05),
                               nonlinearity=nn.lrelu)))
    disc_layers.append(
        nn.weight_norm(
            dnn.Conv2DDNNLayer(disc_layers[-1],
                               96, (3, 3),
                               pad=1,
                               W=Normal(0.05),
                               nonlinearity=nn.lrelu)))
    disc_layers.append(
        nn.weight_norm(
            dnn.Conv2DDNNLayer(disc_layers[-1],
                               96, (3, 3),
                               pad=1,
                               stride=2,
                               W=Normal(0.05),
                               nonlinearity=nn.lrelu)))
    disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
    disc_layers.append(
        nn.weight_norm(
            dnn.Conv2DDNNLayer(disc_layers[-1],
                               128, (3, 3),
                               pad=1,
                               W=Normal(0.05),
                               nonlinearity=nn.lrelu)))
    disc_layers.append(
        nn.weight_norm(
            dnn.Conv2DDNNLayer(disc_layers[-1],
                               128, (3, 3),
                               pad=1,
                               W=Normal(0.05),
                               nonlinearity=nn.lrelu)))
    disc_layers.append(
        nn.weight_norm(
            dnn.Conv2DDNNLayer(disc_layers[-1],
                               128, (3, 3),
                               pad=1,
                               stride=2,
                               W=Normal(0.05),
                               nonlinearity=nn.lrelu)))
    disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
    disc_layers.append(
        nn.weight_norm(
            dnn.Conv2DDNNLayer(disc_layers[-1],
                               128, (3, 3),
                               pad=0,
                               W=Normal(0.05),
                               nonlinearity=nn.lrelu)))
    disc_layers.append(
        nn.weight_norm(
            ll.NINLayer(disc_layers[-1],
                        num_units=num_feature,
                        W=Normal(0.05),
                        nonlinearity=nn.lrelu)))
    disc_layers.append(
        nn.weight_norm(
            ll.NINLayer(disc_layers[-1],
                        num_units=128,
                        W=Normal(0.05),
                        nonlinearity=nn.lrelu)))
    disc_layers.append(ll.GlobalPoolLayer(disc_layers[-1]))
    disc_layers.append(
        nn.weight_norm(ll.DenseLayer(disc_layers[-1],
                                     num_units=2,
                                     W=Normal(0.05),
                                     nonlinearity=None),
                       train_g=True,
                       init_stdv=0.1))
    #disc_layers.append(ll.ReshapeLayer(disc_layers[-4], ([0], -1)))
    #disc_layers.append(ll.GlobalPoolLayer(disc_layers[-4]))
    disc_layer_features_low_dim = -4
    disc_layer_features_high_dim = -5

    return disc_layers, disc_layer_features_low_dim, disc_layer_features_high_dim
Esempio n. 12
0
trainx = trainx[ind]
trainy = trainy[ind]
testx, testy = ddsm_data.load(args.data_dir, subset='test')
nr_batches_train = int(trainx.shape[0] / args.batch_size)
nr_batches_test = int(testx.shape[0] / args.batch_size)
print("the train data is %d" % trainx.shape[0])
print("the test data is %d" % testx.shape[0])
print("compiling......")

disc_layers = [ll.InputLayer(shape=(None, 1, 224, 224))]
disc_layers.append(ll.GaussianNoiseLayer(disc_layers[-1], sigma=0.1))
disc_layers.append(
    nn.weight_norm(
        dnn.Conv2DDNNLayer(disc_layers[-1],
                           64, (5, 5),
                           stride=2,
                           pad=1,
                           W=Normal(0.05),
                           nonlinearity=nn.lrelu)))
disc_layers.append(
    nn.weight_norm(
        dnn.Conv2DDNNLayer(disc_layers[-1],
                           128, (5, 5),
                           stride=2,
                           pad=1,
                           W=Normal(0.05),
                           nonlinearity=nn.lrelu)))
disc_layers.append(
    nn.weight_norm(
        dnn.Conv2DDNNLayer(disc_layers[-1],
                           256, (5, 5),
Esempio n. 13
0
gen_layers.append(ll.ReshapeLayer(gen_layers[-1], (args.batch_size, 512, 7, 7)))
gen_layers.append(nn.ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes))
gen_layers.append(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size, 512, 14, 14), (5, 5), W=Normal(0.05), nonlinearity=nn.relu))
gen_layers.append(nn.batch_norm(gen_layers[-1], g=None))
gen_layers.append(nn.ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes))
gen_layers.append(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size, 256, 28, 28), (5, 5), W=Normal(0.05), nonlinearity=nn.relu))
gen_layers.append(nn.batch_norm(gen_layers[-1], g=None))
gen_layers.append(nn.ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes))
gen_layers.append(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size, 128, 56, 56), (5, 5), W=Normal(0.05), nonlinearity=nn.relu))
gen_layers.append(nn.batch_norm(gen_layers[-1], g=None))
gen_layers.append(nn.ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes))
gen_layers.append(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size, 128, 112, 112), (5, 5), W=Normal(0.05), nonlinearity=nn.relu))
gen_layers.append(nn.batch_norm(gen_layers[-1], g=None))
gen_layers.append(nn.ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes))
gen_layers.append(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size, 1, 224, 224), (5, 5), W=Normal(0.05), nonlinearity=T.tanh))
gen_layers.append(nn.weight_norm(gen_layers[-1], train_g=True, init_stdv=0.1))

# specify discriminative model
disc_layers = [ll.InputLayer(shape=(None, 1, 224, 224))]
disc_layers.append(ll.GaussianNoiseLayer(disc_layers[-1], sigma=0.2))  #uncomment this line if test without data augmentation
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 64, (5, 5), stride=2, pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 128, (5, 5), stride=2,pad=1,W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 256, (5, 5),stride=2, pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 512, (5, 5),stride=2, pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(ll.GlobalPoolLayer(disc_layers[-1]))
disc_layers.append(nn.weight_norm(ll.DenseLayer(disc_layers[-1], num_units=2 * num_classes, W=Normal(0.05), nonlinearity=None), train_g=True, init_stdv=0.1))

# costs
# init_updates = [u for l in disc_layers for u in getattr(l, 'init_updates', [])]
gen_dat = ll.get_output(gen_layers[-1], {gen_in_y: labels_gen, gen_in_z: noise})
output_before_softmax_lab = ll.get_output(disc_layers[-1], x_lab)
    def build_network(self, X, Y):
        # Define the layers

        lrelu = lasagne.nonlinearities.LeakyRectify(0.1)
        input_shape = self.Size_Input
        #temp = input_shape[1]*input_shape[2]*input_shape[3]
        Auto_Enc_Layer = [
            lasagne.layers.InputLayer(shape=(None, input_shape[1],
                                             input_shape[2], input_shape[3]),
                                      input_var=X)
        ]
        # Encode lasagne.nonlinearities.rectify
        Auto_Enc_Layer.append(
            nn.weight_norm(
                Conv2DDNNLayer(Auto_Enc_Layer[-1],
                               64, (3, 3),
                               pad=1,
                               W=lasagne.init.Normal(0.05),
                               nonlinearity=lrelu)))
        Auto_Enc_Layer.append(
            nn.weight_norm(
                Conv2DDNNLayer(Auto_Enc_Layer[-2],
                               64, (3, 3),
                               pad=1,
                               W=lasagne.init.Normal(0.05),
                               nonlinearity=lrelu)))

        Auto_Enc_Layer_Global0 = nn.weight_norm(
            lasagne.layers.NINLayer(Auto_Enc_Layer[-1],
                                    num_units=32,
                                    W=lasagne.init.Normal(0.05),
                                    nonlinearity=lrelu))
        Auto_Enc_Layer_Global = lasagne.layers.GlobalPoolLayer(
            Auto_Enc_Layer_Global0)

        Auto_Enc_Layer.append(
            nn.weight_norm(
                Conv2DDNNLayer(Auto_Enc_Layer[-1],
                               64, (3, 3),
                               pad=1,
                               stride=2,
                               W=lasagne.init.Normal(0.05),
                               nonlinearity=lrelu)))
        Auto_Enc_Layer.append(
            lasagne.layers.DropoutLayer(Auto_Enc_Layer[-1], p=0.5))
        Auto_Enc_Layer.append(
            nn.weight_norm(
                Conv2DDNNLayer(Auto_Enc_Layer[-1],
                               128, (3, 3),
                               pad=1,
                               W=lasagne.init.Normal(0.05),
                               nonlinearity=lrelu)))
        Auto_Enc_Layer.append(
            nn.weight_norm(
                Conv2DDNNLayer(Auto_Enc_Layer[-1],
                               128, (3, 3),
                               pad=1,
                               W=lasagne.init.Normal(0.05),
                               nonlinearity=lrelu)))
        Auto_Enc_Layer.append(
            nn.weight_norm(
                Conv2DDNNLayer(Auto_Enc_Layer[-1],
                               128, (3, 3),
                               pad=1,
                               stride=2,
                               W=lasagne.init.Normal(0.05),
                               nonlinearity=lrelu)))

        Auto_Enc_Layer_Local0 = nn.weight_norm(
            lasagne.layers.NINLayer(Auto_Enc_Layer[-1],
                                    num_units=128,
                                    W=lasagne.init.Normal(0.05),
                                    nonlinearity=lrelu))
        Auto_Enc_Layer_Local = lasagne.layers.GlobalPoolLayer(
            Auto_Enc_Layer_Local0)

        Auto_Enc_Layer.append(
            nn.weight_norm(
                lasagne.layers.NINLayer(Auto_Enc_Layer[-1],
                                        num_units=512,
                                        W=lasagne.init.Normal(0.05),
                                        nonlinearity=lrelu)))
        Auto_Enc_Layer.append(
            lasagne.layers.GlobalPoolLayer(Auto_Enc_Layer[-1]))

        Auto_Enc_Layer.append(
            nn.weight_norm(
                DenseLayer(Auto_Enc_Layer_Local,
                           num_units=32 * 16 * 16,
                           W=lasagne.init.Normal(0.05),
                           nonlinearity=lasagne.nonlinearities.tanh)))
        # Decccode

        Auto_Dec_Layer = [
            (lasagne.layers.ReshapeLayer(Auto_Enc_Layer[-1],
                                         (self.Batch_Size, 32, 16, 16)))
        ]
        Auto_Dec_Layer.append(
            nn.weight_norm(
                nn.Deconv2DLayer(Auto_Dec_Layer[-1],
                                 (self.Batch_Size, 32, 32, 32), (3, 3),
                                 W=lasagne.init.Normal(0.05),
                                 nonlinearity=lrelu)))  # 4 -> 8
        Auto_Dec_Layer.append(
            nn.weight_norm(
                nn.Deconv2DLayer(
                    Auto_Dec_Layer[-1], (self.Batch_Size, 3, 64, 64), (3, 3),
                    W=lasagne.init.Normal(0.05),
                    nonlinearity=lasagne.nonlinearities.tanh)))  # 4 -> 8

        all_params = lasagne.layers.get_all_params(Auto_Dec_Layer,
                                                   trainable=True)
        network_output = lasagne.layers.get_output(Auto_Dec_Layer[-1],
                                                   X,
                                                   deterministic=False)

        encoded_output = lasagne.layers.get_output(Auto_Enc_Layer_Local,
                                                   X,
                                                   deterministic=True)
        encoded_output1 = lasagne.layers.get_output(Auto_Enc_Layer_Global,
                                                    X,
                                                    deterministic=True)
        #encoded_output1 = Auto_Enc_Layer_Global
        network_output1 = lasagne.layers.get_output(Auto_Dec_Layer[-1],
                                                    X,
                                                    deterministic=True)

        loss_A = T.mean(
            lasagne.objectives.squared_error(
                network_output[:, 0, :, :], Y[:, 0, :, :])) + T.mean(
                    lasagne.objectives.squared_error(
                        network_output[:, 1, :, :], Y[:, 1, :, :])) + T.mean(
                            lasagne.objectives.squared_error(
                                network_output[:, 2, :, :], Y[:, 2, :, :]))

        #loss_A = T.mean(lasagne.objectives.squared_error(network_output,Y))

        loss = [loss_A, encoded_output, network_output]
        #Autoencodeur_params_updates = lasagne.updates.momentum(loss_A,all_params,learning_rate = 0.05,momentum = 0.5)
        Autoencodeur_params_updates = lasagne.updates.adam(loss_A,
                                                           all_params,
                                                           learning_rate=0.001,
                                                           beta1=0.9)
        # Some Theano functions ,

        self.generate_fn_X = theano.function([X], network_output)
        self.train = theano.function([X, Y],
                                     loss,
                                     updates=Autoencodeur_params_updates,
                                     allow_input_downcast=True)
        self.predict = theano.function([X],
                                       network_output1,
                                       allow_input_downcast=True)
        self.encode_L = theano.function([X],
                                        encoded_output,
                                        allow_input_downcast=True)
        self.encode_G = theano.function([X],
                                        encoded_output1,
                                        allow_input_downcast=True)
        #s#elf.encode_G =  encoded_output1
        self.network = Auto_Enc_Layer

        return
                                   W=Normal(0.05),
                                   nonlinearity=nn.relu,
                                   name='g2'),
                  g=None))  # 4 -> 8
gen_layers.append(
    nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1],
                                   (args.batch_size, 128, 16, 16), (5, 5),
                                   W=Normal(0.05),
                                   nonlinearity=nn.relu,
                                   name='g3'),
                  g=None))  # 8 -> 16
gen_layers.append(
    nn.weight_norm(nn.Deconv2DLayer(gen_layers[-1],
                                    (args.batch_size, 3, 32, 32), (5, 5),
                                    W=Normal(0.05),
                                    nonlinearity=T.tanh,
                                    name='g4'),
                   train_g=True,
                   init_stdv=0.1))  # 16 -> 32
gen_dat = ll.get_output(gen_layers[-1])

genz_layers = [x_input]
genz_layers.append(
    dnn.Conv2DDNNLayer(genz_layers[-1],
                       128, (3, 3),
                       pad=1,
                       stride=2,
                       W=Normal(0.05),
                       nonlinearity=nn.lrelu,
                       name='gz1'))
genz_layers.append(
Esempio n. 16
0
# load CIFAR-10
trainx, trainy = cifar10_data.load(args.data_dir, subset='train')
trainx_unl = trainx.copy()
nr_batches_train = int(trainx.shape[0]/args.batch_size)


# specify generative model
noise_dim = (args.batch_size, 100)
noise = theano_rng.uniform(size=noise_dim)
gen_layers = [ll.InputLayer(shape=noise_dim, input_var=noise)]
gen_layers.append(nn.batch_norm(ll.DenseLayer(gen_layers[-1], num_units=4*4*512, W=Normal(0.05), nonlinearity=nn.relu), g=None))
gen_layers.append(ll.ReshapeLayer(gen_layers[-1], (args.batch_size,512,4,4)))
gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size,256,8,8), (5,5), W=Normal(0.05), nonlinearity=nn.relu), g=None)) # 4 -> 8
gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size,128,16,16), (5,5), W=Normal(0.05), nonlinearity=nn.relu), g=None)) # 8 -> 16
gen_layers.append(nn.weight_norm(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size,3,32,32), (5,5), W=Normal(0.05), nonlinearity=T.tanh), train_g=True, init_stdv=0.1)) # 16 -> 32
gen_dat = ll.get_output(gen_layers[-1])

# specify discriminative model
disc_layers = [ll.InputLayer(shape=(None, 3, 32, 32))]
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.2))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 96, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 96, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 96, (3,3), pad=1, stride=2, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 192, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 192, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 192, (3,3), pad=1, stride=2, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 192, (3,3), pad=0, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(ll.NINLayer(disc_layers[-1], num_units=192, W=Normal(0.05), nonlinearity=nn.lrelu)))
Esempio n. 17
0
gen_layers.append(MLPConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-5'))
gen_layers.append(nn.batch_norm(ll.DenseLayer(gen_layers[-1], num_units=512*4*4, nonlinearity=ln.linear, name='gen-6'), g=None, name='gen-61'))

gen_layers.append(ll.ReshapeLayer(gen_layers[-1], (-1, 512, 4, 4), name='gen-7'))

gen_layers.append(ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-8'))
gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (None, 256, 8, 8), filter_size=(4,4), stride=(2, 2), W=Normal(0.05), nonlinearity=nn.relu, name='gen-11'), g=None, name='gen-12'))

gen_layers.append(ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-9'))
gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (None, 128, 16, 16), filter_size=(4,4), stride=(2, 2), W=Normal(0.05), nonlinearity=nn.relu, name='gen-11'), g=None, name='gen-12'))

gen_layers.append(ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-10'))
gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (None, 64, 32, 32), filter_size=(4,4), stride=(2, 2), W=Normal(0.05), nonlinearity=nn.relu, name='gen-11'), g=None, name='gen-12'))

gen_layers.append(ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-11'))
gen_layers.append(nn.weight_norm(nn.Deconv2DLayer(gen_layers[-1], (None, 1, 64, 64), filter_size=(4,4), stride=(2, 2), W=Normal(0.05), nonlinearity=gen_final_non, name='gen-31'), train_g=True, init_stdv=0.1, name='gen-32'))



########## Discriminators
dis_in_x = ll.InputLayer(shape=(None, in_channels) + dim_input)
dis_in_y = ll.InputLayer(shape=(None,))
dis_layers = [dis_in_x]

dis_layers.append(ll.DropoutLayer(dis_layers[-1], p=0.2, name='dis-00'))
dis_layers.append(ConvConcatLayer([dis_layers[-1], dis_in_y], num_classes, name='dis-01'))
dis_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(dis_layers[-1], 32, filter_size=4, stride=(2,2), pad=1, W=Normal(0.02), nonlinearity=nn.lrelu, name='dis-02'), name='dis-03'))

dis_layers.append(ConvConcatLayer([dis_layers[-1], dis_in_y], num_classes, name='dis-20'))
dis_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(dis_layers[-1], 64, filter_size=4, stride=(2,2), pad=1, W=Normal(0.02), nonlinearity=nn.lrelu, name='dis-02'), name='dis-03'))
Esempio n. 18
0
 def d_architecture(self):
     ### Input
     lrelu = lasagne.nonlinearities.LeakyRectify(0.2)
     input_shape = self.size_input  # 50000*1*28*28 (channel=1, lenght=28,with=28)
     discriminator_layers = [
         lasagne.layers.InputLayer(shape=(None, input_shape[1],
                                          input_shape[2], input_shape[3]),
                                   input_var=self.input_var)
     ]
     discriminator_layers.append(
         nn.weight_norm(
             Conv2DDNNLayer(discriminator_layers[-1],
                            64, (5, 5),
                            pad=2,
                            stride=2,
                            W=lasagne.init.Normal(0.05),
                            nonlinearity=lrelu)))
     discriminator_layers.append(
         nn.weight_norm(
             Conv2DDNNLayer(discriminator_layers[-1],
                            128, (5, 5),
                            pad=2,
                            stride=2,
                            W=lasagne.init.Normal(0.05),
                            nonlinearity=lrelu)))
     discriminator_layers.append(
         nn.weight_norm(
             Conv2DDNNLayer(discriminator_layers[-1],
                            256, (5, 5),
                            pad=2,
                            stride=2,
                            W=lasagne.init.Normal(0.05),
                            nonlinearity=lrelu)))
     discriminator_layers.append(
         nn.weight_norm(
             Conv2DDNNLayer(discriminator_layers[-1],
                            512, (5, 5),
                            pad=2,
                            stride=2,
                            W=lasagne.init.Normal(0.05),
                            nonlinearity=lrelu)))
     discriminator_layers.append(
         nn.weight_norm(
             Conv2DDNNLayer(discriminator_layers[-1],
                            1024, (5, 5),
                            pad=2,
                            stride=2,
                            W=lasagne.init.Normal(0.05),
                            nonlinearity=lrelu)))
     discriminator_layers.append(
         lasagne.layers.DropoutLayer(discriminator_layers[-1], p=0.2))
     discriminator_layers.append(
         nn.weight_norm(
             lasagne.layers.NINLayer(discriminator_layers[-1],
                                     num_units=1024,
                                     W=lasagne.init.Normal(0.05),
                                     nonlinearity=lrelu)))
     discriminator_layers.append(
         lasagne.layers.GlobalPoolLayer(discriminator_layers[-1]))
     #discriminator_layers.append(lasagne.layers.batch_norm(lasagne.layers.DenseLayer(discriminator_layers[-1] , num_units=1, W=lasagne.init.Normal(0.05), nonlinearity=lasagne.nonlinearities.sigmoid)))
     discriminator_layers.append(
         nn.weight_norm(lasagne.layers.DenseLayer(
             discriminator_layers[-1],
             num_units=1,
             W=lasagne.init.Normal(0.05),
             nonlinearity=lasagne.nonlinearities.sigmoid),
                        train_g=True,
                        init_stdv=0.1))
     self.architecture = discriminator_layers
Esempio n. 19
0
rng_data = np.random.RandomState(args.seed_data)
rng = np.random.RandomState(args.seed)
theano_rng = MRG_RandomStreams(rng.randint(2 ** 15))
lasagne.random.set_rng(np.random.RandomState(rng.randint(2 ** 15)))

# load CIFAR-10
ds_train = DataStream(d=args.data_dir, img_size=args.image_size,
                      batch_size=args.batch_size, h5_name="train_sub.hdf5")
ds_test = DataStream(d=args.data_dir, img_size=args.image_size,
                     batch_size=args.batch_size, h5_name="val_sub.hdf5")
print("DATA LOADERS CREATED")

# specify discriminative model
disc_layers = [ll.InputLayer(shape=(None, 1, args.image_size, args.image_size))]
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 32, (3,3), pad=1, stride=2, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 64, (3,3), pad=1, stride=2, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 96, (3,3), pad=1, stride=2, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 192, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 192, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 192, (3,3), pad=1, stride=2, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 192, (3,3), pad=0, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
disc_layers.append(nn.weight_norm(ll.NINLayer(disc_layers[-1], num_units=192, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
sym_z_shared = T.tile(theano_rng.uniform((batch_size_g/num_classes, n_z)), (num_classes, 1))

# generator y2x: p_g(x, y) = p(y) p_g(x | y) where x = G(z, y), z follows p_g(z)
gen_in_z = ll.InputLayer(shape=(None, n_z))
gen_in_y = ll.InputLayer(shape=(None,))
gen_layers = [gen_in_z]
if args.dataset == 'svhn' or args.dataset == 'cifar10':
    gen_layers.append(MLPConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-00'))
    gen_layers.append(nn.batch_norm(ll.DenseLayer(gen_layers[-1], num_units=4*4*512, W=Normal(0.05), nonlinearity=nn.relu, name='gen-01'), g=None, name='gen-02'))
    gen_layers.append(ll.ReshapeLayer(gen_layers[-1], (-1,512,4,4), name='gen-03'))
    gen_layers.append(ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-10'))
    gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (None,256,8,8), (5,5), W=Normal(0.05), nonlinearity=nn.relu, name='gen-11'), g=None, name='gen-12')) # 4 -> 8
    gen_layers.append(ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-20'))
    gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (None,128,16,16), (5,5), W=Normal(0.05), nonlinearity=nn.relu, name='gen-21'), g=None, name='gen-22')) # 8 -> 16
    gen_layers.append(ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-30'))
    gen_layers.append(nn.weight_norm(nn.Deconv2DLayer(gen_layers[-1], (None,3,32,32), (5,5), W=Normal(0.05), nonlinearity=gen_final_non, name='gen-31'), train_g=True, init_stdv=0.1, name='gen-32')) # 16 -> 32
elif args.dataset == 'mnist':
    gen_layers.append(MLPConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-1'))
    gen_layers.append(ll.batch_norm(ll.DenseLayer(gen_layers[-1], num_units=500, nonlinearity=ln.softplus, name='gen-2'), name='gen-3'))
    gen_layers.append(MLPConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-4'))
    gen_layers.append(ll.batch_norm(ll.DenseLayer(gen_layers[-1], num_units=500, nonlinearity=ln.softplus, name='gen-5'), name='gen-6'))
    gen_layers.append(MLPConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-7'))
    gen_layers.append(nn.l2normalize(ll.DenseLayer(gen_layers[-1], num_units=28**2, nonlinearity=gen_final_non, name='gen-8')))

# outputs
gen_out_x = ll.get_output(gen_layers[-1], {gen_in_y:sym_y_g, gen_in_z:sym_z_rand}, deterministic=False)
gen_out_x_shared = ll.get_output(gen_layers[-1], {gen_in_y:sym_y_g, gen_in_z:sym_z_shared}, deterministic=False)
gen_out_x_interpolation = ll.get_output(gen_layers[-1], {gen_in_y:sym_y_g, gen_in_z:sym_z_input}, deterministic=False)
generate = theano.function(inputs=[sym_y_g], outputs=gen_out_x)
generate_shared = theano.function(inputs=[sym_y_g], outputs=gen_out_x_shared)
generate_interpolation = theano.function(inputs=[sym_y_g, sym_z_input], outputs=gen_out_x_interpolation)
Esempio n. 21
0
def get_discriminator_binary():

    disc_layers = [ll.InputLayer(shape=(None, 3, 32, 32))]
    disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.2))
    disc_layers.append(
        nn.weight_norm(
            dnn.Conv2DDNNLayer(disc_layers[-1],
                               96, (3, 3),
                               pad=1,
                               W=Normal(0.05),
                               nonlinearity=nn.lrelu)))
    disc_layers.append(
        nn.weight_norm(
            dnn.Conv2DDNNLayer(disc_layers[-1],
                               96, (3, 3),
                               pad=1,
                               W=Normal(0.05),
                               nonlinearity=nn.lrelu)))
    disc_layers.append(
        nn.weight_norm(
            dnn.Conv2DDNNLayer(disc_layers[-1],
                               96, (3, 3),
                               pad=1,
                               stride=2,
                               W=Normal(0.05),
                               nonlinearity=nn.lrelu)))
    disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
    disc_layers.append(
        nn.weight_norm(
            dnn.Conv2DDNNLayer(disc_layers[-1],
                               192, (3, 3),
                               pad=1,
                               W=Normal(0.05),
                               nonlinearity=nn.lrelu)))
    disc_layers.append(
        nn.weight_norm(
            dnn.Conv2DDNNLayer(disc_layers[-1],
                               192, (3, 3),
                               pad=1,
                               W=Normal(0.05),
                               nonlinearity=nn.lrelu)))
    disc_layers.append(
        nn.weight_norm(
            dnn.Conv2DDNNLayer(disc_layers[-1],
                               192, (3, 3),
                               pad=1,
                               stride=2,
                               W=Normal(0.05),
                               nonlinearity=nn.lrelu)))
    disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
    disc_layers.append(
        nn.weight_norm(
            dnn.Conv2DDNNLayer(disc_layers[-1],
                               192, (3, 3),
                               pad=0,
                               W=Normal(0.05),
                               nonlinearity=nn.lrelu)))
    disc_layers.append(
        nn.weight_norm(
            ll.NINLayer(disc_layers[-1],
                        num_units=192,
                        W=Normal(0.05),
                        nonlinearity=nn.lrelu)))
    disc_layers.append(
        nn.weight_norm(
            ll.NINLayer(disc_layers[-1],
                        num_units=192,
                        W=Normal(0.05),
                        nonlinearity=nn.lrelu)))
    disc_layers.append(ll.GlobalPoolLayer(disc_layers[-1]))
    disc_layers.append(
        nn.weight_norm(ll.DenseLayer(disc_layers[-1],
                                     num_units=2,
                                     W=Normal(0.05),
                                     nonlinearity=None),
                       train_g=True,
                       init_stdv=0.1))
    disc_layers.append(
        ll.DenseLayer(disc_layers[-2],
                      num_units=128,
                      W=Normal(0.05),
                      nonlinearity=T.nnet.sigmoid))

    return disc_layers