コード例 #1
0
def get_generator(batch_size, theano_rng, noise_length=100):

    noise_dim = (batch_size, noise_length)
    noise = theano_rng.uniform(size=noise_dim)
    gen_layers = [ll.InputLayer(shape=noise_dim, input_var=noise)]
    gen_layers.append(
        nn.batch_norm(ll.DenseLayer(gen_layers[-1],
                                    num_units=4 * 4 * 512,
                                    W=Normal(0.05),
                                    nonlinearity=nn.relu),
                      g=None))
    gen_layers.append(ll.ReshapeLayer(gen_layers[-1], (batch_size, 512, 4, 4)))
    gen_layers.append(
        nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (batch_size, 256, 8, 8),
                                       (5, 5),
                                       W=Normal(0.05),
                                       nonlinearity=nn.relu),
                      g=None))  # 4 -> 8
    gen_layers.append(
        nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1],
                                       (batch_size, 128, 16, 16), (5, 5),
                                       W=Normal(0.05),
                                       nonlinearity=nn.relu),
                      g=None))  # 8 -> 16
    gen_layers.append(
        nn.weight_norm(nn.Deconv2DLayer(gen_layers[-1],
                                        (batch_size, 3, 32, 32), (5, 5),
                                        W=Normal(0.05),
                                        nonlinearity=T.tanh),
                       train_g=True,
                       init_stdv=0.1))  # 16 -> 32

    return gen_layers
コード例 #2
0
    def g_architecture(self):
#        lrelu = lasagne.nonlinearities.LeakyRectify(0.2)
#        gen_layers = [lasagne.layers.InputLayer(shape=self.size_input, input_var=self.distribution)]
#        gen_layers.append(lasagne.layers.batch_norm(lasagne.layers.DenseLayer(gen_layers[-1], num_units=4*4*512, nonlinearity=lrelu)))
#        gen_layers.append(lasagne.layers.ReshapeLayer(gen_layers[-1], (-1,512,4,4)))
#        gen_layers.append(lasagne.layers.batch_norm(lasagne.layers.Deconv2DLayer(gen_layers[-1], num_filters=256, filter_size=5, stride=2, crop=2,output_size=8,nonlinearity=lasagne.nonlinearities.rectify)))
#        gen_layers.append(lasagne.layers.batch_norm(lasagne.layers.Deconv2DLayer(gen_layers[-1], num_filters=128, filter_size=5, stride=2, crop=2,output_size=16,nonlinearity=lasagne.nonlinearities.rectify)))
#        gen_layers.append(lasagne.layers.batch_norm(lasagne.layers.Deconv2DLayer(gen_layers[-1], num_filters=64, filter_size=5, stride=2, crop=2,output_size=32,nonlinearity=lasagne.nonlinearities.rectify)))
#        #gen_layers.append(lasagne.layers.batch_norm(lasagne.layers.Deconv2DLayer(gen_layers[-1], num_filters=3, filter_size=5, stride=2, crop=2,output_size=64,W=lasagne.init.Normal(0.05), nonlinearity=lasagne.nonlinearities.tanh))) # 16 -> 32
#        gen_layers.append(nn.weight_norm(lasagne.layers.Deconv2DLayer(gen_layers[-1], num_filters=3, filter_size=5, stride=2, crop=2,output_size=64,W=lasagne.init.Normal(0.05), nonlinearity=T.tanh), train_g=True, init_stdv=0.1)) 
#        self.architecture = gen_layers
###        gen_layers.append(lasagne.layers.batch_norm(lasagne.layers.Deconv2DLayer(gen_layers[-1], num_filters=3, filter_size=5, stride=2, crop=2,output_size=32,nonlinearity=lasagne.nonlinearities.sigmoid)))
#
#        self.architecture = gen_layers
#        #self.last_layer   = gen_layers[-1]
        #self.parameters = lasagne.layers.get_all_params(gen_layers,trainable=True)
#
        lrelu = lasagne.nonlinearities.LeakyRectify(0.2)
        gen_layers = [lasagne.layers.InputLayer(shape=self.size_input, input_var=self.distribution)]
        #gen_layers.append(lasagne.layers.DropoutLayer(gen_layers[-1], p=0.5))
        gen_layers.append(lasagne.layers.batch_norm(lasagne.layers.DenseLayer(gen_layers[-1], num_units=4*4*512, nonlinearity=lasagne.nonlinearities.rectify)))
        gen_layers.append(lasagne.layers.ReshapeLayer(gen_layers[-1], (self.BATCH_SIZE,512,4,4)))
        gen_layers.append(lasagne.layers.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (self.BATCH_SIZE,256,8,8), (5,5), W=lasagne.init.Normal(0.05), nonlinearity=lasagne.nonlinearities.rectify)))# 4 -> 8
        gen_layers.append(lasagne.layers.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (self.BATCH_SIZE,128,16,16), (5,5), W=lasagne.init.Normal(0.05), nonlinearity=lasagne.nonlinearities.rectify))) # 4 -> 8
        gen_layers.append(lasagne.layers.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (self.BATCH_SIZE,64,32,32), (5,5), W=lasagne.init.Normal(0.05), nonlinearity=lasagne.nonlinearities.rectify))) # 4 -> 8
        #gen_layers.append(nn.weight_norm(nn.Deconv2DLayer(gen_layers[-1], (self.BATCH_SIZE,3,64,64), (5,5), W=init.Normal(0.05), nonlinearity=T.tanh), train_g=True, init_stdv=0.1))  
        gen_layers.append(nn.weight_norm(nn.Deconv2DLayer(gen_layers[-1], (self.BATCH_SIZE,3,64,64), (5,5), W=init.Normal(0.05), nonlinearity=T.tanh), train_g=True, init_stdv=0.1))  # 16 -> 32, train_g=True, init_stdv=0.1
        self.architecture = gen_layers
   



        return       
コード例 #3
0
ファイル: train_mixgan.py プロジェクト: yaserkl/MIX-plus-GANs
    def get_generator(self, meanx, z0, y_1hot):
        ''' specify generator G0, gen_x = G0(z0, h1) '''
        """
        #z0 = theano_rng.uniform(size=(self.args.batch_size, 16)) # uniform noise
        gen0_layers = [LL.InputLayer(shape=(self.args.batch_size, 50), input_var=z0)] # Input layer for z0
        gen0_layers.append(nn.batch_norm(LL.DenseLayer(nn.batch_norm(LL.DenseLayer(gen0_layers[0], num_units=128, W=Normal(0.02), nonlinearity=nn.relu)),
                          num_units=128, W=Normal(0.02), nonlinearity=nn.relu))) # embedding, 50 -> 128
        gen0_layer_z_embed = gen0_layers[-1] 

        #gen0_layers.append(LL.InputLayer(shape=(self.args.batch_size, 256), input_var=real_fc3)) # Input layer for real_fc3 in independent training, gen_fc3 in joint training
        gen0_layers.append(LL.InputLayer(shape=(self.args.batch_size, 10), input_var=y_1hot)) # Input layer for real_fc3 in independent training, gen_fc3 in joint training
        gen0_layer_fc3 = gen0_layers[-1]

        gen0_layers.append(LL.ConcatLayer([gen0_layer_fc3,gen0_layer_z_embed], axis=1)) # concatenate noise and fc3 features
        gen0_layers.append(LL.ReshapeLayer(nn.batch_norm(LL.DenseLayer(gen0_layers[-1], num_units=256*5*5, W=Normal(0.02), nonlinearity=T.nnet.relu)),
                         (self.args.batch_size,256,5,5))) # fc
        gen0_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen0_layers[-1], (self.args.batch_size,256,10,10), (5,5), stride=(2, 2), padding = 'half',
                         W=Normal(0.02),  nonlinearity=nn.relu))) # deconv
        gen0_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen0_layers[-1], (self.args.batch_size,128,14,14), (5,5), stride=(1, 1), padding = 'valid',
                         W=Normal(0.02),  nonlinearity=nn.relu))) # deconv

        gen0_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen0_layers[-1], (self.args.batch_size,128,28,28), (5,5), stride=(2, 2), padding = 'half',
                         W=Normal(0.02),  nonlinearity=nn.relu))) # deconv
        gen0_layers.append(nn.Deconv2DLayer(gen0_layers[-1], (self.args.batch_size,3,32,32), (5,5), stride=(1, 1), padding = 'valid',
                         W=Normal(0.02),  nonlinearity=T.nnet.sigmoid)) # deconv

        gen_x_pre = LL.get_output(gen0_layers[-1], deterministic=False)
        gen_x = gen_x_pre - meanx
        # gen_x_joint = LL.get_output(gen0_layers[-1], {gen0_layer_fc3: gen_fc3}, deterministic=False) - meanx

        return gen0_layers, gen_x 
        """
        gen_x_layer_z = LL.InputLayer(shape=(self.args.batch_size, self.args.z0dim), input_var=z0) # z, 20
        # gen_x_layer_z_embed = nn.batch_norm(LL.DenseLayer(gen_x_layer_z, num_units=128), g=None) # 20 -> 64

        gen_x_layer_y = LL.InputLayer(shape=(self.args.batch_size, 10), input_var=y_1hot) # conditioned on real fc3 activations
        gen_x_layer_y_z = LL.ConcatLayer([gen_x_layer_y,gen_x_layer_z],axis=1) #512+256 = 768
        gen_x_layer_pool2 = LL.ReshapeLayer(nn.batch_norm(LL.DenseLayer(gen_x_layer_y_z, num_units=256*5*5)), (self.args.batch_size,256,5,5))
        gen_x_layer_dconv2_1 = nn.batch_norm(nn.Deconv2DLayer(gen_x_layer_pool2, (self.args.batch_size,256,10,10), (5,5), stride=(2, 2), padding = 'half',
                         W=Normal(0.02),  nonlinearity=nn.relu))
        gen_x_layer_dconv2_2 = nn.batch_norm(nn.Deconv2DLayer(gen_x_layer_dconv2_1, (self.args.batch_size,128,14,14), (5,5), stride=(1, 1), padding = 'valid',
                         W=Normal(0.02),  nonlinearity=nn.relu))

        gen_x_layer_dconv1_1 = nn.batch_norm(nn.Deconv2DLayer(gen_x_layer_dconv2_2, (self.args.batch_size,128,28,28), (5,5), stride=(2, 2), padding = 'half',
                         W=Normal(0.02),  nonlinearity=nn.relu))
        gen_x_layer_x = nn.Deconv2DLayer(gen_x_layer_dconv1_1, (self.args.batch_size,3,32,32), (5,5), stride=(1, 1), padding = 'valid',
                         W=Normal(0.02),  nonlinearity=T.nnet.sigmoid)
        # gen_x_layer_x = dnn.Conv2DDNNLayer(gen_x_layer_dconv1_2, 3, (1,1), pad=0, stride=1, 
        #                  W=Normal(0.02), nonlinearity=T.nnet.sigmoid)

        gen_x_layers = [gen_x_layer_z, gen_x_layer_y, gen_x_layer_y_z, gen_x_layer_pool2, gen_x_layer_dconv2_1, 
            gen_x_layer_dconv2_2, gen_x_layer_dconv1_1, gen_x_layer_x]

        gen_x_pre = LL.get_output(gen_x_layer_x, deterministic=False)
        gen_x = gen_x_pre - meanx

        return gen_x_layers, gen_x 
コード例 #4
0
ファイル: generate.py プロジェクト: yux94/opt-mmd
def _sample_trained_minibatch_gan(params_file, n, batch_size, rs):
    import lasagne
    from lasagne.init import Normal
    import lasagne.layers as ll
    import theano as th
    from theano.sandbox.rng_mrg import MRG_RandomStreams
    import theano.tensor as T

    import nn

    theano_rng = MRG_RandomStreams(rs.randint(2**15))
    lasagne.random.set_rng(np.random.RandomState(rs.randint(2**15)))

    noise_dim = (batch_size, 100)
    noise = theano_rng.uniform(size=noise_dim)
    ls = [ll.InputLayer(shape=noise_dim, input_var=noise)]
    ls.append(
        nn.batch_norm(ll.DenseLayer(ls[-1],
                                    num_units=4 * 4 * 512,
                                    W=Normal(0.05),
                                    nonlinearity=nn.relu),
                      g=None))
    ls.append(ll.ReshapeLayer(ls[-1], (batch_size, 512, 4, 4)))
    ls.append(
        nn.batch_norm(nn.Deconv2DLayer(ls[-1], (batch_size, 256, 8, 8), (5, 5),
                                       W=Normal(0.05),
                                       nonlinearity=nn.relu),
                      g=None))  # 4 -> 8
    ls.append(
        nn.batch_norm(nn.Deconv2DLayer(ls[-1], (batch_size, 128, 16, 16),
                                       (5, 5),
                                       W=Normal(0.05),
                                       nonlinearity=nn.relu),
                      g=None))  # 8 -> 16
    ls.append(
        nn.weight_norm(nn.Deconv2DLayer(ls[-1], (batch_size, 3, 32, 32),
                                        (5, 5),
                                        W=Normal(0.05),
                                        nonlinearity=T.tanh),
                       train_g=True,
                       init_stdv=0.1))  # 16 -> 32
    gen_dat = ll.get_output(ls[-1])

    with np.load(params_file) as d:
        params = [d['arr_{}'.format(i)] for i in range(9)]
    ll.set_all_param_values(ls[-1], params, trainable=True)

    sample_batch = th.function(inputs=[], outputs=gen_dat)
    samps = []
    while len(samps) < n:
        samps.extend(sample_batch())
    samps = np.array(samps[:n])
    return samps
コード例 #5
0
def generator(input_var):
    network = lasagne.layers.InputLayer(shape=(None, NLAT,1,1),
                                        input_var=input_var)

    network = ll.DenseLayer(network, num_units=4*4*64, W=Normal(0.05), nonlinearity=nn.relu)
    #print(input_var.shape[0])
    network = ll.ReshapeLayer(network, (batch_size,64,4,4))
    network = nn.Deconv2DLayer(network, (batch_size,32,7,7), (4,4), stride=(1,1), pad='valid', W=Normal(0.05), nonlinearity=nn.relu)
    network = nn.Deconv2DLayer(network, (batch_size,32,11,11), (5,5), stride=(1,1), pad='valid', W=Normal(0.05), nonlinearity=nn.relu)
    network = nn.Deconv2DLayer(network, (batch_size,32,25,25), (5,5), stride=(2,2), pad='valid', W=Normal(0.05), nonlinearity=nn.relu)
    network = nn.Deconv2DLayer(network, (batch_size,1,28,28), (4,4), stride=(1,1), pad='valid', W=Normal(0.05), nonlinearity=sigmoid)

    #network =lasagne.layers.Conv2DLayer(network, num_filters=1, filter_size=1, stride=1, nonlinearity=sigmoid)
    return network
コード例 #6
0
ファイル: train_joint.py プロジェクト: zghyfbmw/SGAN
gen0_layers.append(
    LL.ConcatLayer([gen0_layer_fc3, gen0_layer_z_embed],
                   axis=1))  # concatenate noise and fc3 features
gen0_layers.append(
    LL.ReshapeLayer(
        nn.batch_norm(
            LL.DenseLayer(gen0_layers[-1],
                          num_units=128 * 4 * 4,
                          W=Normal(0.02),
                          nonlinearity=T.nnet.relu)),
        (args.batch_size, 128, 4, 4)))  # fc
gen0_layers.append(
    nn.batch_norm(
        nn.Deconv2DLayer(gen0_layers[-1], (args.batch_size, 128, 8, 8), (5, 5),
                         stride=(2, 2),
                         padding='half',
                         W=Normal(0.02),
                         nonlinearity=nn.relu)))  # deconv
gen0_layers.append(
    nn.batch_norm(
        nn.Deconv2DLayer(gen0_layers[-1], (args.batch_size, 64, 12, 12),
                         (5, 5),
                         stride=(1, 1),
                         padding='valid',
                         W=Normal(0.02),
                         nonlinearity=nn.relu)))  # deconv

gen0_layers.append(
    nn.batch_norm(
        nn.Deconv2DLayer(gen0_layers[-1], (args.batch_size, 64, 24, 24),
                         (5, 5),
コード例 #7
0
ファイル: sampling.py プロジェクト: zghyfbmw/SGAN
gen1_layers.append(LL.ConcatLayer([gen1_layer_z,gen1_layer_y],axis=1))
gen1_layers.append(nn.batch_norm(LL.DenseLayer(gen1_layers[-1], num_units=512, W=Normal(0.02), nonlinearity=T.nnet.relu)))
gen1_layers.append(nn.batch_norm(LL.DenseLayer(gen1_layers[-1], num_units=512, W=Normal(0.02), nonlinearity=T.nnet.relu))) 
gen1_layers.append(LL.DenseLayer(gen1_layers[-1], num_units=256, W=Normal(0.02), nonlinearity=T.nnet.relu)) 
                   
''' specify generator G0, gen_x = G0(z0, h1) '''
z0 = theano_rng.uniform(size=(args.batch_size, 16)) # uniform noise
gen0_layers = [LL.InputLayer(shape=(args.batch_size, 16), input_var=z0)] # Input layer for z0
gen0_layers.append(nn.batch_norm(LL.DenseLayer(nn.batch_norm(LL.DenseLayer(gen0_layers[0], num_units=128, W=Normal(0.02), nonlinearity=nn.relu)),
                  num_units=128, W=Normal(0.02), nonlinearity=nn.relu))) # embedding, 50 -> 128
gen0_layer_z_embed = gen0_layers[-1] 

gen0_layers.append(LL.ConcatLayer([gen1_layers[-1],gen0_layer_z_embed], axis=1)) # concatenate noise and fc3 features
gen0_layers.append(LL.ReshapeLayer(nn.batch_norm(LL.DenseLayer(gen0_layers[-1], num_units=256*5*5, W=Normal(0.02), nonlinearity=T.nnet.relu)),
                 (args.batch_size,256,5,5))) # fc
gen0_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen0_layers[-1], (args.batch_size,256,10,10), (5,5), stride=(2, 2), padding = 'half',
                 W=Normal(0.02),  nonlinearity=nn.relu))) # deconv
gen0_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen0_layers[-1], (args.batch_size,128,14,14), (5,5), stride=(1, 1), padding = 'valid',
                 W=Normal(0.02),  nonlinearity=nn.relu))) # deconv

gen0_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen0_layers[-1], (args.batch_size,128,28,28), (5,5), stride=(2, 2), padding = 'half',
                 W=Normal(0.02),  nonlinearity=nn.relu))) # deconv
gen0_layers.append(nn.Deconv2DLayer(gen0_layers[-1], (args.batch_size,3,32,32), (5,5), stride=(1, 1), padding = 'valid',
                 W=Normal(0.02),  nonlinearity=T.nnet.sigmoid)) # deconv

gen_fc3, gen_x_pre = LL.get_output([gen1_layers[-1], gen0_layers[-1]], deterministic=True)
gen_x = gen_x_pre - meanx

weights_toload = np.load('pretrained/generator.npz')
weights_list_toload = [weights_toload['arr_{}'.format(k)] for k in range(len(weights_toload.files))]
LL.set_all_param_values(gen0_layers[-1], weights_list_toload)
コード例 #8
0
     nn.batch_norm(ll.DenseLayer(gen_layers[-1],
                                 num_units=4 * 4 * 512,
                                 W=Normal(0.05),
                                 nonlinearity=nn.relu,
                                 name='gen-01'),
                   g=None,
                   name='gen-02'))
 gen_layers.append(
     ll.ReshapeLayer(gen_layers[-1], (-1, 512, 4, 4), name='gen-03'))
 gen_layers.append(
     ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes,
                     name='gen-10'))
 gen_layers.append(
     nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (None, 256, 8, 8),
                                    (5, 5),
                                    W=Normal(0.05),
                                    nonlinearity=nn.relu,
                                    name='gen-11'),
                   g=None,
                   name='gen-12'))  # 4 -> 8
 gen_layers.append(
     ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes,
                     name='gen-20'))
 gen_layers.append(
     nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (None, 128, 16, 16),
                                    (5, 5),
                                    W=Normal(0.05),
                                    nonlinearity=nn.relu,
                                    name='gen-21'),
                   g=None,
                   name='gen-22'))  # 8 -> 16
コード例 #9
0
ファイル: tgan_cifar.py プロジェクト: wrccrwx/tripletGAN
theano_rng = MRG_RandomStreams(rng.randint(2 ** 15))
lasagne.random.set_rng(np.random.RandomState(rng.randint(2 ** 15)))

# load CIFAR-10
trainx, trainy = cifar10_data.load(args.data_dir, subset='train')
trainx_unl = trainx.copy()
nr_batches_train = int(trainx.shape[0]/args.batch_size)


# specify generative model
noise_dim = (args.batch_size, 100)
noise = theano_rng.uniform(size=noise_dim)
gen_layers = [ll.InputLayer(shape=noise_dim, input_var=noise)]
gen_layers.append(nn.batch_norm(ll.DenseLayer(gen_layers[-1], num_units=4*4*512, W=Normal(0.05), nonlinearity=nn.relu), g=None))
gen_layers.append(ll.ReshapeLayer(gen_layers[-1], (args.batch_size,512,4,4)))
gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size,256,8,8), (5,5), W=Normal(0.05), nonlinearity=nn.relu), g=None)) # 4 -> 8
gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size,128,16,16), (5,5), W=Normal(0.05), nonlinearity=nn.relu), g=None)) # 8 -> 16
gen_layers.append(nn.weight_norm(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size,3,32,32), (5,5), W=Normal(0.05), nonlinearity=T.tanh), train_g=True, init_stdv=0.1)) # 16 -> 32
gen_dat = ll.get_output(gen_layers[-1])

# specify discriminative model
disc_layers = [ll.InputLayer(shape=(None, 3, 32, 32))]
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.2))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 96, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 96, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 96, (3,3), pad=1, stride=2, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 192, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 192, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 192, (3,3), pad=1, stride=2, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
コード例 #10
0
#y_pred, real_pool3 = LL.get_output([fc8, poo5], x, deterministic=False)
# real_pool3 = LL.get_output(poo5, x, deterministic=False)
#enc_error = T.mean(T.neq(T.argmax(y_pred,axis=1),y)) # classification error of the encoder, to make sure the encoder is working properly


# specify generator, gen_x = G(z, real_pool3)
z = theano_rng.uniform(size=(args.batch_size, 50)) # uniform noise
# y_1hot = T.matrix()
gen_x_layer_z = LL.InputLayer(shape=(args.batch_size, 50), input_var=z) # z, 20
# gen_x_layer_z_embed = nn.batch_norm(LL.DenseLayer(gen_x_layer_z, num_units=128), g=None) # 20 -> 64

gen_x_layer_y = LL.InputLayer(shape=(args.batch_size, 10), input_var=y_1hot) # conditioned on real fc3 activations
gen_x_layer_y_z = LL.ConcatLayer([gen_x_layer_y,gen_x_layer_z],axis=1) #512+256 = 768
gen_x_layer_pool2 = LL.ReshapeLayer(nn.batch_norm(LL.DenseLayer(gen_x_layer_y_z, num_units=256*5*5)), (args.batch_size,256,5,5))
gen_x_layer_dconv2_1 = nn.batch_norm(nn.Deconv2DLayer(gen_x_layer_pool2, (args.batch_size,256,10,10), (5,5), stride=(2, 2), padding = 'half',
                 W=Normal(0.02),  nonlinearity=nn.relu))
gen_x_layer_dconv2_2 = nn.batch_norm(nn.Deconv2DLayer(gen_x_layer_dconv2_1, (args.batch_size,128,14,14), (5,5), stride=(1, 1), padding = 'valid',
                 W=Normal(0.02),  nonlinearity=nn.relu))

gen_x_layer_dconv1_1 = nn.batch_norm(nn.Deconv2DLayer(gen_x_layer_dconv2_2, (args.batch_size,128,28,28), (5,5), stride=(2, 2), padding = 'half',
                 W=Normal(0.02),  nonlinearity=nn.relu))
gen_x_layer_x = nn.Deconv2DLayer(gen_x_layer_dconv1_1, (args.batch_size,3,32,32), (5,5), stride=(1, 1), padding = 'valid',
                 W=Normal(0.02),  nonlinearity=T.nnet.sigmoid)
# gen_x_layer_x = dnn.Conv2DDNNLayer(gen_x_layer_dconv1_2, 3, (1,1), pad=0, stride=1, 
#                  W=Normal(0.02), nonlinearity=T.nnet.sigmoid)

print(gen_x_layer_x.output_shape)

gen_x_layers = [gen_x_layer_z, gen_x_layer_y, gen_x_layer_y_z, gen_x_layer_pool2, gen_x_layer_dconv2_1, 
    gen_x_layer_dconv2_2, gen_x_layer_dconv1_1, gen_x_layer_x]
コード例 #11
0
ファイル: train_cifar10.py プロジェクト: hhkunming/SSL_LGAN
gen_layers.append(
    nn.batch_norm(dnn.Conv2DDNNLayer(gen_layers[-1],
                                     256, (3, 3),
                                     pad=1,
                                     W=Normal(0.05),
                                     nonlinearity=nn.lrelu),
                  g=None))
gen_layers.append(
    nn.batch_norm(
        ll.NINLayer(gen_layers[-1],
                    num_units=512,
                    W=Normal(0.05),
                    nonlinearity=nn.lrelu)))
gen_layers.append(
    nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (n_batch, 256, 8, 8),
                                   (5, 5),
                                   W=Normal(0.05),
                                   nonlinearity=nn.relu),
                  g=None))  # 4 -> 8
gen_layers.append(
    nn.batch_norm(dnn.Conv2DDNNLayer(gen_layers[-1],
                                     128, (3, 3),
                                     pad=1,
                                     W=Normal(0.05),
                                     nonlinearity=nn.lrelu),
                  g=None))
gen_layers.append(
    nn.batch_norm(
        ll.NINLayer(gen_layers[-1],
                    num_units=256,
                    W=Normal(0.05),
                    nonlinearity=nn.lrelu)))
コード例 #12
0
# specify generative model
noise_dim = (args.batch_size, 100)
print("Compiling......")
labels = T.ivector()
x_lab = T.tensor4()
labels_gen = T.ivector()
gen_in_z = ll.InputLayer(shape=noise_dim)
noise = theano_rng.uniform(size=noise_dim)
gen_in_y = ll.InputLayer(shape=(args.batch_size,))
gen_layers = [gen_in_z]
gen_layers.append(nn.MLPConcatLayer([gen_layers[-1], gen_in_y], num_classes))
gen_layers.append(ll.DenseLayer(gen_layers[-1], num_units=7 * 7 * 512, W=Normal(0.05), nonlinearity=nn.relu))
gen_layers.append(nn.batch_norm(gen_layers[-1], g=None))
gen_layers.append(ll.ReshapeLayer(gen_layers[-1], (args.batch_size, 512, 7, 7)))
gen_layers.append(nn.ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes))
gen_layers.append(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size, 512, 14, 14), (5, 5), W=Normal(0.05), nonlinearity=nn.relu))
gen_layers.append(nn.batch_norm(gen_layers[-1], g=None))
gen_layers.append(nn.ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes))
gen_layers.append(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size, 256, 28, 28), (5, 5), W=Normal(0.05), nonlinearity=nn.relu))
gen_layers.append(nn.batch_norm(gen_layers[-1], g=None))
gen_layers.append(nn.ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes))
gen_layers.append(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size, 128, 56, 56), (5, 5), W=Normal(0.05), nonlinearity=nn.relu))
gen_layers.append(nn.batch_norm(gen_layers[-1], g=None))
gen_layers.append(nn.ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes))
gen_layers.append(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size, 128, 112, 112), (5, 5), W=Normal(0.05), nonlinearity=nn.relu))
gen_layers.append(nn.batch_norm(gen_layers[-1], g=None))
gen_layers.append(nn.ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes))
gen_layers.append(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size, 1, 224, 224), (5, 5), W=Normal(0.05), nonlinearity=T.tanh))
gen_layers.append(nn.weight_norm(gen_layers[-1], train_g=True, init_stdv=0.1))

# specify discriminative model
コード例 #13
0
    def build_network(self, X, Y):
        # Define the layers

        lrelu = lasagne.nonlinearities.LeakyRectify(0.1)
        input_shape = self.Size_Input
        #temp = input_shape[1]*input_shape[2]*input_shape[3]
        Auto_Enc_Layer = [
            lasagne.layers.InputLayer(shape=(None, input_shape[1],
                                             input_shape[2], input_shape[3]),
                                      input_var=X)
        ]
        # Encode lasagne.nonlinearities.rectify
        Auto_Enc_Layer.append(
            nn.weight_norm(
                Conv2DDNNLayer(Auto_Enc_Layer[-1],
                               64, (3, 3),
                               pad=1,
                               W=lasagne.init.Normal(0.05),
                               nonlinearity=lrelu)))
        Auto_Enc_Layer.append(
            nn.weight_norm(
                Conv2DDNNLayer(Auto_Enc_Layer[-2],
                               64, (3, 3),
                               pad=1,
                               W=lasagne.init.Normal(0.05),
                               nonlinearity=lrelu)))

        Auto_Enc_Layer_Global0 = nn.weight_norm(
            lasagne.layers.NINLayer(Auto_Enc_Layer[-1],
                                    num_units=32,
                                    W=lasagne.init.Normal(0.05),
                                    nonlinearity=lrelu))
        Auto_Enc_Layer_Global = lasagne.layers.GlobalPoolLayer(
            Auto_Enc_Layer_Global0)

        Auto_Enc_Layer.append(
            nn.weight_norm(
                Conv2DDNNLayer(Auto_Enc_Layer[-1],
                               64, (3, 3),
                               pad=1,
                               stride=2,
                               W=lasagne.init.Normal(0.05),
                               nonlinearity=lrelu)))
        Auto_Enc_Layer.append(
            lasagne.layers.DropoutLayer(Auto_Enc_Layer[-1], p=0.5))
        Auto_Enc_Layer.append(
            nn.weight_norm(
                Conv2DDNNLayer(Auto_Enc_Layer[-1],
                               128, (3, 3),
                               pad=1,
                               W=lasagne.init.Normal(0.05),
                               nonlinearity=lrelu)))
        Auto_Enc_Layer.append(
            nn.weight_norm(
                Conv2DDNNLayer(Auto_Enc_Layer[-1],
                               128, (3, 3),
                               pad=1,
                               W=lasagne.init.Normal(0.05),
                               nonlinearity=lrelu)))
        Auto_Enc_Layer.append(
            nn.weight_norm(
                Conv2DDNNLayer(Auto_Enc_Layer[-1],
                               128, (3, 3),
                               pad=1,
                               stride=2,
                               W=lasagne.init.Normal(0.05),
                               nonlinearity=lrelu)))

        Auto_Enc_Layer_Local0 = nn.weight_norm(
            lasagne.layers.NINLayer(Auto_Enc_Layer[-1],
                                    num_units=128,
                                    W=lasagne.init.Normal(0.05),
                                    nonlinearity=lrelu))
        Auto_Enc_Layer_Local = lasagne.layers.GlobalPoolLayer(
            Auto_Enc_Layer_Local0)

        Auto_Enc_Layer.append(
            nn.weight_norm(
                lasagne.layers.NINLayer(Auto_Enc_Layer[-1],
                                        num_units=512,
                                        W=lasagne.init.Normal(0.05),
                                        nonlinearity=lrelu)))
        Auto_Enc_Layer.append(
            lasagne.layers.GlobalPoolLayer(Auto_Enc_Layer[-1]))

        Auto_Enc_Layer.append(
            nn.weight_norm(
                DenseLayer(Auto_Enc_Layer_Local,
                           num_units=32 * 16 * 16,
                           W=lasagne.init.Normal(0.05),
                           nonlinearity=lasagne.nonlinearities.tanh)))
        # Decccode

        Auto_Dec_Layer = [
            (lasagne.layers.ReshapeLayer(Auto_Enc_Layer[-1],
                                         (self.Batch_Size, 32, 16, 16)))
        ]
        Auto_Dec_Layer.append(
            nn.weight_norm(
                nn.Deconv2DLayer(Auto_Dec_Layer[-1],
                                 (self.Batch_Size, 32, 32, 32), (3, 3),
                                 W=lasagne.init.Normal(0.05),
                                 nonlinearity=lrelu)))  # 4 -> 8
        Auto_Dec_Layer.append(
            nn.weight_norm(
                nn.Deconv2DLayer(
                    Auto_Dec_Layer[-1], (self.Batch_Size, 3, 64, 64), (3, 3),
                    W=lasagne.init.Normal(0.05),
                    nonlinearity=lasagne.nonlinearities.tanh)))  # 4 -> 8

        all_params = lasagne.layers.get_all_params(Auto_Dec_Layer,
                                                   trainable=True)
        network_output = lasagne.layers.get_output(Auto_Dec_Layer[-1],
                                                   X,
                                                   deterministic=False)

        encoded_output = lasagne.layers.get_output(Auto_Enc_Layer_Local,
                                                   X,
                                                   deterministic=True)
        encoded_output1 = lasagne.layers.get_output(Auto_Enc_Layer_Global,
                                                    X,
                                                    deterministic=True)
        #encoded_output1 = Auto_Enc_Layer_Global
        network_output1 = lasagne.layers.get_output(Auto_Dec_Layer[-1],
                                                    X,
                                                    deterministic=True)

        loss_A = T.mean(
            lasagne.objectives.squared_error(
                network_output[:, 0, :, :], Y[:, 0, :, :])) + T.mean(
                    lasagne.objectives.squared_error(
                        network_output[:, 1, :, :], Y[:, 1, :, :])) + T.mean(
                            lasagne.objectives.squared_error(
                                network_output[:, 2, :, :], Y[:, 2, :, :]))

        #loss_A = T.mean(lasagne.objectives.squared_error(network_output,Y))

        loss = [loss_A, encoded_output, network_output]
        #Autoencodeur_params_updates = lasagne.updates.momentum(loss_A,all_params,learning_rate = 0.05,momentum = 0.5)
        Autoencodeur_params_updates = lasagne.updates.adam(loss_A,
                                                           all_params,
                                                           learning_rate=0.001,
                                                           beta1=0.9)
        # Some Theano functions ,

        self.generate_fn_X = theano.function([X], network_output)
        self.train = theano.function([X, Y],
                                     loss,
                                     updates=Autoencodeur_params_updates,
                                     allow_input_downcast=True)
        self.predict = theano.function([X],
                                       network_output1,
                                       allow_input_downcast=True)
        self.encode_L = theano.function([X],
                                        encoded_output,
                                        allow_input_downcast=True)
        self.encode_G = theano.function([X],
                                        encoded_output1,
                                        allow_input_downcast=True)
        #s#elf.encode_G =  encoded_output1
        self.network = Auto_Enc_Layer

        return
コード例 #14
0
    def build_generator(self, version=1, encode=False):

        #from lasagne.layers import TransposedConv2DLayer as Deconv2DLayer
        global mask

        if mask is None:
            mask = T.zeros(shape=(self.batch_size, 1, 64, 64),
                           dtype=theano.config.floatX)
            mask = T.set_subtensor(mask[:, :, 16:48, 16:48], 1.)
            self.mask = mask

        noise_dim = (self.batch_size, 100)
        theano_rng = MRG_RandomStreams(rng.randint(2**15))
        noise = theano_rng.uniform(size=noise_dim)
        # mask_color = T.cast(T.cast(theano_rng.uniform(size=(self.batch_size,), low=0., high=2.), 'int16').dimshuffle(0, 'x', 'x', 'x') * mask, dtype=theano.config.floatX)
        input = ll.InputLayer(shape=noise_dim, input_var=noise)

        cropped_image = T.cast(T.zeros_like(self.input_) * mask +
                               (1. - mask) * self.input_,
                               dtype=theano.config.floatX)
        encoder_input = T.concatenate([cropped_image, mask],
                                      axis=1)  # shoudl concat wrt channels

        if version == 1:
            if encode:
                gen_layers = [
                    ll.InputLayer(shape=(self.batch_size, 4, 64, 64),
                                  input_var=encoder_input)
                ]  #  3 x 64 x 64 -->  64 x 32 x 32

                gen_layers.append(
                    nn.batch_norm(
                        ll.Conv2DLayer(gen_layers[-1],
                                       64,
                                       4,
                                       2,
                                       pad=1,
                                       nonlinearity=nn.lrelu))
                )  #  64 x 32 x 32 --> 128 x 16 x 16

                gen_layers.append(
                    nn.batch_norm(
                        ll.Conv2DLayer(gen_layers[-1],
                                       128,
                                       4,
                                       2,
                                       pad=1,
                                       nonlinearity=nn.lrelu))
                )  # 128 x 16 x 16 -->  256 x 8 x 8

                gen_layers.append(
                    nn.batch_norm(
                        ll.Conv2DLayer(gen_layers[-1],
                                       256,
                                       4,
                                       2,
                                       pad=1,
                                       nonlinearity=nn.lrelu))
                )  # 256 x 8 x 8 --> 512 x 4 x 4

                gen_layers.append(
                    nn.batch_norm(
                        ll.Conv2DLayer(gen_layers[-1],
                                       512,
                                       4,
                                       2,
                                       pad=1,
                                       nonlinearity=nn.lrelu))
                )  # 512 x 4 x 4 --> 1024 x 2 x 2

                gen_layers.append(
                    nn.batch_norm(
                        ll.Conv2DLayer(gen_layers[-1],
                                       4000,
                                       4,
                                       4,
                                       pad=1,
                                       nonlinearity=nn.lrelu))
                )  # 1024 x 2 x 2 --> 2048 x 1 x 1

                #gen_layers.append(nn.batch_norm(ll.Conv2DLayer(gen_layers[-1], 2048, 4, 2, pad=1, nonlinearity=nn.lrelu)))
                # flatten this out
                #gen_layers.append(ll.FlattenLayer(gen_layers[-1]))

                gen_layers.append(
                    nn.batch_norm(
                        nn.Deconv2DLayer(gen_layers[-1],
                                         (self.batch_size, 128 * 4, 4, 4),
                                         (5, 5),
                                         stride=(4, 4))))
                # concat with noise
                latent_size = 2048

            else:
                gen_layers = [input]
                latent_size = 100

                # TODO : put batchorm back on all layers, + g=None
                gen_layers.append(
                    ll.DenseLayer(gen_layers[-1],
                                  128 * 8 * 4 * 4,
                                  W=Normal(0.02)))
                gen_layers.append(
                    ll.ReshapeLayer(gen_layers[-1],
                                    (self.batch_size, 128 * 8, 4, 4)))

# creating array of mixing coefficients (shared Theano floats) that will be used for mixing generated_output and image at each layer
            mixing_coefs = [
                theano.shared(lasagne.utils.floatX(0.05)) for i in range(2)
            ]

            # theano.shared(lasagne.utils.floatX(np.array([0.5])))  for i in range(3)]
            mixing_coefs.append(theano.shared(lasagne.utils.floatX(1)))
            border = 2
            gen_layers.append(
                nn.batch_norm(nn.Deconv2DLayer(
                    gen_layers[-1], (self.batch_size, 128 * 2, 8, 8), (5, 5),
                    W=Normal(0.02),
                    nonlinearity=nn.relu),
                              g=None))  # 4 -> 8
            #gen_layers.append(ll.DropoutLayer(gen_layers[-1],p=0.5))
            #gen_layers.append(nn.ResetDeconvLayer(gen_layers[-1], cropped_image, mixing_coefs[0], border=border))

            #layer_a = nn.ResetDeconvLayer(gen_layers[-1], cropped_image, mixing_coefs[0]) # all new
            #layer_concat_a = ll.ConcatLayer([layer_a, gen_layers[-1]], axis=1)
            #gen_layers.append(layer_concat_a)

            gen_layers.append(
                nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1],
                                               (self.batch_size, 128, 16, 16),
                                               (5, 5),
                                               W=Normal(0.02),
                                               nonlinearity=nn.relu),
                              g=None))  # 8 -> 16

            #gen_layers.append(ll.DropoutLayer(gen_layers[-1],p=0.5))
            #gen_layers.append(nn.ResetDeconvLayer(gen_layers[-1], cropped_image, mixing_coefs[1], border=border*2))
            #layer_b = nn.ResetDeconvLayer(gen_layers[-1], cropped_image, mixing_coefs[1]) # all new
            #layer_concat_b = ll.ConcatLayer([layer_b, gen_layers[-1]], axis=1)
            #gen_layers.append(layer_concat_b)

            gen_layers.append(
                nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1],
                                               (self.batch_size, 64, 32, 32),
                                               (5, 5),
                                               W=Normal(0.02),
                                               nonlinearity=nn.relu),
                              g=None))  # 16 -> 32

            #gen_layers.append(ll.DropoutLayer(gen_layers[-1],p=0.5))
            #gen_layers.append(nn.ResetDeconvLayer(gen_layers[-1], cropped_image, mixing_coefs[2], border=border*2*2))
            #layer_c = nn.ResetDeconvLayer(gen_layers[-1], cropped_image, mixing_coefs[1]) # all new
            #layer_concat_c = ll.ConcatLayer([layer_c, gen_layers[-1]], axis=1)
            #gen_layers.append(layer_concat_c)

            gen_layers.append(
                nn.Deconv2DLayer(
                    gen_layers[-1], (self.batch_size, 3, 64, 64), (5, 5),
                    W=Normal(0.02),
                    nonlinearity=lasagne.nonlinearities.sigmoid))  # 32 -> 64

#gen_layers.append(ll.DropoutLayer(gen_layers[-1],p=0.5))
#gen_layers.append(nn.ResetDeconvLayer(gen_layers[-1], cropped_image, mixing_coefs[3], border=border*2*2*2, trainable=False))

        for layer in gen_layers:
            print layer.output_shape
        print ''

        GAN.mixing_coefs = mixing_coefs

        return gen_layers
コード例 #15
0
ファイル: generation.py プロジェクト: superman97/DADA
gen_in_z = ll.InputLayer(shape=(batch_size_g, n_z))
gen_in_y = ll.InputLayer(shape=(batch_size_g, ))
gen_layers = [gen_in_z]
# gen_layers = [(nn.MoGLayer(gen_in_z, noise_dim=(batch_size_g, n_z)))]
gen_layers.append(nn.MLPConcatLayer([gen_layers[-1], gen_in_y], num_classes))
gen_layers.append(
    ll.DenseLayer(gen_layers[-1],
                  num_units=4 * 4 * 512,
                  W=Normal(0.05),
                  nonlinearity=nn.relu))
gen_layers.append(nn.batch_norm(gen_layers[-1], g=None))
gen_layers.append(ll.ReshapeLayer(gen_layers[-1], (batch_size_g, 512, 4, 4)))
gen_layers.append(nn.ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes))
gen_layers.append(
    nn.Deconv2DLayer(gen_layers[-1], (batch_size_g, 256, 8, 8), (5, 5),
                     W=Normal(0.05),
                     nonlinearity=nn.relu))
gen_layers.append(nn.batch_norm(gen_layers[-1], g=None))
gen_layers.append(nn.ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes))
gen_layers.append(
    nn.Deconv2DLayer(gen_layers[-1], (batch_size_g, 128, 16, 16), (5, 5),
                     W=Normal(0.05),
                     nonlinearity=nn.relu))
gen_layers.append(nn.batch_norm(gen_layers[-1], g=None))
gen_layers.append(nn.ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes))
gen_layers.append(
    nn.Deconv2DLayer(gen_layers[-1], (batch_size_g, 3, 32, 32), (5, 5),
                     W=Normal(0.05),
                     nonlinearity=T.tanh))
gen_layers.append(nn.weight_norm(gen_layers[-1], train_g=True, init_stdv=0.1))
# for layer in gen_layers:
コード例 #16
0
# specify generative model
gen_layers = [z_input]
gen_layers.append(
    nn.batch_norm(ll.DenseLayer(gen_layers[-1],
                                num_units=4 * 4 * 512,
                                W=Normal(0.05),
                                nonlinearity=nn.relu,
                                name='g1'),
                  g=None))
gen_layers.append(ll.ReshapeLayer(gen_layers[-1],
                                  (args.batch_size, 512, 4, 4)))
gen_layers.append(
    nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1],
                                   (args.batch_size, 256, 8, 8), (5, 5),
                                   W=Normal(0.05),
                                   nonlinearity=nn.relu,
                                   name='g2'),
                  g=None))  # 4 -> 8
gen_layers.append(
    nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1],
                                   (args.batch_size, 128, 16, 16), (5, 5),
                                   W=Normal(0.05),
                                   nonlinearity=nn.relu,
                                   name='g3'),
                  g=None))  # 8 -> 16
gen_layers.append(
    nn.weight_norm(nn.Deconv2DLayer(gen_layers[-1],
                                    (args.batch_size, 3, 32, 32), (5, 5),
                                    W=Normal(0.05),
                                    nonlinearity=T.tanh,
コード例 #17
0
gen_layers = [ll.InputLayer(shape=noise_dim, input_var=noise)]
gen_layers.append(
    nn.MoGLayer(gen_layers[-1], noise_dim=noise_dim, z=Z, sig=sig)
)  #  Comment this line when testing/training baseline GAN model
gen_layers.append(
    nn.batch_norm(ll.DenseLayer(gen_layers[-1],
                                num_units=4 * 4 * gen_dim * 4,
                                W=Normal(0.05),
                                nonlinearity=nn.relu),
                  g=None))
gen_layers.append(
    ll.ReshapeLayer(gen_layers[-1], (args.batch_size, gen_dim * 4, 4, 4)))
gen_layers.append(
    nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1],
                                   (args.batch_size, gen_dim * 2, 8, 8),
                                   (5, 5),
                                   W=Normal(0.05),
                                   nonlinearity=nn.relu),
                  g=None))  # 4 -> 8
gen_layers.append(
    nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1],
                                   (args.batch_size, gen_dim, 16, 16), (5, 5),
                                   W=Normal(0.05),
                                   nonlinearity=nn.relu),
                  g=None))  # 8 -> 16
gen_layers.append(
    nn.weight_norm(nn.Deconv2DLayer(gen_layers[-1],
                                    (args.batch_size, 1, 32, 32), (5, 5),
                                    W=Normal(0.05),
                                    nonlinearity=T.tanh),
                   train_g=True,
コード例 #18
0
ファイル: ICAT_Htru_Subints.py プロジェクト: gzmtzly/ICAT
    nn.batch_norm(ll.DenseLayer(gen_layers[-1],
                                num_units=512 * 4 * 4,
                                nonlinearity=ln.linear,
                                name='gen-6'),
                  g=None,
                  name='gen-61'))

gen_layers.append(
    ll.ReshapeLayer(gen_layers[-1], (-1, 512, 4, 4), name='gen-7'))

gen_layers.append(
    ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-8'))
gen_layers.append(
    nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (None, 256, 8, 8),
                                   filter_size=(4, 4),
                                   stride=(2, 2),
                                   W=Normal(0.05),
                                   nonlinearity=nn.relu,
                                   name='gen-11'),
                  g=None,
                  name='gen-12'))

gen_layers.append(
    ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-9'))
gen_layers.append(
    nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (None, 128, 16, 16),
                                   filter_size=(4, 4),
                                   stride=(2, 2),
                                   W=Normal(0.05),
                                   nonlinearity=nn.relu,
                                   name='gen-11'),
                  g=None,
コード例 #19
0
cla_layers.append(ll.DenseLayer(cla_layers[-1], num_units=num_classes, W=lasagne.init.Normal(1e-2, 0),
                                nonlinearity=ln.softmax, name='cla-6'))


################# Generator
gen_in_z = ll.InputLayer(shape=(None, n_z))
gen_in_y = ll.InputLayer(shape=(None,))
gen_layers = [gen_in_z]

gen_layers.append(MLPConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-5'))
gen_layers.append(nn.batch_norm(ll.DenseLayer(gen_layers[-1], num_units=512*4*4, nonlinearity=ln.linear, name='gen-6'), g=None, name='gen-61'))

gen_layers.append(ll.ReshapeLayer(gen_layers[-1], (-1, 512, 4, 4), name='gen-7'))

gen_layers.append(ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-8'))
gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (None, 256, 8, 8), filter_size=(4,4), stride=(2, 2), W=Normal(0.05), nonlinearity=nn.relu, name='gen-11'), g=None, name='gen-12'))

gen_layers.append(ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-9'))
gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (None, 128, 16, 16), filter_size=(4,4), stride=(2, 2), W=Normal(0.05), nonlinearity=nn.relu, name='gen-11'), g=None, name='gen-12'))

gen_layers.append(ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-10'))
gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (None, 64, 32, 32), filter_size=(4,4), stride=(2, 2), W=Normal(0.05), nonlinearity=nn.relu, name='gen-11'), g=None, name='gen-12'))

gen_layers.append(ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-11'))
gen_layers.append(nn.weight_norm(nn.Deconv2DLayer(gen_layers[-1], (None, 1, 64, 64), filter_size=(4,4), stride=(2, 2), W=Normal(0.05), nonlinearity=gen_final_non, name='gen-31'), train_g=True, init_stdv=0.1, name='gen-32'))



########## Discriminators
dis_in_x = ll.InputLayer(shape=(None, in_channels) + dim_input)
dis_in_y = ll.InputLayer(shape=(None,))
コード例 #20
0
# symbols
sym_y_g = T.ivector()
sym_z_input = T.matrix()
sym_z_rand = theano_rng.uniform(size=(batch_size_g, n_z))
sym_z_shared = T.tile(theano_rng.uniform((batch_size_g/num_classes, n_z)), (num_classes, 1))

# generator y2x: p_g(x, y) = p(y) p_g(x | y) where x = G(z, y), z follows p_g(z)
gen_in_z = ll.InputLayer(shape=(None, n_z))
gen_in_y = ll.InputLayer(shape=(None,))
gen_layers = [gen_in_z]
if args.dataset == 'svhn' or args.dataset == 'cifar10':
    gen_layers.append(MLPConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-00'))
    gen_layers.append(nn.batch_norm(ll.DenseLayer(gen_layers[-1], num_units=4*4*512, W=Normal(0.05), nonlinearity=nn.relu, name='gen-01'), g=None, name='gen-02'))
    gen_layers.append(ll.ReshapeLayer(gen_layers[-1], (-1,512,4,4), name='gen-03'))
    gen_layers.append(ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-10'))
    gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (None,256,8,8), (5,5), W=Normal(0.05), nonlinearity=nn.relu, name='gen-11'), g=None, name='gen-12')) # 4 -> 8
    gen_layers.append(ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-20'))
    gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (None,128,16,16), (5,5), W=Normal(0.05), nonlinearity=nn.relu, name='gen-21'), g=None, name='gen-22')) # 8 -> 16
    gen_layers.append(ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-30'))
    gen_layers.append(nn.weight_norm(nn.Deconv2DLayer(gen_layers[-1], (None,3,32,32), (5,5), W=Normal(0.05), nonlinearity=gen_final_non, name='gen-31'), train_g=True, init_stdv=0.1, name='gen-32')) # 16 -> 32
elif args.dataset == 'mnist':
    gen_layers.append(MLPConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-1'))
    gen_layers.append(ll.batch_norm(ll.DenseLayer(gen_layers[-1], num_units=500, nonlinearity=ln.softplus, name='gen-2'), name='gen-3'))
    gen_layers.append(MLPConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-4'))
    gen_layers.append(ll.batch_norm(ll.DenseLayer(gen_layers[-1], num_units=500, nonlinearity=ln.softplus, name='gen-5'), name='gen-6'))
    gen_layers.append(MLPConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-7'))
    gen_layers.append(nn.l2normalize(ll.DenseLayer(gen_layers[-1], num_units=28**2, nonlinearity=gen_final_non, name='gen-8')))

# outputs
gen_out_x = ll.get_output(gen_layers[-1], {gen_in_y:sym_y_g, gen_in_z:sym_z_rand}, deterministic=False)
gen_out_x_shared = ll.get_output(gen_layers[-1], {gen_in_y:sym_y_g, gen_in_z:sym_z_shared}, deterministic=False)
コード例 #21
0
    def build_generator(self, version=1, encode=False):

        #from lasagne.layers import TransposedConv2DLayer as Deconv2DLayer
        global mask

        if mask is None:
            mask = T.zeros(shape=(self.batch_size, 1, 64, 64),
                           dtype=theano.config.floatX)
            mask = T.set_subtensor(mask[:, :, 16:48, 16:48], 1.)
            self.mask = mask

        noise_dim = (self.batch_size, 100)
        theano_rng = MRG_RandomStreams(rng.randint(2**15))
        noise = theano_rng.uniform(size=noise_dim)
        input = ll.InputLayer(shape=noise_dim, input_var=noise)

        cropped_image = T.cast(T.zeros_like(self.input_) * mask +
                               (1. - mask) * self.input_,
                               dtype=theano.config.floatX)
        encoder_input = T.concatenate([cropped_image, mask], axis=1)

        if version == 1:
            if encode:
                gen_layers = [
                    ll.InputLayer(shape=(self.batch_size, 4, 64, 64),
                                  input_var=encoder_input)
                ]  #  3 x 64 x 64 -->  64 x 32 x 32

                gen_layers.append(
                    nn.batch_norm(
                        ll.Conv2DLayer(gen_layers[-1],
                                       64,
                                       4,
                                       2,
                                       pad=1,
                                       nonlinearity=nn.lrelu))
                )  #  64 x 32 x 32 --> 128 x 16 x 16

                gen_layers.append(
                    nn.batch_norm(
                        ll.Conv2DLayer(gen_layers[-1],
                                       128,
                                       4,
                                       2,
                                       pad=1,
                                       nonlinearity=nn.lrelu))
                )  # 128 x 16 x 16 -->  256 x 8 x 8

                gen_layers.append(
                    nn.batch_norm(
                        ll.Conv2DLayer(gen_layers[-1],
                                       256,
                                       4,
                                       2,
                                       pad=1,
                                       nonlinearity=nn.lrelu))
                )  # 256 x 8 x 8 --> 512 x 4 x 4

                gen_layers.append(
                    nn.batch_norm(
                        ll.Conv2DLayer(gen_layers[-1],
                                       512,
                                       4,
                                       2,
                                       pad=1,
                                       nonlinearity=nn.lrelu))
                )  # 512 x 4 x 4 --> 1024 x 2 x 2

                gen_layers.append(
                    nn.batch_norm(
                        ll.Conv2DLayer(gen_layers[-1],
                                       4000,
                                       4,
                                       4,
                                       pad=1,
                                       nonlinearity=nn.lrelu))
                )  # 1024 x 2 x 2 --> 2048 x 1 x 1

                #gen_layers.append(nn.batch_norm(ll.Conv2DLayer(gen_layers[-1], 2048, 4, 2, pad=1, nonlinearity=nn.lrelu)))
                # flatten this out
                #gen_layers.append(ll.FlattenLayer(gen_layers[-1]))

                gen_layers.append(
                    nn.batch_norm(
                        nn.Deconv2DLayer(gen_layers[-1],
                                         (self.batch_size, 128 * 4, 4, 4),
                                         (5, 5),
                                         stride=(4, 4))))
                # concat with noise
                latent_size = 2048

            else:
                gen_layers = [input]
                latent_size = 100

            gen_layers.append(
                nn.batch_norm(
                    ll.DenseLayer(gen_layers[-1],
                                  128 * 8 * 4 * 4,
                                  W=Normal(0.02))))
            gen_layers.append(
                ll.ReshapeLayer(gen_layers[-1],
                                (self.batch_size, 128 * 8, 4, 4)))

            # creating array of mixing coefficients (shared Theano floats) that will be used for mixing generated_output and image at each layer
            mixing_coefs = [
                theano.shared(lasagne.utils.floatX(0.25)) for i in range(3)
            ]
            mixing_coefs.append(theano.shared(lasagne.utils.floatX(0.9)))
            border = 2

            gen_layers.append(
                nn.batch_norm(nn.Deconv2DLayer(
                    gen_layers[-1], (self.batch_size, 128 * 2, 8, 8), (5, 5),
                    W=Normal(0.02),
                    nonlinearity=nn.relu),
                              g=None))  # 4 -> 8
            #gen_layers.append(ll.DropoutLayer(gen_layers[-1],p=0.5))
            if reset:
                gen_layers.append(
                    nn.ResetDeconvLayer(gen_layers[-1], cropped_image,
                                        mixing_coefs[0]))

            gen_layers.append(
                nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1],
                                               (self.batch_size, 128, 16, 16),
                                               (5, 5),
                                               W=Normal(0.02),
                                               nonlinearity=nn.relu),
                              g=None))  # 8 -> 16
            #gen_layers.append(ll.DropoutLayer(gen_layers[-1],p=0.5))
            if reset:
                gen_layers.append(
                    nn.ResetDeconvLayer(gen_layers[-1], cropped_image,
                                        mixing_coefs[1]))

            gen_layers.append(
                nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1],
                                               (self.batch_size, 64, 32, 32),
                                               (5, 5),
                                               W=Normal(0.02),
                                               nonlinearity=nn.relu),
                              g=None))  # 16 -> 32
            #gen_layers.append(ll.DropoutLayer(gen_layers[-1],p=0.5))
            if reset:
                gen_layers.append(
                    nn.ResetDeconvLayer(gen_layers[-1], cropped_image,
                                        mixing_coefs[2]))

            gen_layers.append(
                nn.Deconv2DLayer(gen_layers[-1], (self.batch_size, 3, 64, 64),
                                 (5, 5),
                                 W=Normal(0.02),
                                 nonlinearity=T.tanh))  # 32 -> 64

            #gen_layers.append(ll.DropoutLayer(gen_layers[-1],p=0.5))
            if reset:
                gen_layers.append(
                    nn.ResetDeconvLayer(gen_layers[-1],
                                        cropped_image,
                                        mixing_coefs[3],
                                        trainable=False))

        for layer in gen_layers:
            print layer.output_shape
        print ''

        GAN.mixing_coefs = mixing_coefs

        return gen_layers