'''
# symbols
sym_y_g = T.ivector()
sym_z_input = T.matrix()
sym_z_rand = theano_rng.uniform(size=(batch_size_g, n_z))
sym_z_shared = T.tile(theano_rng.uniform((batch_size_g/num_classes, n_z)), (num_classes, 1))

# generator y2x: p_g(x, y) = p(y) p_g(x | y) where x = G(z, y), z follows p_g(z)
gen_in_z = ll.InputLayer(shape=(None, n_z))
gen_in_y = ll.InputLayer(shape=(None,))
gen_layers = [gen_in_z]
if args.dataset == 'svhn' or args.dataset == 'cifar10':
    gen_layers.append(MLPConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-00'))
    gen_layers.append(nn.batch_norm(ll.DenseLayer(gen_layers[-1], num_units=4*4*512, W=Normal(0.05), nonlinearity=nn.relu, name='gen-01'), g=None, name='gen-02'))
    gen_layers.append(ll.ReshapeLayer(gen_layers[-1], (-1,512,4,4), name='gen-03'))
    gen_layers.append(ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-10'))
    gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (None,256,8,8), (5,5), W=Normal(0.05), nonlinearity=nn.relu, name='gen-11'), g=None, name='gen-12')) # 4 -> 8
    gen_layers.append(ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-20'))
    gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (None,128,16,16), (5,5), W=Normal(0.05), nonlinearity=nn.relu, name='gen-21'), g=None, name='gen-22')) # 8 -> 16
    gen_layers.append(ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-30'))
    gen_layers.append(nn.weight_norm(nn.Deconv2DLayer(gen_layers[-1], (None,3,32,32), (5,5), W=Normal(0.05), nonlinearity=gen_final_non, name='gen-31'), train_g=True, init_stdv=0.1, name='gen-32')) # 16 -> 32
elif args.dataset == 'mnist':
    gen_layers.append(MLPConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-1'))
    gen_layers.append(ll.batch_norm(ll.DenseLayer(gen_layers[-1], num_units=500, nonlinearity=ln.softplus, name='gen-2'), name='gen-3'))
    gen_layers.append(MLPConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-4'))
    gen_layers.append(ll.batch_norm(ll.DenseLayer(gen_layers[-1], num_units=500, nonlinearity=ln.softplus, name='gen-5'), name='gen-6'))
    gen_layers.append(MLPConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-7'))
    gen_layers.append(nn.l2normalize(ll.DenseLayer(gen_layers[-1], num_units=28**2, nonlinearity=gen_final_non, name='gen-8')))

# outputs
gen_out_x = ll.get_output(gen_layers[-1], {gen_in_y:sym_y_g, gen_in_z:sym_z_rand}, deterministic=False)
Пример #2
0
shared_unlabel = theano.shared(x_unlabelled, borrow=True)
slice_x_u_d = T.ivector()
slice_x_u_c = T.ivector()
slice_x_u_i = T.ivector()

classifier = build_network()

# generator y2x: p_g(x, y) = p(y) p_g(x | y) where x = G(z, y), z follows p_g(z)
gen_in_z = ll.InputLayer(shape=(None, n_z))
gen_in_y = ll.InputLayer(shape=(None,))
gen_layers = [gen_in_z]
gen_layers.append(MLPConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-00'))
gen_layers.append(nn.batch_norm(ll.DenseLayer(gen_layers[-1], num_units=4*4*512, W=Normal(0.05), nonlinearity=nn.relu, name='gen-01'), g=None, name='gen-02'))
gen_layers.append(ll.ReshapeLayer(gen_layers[-1], (-1,512,4,4), name='gen-03'))
gen_layers.append(ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-10'))
gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (None,256,8,8), (5,5), W=Normal(0.05), nonlinearity=nn.relu, name='gen-11'), g=None, name='gen-12')) # 4 -> 8
gen_layers.append(ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-20'))
gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (None,128,16,16), (5,5), W=Normal(0.05), nonlinearity=nn.relu, name='gen-21'), g=None, name='gen-22')) # 8 -> 16
gen_layers.append(ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-30'))
gen_layers.append(nn.weight_norm(nn.Deconv2DLayer(gen_layers[-1], (None,3,32,32), (5,5), W=Normal(0.05), nonlinearity=gen_final_non, name='gen-31'), train_g=True, init_stdv=0.1, name='gen-32')) # 16 -> 32

# discriminator xy2p: test a pair of input comes from p(x, y) instead of p_c or p_g
dis_in_x = ll.InputLayer(shape=(None, in_channels) + dim_input)
dis_in_y = ll.InputLayer(shape=(None,))
dis_layers = [dis_in_x]
dis_layers.append(ll.DropoutLayer(dis_layers[-1], p=0.2, name='dis-00'))
dis_layers.append(ConvConcatLayer([dis_layers[-1], dis_in_y], num_classes, name='dis-01'))
dis_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(dis_layers[-1], 32, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu, name='dis-02'), name='dis-03'))
dis_layers.append(ConvConcatLayer([dis_layers[-1], dis_in_y], num_classes, name='dis-20'))
dis_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(dis_layers[-1], 32, (3,3), pad=1, stride=2, W=Normal(0.05), nonlinearity=nn.lrelu, name='dis-21'), name='dis-22'))
Пример #3
0
    feature = lasagne.layers.GlobalPoolLayer(l_cla[-1])
classifier = lasagne.layers.DenseLayer(
    feature,
    num_units=num_classes,
    nonlinearity=lasagne.nonlinearities.identity,
    W=lasagne.init.Normal(1e-2, 0),
    name="classifier")

# encoder xy2z
l_in_x = lasagne.layers.InputLayer((None, in_channels) + dim_input)
l_in_y = lasagne.layers.InputLayer((None, ))
l_enc = [
    l_in_x,
]
for i in xrange(nlayers_enc):
    l_enc.append(ConvConcatLayer([l_enc[-1], l_in_y], num_classes))
    l, _ = encodelayer(l_enc[-1], bn_dgm, dr_enc[i], ps_enc[i], nk_enc[i],
                       dk_enc[i], nonlin_enc[i], pad_enc[i], str_enc[i],
                       'ENC-' + str(i + 1), False, 0)
    l_enc.append(l)
    print lasagne.layers.get_output_shape(l_enc[-1])

# reshape
after_conv_shape = lasagne.layers.get_output_shape(l_enc[-1])
after_conv_size = int(np.prod(after_conv_shape[1:]))
l_enc.append(lasagne.layers.FlattenLayer(l_enc[-1]))
print lasagne.layers.get_output_shape(l_enc[-1])

# compute parameters and sample z
l_mu = mlplayer(l_enc[-1], False, 0, nz, lasagne.nonlinearities.identity,
                'ENC-MU')
Пример #4
0
gen_layers.append(
    MLPConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-5'))
gen_layers.append(
    nn.batch_norm(ll.DenseLayer(gen_layers[-1],
                                num_units=512 * 4 * 4,
                                nonlinearity=ln.linear,
                                name='gen-6'),
                  g=None,
                  name='gen-61'))

gen_layers.append(
    ll.ReshapeLayer(gen_layers[-1], (-1, 512, 4, 4), name='gen-7'))

gen_layers.append(
    ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-8'))
gen_layers.append(
    nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (None, 256, 8, 8),
                                   filter_size=(4, 4),
                                   stride=(2, 2),
                                   W=Normal(0.05),
                                   nonlinearity=nn.relu,
                                   name='gen-11'),
                  g=None,
                  name='gen-12'))

gen_layers.append(
    ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-9'))
gen_layers.append(
    nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (None, 128, 16, 16),
                                   filter_size=(4, 4),
gen_in_y = ll.InputLayer(shape=(None, ))
gen_layers = [gen_in_z]
gen_layers.append(
    MLPConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-00'))
gen_layers.append(
    nn.batch_norm(ll.DenseLayer(gen_layers[-1],
                                num_units=4 * 4 * 512,
                                W=Normal(0.05),
                                nonlinearity=nn.relu,
                                name='gen-01'),
                  g=None,
                  name='gen-02'))
gen_layers.append(
    ll.ReshapeLayer(gen_layers[-1], (-1, 512, 4, 4), name='gen-03'))
gen_layers.append(
    ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-10'))
gen_layers.append(
    nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (None, 256, 8, 8), (5, 5),
                                   W=Normal(0.05),
                                   nonlinearity=nn.relu,
                                   name='gen-11'),
                  g=None,
                  name='gen-12'))  # 4 -> 8
gen_layers.append(
    ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-20'))
gen_layers.append(
    nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (None, 128, 16, 16), (5, 5),
                                   W=Normal(0.05),
                                   nonlinearity=nn.relu,
                                   name='gen-21'),
                  g=None,