Exemplo n.º 1
0
    def get_discriminator(self):
        ''' specify discriminator D0 '''
        """
        disc0_layers = [LL.InputLayer(shape=(self.args.batch_size, 3, 32, 32))]
        disc0_layers.append(LL.GaussianNoiseLayer(disc0_layers[-1], sigma=0.05))
        disc0_layers.append(dnn.Conv2DDNNLayer(disc0_layers[-1], 96, (3,3), pad=1, W=Normal(0.02), nonlinearity=nn.lrelu))
        disc0_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc0_layers[-1], 96, (3,3), pad=1, stride=2, W=Normal(0.02), nonlinearity=nn.lrelu))) # 16x16
        disc0_layers.append(LL.DropoutLayer(disc0_layers[-1], p=0.1))
        disc0_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc0_layers[-1], 192, (3,3), pad=1, W=Normal(0.02), nonlinearity=nn.lrelu)))
        disc0_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc0_layers[-1], 192, (3,3), pad=1, stride=2, W=Normal(0.02), nonlinearity=nn.lrelu))) # 8x8
        disc0_layers.append(LL.DropoutLayer(disc0_layers[-1], p=0.1))
        disc0_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc0_layers[-1], 192, (3,3), pad=0, W=Normal(0.02), nonlinearity=nn.lrelu))) # 6x6
        disc0_layer_shared = LL.NINLayer(disc0_layers[-1], num_units=192, W=Normal(0.02), nonlinearity=nn.lrelu) # 6x6
        disc0_layers.append(disc0_layer_shared)

        disc0_layer_z_recon = LL.DenseLayer(disc0_layer_shared, num_units=50, W=Normal(0.02), nonlinearity=None)
        disc0_layers.append(disc0_layer_z_recon) # also need to recover z from x

        disc0_layers.append(LL.GlobalPoolLayer(disc0_layer_shared))
        disc0_layer_adv = LL.DenseLayer(disc0_layers[-1], num_units=10, W=Normal(0.02), nonlinearity=None)
        disc0_layers.append(disc0_layer_adv)

        return disc0_layers, disc0_layer_adv, disc0_layer_z_recon
        """
        disc_x_layers = [LL.InputLayer(shape=(None, 3, 32, 32))]
        disc_x_layers.append(LL.GaussianNoiseLayer(disc_x_layers[-1], sigma=0.2))
        disc_x_layers.append(dnn.Conv2DDNNLayer(disc_x_layers[-1], 96, (3,3), pad=1, W=Normal(0.01), nonlinearity=nn.lrelu))
        disc_x_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc_x_layers[-1], 96, (3,3), pad=1, stride=2, W=Normal(0.01), nonlinearity=nn.lrelu)))
        disc_x_layers.append(LL.DropoutLayer(disc_x_layers[-1], p=0.5))
        disc_x_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc_x_layers[-1], 192, (3,3), pad=1, W=Normal(0.01), nonlinearity=nn.lrelu)))
        disc_x_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc_x_layers[-1], 192, (3,3), pad=1, stride=2, W=Normal(0.01), nonlinearity=nn.lrelu)))
        disc_x_layers.append(LL.DropoutLayer(disc_x_layers[-1], p=0.5))
        disc_x_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc_x_layers[-1], 192, (3,3), pad=0, W=Normal(0.01), nonlinearity=nn.lrelu)))
        disc_x_layers_shared = LL.NINLayer(disc_x_layers[-1], num_units=192, W=Normal(0.01), nonlinearity=nn.lrelu)
        disc_x_layers.append(disc_x_layers_shared)

        disc_x_layer_z_recon = LL.DenseLayer(disc_x_layers_shared, num_units=self.args.z0dim, nonlinearity=None)
        disc_x_layers.append(disc_x_layer_z_recon) # also need to recover z from x

        # disc_x_layers.append(nn.MinibatchLayer(disc_x_layers_shared, num_kernels=100))
        disc_x_layers.append(LL.GlobalPoolLayer(disc_x_layers_shared))
        disc_x_layer_adv = LL.DenseLayer(disc_x_layers[-1], num_units=10, W=Normal(0.01), nonlinearity=None)
        disc_x_layers.append(disc_x_layer_adv)

        #output_before_softmax_x = LL.get_output(disc_x_layer_adv, x, deterministic=False)
        #output_before_softmax_gen = LL.get_output(disc_x_layer_adv, gen_x, deterministic=False)

        # temp = LL.get_output(gen_x_layers[-1], deterministic=False, init=True)
        # temp = LL.get_output(disc_x_layers[-1], x, deterministic=False, init=True)
        # init_updates = [u for l in LL.get_all_layers(gen_x_layers)+LL.get_all_layers(disc_x_layers) for u in getattr(l,'init_updates',[])]
        return disc_x_layers, disc_x_layer_adv, disc_x_layer_z_recon
Exemplo n.º 2
0
def build_1Dregression_v1(input_var=None,
                          input_width=None,
                          nin_units=12,
                          h_num_units=[64, 64],
                          h_grad_clip=1.0,
                          output_width=1):
    """
    A stacked bidirectional RNN network for regression, alternating
    with dense layers and merging of the two directions, followed by
    a feature mean pooling in the time direction, with a linear
    dim-reduction layer at the start
    
    Args:
        input_var (theano 3-tensor): minibatch of input sequence vectors
        input_width (int): length of input sequences
        nin_units (list): number of NIN features
        h_num_units (int list): no. of units in hidden layer in each stack
                                from bottom to top
        h_grad_clip (float): gradient clipping maximum value 
        output_width (int): size of output layer (e.g. =1 for 1D regression)
    Returns:
        output layer (Lasagne layer object)
    """

    # Non-linearity hyperparameter
    nonlin = lasagne.nonlinearities.LeakyRectify(leakiness=0.15)

    # Input layer
    l_in = LL.InputLayer(shape=(None, 22, input_width), input_var=input_var)
    batchsize = l_in.input_var.shape[0]

    # NIN-layer
    l_in = LL.NINLayer(l_in,
                       num_units=nin_units,
                       nonlinearity=lasagne.nonlinearities.linear)

    l_in_1 = LL.DimshuffleLayer(l_in, (0, 2, 1))

    # RNN layers
    for h in h_num_units:
        # Forward layers
        l_forward_0 = LL.RecurrentLayer(l_in_1,
                                        nonlinearity=nonlin,
                                        num_units=h,
                                        backwards=False,
                                        learn_init=True,
                                        grad_clipping=h_grad_clip,
                                        unroll_scan=True,
                                        precompute_input=True)

        l_forward_0a = LL.ReshapeLayer(l_forward_0, (-1, h))
        l_forward_0b = LL.DenseLayer(l_forward_0a,
                                     num_units=h,
                                     nonlinearity=nonlin)
        l_forward_0c = LL.ReshapeLayer(l_forward_0b,
                                       (batchsize, input_width, h))

        # Backward layers
        l_backward_0 = LL.RecurrentLayer(l_in_1,
                                         nonlinearity=nonlin,
                                         num_units=h,
                                         backwards=True,
                                         learn_init=True,
                                         grad_clipping=h_grad_clip,
                                         unroll_scan=True,
                                         precompute_input=True)

        l_backward_0a = LL.ReshapeLayer(l_backward_0, (-1, h))
        l_backward_0b = LL.DenseLayer(l_backward_0a,
                                      num_units=h,
                                      nonlinearity=nonlin)
        l_backward_0c = LL.ReshapeLayer(l_backward_0b,
                                        (batchsize, input_width, h))

        l_in_1 = LL.ElemwiseSumLayer([l_forward_0c, l_backward_0c])

    # Output layers
    network_0a = LL.ReshapeLayer(l_in_1, (-1, h_num_units[-1]))
    network_0b = LL.DenseLayer(network_0a,
                               num_units=output_width,
                               nonlinearity=nonlin)
    network_0c = LL.ReshapeLayer(network_0b,
                                 (batchsize, input_width, output_width))

    output_net_1 = LL.FlattenLayer(network_0c, outdim=2)
    output_net_2 = LL.FeaturePoolLayer(output_net_1,
                                       pool_size=input_width,
                                       pool_function=T.mean)

    return output_net_2
                                     nonlinearity=nn.lrelu,
                                     name='gz2'),
                  g=None))
genz_layers.append(
    nn.batch_norm(dnn.Conv2DDNNLayer(genz_layers[-1],
                                     512, (3, 3),
                                     pad=1,
                                     stride=2,
                                     W=Normal(0.05),
                                     nonlinearity=nn.lrelu,
                                     name='gz2'),
                  g=None))
genz_layers.append(
    nn.batch_norm(ll.NINLayer(genz_layers[-1],
                              num_units=512,
                              W=Normal(0.05),
                              nonlinearity=nn.lrelu,
                              name='gz5'),
                  g=None))
genz_layers.append(ll.GlobalPoolLayer(genz_layers[-1], name='gz6'))
genz_layers.append(
    ll.DenseLayer(genz_layers[-1],
                  num_units=100,
                  W=Normal(0.05),
                  nonlinearity=lasagne.nonlinearities.sigmoid,
                  name='gz7'))

# specify discriminative model

# for z
discz_layers = [z_input]
Exemplo n.º 4
0
gen_layers.append(nn.weight_norm(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size,3,32,32), (5,5), W=Normal(0.05), nonlinearity=T.tanh), train_g=True, init_stdv=0.1)) # 16 -> 32
gen_dat = ll.get_output(gen_layers[-1])

# specify discriminative model
disc_layers = [ll.InputLayer(shape=(None, 3, 32, 32))]
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.2))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 96, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 96, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 96, (3,3), pad=1, stride=2, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 192, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 192, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 192, (3,3), pad=1, stride=2, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
disc_layers.append(nn.weight_norm(dnn.Conv2DDNNLayer(disc_layers[-1], 192, (3,3), pad=0, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(ll.NINLayer(disc_layers[-1], num_units=192, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(ll.NINLayer(disc_layers[-1], num_units=192, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(ll.GlobalPoolLayer(disc_layers[-1]))
disc_layers.append(nn.weight_norm(ll.DenseLayer(disc_layers[-1], num_units=16, W=Normal(0.05), nonlinearity=None), train_g=True, init_stdv=0.1))
disc_params = ll.get_all_params(disc_layers, trainable=True)

x_temp = T.tensor4()

temp = ll.get_output(gen_layers[-1], deterministic=False, init=True)
temp = ll.get_output(disc_layers[-1], x_temp, deterministic=False, init=True)
init_updates = [u for l in gen_layers+disc_layers for u in getattr(l,'init_updates',[])]

init_param = th.function(inputs=[x_temp], outputs=None, updates=init_updates)

# costs
labels = T.ivector()
Exemplo n.º 5
0
# classifier x2y: p_c(x, y) = p(x) p_c(y | x)
cla_in_x = ll.InputLayer(shape=(None, in_channels) + dim_input)
cla_layers = [cla_in_x]
cla_layers.append(ll.DropoutLayer(cla_layers[-1], p=0.2, name='cla-00'))
cla_layers.append(ll.batch_norm(dnn.Conv2DDNNLayer(cla_layers[-1], 128, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu, name='cla-02'), name='cla-03'))
cla_layers.append(ll.batch_norm(dnn.Conv2DDNNLayer(cla_layers[-1], 128, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu, name='cla-11'), name='cla-12'))
cla_layers.append(ll.batch_norm(dnn.Conv2DDNNLayer(cla_layers[-1], 128, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu, name='cla-21'), name='cla-22'))
cla_layers.append(dnn.MaxPool2DDNNLayer(cla_layers[-1], pool_size=(2, 2)))
cla_layers.append(ll.DropoutLayer(cla_layers[-1], p=0.5, name='cla-23'))
cla_layers.append(ll.batch_norm(dnn.Conv2DDNNLayer(cla_layers[-1], 256, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu, name='cla-31'), name='cla-32'))
cla_layers.append(ll.batch_norm(dnn.Conv2DDNNLayer(cla_layers[-1], 256, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu, name='cla-41'), name='cla-42'))
cla_layers.append(ll.batch_norm(dnn.Conv2DDNNLayer(cla_layers[-1], 256, (3,3), pad=1, W=Normal(0.05), nonlinearity=nn.lrelu, name='cla-51'), name='cla-52'))
cla_layers.append(dnn.MaxPool2DDNNLayer(cla_layers[-1], pool_size=(2, 2)))
cla_layers.append(ll.DropoutLayer(cla_layers[-1], p=0.5, name='cla-53'))
cla_layers.append(ll.batch_norm(dnn.Conv2DDNNLayer(cla_layers[-1], 512, (3,3), pad=0, W=Normal(0.05), nonlinearity=nn.lrelu, name='cla-61'), name='cla-62'))
cla_layers.append(ll.batch_norm(ll.NINLayer(cla_layers[-1], num_units=256, W=Normal(0.05), nonlinearity=nn.lrelu, name='cla-71'), name='cla-72'))
cla_layers.append(ll.batch_norm(ll.NINLayer(cla_layers[-1], num_units=128, W=Normal(0.05), nonlinearity=nn.lrelu, name='cla-81'), name='cla-82'))
cla_layers.append(ll.GlobalPoolLayer(cla_layers[-1], name='cla-83'))
cla_layers.append(ll.batch_norm(ll.DenseLayer(cla_layers[-1], num_units=num_classes, W=Normal(0.05), nonlinearity=ln.softmax, name='cla-91'), name='cla-92'))

# generator y2x: p_g(x, y) = p(y) p_g(x | y) where x = G(z, y), z follows p_g(z)
gen_in_z = ll.InputLayer(shape=(None, n_z))
gen_in_y = ll.InputLayer(shape=(None,))
gen_layers = [gen_in_z]
gen_layers.append(MLPConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-00'))
gen_layers.append(nn.batch_norm(ll.DenseLayer(gen_layers[-1], num_units=4*4*512, W=Normal(0.05), nonlinearity=nn.relu, name='gen-01'), g=None, name='gen-02'))
gen_layers.append(ll.ReshapeLayer(gen_layers[-1], (-1,512,4,4), name='gen-03'))
gen_layers.append(ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-10'))
gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (None,256,8,8), (5,5), W=Normal(0.05), nonlinearity=nn.relu, name='gen-11'), g=None, name='gen-12')) # 4 -> 8
gen_layers.append(ConvConcatLayer([gen_layers[-1], gen_in_y], num_classes, name='gen-20'))
gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (None,128,16,16), (5,5), W=Normal(0.05), nonlinearity=nn.relu, name='gen-21'), g=None, name='gen-22')) # 8 -> 16
Exemplo n.º 6
0
        dnn.Conv2DDNNLayer(disc0_layers[-1],
                           192, (3, 3),
                           pad=1,
                           stride=2,
                           W=Normal(0.02),
                           nonlinearity=nn.lrelu)))  # 8x8
disc0_layers.append(LL.DropoutLayer(disc0_layers[-1], p=0.1))
disc0_layers.append(
    nn.batch_norm(
        dnn.Conv2DDNNLayer(disc0_layers[-1],
                           192, (3, 3),
                           pad=0,
                           W=Normal(0.02),
                           nonlinearity=nn.lrelu)))  # 6x6
disc0_layer_shared = LL.NINLayer(disc0_layers[-1],
                                 num_units=192,
                                 W=Normal(0.02),
                                 nonlinearity=nn.lrelu)  # 6x6
disc0_layers.append(disc0_layer_shared)

disc0_layer_z_recon = LL.DenseLayer(disc0_layer_shared,
                                    num_units=16,
                                    W=Normal(0.02),
                                    nonlinearity=None)
disc0_layers.append(disc0_layer_z_recon)  # also need to recover z from x

disc0_layers.append(LL.GlobalPoolLayer(disc0_layer_shared))
disc0_layer_adv = LL.DenseLayer(disc0_layers[-1],
                                num_units=10,
                                W=Normal(0.02),
                                nonlinearity=None)
disc0_layers.append(disc0_layer_adv)
Exemplo n.º 7
0
def get_discriminator_binary():

    disc_layers = [ll.InputLayer(shape=(None, 3, 32, 32))]
    disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.2))
    disc_layers.append(
        nn.weight_norm(
            dnn.Conv2DDNNLayer(disc_layers[-1],
                               96, (3, 3),
                               pad=1,
                               W=Normal(0.05),
                               nonlinearity=nn.lrelu)))
    disc_layers.append(
        nn.weight_norm(
            dnn.Conv2DDNNLayer(disc_layers[-1],
                               96, (3, 3),
                               pad=1,
                               W=Normal(0.05),
                               nonlinearity=nn.lrelu)))
    disc_layers.append(
        nn.weight_norm(
            dnn.Conv2DDNNLayer(disc_layers[-1],
                               96, (3, 3),
                               pad=1,
                               stride=2,
                               W=Normal(0.05),
                               nonlinearity=nn.lrelu)))
    disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
    disc_layers.append(
        nn.weight_norm(
            dnn.Conv2DDNNLayer(disc_layers[-1],
                               192, (3, 3),
                               pad=1,
                               W=Normal(0.05),
                               nonlinearity=nn.lrelu)))
    disc_layers.append(
        nn.weight_norm(
            dnn.Conv2DDNNLayer(disc_layers[-1],
                               192, (3, 3),
                               pad=1,
                               W=Normal(0.05),
                               nonlinearity=nn.lrelu)))
    disc_layers.append(
        nn.weight_norm(
            dnn.Conv2DDNNLayer(disc_layers[-1],
                               192, (3, 3),
                               pad=1,
                               stride=2,
                               W=Normal(0.05),
                               nonlinearity=nn.lrelu)))
    disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
    disc_layers.append(
        nn.weight_norm(
            dnn.Conv2DDNNLayer(disc_layers[-1],
                               192, (3, 3),
                               pad=0,
                               W=Normal(0.05),
                               nonlinearity=nn.lrelu)))
    disc_layers.append(
        nn.weight_norm(
            ll.NINLayer(disc_layers[-1],
                        num_units=192,
                        W=Normal(0.05),
                        nonlinearity=nn.lrelu)))
    disc_layers.append(
        nn.weight_norm(
            ll.NINLayer(disc_layers[-1],
                        num_units=192,
                        W=Normal(0.05),
                        nonlinearity=nn.lrelu)))
    disc_layers.append(ll.GlobalPoolLayer(disc_layers[-1]))
    disc_layers.append(
        nn.weight_norm(ll.DenseLayer(disc_layers[-1],
                                     num_units=2,
                                     W=Normal(0.05),
                                     nonlinearity=None),
                       train_g=True,
                       init_stdv=0.1))
    disc_layers.append(
        ll.DenseLayer(disc_layers[-2],
                      num_units=128,
                      W=Normal(0.05),
                      nonlinearity=T.nnet.sigmoid))

    return disc_layers
Exemplo n.º 8
0
                           pad=1,
                           stride=2,
                           W=Normal(0.05),
                           nonlinearity=nn.lrelu)))
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
disc_layers.append(
    nn.weight_norm(
        dnn.Conv2DDNNLayer(disc_layers[-1],
                           512, (3, 3),
                           pad=0,
                           W=Normal(0.05),
                           nonlinearity=nn.lrelu)))
disc_layers.append(
    nn.weight_norm(
        ll.NINLayer(disc_layers[-1],
                    num_units=256,
                    W=Normal(0.05),
                    nonlinearity=nn.lrelu)))
disc_layers.append(
    nn.weight_norm(
        ll.NINLayer(disc_layers[-1],
                    num_units=128,
                    W=Normal(0.05),
                    nonlinearity=nn.lrelu)))
disc_layers.append(ll.GlobalPoolLayer(disc_layers[-1]))
disc_layers.append(
    nn.weight_norm(ll.DenseLayer(disc_layers[-1],
                                 num_units=10,
                                 W=Normal(0.05),
                                 nonlinearity=None),
                   train_g=True,
                   init_stdv=0.1))
Exemplo n.º 9
0
                           stride=2,
                           W=Normal(0.05),
                           nonlinearity=nn.lrelu)))
disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
disc_layers.append(
    nn.weight_norm(
        dnn.Conv2DDNNLayer(disc_layers[-1],
                           disc_dim * 4, (3, 3),
                           pad=0,
                           stride=2,
                           W=Normal(0.05),
                           nonlinearity=nn.lrelu)))
disc_layers.append(
    nn.weight_norm(
        ll.NINLayer(disc_layers[-1],
                    num_units=disc_dim * 4,
                    W=Normal(0.05),
                    nonlinearity=nn.lrelu)))
disc_layers.append(ll.GlobalPoolLayer(disc_layers[-1]))
disc_layers.append(nn.MinibatchLayer(disc_layers[-1], num_kernels=50))
disc_layers.append(
    nn.weight_norm(ll.DenseLayer(disc_layers[-1],
                                 num_units=1,
                                 W=Normal(0.05),
                                 nonlinearity=None),
                   train_g=True,
                   init_stdv=0.1))

# costs
x_lab = T.tensor4()
temp = ll.get_output(gen_layers[-1], deterministic=False, init=True)
temp = ll.get_output(disc_layers[-1], x_lab, deterministic=False, init=True)
Exemplo n.º 10
0
# disc_x_layers.append(LL.GlobalPoolLayer(disc_x_layers[-1],))
# #disc_x_layers.append(nn.MinibatchLayer(disc_x_layers[-1], num_kernels=100))
# disc_x_layer_adv = LL.DenseLayer(disc_x_layers[-1], num_units=10, W=Normal(0.01), nonlinearity=None)
# disc_x_layers.append(disc_x_layer_adv)

# specify discriminative model
disc_x_layers = [LL.InputLayer(shape=(None, 3, 32, 32))]
disc_x_layers.append(LL.GaussianNoiseLayer(disc_x_layers[-1], sigma=0.2))
disc_x_layers.append(dnn.Conv2DDNNLayer(disc_x_layers[-1], 96, (3,3), pad=1, W=Normal(0.01), nonlinearity=nn.lrelu))
disc_x_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc_x_layers[-1], 96, (3,3), pad=1, stride=2, W=Normal(0.01), nonlinearity=nn.lrelu)))
disc_x_layers.append(LL.DropoutLayer(disc_x_layers[-1], p=0.5))
disc_x_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc_x_layers[-1], 192, (3,3), pad=1, W=Normal(0.01), nonlinearity=nn.lrelu)))
disc_x_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc_x_layers[-1], 192, (3,3), pad=1, stride=2, W=Normal(0.01), nonlinearity=nn.lrelu)))
disc_x_layers.append(LL.DropoutLayer(disc_x_layers[-1], p=0.5))
disc_x_layers.append(nn.batch_norm(dnn.Conv2DDNNLayer(disc_x_layers[-1], 192, (3,3), pad=0, W=Normal(0.01), nonlinearity=nn.lrelu)))
disc_x_layers_shared = LL.NINLayer(disc_x_layers[-1], num_units=192, W=Normal(0.01), nonlinearity=nn.lrelu)
disc_x_layers.append(disc_x_layers_shared)

disc_x_layer_z_recon = LL.DenseLayer(disc_x_layers_shared, num_units=50, nonlinearity=None)
disc_x_layers.append(disc_x_layer_z_recon) # also need to recover z from x

# disc_x_layers.append(nn.MinibatchLayer(disc_x_layers_shared, num_kernels=100))
disc_x_layers.append(LL.GlobalPoolLayer(disc_x_layers_shared))
disc_x_layer_adv = LL.DenseLayer(disc_x_layers[-1], num_units=10, W=Normal(0.01), nonlinearity=None)
disc_x_layers.append(disc_x_layer_adv)

#output_before_softmax_x = LL.get_output(disc_x_layer_adv, x, deterministic=False)
#output_before_softmax_gen = LL.get_output(disc_x_layer_adv, gen_x, deterministic=False)

# temp = LL.get_output(gen_x_layers[-1], deterministic=False, init=True)
# temp = LL.get_output(disc_x_layers[-1], x, deterministic=False, init=True)
Exemplo n.º 11
0
def get_discriminator_brown(num_feature=256):

    disc_layers = [ll.InputLayer(shape=(None, 3, 32, 32))]
    disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.2))
    disc_layers.append(
        nn.weight_norm(
            dnn.Conv2DDNNLayer(disc_layers[-1],
                               96, (3, 3),
                               pad=1,
                               W=Normal(0.05),
                               nonlinearity=nn.lrelu)))
    disc_layers.append(
        nn.weight_norm(
            dnn.Conv2DDNNLayer(disc_layers[-1],
                               96, (3, 3),
                               pad=1,
                               W=Normal(0.05),
                               nonlinearity=nn.lrelu)))
    disc_layers.append(
        nn.weight_norm(
            dnn.Conv2DDNNLayer(disc_layers[-1],
                               96, (3, 3),
                               pad=1,
                               stride=2,
                               W=Normal(0.05),
                               nonlinearity=nn.lrelu)))
    disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
    disc_layers.append(
        nn.weight_norm(
            dnn.Conv2DDNNLayer(disc_layers[-1],
                               128, (3, 3),
                               pad=1,
                               W=Normal(0.05),
                               nonlinearity=nn.lrelu)))
    disc_layers.append(
        nn.weight_norm(
            dnn.Conv2DDNNLayer(disc_layers[-1],
                               128, (3, 3),
                               pad=1,
                               W=Normal(0.05),
                               nonlinearity=nn.lrelu)))
    disc_layers.append(
        nn.weight_norm(
            dnn.Conv2DDNNLayer(disc_layers[-1],
                               128, (3, 3),
                               pad=1,
                               stride=2,
                               W=Normal(0.05),
                               nonlinearity=nn.lrelu)))
    disc_layers.append(ll.DropoutLayer(disc_layers[-1], p=0.5))
    disc_layers.append(
        nn.weight_norm(
            dnn.Conv2DDNNLayer(disc_layers[-1],
                               128, (3, 3),
                               pad=0,
                               W=Normal(0.05),
                               nonlinearity=nn.lrelu)))
    disc_layers.append(
        nn.weight_norm(
            ll.NINLayer(disc_layers[-1],
                        num_units=num_feature,
                        W=Normal(0.05),
                        nonlinearity=nn.lrelu)))
    disc_layers.append(
        nn.weight_norm(
            ll.NINLayer(disc_layers[-1],
                        num_units=128,
                        W=Normal(0.05),
                        nonlinearity=nn.lrelu)))
    disc_layers.append(ll.GlobalPoolLayer(disc_layers[-1]))
    disc_layers.append(
        nn.weight_norm(ll.DenseLayer(disc_layers[-1],
                                     num_units=2,
                                     W=Normal(0.05),
                                     nonlinearity=None),
                       train_g=True,
                       init_stdv=0.1))
    #disc_layers.append(ll.ReshapeLayer(disc_layers[-4], ([0], -1)))
    #disc_layers.append(ll.GlobalPoolLayer(disc_layers[-4]))
    disc_layer_features_low_dim = -4
    disc_layer_features_high_dim = -5

    return disc_layers, disc_layer_features_low_dim, disc_layer_features_high_dim
Exemplo n.º 12
0
layers.append(
    batch_norm(
        dnn.Conv2DDNNLayer(layers[-1], 192, (3, 3), pad=1, nonlinearity=f)))
layers.append(
    batch_norm(
        dnn.Conv2DDNNLayer(layers[-1], 192, (3, 3), pad=1, nonlinearity=f)))
layers.append(
    batch_norm(
        dnn.Conv2DDNNLayer(layers[-1], 192, (3, 3), pad=1, nonlinearity=f)))
layers.append(ll.MaxPool2DLayer(layers[-1], 2))
layers.append(ll.DropoutLayer(layers[-1], p=0.5))
layers.append(
    batch_norm(
        dnn.Conv2DDNNLayer(layers[-1], 192, (3, 3), pad=0, nonlinearity=f)))
layers.append(
    batch_norm(ll.NINLayer(layers[-1], num_units=192, nonlinearity=f)))
layers.append(
    batch_norm(ll.NINLayer(layers[-1], num_units=192, nonlinearity=f)))
layers.append(nn.GlobalAvgLayer(layers[-1]))
layers.append(
    batch_norm(ll.DenseLayer(layers[-1], num_units=10, nonlinearity=None)))

# discriminative cost & updates
output_before_softmax = ll.get_output(layers[-1], x)
y = T.ivector()
cost = nn.softmax_loss(y, output_before_softmax)
train_err = T.mean(T.neq(T.argmax(output_before_softmax, axis=1), y))
params = ll.get_all_params(layers, trainable=True)
lr = T.scalar()
mom1 = T.scalar()
param_updates = nn.adam_updates(params, cost, lr=lr, mom1=mom1)
Exemplo n.º 13
0
    raise NotImplementedError('incorrect norm type')

layers = [ll.InputLayer(shape=(None, 3, 32, 32))]
layers.append(ll.GaussianNoiseLayer(layers[-1], sigma=0.15))
layers.append(normalizer(dnn.Conv2DDNNLayer(layers[-1], 96, (3,3), pad=1, nonlinearity=nn.lrelu)))
layers.append(normalizer(dnn.Conv2DDNNLayer(layers[-1], 96, (3,3), pad=1, nonlinearity=nn.lrelu)))
layers.append(normalizer(dnn.Conv2DDNNLayer(layers[-1], 96, (3,3), pad=1, nonlinearity=nn.lrelu)))
layers.append(ll.MaxPool2DLayer(layers[-1], 2))
layers.append(ll.DropoutLayer(layers[-1], p=0.5))
layers.append(normalizer(dnn.Conv2DDNNLayer(layers[-1], 192, (3,3), pad=1, nonlinearity=nn.lrelu)))
layers.append(normalizer(dnn.Conv2DDNNLayer(layers[-1], 192, (3,3), pad=1, nonlinearity=nn.lrelu)))
layers.append(normalizer(dnn.Conv2DDNNLayer(layers[-1], 192, (3,3), pad=1, nonlinearity=nn.lrelu)))
layers.append(ll.MaxPool2DLayer(layers[-1], 2))
layers.append(ll.DropoutLayer(layers[-1], p=0.5))
layers.append(normalizer(dnn.Conv2DDNNLayer(layers[-1], 192, (3,3), pad=0, nonlinearity=nn.lrelu)))
layers.append(normalizer(ll.NINLayer(layers[-1], num_units=192, nonlinearity=nn.lrelu)))
layers.append(normalizer(ll.NINLayer(layers[-1], num_units=192, nonlinearity=nn.lrelu)))
layers.append(nn.GlobalAvgLayer(layers[-1]))
layers.append(normalizer(ll.DenseLayer(layers[-1], num_units=10, nonlinearity=None)))

# initialization
x = T.tensor4()
temp = ll.get_output(layers[-1], x, init=True)
init_updates = [u for l in layers for u in getattr(l,'init_updates',[])]

# discriminative cost & updates
output_before_softmax = ll.get_output(layers[-1], x)
bn_updates = [u for l in layers for u in getattr(l,'bn_updates',[])]
y = T.ivector()
cost = nn.softmax_loss(y, output_before_softmax)
train_err = T.mean(T.neq(T.argmax(output_before_softmax,axis=1),y))