Ejemplo n.º 1
0
def discriminator(inp):
    global reuse_dis, bn_training, blknum
    blknum = 0
    with tf.variable_scope('discriminator', reuse=reuse_dis):
        mod = Model(inp)
        mod.set_bn_training(bn_training)
        mod.convLayer(7,
                      16,
                      stride=2,
                      activation=M.PARAM_LRELU,
                      batch_norm=True)  # 64
        mod.convLayer(5,
                      32,
                      stride=2,
                      activation=M.PARAM_LRELU,
                      batch_norm=True)  # 32
        mod.SelfAttention(4)
        feat = mod.convLayer(5, 64, stride=2, activation=M.PARAM_LRELU)  # 16
        mod.batch_norm()
        mod.convLayer(3,
                      128,
                      stride=2,
                      activation=M.PARAM_LRELU,
                      batch_norm=True)  # 8
        mod.convLayer(3,
                      128,
                      stride=2,
                      activation=M.PARAM_LRELU,
                      batch_norm=True)
        adv = mod.convLayer(3, 1)
        mod.set_current_layer(feat)
        mod.convLayer(3,
                      128,
                      stride=2,
                      activation=M.PARAM_LRELU,
                      batch_norm=True)
        block(mod, 128, 1)  # 8
        mod.convLayer(3,
                      128,
                      stride=2,
                      activation=M.PARAM_LRELU,
                      batch_norm=True)
        block(mod, 256, 1)  # 4
        mod.convLayer(3,
                      128,
                      stride=2,
                      activation=M.PARAM_LRELU,
                      batch_norm=True)
        block(mod, 256, 1)  # 2
        mod.flatten()
        age = mod.fcLayer(1)
        reuse_dis = True
    return adv, age
Ejemplo n.º 2
0
def discriminator_f(inp, id_num):
    global reuse_dis_f, bn_training
    with tf.variable_scope('dis_f', reuse=reuse_dis_f):
        mod = Model(inp)
        mod.set_bn_training(bn_training)
        mod.flatten()
        feat = mod.get_current_layer()
        mod.fcLayer(512, activation=M.PARAM_LRELU, batch_norm=True)
        mod.fcLayer(256, activation=M.PARAM_LRELU, batch_norm=True)
        adv = mod.fcLayer(1)
        mod.set_current_layer(feat)
        ip = mod.fcLayer(id_num)
        reuse_dis_f = True
    return adv, ip
Ejemplo n.º 3
0
def generator_att(inp):
    global reuse_genatt, bn_training, blknum
    blknum = 0
    with tf.variable_scope('gen_att', reuse=reuse_genatt):
        mod = Model(inp)
        mod.set_bn_training(bn_training)
        mod.convLayer(5,
                      32,
                      stride=2,
                      activation=M.PARAM_LRELU,
                      batch_norm=True)  #64
        block(mod, 64, 1)
        mod.convLayer(3,
                      128,
                      stride=2,
                      activation=M.PARAM_LRELU,
                      batch_norm=True)  #32
        block(mod, 128, 1)
        # block(mod,256,1)
        # block(mod,256,1)
        block(mod, 256, 1)
        mod.SelfAttention(64, residual=True)
        # block(mod,512,1)
        block(mod, 256, 1)
        # block(mod,256,1)
        block(mod, 128, 1)
        mod.deconvLayer(3,
                        64,
                        stride=2,
                        activation=M.PARAM_LRELU,
                        batch_norm=True)  # 64
        block(mod, 64, 1)
        feat = mod.deconvLayer(5,
                               64,
                               stride=2,
                               activation=M.PARAM_LRELU,
                               batch_norm=True)  #128
        A = mod.convLayer(5, 1, activation=M.PARAM_SIGMOID)  #output_attention
        mod.set_current_layer(feat)
        C = mod.convLayer(5, 3, activation=M.PARAM_TANH)
        reuse_genatt = True
    return A, C
Ejemplo n.º 4
0
def generator_att(inp):
    global reuse_genatt, bn_training
    with tf.variable_scope('gen_att', reuse=reuse_genatt):
        mod = Model(inp)
        mod.set_bn_training(bn_training)
        mod.deconvLayer(3,
                        512,
                        stride=2,
                        activation=M.PARAM_LRELU,
                        batch_norm=True)  #4
        mod.deconvLayer(3,
                        256,
                        stride=2,
                        activation=M.PARAM_LRELU,
                        batch_norm=True)  #8
        mod.deconvLayer(3,
                        128,
                        stride=2,
                        activation=M.PARAM_LRELU,
                        batch_norm=True)  #16
        mod.SelfAttention(32)
        mod.deconvLayer(5,
                        64,
                        stride=2,
                        activation=M.PARAM_LRELU,
                        batch_norm=True)  #32
        mod.deconvLayer(5,
                        32,
                        stride=2,
                        activation=M.PARAM_LRELU,
                        batch_norm=True)  #64
        feat = mod.deconvLayer(5,
                               16,
                               stride=2,
                               activation=M.PARAM_LRELU,
                               batch_norm=True)  #128
        A = mod.convLayer(5, 3, activation=M.PARAM_SIGMOID)  #output_attention
        mod.set_current_layer(feat)
        C = mod.convLayer(5, 3, activation=M.PARAM_TANH)
        reuse_genatt = True
    return A, C