def age_classify(inp, age_size): global reuse_agecls, bn_training with tf.variable_scope('age_cls', reuse=reuse_agecls): mod = Model(inp) mod.set_bn_training(bn_training) mod.flatten() mod.fcLayer(512, activation=M.PARAM_LRELU) mod.fcLayer(256, activation=M.PARAM_LRELU) mod.fcLayer(age_size) reuse_agecls = True return mod.get_current_layer()
def age_encoder(inp, ind): global reuse_age_enc name = 'decoder' + str(ind) if not name in reuse_age_enc: reuse = False else: reuse = True with tf.variable_scope(name, reuse=reuse): mod = Model(inp) mod.fcLayer(2 * 2 * 512, activation=M.PARAM_RELU) mod.SelfAttention(is_fc=True, residual=True) reuse_age_enc[name] = 1 return mod.get_current_layer()
def discriminator_f(inp, id_num): global reuse_dis_f, bn_training with tf.variable_scope('dis_f', reuse=reuse_dis_f): mod = Model(inp) mod.set_bn_training(bn_training) mod.flatten() feat = mod.get_current_layer() mod.fcLayer(512, activation=M.PARAM_LRELU, batch_norm=True) mod.fcLayer(256, activation=M.PARAM_LRELU, batch_norm=True) adv = mod.fcLayer(1) mod.set_current_layer(feat) ip = mod.fcLayer(id_num) reuse_dis_f = True return adv, ip
def attention_blk(features): global reuse_att with tf.variable_scope('attention_blk', reuse=reuse_att): # get q0 f_dim = features.get_shape().as_list()[-1] q0 = tf.get_variable('q0', [1, f_dim], initializer=tf.random_normal_initializer(), dtype=tf.float32) BSIZE = tf.shape(features[0]) q0 = tf.tile(q0, [BSIZE, 1]) mod = Model(q0) mod.QAttention(features) mod.fcLayer(f_dim, activation=M.PARAM_TANH) mod.QAttention(features) reuse_att = True return mod.get_current_layer()
def discriminator(inp, age_size): global reuse_dis, bn_training, blknum blknum = 0 with tf.variable_scope('discriminator', reuse=reuse_dis): mod = Model(inp) mod.set_bn_training(bn_training) mod.convLayer(7, 16, stride=2, activation=M.PARAM_LRELU, batch_norm=True) # 64 mod.convLayer(5, 32, stride=2, activation=M.PARAM_LRELU, batch_norm=True) # 32 mod.SelfAttention(4) feat = mod.convLayer(5, 64, stride=2, activation=M.PARAM_LRELU) # 16 mod.batch_norm() mod.convLayer(3, 128, stride=2, activation=M.PARAM_LRELU, batch_norm=True) # 8 adv = mod.convLayer(3, 1) mod.set_current_layer(feat) block(mod, 128, 1) block(mod, 128, 2) # 8 block(mod, 256, 1) mod.SelfAttention(32) block(mod, 256, 1) block(mod, 256, 2) # 4 block(mod, 256, 1) mod.SelfAttention(32) block(mod, 256, 2) # 2 block(mod, 256, 1) mod.flatten() mod.fcLayer(512, activation=M.PARAM_LRELU) age = mod.fcLayer(age_size) reuse_dis = True return adv, age
def discriminator_feature(inp): global reuse_dis2, bn_training with tf.variable_scope('discriminator_feature', reuse=reuse_dis2): mod = Model(inp) mod.set_bn_training(bn_training) mod.fcLayer(256, activation=M.PARAM_LRELU, batch_norm=True) mod.fcLayer(64, activation=M.PARAM_LRELU, batch_norm=True) mod.fcLayer(1) reuse_dis2 = True return mod.get_current_layer()