Example #1
0
    def build_loss_adv2(self, adv2, adv2_real, grad_penalty):
        adv2_loss1 = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                logits=adv2, labels=tf.zeros_like(adv2)))
        adv2_loss2 = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                logits=adv2_real, labels=tf.ones_like(adv2_real)))
        adv2_loss_d = 0.5 * (adv2_loss1 + adv2_loss2)

        adv2_loss_g = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=adv2,
                                                    labels=tf.ones_like(adv2)))

        # adv2_loss_g = -tf.reduce_mean(adv2)
        # adv2_loss_d = -tf.reduce_mean(adv2_real) + tf.reduce_mean(adv2)

        # adv2_loss_d += ddx
        print('-----build loss finished, start optimizer')

        train_adv2_g = tf.train.AdamOptimizer(0.00005,beta1=0.5).minimize(adv2_loss_g,\
         var_list=M.get_all_vars('gen_att'))
        print('-----opt _g')
        with tf.variable_scope('adam_adv2_d', reuse=tf.AUTO_REUSE):
            train_adv2_d = tf.train.AdamOptimizer(0.00005,beta1=0.5).minimize(adv2_loss_d,\
             var_list=M.get_all_vars('discriminator'))
        print('-----opt _d')

        with tf.control_dependencies([train_adv2_d, train_adv2_g]):
            train_adv2 = tf.no_op()

        return adv2_loss_d, adv2_loss_g, train_adv2
Example #2
0
	def __init__(self,class_num):
		img_holder = tf.placeholder(tf.float32,[None,460,460,3])
		with tf.variable_scope('bg_fg'):
			net_bgfg = network_bg_fg(img_holder)
		with tf.variable_scope('seg_part'):
			net_seg = network_seg(class_num,img_holder,tf.nn.softmax(tf.image.resize_images(net_bgfg.seg_layer,[460,460]),1)[:,:,:,1])
		with tf.variable_scope('inst_part'):
			net_inst = network_inst(class_num,img_holder
				tf.nn.softmax(tf.image.resize_images(net_bgfg.seg_layer,[460,460]),1)[:,:,:,1],
				tf.image.resize_images(net_seg.seg_layer,[460,460]))
		
		self.network_bg_fg = network_bg_fg
		self.network_seg = network_seg
		self.network_inst = network_inst

		self.img_holder = img_holder
		self.mask_holder = network_bg_fg.lab_holder
		self.seg_holder = network_seg.lab_holder
		self.coord_holder = network_inst.coord_holder
		self.inst_holder = network_inst.inst_holder

		self.mask_out = network_bg_fg.seg_layer
		self.seg_out = network_seg.seg_layer
		self.inst_num_out = network_inst.inst_layer
		self.coord_out = network_inst.coord_layer

		self.build_loss()

		self.sess = tf.Session()
		M.loadSess('./savings_bgfg/',sess=self.sess,init=True,var_list=M.get_all_vars('bg_fg'))
		M.loadSess('./savings_seg/',sess=self.sess,var_list=M.get_all_vars('seg_part'))
		M.loadSess('./savings_inst/',sess=self.sess,var_list=M.get_all_vars('inst_part'))
Example #3
0
    def build_loss_ae(self, age_pred, age_pred_real):
        self.age_cls_loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(logits=age_pred_real,
                                                    labels=self.age_holder))
        self.age_generate_loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(logits=age_pred,
                                                    labels=self.age_holder))

        train_classifier = tf.train.AdamOptimizer(0.0001).minimize(self.age_cls_loss,\
         var_list=M.get_all_vars('discriminator'))
        train_generator = tf.train.AdamOptimizer(0.0001).minimize(self.age_generate_loss,\
         var_list=M.get_all_vars('gen_att'))

        with tf.control_dependencies([train_classifier, train_generator]):
            self.train_ae = tf.no_op()
Example #4
0
 def build_loss_A(self, A):
     tv_loss = tf.reduce_mean(tf.image.total_variation(A))
     l2_reg = tf.reduce_mean(tf.square(A))
     loss_A = tv_loss / (128. * 128 * 10) + l2_reg * 1.0
     train_A = tf.train.AdamOptimizer(0.00006,beta1=0.5).minimize(loss_A,\
      var_list=M.get_all_vars('gen_att'))
     return loss_A, train_A
Example #5
0
    def build_loss_ae_dis(self, age_pred, age_holder):
        # age_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=age_pred,labels=age_holder))
        age_loss = tf.reduce_mean(tf.abs(age_pred - age_holder))

        train_ae = tf.train.AdamOptimizer(0.0001,beta1=0.5).minimize(age_loss,\
         var_list=M.get_all_vars('discriminator'))
        return age_loss, train_ae
Example #6
0
    def build_loss_ai2(self, ai2, age_size):
        self.ai2_cls_loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(logits=ai2,
                                                    labels=self.age_holder))
        self.ai2_enc_loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(logits=ai2,
                                                    labels=tf.ones_like(ai2) /
                                                    age_size))

        train_cls = tf.train.AdamOptimizer(0.000).minimize(self.ai2_cls_loss,\
         var_list=M.get_all_vars('age_cls'))
        train_enc = tf.train.AdamOptimizer(0.000).minimize(self.ai2_enc_loss,\
         var_list=M.get_all_vars('encoder'))

        with tf.control_dependencies([train_cls, train_enc]):
            self.train_ai2 = tf.no_op()
Example #7
0
 def build_loss_mc(self, generated, target):
     mc_loss = tf.reduce_mean(tf.abs(generated - target))
     train_mc = tf.train.AdamOptimizer(0.0000, beta1=0.5).minimize(
         mc_loss, var_list=M.get_all_vars('gen_att'))
     # train_mc = tf.no_op()
     with tf.control_dependencies([train_mc]):
         train_mc = tf.no_op()
     return mc_loss, train_mc
Example #8
0
    def __init__(self, class_num):
        img_holder = tf.placeholder(tf.float32, [None, 460, 460, 3])
        with tf.variable_scope('bg_fg'):
            net_bgfg = network_bg_fg(img_holder)
        with tf.variable_scope('seg_part'):
            bg_fg_upsample = tf.nn.softmax(
                tf.image.resize_images(net_bgfg.seg_layer,
                                       img_holder.get_shape().as_list()[1:3]),
                1)[:, :, :, 1]
            print(bg_fg_upsample)
            input('pause')
            net_seg = network_seg(img_holder, class_num, bg_fg_upsample)
        with tf.variable_scope('inst_part'):
            net_inst = network_inst(
                img_holder, class_num,
                tf.nn.softmax(
                    tf.image.resize_images(net_bgfg.seg_layer,
                                           tf.shape(img_holder)[1:3]),
                    1)[:, :, :, 1],
                tf.image.resize_images(net_seg.seg_layer,
                                       tf.shape(img_holder)[1:3]))

        self.net_bgfg = net_bgfg
        self.net_seg = net_seg
        self.net_inst = net_inst

        self.img_holder = img_holder

        self.mask_out = tf.image.resize_images(net_bgfg.seg_layer,
                                               tf.shape(img_holder)[1:3])
        self.seg_out = tf.image.resize_images(net_seg.seg_layer,
                                              tf.shape(img_holder)[1:3])
        self.inst_num_out = net_inst.inst_layer
        self.coord_out = net_inst.coord_layer

        self.sess = tf.Session()
        M.loadSess('./savings_bgfg/',
                   sess=self.sess,
                   init=True,
                   var_list=M.get_all_vars('bg_fg'))
        M.loadSess('./savings_seg/',
                   sess=self.sess,
                   var_list=M.get_all_vars('seg_part'))
        M.loadSess('./savings_inst/',
                   sess=self.sess,
                   var_list=M.get_all_vars('inst_part'))
Example #9
0
    def build_loss_mc(self):
        self.mc_loss = tf.reduce_mean(
            tf.abs(self.generated - self.target_holder))
        train_mc = tf.train.AdamOptimizer(0.001).minimize(self.mc_loss,\
			#var_list=M.get_all_vars('encoder')+M.get_all_vars('gen_att'))

         var_list=M.get_all_vars('gen_att'))
        with tf.control_dependencies([train_mc]):
            self.train_mc = tf.no_op()
Example #10
0
    def build_loss_adv2(self, adv2, adv2_real):
        adv2_loss1 = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                logits=adv2, labels=tf.zeros_like(adv2)))
        adv2_loss2 = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                logits=adv2_real, labels=tf.ones_like(adv2_real)))
        self.adv2_loss_d = 0.5 * (adv2_loss1 + adv2_loss2)

        self.adv2_loss_g = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=adv2,
                                                    labels=tf.ones_like(adv2)))

        train_adv2_d = tf.train.AdamOptimizer(0.0001).minimize(self.adv2_loss_d,\
         var_list=M.get_all_vars('discriminator'))

        train_adv2_g = tf.train.AdamOptimizer(0.0001).minimize(self.adv2_loss_g,\
         var_list=M.get_all_vars('gen_att'))

        with tf.control_dependencies([train_adv2_d, train_adv2_g]):
            self.train_adv2 = tf.no_op()
Example #11
0
    def build_loss_adv1(self, adv1, adv1_uni):
        adv1_loss1 = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                logits=adv1, labels=tf.zeros_like(adv1)))
        adv1_loss2 = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                logits=adv1_uni, labels=tf.ones_like(adv1_uni)))
        self.adv1_loss_d = 0.5 * (adv1_loss1 + adv1_loss2)

        self.adv1_loss_g = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=adv1,
                                                    labels=tf.ones_like(adv1)))

        # opt_adv1 = M.get_update_ops('encoder') + M.get_update_ops('dis_f')

        train_adv1_d = tf.train.AdamOptimizer(0.00001).minimize(self.adv1_loss_d,\
         var_list=M.get_all_vars('dis_f'))
        train_adv1_g = tf.train.AdamOptimizer(0.00005).minimize(self.adv1_loss_g,\
         var_list=M.get_all_vars('encoder'))

        with tf.control_dependencies([train_adv1_g, train_adv1_d]):
            self.train_adv1 = tf.no_op()
Example #12
0
def get_assign_tensors():
	v = M.get_all_vars()
	print(len(v))

	with open('buffer_weights.pickle','rb') as f:
		fmat = pickle.load(f)

	assign_tensor = []
	# fmat = sio.loadmat('layer1')
	for i in range(640):
		buff = fmat[str(i)]
		# buff = np.float32(buff)
		if len(buff.shape)==2:
			buff = buff[0]
		bufftensor = tf.assign(v[i],buff)
		assign_tensor.append(bufftensor)
	return assign_tensor
Example #13
0
    def __init__(self, model_path='./aim_model_gen/'):
        self.model_path = model_path
        self.inp_holder = tf.placeholder(tf.float32, [None, 128, 128, 3])
        self.age_holder = tf.placeholder(tf.float32, [None, 1])
        self.age_holder2 = tf.placeholder(tf.float32, [None, 1])

        # get attention A and C
        age_expanded = self.expand(self.age_holder, self.inp_holder)
        aged_feature = tf.concat([age_expanded, self.inp_holder], -1)
        A, C = N.generator_att(aged_feature)
        # construct synthesized image
        generated = A * C + (1. - A) * self.inp_holder

        # get attention A2 and C2
        age_expanded2 = self.expand(self.age_holder2, generated)
        aged_feature2 = tf.concat([age_expanded2, generated], -1)
        A2, C2 = N.generator_att(aged_feature2)
        generated2 = A2 * C2 + (1. - A2) * generated

        # retrieve tensor for adv2 and ae
        adv2, age_pred = N.discriminator(generated)
        adv2_real, age_pred_real = N.discriminator(self.inp_holder)

        adv2_2, age_pred2 = N.discriminator(generated2)

        feat = N.feat_encoder(self.inp_holder)
        feat1 = N.feat_encoder(generated)
        feat2 = N.feat_encoder(generated2)

        self.feat_loss = tf.reduce_mean(
            tf.square(feat - feat1) + tf.square(feat - feat2))
        self.train_feat = tf.train.AdamOptimizer(0.00001).minimize(
            self.feat_loss, var_list=M.get_all_vars('gen_att'))

        # get gradient penalty

        # gamma1 = tf.random_uniform([],0.0,1.0)
        # interp1 = gamma1 * generated + (1. - gamma1) * self.inp_holder
        # interp1_y, _ = N.discriminator(interp1, 7)
        # grad_p1 = tf.gradients(interp1_y, interp1)[0]
        # grad_p1 = tf.sqrt(tf.reduce_sum(tf.square(grad_p1),axis=[1,2,3]))
        # grad_p1 = tf.reduce_mean(tf.square(grad_p1 - 1.) * 10.)

        # gamma2 = tf.random_uniform([],0.0,1.0)
        # interp2 = gamma2 * generated + (1. - gamma2) * self.inp_holder
        # interp2_y, _ = N.discriminator(interp2, 7)
        # grad_p2 = tf.gradients(interp2_y, interp2)[0]
        # grad_p2 = tf.sqrt(tf.reduce_sum(tf.square(grad_p2),axis=[1,2,3]))
        # grad_p2 = tf.reduce_mean(tf.square(grad_p2 - 1.) * 10.)

        grad_p1 = grad_p2 = 0.

        # call loss builder functions
        self.mc_loss, self.train_mc = self.build_loss_mc(
            generated2, self.inp_holder)
        self.adv2_loss_d1, self.adv2_loss_g1, self.train_adv2_1 = self.build_loss_adv2(
            adv2, adv2_real, grad_p1)
        self.adv2_loss_d2, self.adv2_loss_g2, self.train_adv2_2 = self.build_loss_adv2(
            adv2_2, adv2_real, grad_p2)
        self.age_cls_loss_dis, self.train_ae_dis = self.build_loss_ae_dis(
            age_pred_real, self.age_holder2)
        self.age_cls_loss_gen, self.train_ae_gen = self.build_loss_ae_gen(
            age_pred, self.age_holder)
        self.age_cls_loss_gen2, self.train_ae_gen2 = self.build_loss_ae_gen(
            age_pred2, self.age_holder2)
        self.loss_A, self.train_A = self.build_loss_A(A)
        self.loss_A2, self.train_A2 = self.build_loss_A(A2)
        self.update_ops()
        self.accuracy = M.accuracy(age_pred_real,
                                   tf.argmax(self.age_holder2, -1))
        self.A1_l, self.A2_l = tf.reduce_mean(tf.square(A)), tf.reduce_mean(
            tf.square(A2))

        self.generated = generated
        self.A, self.C = A, C

        self.sess = tf.Session()
        M.loadSess(model_path, self.sess, init=True)
        M.loadSess('./aim_model/',
                   self.sess,
                   var_list=M.get_all_vars('encoder'))
        self.saver = tf.train.Saver()
Example #14
0
 def __init__(self, inp_holder):
     self.feature_maps = self.build_model(inp_holder)
     self.feature_layer = self.feature_maps[-1]
     self.var = M.get_all_vars('WideRes')
Example #15
0
 def build_loss_A(self):
     tv_loss = tf.reduce_mean(tf.image.total_variation(self.A))
     l2_reg = tf.reduce_mean(tf.square(self.A))
     self.loss_A = tv_loss / (128 * 128) + l2_reg * 0.01
     self.train_A = tf.train.AdamOptimizer(0.000001).minimize(self.loss_A,\
      var_list=M.get_all_vars('gen_att'))