def __init__(self, sess): self.sess = sess self.global_step = tf.Variable(0.0, name='global_step', dtype=tf.float32, trainable=False) self.G_para = [] self.D_para = [] #for debug self.cnt_tep = 0 self.deb_kep = 0 self.deb_kep2 = 0 #for data input self.pipline_data_train = cdata.get_pipline_data_train( img_size, batchsize) self.pipline_data_test = cdata.get_pipline_data_test( img_size, batchsize_test) #3个placeholder, img和noise,training self.imgs_pla = tf.placeholder( tf.float32, [batchsize, img_size_h, img_size_w, G_group_img_num * img_channel], name='imgs_in') self.training = tf.placeholder(tf.bool, name='training_in') print('placeholders:\n', 'img_placeholder:', self.imgs_pla, '\ntraining:', self.training) ''' placeholders: img_placeholder: Tensor("imgs_in:0", shape=(12, 180, 320, 9), dtype=float32) training: Tensor("training_in:0", dtype=bool) ''' self.frame0 = self.imgs_pla[:, :, :, :img_channel] self.frame1 = self.imgs_pla[:, :, :, img_channel:img_channel * 2] self.frame2 = self.imgs_pla[:, :, :, img_channel * 2:] #这里是为了看看第一帧和第3帧的差距,用来给evalte self.frame_0_2_squareloss = tf.reduce_mean(tf.squared_difference( self.frame0, self.frame2), [1, 2, 3], name='frame_0_2_squareloss') #frame0and2=tf.concat([self.frame0, self.frame2], -1) #在第三维度连接起来 #print ('after concat:',frame0and2) #!!!!!!!!!!here is differs from v1,add to Generator output the ori img will reduce the generator difficulty self.G_net = self.Generator_net( self.frame0, self.frame2) #+self.frame0 #注意这里是直接作为生成结果 print( 'self.G_net:', self.G_net ) #self.G_net: Tensor("G_Net/G_tanh:0", shape=(12, 180, 320, 3), dtype=float32) #D_1的输出 frame0_False_2 = tf.concat([self.frame0, self.G_net, self.frame2], -1) self.D_linear_net_F, self.D_linear_net_F_logit = self.Discriminator_net_linear( frame0_False_2) self.D_linear_net_T, self.D_linear_net_T_logit = self.Discriminator_net_linear( self.imgs_pla) #下面是loss公式 self.D_linear_net_loss_sum, self.D_linear_net_loss_T, self.D_linear_net_loss_F=self.D_loss_TandF_logits(self.D_linear_net_T_logit, \ self.D_linear_net_F_logit, "D_linear_net") print('D1 form finished..') #D_2的输出 ''' self.D_clear_net_F, self.D_clear_net_F_logit=self.Discriminator_net_clear(self.G_net) self.D_clear_net_T, self.D_clear_net_T_logit=self.Discriminator_net_clear(self.frame1) #下面是loss公式 self.D_clear_net_loss_sum, self.D_clear_net_loss_T, self.D_clear_net_loss_F=self.D_loss_TandF_logits(self.D_clear_net_T_logit, \ self.D_clear_net_F_logit, "D_clear_net") ''' self.G_loss_mean_Square = tf.reduce_mean(tf.squared_difference( self.G_net, self.frame1), name='G_clear_square_loss') print('G_loss_mean_Square form finished..') #这里对两个D的loss没有特殊处理,只是简单相加 self.D_loss_all = self.D_linear_net_loss_sum #+ self.D_clear_net_loss_sum #下面是G的loss self.G_loss_mean_D1 = self.G_loss_F_logits(self.D_linear_net_F_logit, 'G_loss_D1') #self.G_loss_mean_D2=self.G_loss_F_logits(self.D_clear_net_F_logit, 'G_loss_D2') #这里对两个G的loss没有特殊处理,只是简单相加 self.G_loss_all = self.G_loss_mean_D1 + self.G_loss_mean_Square * ( 1 + self.global_step / G_squareloss_rate_globalstep ) # self.G_loss_mean_D2 #还是应该以tf.trainable_variables()为主 t_vars = tf.trainable_variables() print("trainable vars cnt:", len(t_vars)) self.G_para = [var for var in t_vars if var.name.startswith('G')] self.D_para = [var for var in t_vars if var.name.startswith('D')] # weight clipping self.clip_D = [ p.assign(tf.clip_by_value(p, weightclip_min, weightclip_max)) for p in self.D_para ] #训练使用 self.train_D = self.train_op_D(decay_steps, decay_rate) self.train_G = self.train_op_G(decay_steps, decay_rate) ''' print ('\nshow all trainable vars:',len(tf.trainable_variables())) for i in tf.trainable_variables(): print (i) ''' print('\nfirst show G params') for ind, i in enumerate(self.G_para): print(ind, i) print('\nnext is D:\n') for ind, i in enumerate(self.D_para): print(ind, i) print('\nnext is tf.GraphKeys.UPDATE_OPS:') print(tf.get_collection(tf.GraphKeys.UPDATE_OPS)) self.summary_all = tf.summary.merge_all() init = tf.global_variables_initializer() #初始化tf.Variable self.sess.run(init)
def __init__(self, sess): self.sess=sess self.global_step = tf.Variable(0, name='global_step', trainable=False) self.G_para=[] self.D_para=[] #for debug self.cnt_tep=0 self.deb_kep=0 self.deb_kep2=0 #for data input self.pipline_data_train=cdata.get_pipline_data_train(img_size, batchsize) self.pipline_data_test=cdata.get_pipline_data_test(img_size, batchsize_test) #3个placeholder, img和noise,training self.imgs_pla = tf.placeholder(tf.float32, [batchsize, img_size_h, img_size_w, G_group_img_num*img_channel], name='imgs_in') self.training=tf.placeholder(tf.bool, name='training_in') self.frame0=self.imgs_pla[:,:,:,:img_channel] self.frame1=self.imgs_pla[:,:,:,img_channel:img_channel*2] self.frame2=self.imgs_pla[:,:,:,img_channel*2:] frame0and2=tf.concat([self.frame0, self.frame2], 3) #在第三维度连接起来 #print ('after concat:',frame0and2) self.G_net=self.Generator_net(frame0and2) #D_1的输出 frame0_False_2=tf.concat([self.frame0, self.G_net,self.frame2], 3) self.D_linear_net_F, self.D_linear_net_F_logit=self.Discriminator_net_linear(frame0_False_2) self.D_linear_net_T, self.D_linear_net_T_logit=self.Discriminator_net_linear(self.imgs_pla) self.D_linear_net_loss_sum, self.D_linear_net_loss_T, self.D_linear_net_loss_F=self.D_loss_TandF_logits(self.D_linear_net_T_logit, \ self.D_linear_net_F_logit, "D_linear_net") print ('D1 form finished..') #D_2的输出 self.D_clear_net_F, self.D_clear_net_F_logit=self.Discriminator_net_clear(self.G_net) self.D_clear_net_T, self.D_clear_net_T_logit=self.Discriminator_net_clear(self.frame1) self.D_clear_net_loss_sum, self.D_clear_net_loss_T, self.D_clear_net_loss_F=self.D_loss_TandF_logits(self.D_clear_net_T_logit, \ self.D_clear_net_F_logit, "D_clear_net") print ('D2 form finished..') #这里对两个D的loss没有特殊处理,只是简单相加 self.D_loss_all=self.D_clear_net_loss_sum + self.D_linear_net_loss_sum #下面是G的loss self.G_loss_mean_D1=self.G_loss_F_logits(self.D_linear_net_F_logit, 'G_loss_D1') self.G_loss_mean_D2=self.G_loss_F_logits(self.D_clear_net_F_logit, 'G_loss_D2') self.G_loss_all=self.G_loss_mean_D1 + self.G_loss_mean_D2 #还是应该以tf.trainable_variables()为主 t_vars=tf.trainable_variables() print ("trainable vars cnt:",len(t_vars)) self.G_para=[var for var in t_vars if var.name.startswith('G')] self.D_para=[var for var in t_vars if var.name.startswith('D')] #训练使用 self.train_D=self.train_op_D(decay_steps, decay_rate) self.train_G=self.train_op_G(decay_steps, decay_rate) ''' print ('\nshow all trainable vars:',len(tf.trainable_variables())) for i in tf.trainable_variables(): print (i) ''' print ('\nfirst show G params') for ind,i in enumerate(self.G_para): print (ind,i) print('\nnext is D:\n') for ind,i in enumerate(self.D_para): print (ind,i) print ('\nnext is tf.GraphKeys.UPDATE_OPS:') print (tf.get_collection(tf.GraphKeys.UPDATE_OPS)) self.summary_all=tf.summary.merge_all() init = tf.global_variables_initializer()#初始化tf.Variable self.sess.run(init)