def generate(self, z_var, pg=1, t=False, alpha_trans=0.0): with tf.variable_scope('generator') as scope: de = tf.reshape(Pixl_Norm(z_var), [self.batch_size, 1, 1, int(self.get_nf(1))]) de = conv2d(de, output_dim=self.get_nf(1), k_h=4, k_w=4, d_w=1, d_h=1, use_wscale=self.use_wscale, gain=np.sqrt(2)/4, padding='Other', name='gen_n_1_conv') de = Pixl_Norm(lrelu(de)) de = tf.reshape(de, [self.batch_size, 4, 4, int(self.get_nf(1))]) de = conv2d(de, output_dim=self.get_nf(1), d_w=1, d_h=1, use_wscale=self.use_wscale, name='gen_n_2_conv') de = Pixl_Norm(lrelu(de)) for i in range(pg - 1): if i == pg - 2 and t: #To RGB de_iden = conv2d(de, output_dim=2, k_w=1, k_h=1, d_w=1, d_h=1, use_wscale=self.use_wscale, name='gen_y_rgb_conv_{}'.format(de.shape[1])) de_iden = upscale(de_iden, 2) de = upscale(de, 2) de = Pixl_Norm(lrelu( conv2d(de, output_dim=self.get_nf(i + 1), d_w=1, d_h=1, use_wscale=self.use_wscale, name='gen_n_conv_1_{}'.format(de.shape[1])))) de = Pixl_Norm(lrelu( conv2d(de, output_dim=self.get_nf(i + 1), d_w=1, d_h=1, use_wscale=self.use_wscale, name='gen_n_conv_2_{}'.format(de.shape[1])))) #To RGB de = conv2d(de, output_dim=2, k_w=1, k_h=1, d_w=1, d_h=1, use_wscale=self.use_wscale, gain=1, name='gen_y_rgb_conv_{}'.format(de.shape[1])) if pg == 1: return de if t: de = (1 - alpha_trans) * de_iden + alpha_trans*de else: de = de return de
def generate(self, z_var, t_text_embedding, pg=1, t=False, alpha_trans=0.0): with tf.variable_scope('generator') as scope: reduced_text_embedding = lrelu( linear(t_text_embedding, self.tdim, 'g_embeddings') ) z_concat = tf.concat([z_var,reduced_text_embedding], 1) de = tf.reshape(z_concat, [self.batch_size, 1, 1, tf.cast(self.get_nf(1),tf.int32)]) de = conv2d(de, output_dim= self.get_nf(1), k_h=4, k_w=4, d_w=1, d_h=1, padding='Other', name='gen_n_1_conv') de = Pixl_Norm(lrelu(de)) de = tf.reshape(de, [self.batch_size, 4, 4, tf.cast(self.get_nf(1),tf.int32)]) de = conv2d(de, output_dim=self.get_nf(1), d_w=1, d_h=1, name='gen_n_2_conv') de = Pixl_Norm(lrelu(de)) for i in range(pg - 1): if i == pg - 2 and t: #To RGB de_iden = conv2d(de, output_dim=3, k_w=1, k_h=1, d_w=1, d_h=1, name='gen_y_rgb_conv_{}'.format(de.shape[1])) de_iden = upscale(de_iden, 2) de = upscale(de, 2) de = Pixl_Norm(lrelu( conv2d(de, output_dim=self.get_nf(i + 1), d_w=1, d_h=1, name='gen_n_conv_1_{}'.format(de.shape[1])))) de = Pixl_Norm(lrelu( conv2d(de, output_dim=self.get_nf(i + 1), d_w=1, d_h=1, name='gen_n_conv_2_{}'.format(de.shape[1])))) #To RGB de = conv2d(de, output_dim=3, k_w=1, k_h=1, d_w=1, d_h=1, name='gen_y_rgb_conv_{}'.format(de.shape[1])) if pg == 1: return de if t: de = (1 - alpha_trans) * de_iden + alpha_trans*de else: de = de return de #tanh given in text to image code. will this work?
def generate(self, z_var, pg=1, t=False, alpha_trans=0.0): with tf.variable_scope('generator') as scope: # de = tf.reshape(Pixl_Norm(z_var), [self.batch_size, 1, 1, int(self.get_nf(1))]) de = conv2d(de, output_dim=self.get_nf(1), k_h=4, k_w=4, d_w=1, d_h=1, use_wscale=self.use_wscale, gain=np.sqrt(2)/4, padding='Other', name='gen_n_1_conv') de = Pixl_Norm(lrelu(de)) de = tf.reshape(de, [self.batch_size, 4, 4, int(self.get_nf(1))]) de = conv2d(de, output_dim=self.get_nf(1), d_w=1, d_h=1, use_wscale=self.use_wscale, name='gen_n_2_conv') de = Pixl_Norm(lrelu(de)) # pg 代表当前 pixel 上升至哪个档位 # pg=1,即第一层的training不需要进入,不需要提升pixel并进行一系列操作 for i in range(pg - 1): # t 代表是否进行training,如果进行training则进行下述操作 # 最后一层循环,进入 if i == pg - 2 and t: # To RGB de_iden = conv2d(de, output_dim=3, k_w=1, k_h=1, d_w=1, d_h=1, use_wscale=self.use_wscale, name='gen_y_rgb_conv_{}'.format(de.shape[1])) de_iden = upscale(de_iden, 2) de = upscale(de, 2) de = Pixl_Norm(lrelu( conv2d(de, output_dim=self.get_nf(i + 1), d_w=1, d_h=1, use_wscale=self.use_wscale, name='gen_n_conv_1_{}'.format(de.shape[1])))) de = Pixl_Norm(lrelu( conv2d(de, output_dim=self.get_nf(i + 1), d_w=1, d_h=1, use_wscale=self.use_wscale, name='gen_n_conv_2_{}'.format(de.shape[1])))) # To RGB de = conv2d(de, output_dim=3, k_w=1, k_h=1, d_w=1, d_h=1, use_wscale=self.use_wscale, gain=1, name='gen_y_rgb_conv_{}'.format(de.shape[1])) if pg == 1: return de # 如果是training,通过 alpha_trans 来调控和线性组合图片 if t: de = (1 - alpha_trans) * de_iden + alpha_trans*de else: de = de return de
def generate(self, z_var, pg=1, t=False, alpha_trans=0.0): with tf.variable_scope('generator') as scope: # latent vector(batch_size, 512)를 (batch_size, 1, 1, 512)로 바꿈 de = tf.reshape( z_var, [self.batch_size, 1, 1, tf.cast(self.get_nf(1), tf.int32)]) de = conv2d(de, output_dim=self.get_nf(1), k_h=4, k_w=4, d_w=1, d_h=1, padding='Other', name='gen_n_1_conv') de = Pixl_Norm(lrelu(de)) # [batch size, 4, 4, 512]로 바꿈 de = tf.reshape( de, [self.batch_size, 4, 4, tf.cast(self.get_nf(1), tf.int32)]) de = conv2d(de, output_dim=self.get_nf(1), d_w=1, d_h=1, name='gen_n_2_conv') de = Pixl_Norm(lrelu(de)) #pg=2일때 i=0, for문 1번 실행 #pg=3일때 i=0,1 for문 2번 실행 for i in range(pg - 1): #pg=2이고 i=0, t=True일때 실행 #pg=2이고 i=0, t=False일때 실행 x #pg=3이고 i=0, t=True 실행x #pg=3이고 i=1, t=True 실행 if i == pg - 2 and t: #To RGB # 논문에선 upscale을 먼저하고 conv2d(toRGB)를 하도록 되어 있는데 텐서플로우 변수 이름의 중복때문에 어쩔수 없이 conv를 먼저 de_iden = conv2d(de, output_dim=3, k_w=1, k_h=1, d_w=1, d_h=1, name='gen_y_rgb_conv_{}'.format( de.shape[1])) de_iden = upscale(de_iden, 2) de = upscale(de, 2) # i=0일때 get_nf(0+1) = 512 # i=1일때 get_ng(1+1) = 256 de = Pixl_Norm( lrelu( conv2d(de, output_dim=self.get_nf(i + 1), d_w=1, d_h=1, name='gen_n_conv_1_{}'.format(de.shape[1])))) de = Pixl_Norm( lrelu( conv2d(de, output_dim=self.get_nf(i + 1), d_w=1, d_h=1, name='gen_n_conv_2_{}'.format(de.shape[1])))) #To RGB de = conv2d(de, output_dim=3, k_w=1, k_h=1, d_w=1, d_h=1, name='gen_y_rgb_conv_{}'.format(de.shape[1])) if pg == 1: return de # transition이 True일때 if t: de = (1 - alpha_trans) * de_iden + alpha_trans * de else: de = de return de
def generator(hparams, z_var, train, reuse): with tf.variable_scope("generator") as scope: if reuse: tf.get_variable_scope().reuse_variables() use_wscale = True de = tf.reshape(Pixl_Norm(z_var), [hparams.batch_size, 1, 1, 128]) de = conv2d(de, output_dim=128, k_h=4, k_w=4, d_w=1, d_h=1, use_wscale=use_wscale, gain=np.sqrt(2)/4, padding='Other', name='gen_n_1_conv') de = Pixl_Norm(lrelu(de)) de = tf.reshape(de, [hparams.batch_size, 4, 4, 128]) de = conv2d(de, output_dim=128, d_w=1, d_h=1, use_wscale=use_wscale, name='gen_n_2_conv') de = Pixl_Norm(lrelu(de)) de = upscale(de, 2) de = Pixl_Norm(lrelu(conv2d(de, output_dim=128, d_w=1, d_h=1, use_wscale=use_wscale, name='gen_n_conv_1_8'))) de = Pixl_Norm(lrelu(conv2d(de, output_dim=128, d_w=1, d_h=1, use_wscale=use_wscale, name='gen_n_conv_2_8'))) de = upscale(de, 2) de = Pixl_Norm(lrelu(conv2d(de, output_dim=64, d_w=1, d_h=1, use_wscale=use_wscale, name='gen_n_conv_1_16'))) de = Pixl_Norm(lrelu(conv2d(de, output_dim=64, d_w=1, d_h=1, use_wscale=use_wscale, name='gen_n_conv_2_16'))) de = upscale(de, 2) de = Pixl_Norm(lrelu(conv2d(de, output_dim=32, d_w=1, d_h=1, use_wscale=use_wscale, name='gen_n_conv_1_32'))) de = Pixl_Norm(lrelu(conv2d(de, output_dim=32, d_w=1, d_h=1, use_wscale=use_wscale, name='gen_n_conv_2_32'))) de_iden = conv2d(de, output_dim=2, k_w=1, k_h=1, d_w=1, d_h=1, use_wscale=use_wscale,name='gen_y_rgb_conv_32') de_iden = upscale(de_iden, 2) de = upscale(de, 2) de = Pixl_Norm(lrelu(conv2d(de, output_dim=16, d_w=1, d_h=1, use_wscale=use_wscale, name='gen_n_conv_1_64'))) de = Pixl_Norm(lrelu(conv2d(de, output_dim=16, d_w=1, d_h=1, use_wscale=use_wscale, name='gen_n_conv_2_64'))) de = conv2d(de, output_dim=2, k_w=1, k_h=1, d_w=1, d_h=1, use_wscale=use_wscale, gain=1, name='gen_y_rgb_conv_64') return de