def gen_net(self, z, y): with tf.variable_scope('generator') as scope: yb = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim]) #Reshape the input noise(the precondition of the CGAN into a shape 64x1x1x10) z = tf.concat([z, y], 1) c1, c2 = int( self.output_size / 4), int(self.output_size / 2 ) # 10 stand for the num of labels d1 = tf.nn.relu(batch_normal(fully_connect(z, output_size=1024, scope='gen_fully'), scope='gen_bn1')) d1 = tf.concat([d1, y], 1) d2 = tf.nn.relu(batch_normal(fully_connect(d1, output_size=7*7*2*64, scope='gen_fully2'), scope='gen_bn2')) d2 = tf.reshape(d2, [self.batch_size, c1, c1, 64 * 2]) d2 = conv_cond_concat(d2, yb) d3 = tf.nn.relu(batch_normal(de_conv(d2, output_shape=[self.batch_size, c2, c2, 128], name='gen_deconv1'), scope='gen_bn3')) d3 = conv_cond_concat(d3, yb) d4 = de_conv(d3, output_shape=[self.batch_size, self.output_size, self.output_size, self.channel], name='gen_deconv2', initializer = xavier_initializer()) return tf.nn.sigmoid(d4)
def sample_net(batch_size, z, y, output_size): z = tf.concat([z, y], 1) yb = tf.reshape(y, shape=[batch_size, 1, 1, y_dim]) c1, c2 = output_size / 4, output_size / 2 # 10 stand for the num of labels d1 = fully_connect(z, weights2['wd'], biases2['bd']) d1 = batch_normal(d1, scope="genbn1", reuse=True) d1 = tf.nn.relu(d1) d1 = tf.concat([d1, y], 1) d2 = fully_connect(d1, weights2['wc1'], biases2['bc1']) d2 = batch_normal(d2, scope="genbn2", reuse=True) d2 = tf.nn.relu(d2) d2 = tf.reshape(d2, [batch_size, c1, c1, 64 * 2]) d2 = conv_cond_concat(d2, yb) d3 = de_conv(d2, weights2['wc2'], biases2['bc2'], out_shape=[batch_size, c2, c2, 128]) d3 = batch_normal(d3, scope="genbn3", reuse=True) d3 = tf.nn.relu(d3) d3 = conv_cond_concat(d3, yb) d4 = de_conv(d3, weights2['wc3'], biases2['bc3'], out_shape=[batch_size, output_size, output_size, channel]) return tf.nn.sigmoid(d4)
def sample_net(batch_size , z , y, output_size): z = tf.concat(1, [z , y]) # mnist data's shape is (28 , 28 , 1) # int the paper , s = 28 c1, c2 = output_size / 4, output_size / 2 # 10 stand for the num of labels d1 = fully_connect(z , weights2['wd'], biases2['bd']) d1 = batch_normal(d1, scope="genbn1" ,reuse=True) d1 = tf.nn.relu(d1) d2 = fully_connect(d1, weights2['wc1'], biases2['bc1']) d2 = batch_normal(d2, scope="genbn2" ,reuse=True) d2 = tf.nn.relu(d2) d2 = tf.reshape(d2, [batch_size, c1, c1 , 64 * 2]) d3 = de_conv(d2, weights2['wc2'], biases2['bc2'], out_shape=[batch_size , c2, c2, 128]) d3 = batch_normal(d3, scope="genbn3" ,reuse=True) d3 = tf.nn.relu(d3) d4 = de_conv(d3, weights2['wc3'], biases2['bc3'], out_shape=[batch_size, output_size, output_size, 1]) return tf.nn.sigmoid(d4)
def encode_decode(self, x_var, x_exemplar, reuse=False): with tf.variable_scope("encode_decode") as scope: if reuse == True: scope.reuse_variables() x_var = tf.concat([x_var, x_exemplar], axis=3) conv1 = tf.nn.relu( instance_norm(conv2d(x_var, output_dim=64, k_w=7, k_h=7, d_w=1, d_h=1, name='e_c1'), scope='e_in1')) conv2 = tf.nn.relu( instance_norm(conv2d(conv1, output_dim=128, k_w=4, k_h=4, d_w=2, d_h=2, name='e_c2'), scope='e_in2')) conv3 = tf.nn.relu( instance_norm(conv2d(conv2, output_dim=256, k_w=4, k_h=4, d_w=2, d_h=2, name='e_c3'), scope='e_in3')) r1 = Residual(conv3, residual_name='re_1') r2 = Residual(r1, residual_name='re_2') r3 = Residual(r2, residual_name='re_3') r4 = Residual(r3, residual_name='re_4') r5 = Residual(r4, residual_name='re_5') r6 = Residual(r5, residual_name='re_6') g_deconv1 = tf.nn.relu(instance_norm(de_conv(r6, output_shape=[self.batch_size, self.output_size/2, self.output_size/2, 128], name='gen_deconv1'), scope="gen_in")) # for 1 g_deconv_1_1 = tf.nn.relu(instance_norm(de_conv(g_deconv1, output_shape=[self.batch_size, self.output_size, self.output_size, 32], name='g_deconv_1_1'), scope='gen_in_1_1')) g_deconv_1_1_x = tf.concat([g_deconv_1_1, x_var], axis=3) x_tilde1 = conv2d(g_deconv_1_1_x, output_dim=self.channel, k_w=7, k_h=7, d_h=1, d_w=1, name='gen_conv_1_2') return tf.nn.tanh(x_tilde1)
def generate_mnist(self, z_var, reuse=False): with tf.variable_scope('generator') as scope: if reuse == True: scope.reuse_variables() d1 = tf.nn.relu( batch_normal(fully_connect(z_var, output_size=7 * 7 * 32, scope='gen_fully1'), scope='gen_bn1', reuse=reuse)) d2 = tf.reshape(d1, [self.batch_size, 7, 7, 32]) d2 = tf.nn.relu( batch_normal(de_conv( d2, output_shape=[self.batch_size, 14, 14, 16], name='gen_deconv2'), scope='gen_bn2', reuse=reuse)) d3 = de_conv(d2, output_shape=[self.batch_size, 28, 28, 1], name='gen_deconv3') return tf.nn.sigmoid(d3)
def gern_net(self, z, y): #G的输出层不加BN层 with tf.variable_scope('generator') as scope: # ? 1 1 10 yb = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim]) # ? 110 把z和y相连 z = tf.concat([z, y], 1) #在同一行的后面加,即列数增加 (64,100+10) # 7 14 计算中间层大小 c1, c2 = int( self.output_size / 4), int(self.output_size / 2 ) #7,14 # 10 stand for the num of labels # ? 1024 d1 = tf.nn.relu(batch_normal(fully_connect(z, output_size=1024, scope='gen_fully'), scope='gen_bn1')) #(64,1024) # ? 1034 在第一个全连接层后面在连接y d1 = tf.concat([d1, y], 1) #(64,1034) # 全连接层2 ? 7*7*2*64 -> c1*c1*2*self.batch_size d2 = tf.nn.relu(batch_normal(fully_connect(d1, output_size=c1*c2*self.batch_size, scope='gen_fully2'), scope='gen_bn2')) #c1*c1*2*self.batch_size??? #64,7*7*2*64 # ? 7 7 128 d2 = tf.reshape(d2, [self.batch_size, c1, c1, self.batch_size*2]) #64,7,7,128 # ? 7 7 138 d2 = conv_cond_concat(d2, yb)# 又将y加到后面 # ? 14 14 128 d3 = tf.nn.relu(batch_normal(de_conv(d2, output_shape=[self.batch_size, c2, c2, 128], name='gen_deconv1'), scope='gen_bn3'))#64,14,14,128 # ? 14 14 138 d3 = conv_cond_concat(d3, yb) # 再加一次 # 输出 ? 28 28 1 d4 = de_conv(d3, output_shape=[self.batch_size, self.output_size, self.output_size, self.channel], name='gen_deconv2', initializer = xavier_initializer()) #64,28,28,1 return tf.nn.sigmoid(d4)
def generate(self, z_var, y, weights, biases): #add the first layer z_var = tf.concat([z_var, y], 1) d1 = tf.nn.relu(batch_normal(fully_connect(z_var , weights['wd'], biases['bd']) , scope='gen_bn1')) #add the second layer d1 = tf.concat([d1, y], 1) d2 = tf.nn.relu(batch_normal(fully_connect(d1 , weights['wc1'], biases['bc1']) , scope='gen_bn2')) d2 = tf.reshape(d2 , [self.batch_size , 7 , 7 , 128]) y = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim]) d2 = conv_cond_concat(d2, y) d3 = tf.nn.relu(batch_normal(de_conv(d2, weights['wc2'], biases['bc2'], out_shape=[self.batch_size, 14 , 14 , 64]) , scope='gen_bn3')) d3 = conv_cond_concat(d3, y) output = de_conv(d3, weights['wc3'], biases['bc3'], out_shape=[self.batch_size, 28, 28, 1]) return tf.nn.sigmoid(output)
def Style_generate(self, z_var, reuse=False): with tf.variable_scope('sty_generator') as scope: d2 = tf.reshape(z_var, [self.batch_size, 215, 16, 256]) d2 = tf.nn.relu( batch_normal(de_conv( d2, output_shape=[self.batch_size, 430, 64, 128], name='gen_deconv2', d_h=2), scope='gen_bn2')) d3 = tf.nn.relu( batch_normal(de_conv( d2, output_shape=[self.batch_size, 1720, 256, 64], name='gen_deconv3'), scope='gen_bn3')) d4 = tf.nn.relu( batch_normal(de_conv( d3, output_shape=[self.batch_size, 6880, 1024, 1], name='gen_deconv4'), scope='gen_bn4', reuse=reuse)) d5 = de_conv(d4, output_shape=[self.batch_size, 6880, 1024, 1], name='gen_deconv5', d_h=1, d_w=1) return tf.nn.relu(d5)
def generate(self, z_var, batch_size=64, resnet=False, is_train=True, reuse=False): with tf.variable_scope('generator') as scope: s = 4 if reuse: scope.reuse_variables() if self.output_size == 32: s = 4 elif self.output_size == 48: s = 6 d1 = fully_connect(z_var, output_size=s*s*256, scope='gen_fully1') d1 = tf.reshape(d1, [-1, s, s, 256]) if resnet == False: d1 = tf.nn.relu(d1) d2 = tf.nn.relu(batch_normal(de_conv(d1, output_shape=[batch_size, s*2, s*2, 256], name='gen_deconv2') , scope='bn1', is_training=is_train)) d3 = tf.nn.relu(batch_normal(de_conv(d2, output_shape=[batch_size, s*4, s*4, 128], name='gen_deconv3') , scope='bn2', is_training=is_train)) d4 = tf.nn.relu(batch_normal(de_conv(d3, output_shape=[batch_size, s*8, s*8, 64], name='gen_deconv4') , scope='bn3', is_training=is_train)) d5 = conv2d(d4, output_dim=self.channel, stride=1, kernel=3, name='gen_conv') else: d2 = Residual_G(d1, output_dims=256, up_sampling=True, residual_name='in1') d3 = Residual_G(d2, output_dims=256, up_sampling=True, residual_name='in2') d4 = Residual_G(d3, output_dims=256, up_sampling=True, residual_name='in3') d4 = tf.nn.relu(batch_normal(d4, scope='in4')) d5 = conv2d(d4, output_dim=self.channel, kernel=3, stride=1, name='gen_conv') return tf.tanh(d5)
def generate(self, z_var, y, weights, biases): g_prob = 1 # concat z_var and y z_var = tf.concat([z_var, y], 1) d0 = lrelu( batch_normal(fully_connect(z_var, weights['wc0'], biases['bc0']), scope='gen_bn0')) z_var = tf.reshape(d0, shape=[d0.shape[0], 1, 1, d0.shape[1]]) # z_var = tf.reshape(z_var, shape=[z_var.shape[0], 1, 1, z_var.shape[1]]) print('z_var', z_var.shape) # the first layer z_var = tf.nn.dropout(z_var, g_prob) d1 = tf.nn.relu( batch_normal(de_conv(z_var, weights['wc1'], biases['bc1'], out_shape=[self.batch_size, 4, 4, 512], s=[1, 2, 2, 1], padding_='VALID'), scope='gen_bn1')) print('d1', d1.shape) d1 = tf.nn.dropout(d1, g_prob) d2 = tf.nn.relu( batch_normal(de_conv(d1, weights['wc2'], biases['bc2'], out_shape=[self.batch_size, 8, 8, 256]), scope='gen_bn2')) d2 = tf.nn.dropout(d2, g_prob) d3 = tf.nn.relu( batch_normal(de_conv(d2, weights['wc3'], biases['bc3'], out_shape=[self.batch_size, 16, 16, 128]), scope='gen_bn3')) d3 = tf.nn.dropout(d3, g_prob) d4 = tf.nn.relu( batch_normal(de_conv(d3, weights['wc4'], biases['bc4'], out_shape=[self.batch_size, 32, 32, 64]), scope='gen_bn4')) d5 = tf.tanh( de_conv(d4, weights['wc5'], biases['bc5'], out_shape=[self.batch_size, 64, 64, self.channel])) print('d5', d5.shape) return d5
def generate(self, z_var, conv1, conv2, conv3, reuse=False): with tf.variable_scope('generator') as scope: if reuse == True: scope.reuse_variables() d1 = tf.nn.relu( batch_normal(fully_connect(z_var, output_size=4 * 4 * 256, scope='gen_fully1'), scope='gen_bn1', reuse=reuse, isTrain=self.isTrain)) d2 = tf.reshape(d1, [self.batch_size, 4, 4, 256]) d2 = tf.nn.relu( batch_normal(de_conv(d2, output_shape=[self.batch_size, 8, 8, 256], name='gen_deconv2'), scope='gen_bn2', reuse=reuse, isTrain=self.isTrain)) + conv3 print('d2_shape', d2.get_shape()) d3 = tf.nn.relu( batch_normal(de_conv( d2, output_shape=[self.batch_size, 16, 16, 128], name='gen_deconv3'), scope='gen_bn3', reuse=reuse, isTrain=self.isTrain)) + conv2 print('d3_shape', d3.get_shape()) d4 = tf.nn.relu( batch_normal(de_conv( d3, output_shape=[self.batch_size, 32, 32, 64], name='gen_deconv4'), scope='gen_bn4', reuse=reuse, isTrain=self.isTrain)) + conv1 print('d4_shape()', d4.get_shape()) d5 = tf.nn.relu( batch_normal(de_conv( d4, output_shape=[self.batch_size, 64, 64, 64], name='gen_deconv5'), scope='gen_bn5', reuse=reuse, isTrain=self.isTrain)) print('d5_shape', d5.get_shape()) d6 = conv2d(d5, output_dim=3, d_h=1, d_w=1, name='gen_conv6') print('d6_shape', d6.get_shape()) return tf.nn.tanh(d6)
def encode_decode_2(self, x, reuse=False): with tf.variable_scope("encode_decode_2") as scope: if reuse == True: scope.reuse_variables() conv1 = lrelu( instance_norm( conv2d(x, output_dim=64, k_w=5, k_h=5, d_w=1, d_h=1, name='e_c1'), scope='e_in1', )) conv2 = lrelu( instance_norm(conv2d(conv1, output_dim=128, name='e_c2'), scope='e_in2')) conv3 = lrelu( instance_norm(conv2d(conv2, output_dim=256, name='e_c3'), scope='e_in3')) # for x_{1} de_conv1 = lrelu( instance_norm( de_conv(conv3, output_shape=[self.batch_size, 64, 64, 128], name='e_d1', k_h=3, k_w=3), scope='e_in4', )) de_conv2 = lrelu( instance_norm( de_conv(de_conv1, output_shape=[self.batch_size, 128, 128, 64], name='e_d2', k_w=3, k_h=3), scope='e_in5', )) x_tilde1 = conv2d(de_conv2, output_dim=3, d_h=1, d_w=1, name='e_c4') return x_tilde1
def generate(self, z_var, reuse=False): # the size of z_var 32 * 128 with tf.variable_scope('generator') as scope: if reuse == True: scope.reuse_variables() d1 = tf.nn.relu( batch_normal(fully_connect(z_var, output_size=4 * 4 * 256, scope='gen_fully1'), scope='gen_bn1', reuse=reuse)) d2 = tf.reshape(d1, [int(self.batch_size / 2), 4, 4, 256]) d2 = tf.nn.relu( batch_normal(de_conv( d2, output_shape=[int(self.batch_size / 2), 8, 8, 256], name='gen_deconv2'), scope='gen_bn2', reuse=reuse)) d3 = tf.nn.relu( batch_normal(de_conv( d2, output_shape=[int(self.batch_size / 2), 16, 16, 128], name='gen_deconv3'), scope='gen_bn3', reuse=reuse)) d4 = tf.nn.relu( batch_normal(de_conv( d3, output_shape=[int(self.batch_size / 2), 32, 32, 64], name='gen_deconv4'), scope='gen_bn4', reuse=reuse)) d5 = tf.nn.relu( batch_normal(de_conv( d4, output_shape=[int(self.batch_size / 2), 64, 64, 32], name='gen_deconv5'), scope='gen_bn5', reuse=reuse)) d6 = de_conv(d5, output_shape=[int(self.batch_size / 2), 64, 64, 3], name='gen_deconv6', d_h=1, d_w=1) return tf.nn.tanh(d6)
def generate(self, z_var, reuse=False): with tf.variable_scope('generator') as scope: if reuse == True: scope.reuse_variables() d1 = tf.nn.relu(batch_normal(fully_connect(z_var , output_size=8*8*256, scope='gen_fully1'), scope='gen_bn1', reuse=reuse)) d2 = tf.reshape(d1, [self.batch_size, 8, 8, 256]) d2 = tf.nn.relu(batch_normal(de_conv(d2 , output_shape=[self.batch_size, 16, 16, 256], name='gen_deconv2'), scope='gen_bn2', reuse=reuse)) d3 = tf.nn.relu(batch_normal(de_conv(d2, output_shape=[self.batch_size, 32, 32, 128], name='gen_deconv3'), scope='gen_bn3', reuse=reuse)) d4 = tf.nn.relu(batch_normal(de_conv(d3, output_shape=[self.batch_size, 64, 64, 32], name='gen_deconv4'), scope='gen_bn4', reuse=reuse)) d5 = de_conv(d4, output_shape=[self.batch_size, 64, 64, 3], name='gen_deconv5', d_h=1, d_w=1) return tf.nn.tanh(d5)
def gern_net(self, z, y): with tf.variable_scope('generator') as scope: yb = tf.reshape(y, shape=[self.batch_size, 1, 1, self.y_dim]) z = tf.concat([z, y], 1) # c1, c2 = self.output_size / 4, self.output_size / 2 c1_row, c2_row = int(self.output_size_row / 4), int( self.output_size_row / 2) c1_col, c2_col = int(self.output_size_col / 4), int( self.output_size_col / 2) # 10 stand for the num of labels d1 = tf.nn.relu( batch_normal(fully_connect(z, output_size=1024, scope='gen_fully'), scope='gen_bn1')) d1 = tf.concat([d1, y], 1) d2 = tf.nn.relu( batch_normal(fully_connect(d1, output_size=c1_row * c1_col * 2 * 64, scope='gen_fully2'), scope='gen_bn2')) d2 = tf.reshape(d2, [self.batch_size, c1_row, c1_col, 64 * 2]) d2 = conv_cond_concat(d2, yb) d3 = tf.nn.relu( batch_normal(de_conv( d2, output_shape=[self.batch_size, c2_row, c2_col, 128], name='gen_deconv1'), scope='gen_bn3')) d3 = conv_cond_concat(d3, yb) d4 = de_conv(d3, output_shape=[ self.batch_size, self.output_size_row, self.output_size_col, self.channel ], name='gen_deconv2') return tf.nn.sigmoid(d4)
def generate(self, z_var, reuse=False): with tf.variable_scope('generator') as scope: if reuse == True: scope.reuse_variables() # d1 = tf.nn.relu(batch_normal(fully_connect(z_var , output_size=64*2*44100, scope='gen_fully1'), scope='gen_bn1', reuse=reuse)) d2 = tf.reshape(z_var, [self.batch_size, 860, 128, 64]) d3 = tf.nn.relu(batch_normal(de_conv(d2 , output_shape=[self.batch_size, 1720, 256, 64] , name='gen_deconv2',d_h=2), scope='gen_bn2', reuse=reuse)) d4 = tf.nn.relu(batch_normal(de_conv(d3, output_shape=[self.batch_size, 3440, 512,64] , name='gen_deconv3'), scope='gen_bn3', reuse=reuse)) d5 = tf.nn.relu(batch_normal(de_conv(d4, output_shape=[self.batch_size, 6880, 1024, 1], name='gen_deconv4'), scope='gen_bn4', reuse=reuse)) d6 = de_conv(d5, output_shape=[self.batch_size, 6880, 1024, 1], name='gen_deconv5', d_h=1, d_w=1) return tf.nn.relu(d6), d2, d3, d4
def encode_decode_1(self, x, reuse=False): with tf.variable_scope("encode_decode_1") as scope: if reuse == True: scope.reuse_variables() conv1 = lrelu(instance_norm(conv2d(x, output_dim=64, k_w=5, k_h=5, d_w=1, d_h=1, name='e_c1'), scope='e_in1')) conv2 = lrelu(instance_norm(conv2d(conv1, output_dim=128, name='e_c2'), scope='e_in2')) conv3 = lrelu(instance_norm(conv2d(conv2, output_dim=256, name='e_c3'), scope='e_in3')) # for x_{1} de_conv1 = lrelu(instance_norm(de_conv(conv3, output_shape=[self.batch_size, 64, 64, 128] , name='e_d1', k_h=3, k_w=3), scope='e_in4')) de_conv2 = lrelu(instance_norm(de_conv(de_conv1, output_shape=[self.batch_size, 128, 128, 64] , name='e_d2', k_w=3, k_h=3), scope='e_in5')) x_tilde1 = conv2d(de_conv2, output_dim=3, d_h=1, d_w=1, name='e_c4') return x_tilde1
def generate(self, z_var, y, weights, biases): #add the first layer # concat z_var and y z_var = tf.concat([z_var, y], 1) z_var = tf.reshape(z_var, shape=[z_var.shape[0], 1, 1, z_var.shape[1]]) print('z_var', z_var.shape) # the first layer d1 = tf.nn.relu( batch_normal(de_conv(z_var, weights['wc1'], biases['bc1'], out_shape=[self.batch_size, 2, 2, 512], s=[1, 2, 2, 1], padding_='SAME'), scope='gen_bn1')) print('d1', d1.shape) d2 = tf.nn.relu( batch_normal(de_conv(d1, weights['wc2'], biases['bc2'], out_shape=[self.batch_size, 4, 4, 256]), scope='gen_bn2')) d3 = tf.nn.relu( batch_normal(de_conv(d2, weights['wc3'], biases['bc3'], out_shape=[self.batch_size, 7, 7, 128]), scope='gen_bn3')) d4 = tf.nn.relu( batch_normal(de_conv(d3, weights['wc4'], biases['bc4'], out_shape=[self.batch_size, 14, 14, 64]), scope='gen_bn4')) d5 = de_conv(d4, weights['wc5'], biases['bc5'], out_shape=[self.batch_size, 28, 28, self.channel]) print('generator is done!!!') return tf.nn.sigmoid(d5)
def generate(self, z_var, reuse=False): with tf.variable_scope('generator') as scope: if reuse == True: scope.reuse_variables() # d1 = tf.nn.relu(batch_normal(fully_connect(z_var , output_size=16*2*11025, scope='gen_fully1'), scope='gen_bn1', reuse=reuse)) d2 = tf.reshape(z_var, [self.batch_size, 2, 110250, 6]) d3 = tf.nn.relu( batch_normal(de_conv( d2, output_shape=[self.batch_size, 2, 220500, 4], name='gen_deconv2', d_w=2), scope='gen_bn2', reuse=reuse)) d4 = tf.nn.relu( batch_normal(de_conv( d3, output_shape=[self.batch_size, 2, 441000, 2], name='gen_deconv3', d_w=2), scope='gen_bn3', reuse=reuse)) d5 = tf.nn.relu( batch_normal(de_conv( d4, output_shape=[self.batch_size, 2, 882000, 1], name='gen_deconv4', d_w=2), scope='gen_bn4', reuse=reuse)) #d6 = tf.nn.relu(batch_normal(de_conv(d5, output_shape=[self.batch_size, 2, 176400, 2], name='gen_deconv6'),scope='gen_bn6', reuse=reuse)) #d7 = tf.nn.relu(batch_normal(de_conv(d6, output_shape=[self.batch_size, 2, 882000, 1], name='gen_deconv7'),scope='gen_bn7', reuse=reuse)) d8 = de_conv(d5, output_shape=[self.batch_size, 2, 882000, 1], name='gen_deconv5', d_h=1, d_w=1) return tf.nn.relu(d8), d2, d3, d4 #, d5, d6
def Style_generate(self, z_var, reuse=False): with tf.variable_scope('sty_generator') as scope: d2 = tf.reshape(z_var, [self.batch_size, 2, 110250, 6]) d2 = tf.nn.relu( batch_normal(de_conv( d2, output_shape=[self.batch_size, 2, 220500, 4], name='gen_deconv2', d_w=2), scope='gen_bn2')) d3 = tf.nn.relu( batch_normal(de_conv( d2, output_shape=[self.batch_size, 2, 441000, 2], name='gen_deconv3', d_w=2), scope='gen_bn3')) d4 = tf.nn.relu( batch_normal(de_conv( d3, output_shape=[self.batch_size, 2, 882000, 1], name='gen_deconv4', d_w=2), scope='gen_bn4', reuse=reuse)) # d6 = tf.nn.relu(batch_normal(de_conv(d4, output_shape=[self.batch_size, 2, 176400, 2], name='gen_deconv6'),scope='gen_bn6', reuse=reuse)) # d7 = tf.nn.relu(batch_normal(de_conv(d6, output_shape=[self.batch_size, 2, 882000, 1], name='gen_deconv7'),scope='gen_bn7', reuse=reuse)) d8 = de_conv(d4, output_shape=[self.batch_size, 2, 882000, 1], name='gen_deconv5', d_h=1, d_w=1) return tf.nn.relu(d8)
def gern_net(batch_size , z , y , output_size): z = tf.concat(1 , [z , y]) c1 , c2 = output_size/4 , output_size/2 #10 stand for the num of labels d1 = fully_connect(z , weights2['wd'] , biases2['bd']) d1 = batch_normal(d1 , scope="genbn1") d1 = tf.nn.relu(d1) d2 = fully_connect(d1 , weights2['wc1'] , biases2['bc1']) d2 = batch_normal(d2 , scope="genbn2") d2 = tf.nn.relu(d2) d2 = tf.reshape(d2 , [batch_size , c1 , c1 , 64*2]) d3 = de_conv(d2 , weights2['wc2'] , biases2['bc2'] , out_shape=[batch_size , c2 , c2 , 128]) d3 = batch_normal(d3 , scope="genbn3") d3 = tf.nn.relu(d3) d4 = de_conv(d3 , weights2['wc3'] , biases2['bc3'] , out_shape=[batch_size , output_size , output_size , 1]) return tf.nn.sigmoid(d4)
def encode_decode(self, input_x, img_mask, guided_fp_left, guided_fp_right, use_sp=False, reuse=False): with tf.variable_scope("ed") as scope: if reuse == True: scope.reuse_variables() #encode x = tf.concat([input_x, img_mask], axis=3) for i in range(6): c_dim = np.minimum(16 * np.power(2, i), 256) if i == 0: x = tf.nn.relu( instance_norm(conv2d(x, output_dim=c_dim, k_w=7, k_h=7, d_w=1, d_h=1, use_sp=use_sp, name='e_c{}'.format(i)) , scope='e_in_{}'.format(i))) else: x = tf.nn.relu( instance_norm(conv2d(x, output_dim=c_dim, k_w=4, k_h=4, d_w=2, d_h=2, use_sp=use_sp, name='e_c{}'.format(i)) , scope='e_in_{}'.format(i))) bottleneck = tf.reshape(x, shape=[self.batch_size, -1]) bottleneck = fully_connect(bottleneck, output_size=256, use_sp=use_sp, scope='e_ful1') bottleneck = tf.concat([bottleneck, guided_fp_left, guided_fp_right], axis=1) de_x = tf.nn.relu(fully_connect(bottleneck, output_size=256*8*8, use_sp=use_sp, scope='d_ful1')) de_x = tf.reshape(de_x, shape=[self.batch_size, 8, 8, 256]) #de_x = tf.tile(de_x, (1, 8, 8, 1), name='tile') #decode for i in range(5): c_dim = np.maximum(256 / np.power(2, i), 16) output_dim = 16 * np.power(2, i) print de_x de_x = tf.nn.relu(instance_norm(de_conv(de_x, output_shape=[self.batch_size, output_dim, output_dim, c_dim], use_sp=use_sp, name='g_deconv_{}'.format(i)), scope='g_in_{}'.format(i))) #de_x = tf.concat([de_x, input_x], axis=3) x_tilde1 = conv2d(de_x, output_dim=3, k_w=7, k_h=7, d_h=1, d_w=1, use_sp=use_sp, name='g_conv1') return tf.nn.tanh(x_tilde1)
def generator(self, input_x, img_mask, guided_fp_left, guided_fp_right, use_sp=False, reuse=False): with tf.variable_scope("generator") as scope: if reuse == True: scope.reuse_variables() x = tf.concat([input_x, img_mask], axis=3) u_fp_list = [] for i in range(6): c_dim = np.minimum(16 * np.power(2, i), 256) if i == 0: x = tf.nn.relu( instance_norm(conv2d(x, output_dim=c_dim, k_w=7, k_h=7, d_w=1, d_h=1, use_sp=use_sp, name='conv_{}'.format(i)), scope='conv_IN_{}'.format(i))) else: x = tf.nn.relu( instance_norm(conv2d(x, output_dim=c_dim, k_w=4, k_h=4, d_w=2, d_h=2, use_sp=use_sp, name='conv_{}'.format(i)), scope='conv_IN_{}'.format(i))) if i < 5: u_fp_list.append(x) bottleneck = tf.reshape(x, shape=[self.batch_size, -1]) bottleneck = fully_connect(bottleneck, output_size=256, use_sp=use_sp, scope='FC1') bottleneck = tf.concat( [bottleneck, guided_fp_left, guided_fp_right], axis=1) de_x = tf.nn.relu( fully_connect(bottleneck, output_size=256 * 8 * 8, use_sp=use_sp, scope='FC2')) de_x = tf.reshape(de_x, shape=[self.batch_size, 8, 8, 256]) for i in range(5): c_dim = np.maximum(256 / np.power(2, i), 16) output_dim = 16 * np.power(2, i) de_x = tf.nn.relu( instance_norm(de_conv(de_x, output_shape=[ self.batch_size, output_dim, output_dim, c_dim ], use_sp=use_sp, name='deconv_{}'.format(i)), scope='deconv_IN_{}'.format(i))) if i < 4: de_x = tf.concat( [de_x, u_fp_list[len(u_fp_list) - (i + 1)]], axis=3) recon_img1 = conv2d(de_x, output_dim=3, k_w=7, k_h=7, d_h=1, d_w=1, use_sp=use_sp, name='output_conv') return tf.nn.tanh(recon_img1)
def encode_decode(self, x, reuse=False): with tf.variable_scope("encode_decode") as scope: if reuse == True: scope.reuse_variables() conv1 = tf.nn.relu( instance_norm(conv2d(x, output_dim=64, k_w=7, k_h=7, d_w=1, d_h=1, name='e_c1'), scope='e_in1')) conv2 = tf.nn.relu( instance_norm(conv2d(conv1, output_dim=128, k_w=4, k_h=4, d_w=2, d_h=2, name='e_c2'), scope='e_in2')) conv3 = tf.nn.relu( instance_norm(conv2d(conv2, output_dim=256, k_w=4, k_h=4, d_w=2, d_h=2, name='e_c3'), scope='e_in3')) r1 = Residual(conv3, residual_name='re_1') r2 = Residual(r1, residual_name='re_2') r3 = Residual(r2, residual_name='re_3') r4 = Residual(r3, residual_name='re_4') r5 = Residual(r4, residual_name='re_5') r6 = Residual(r5, residual_name='re_6') g_deconv1 = tf.nn.relu( instance_norm(de_conv(r6, output_shape=[ self.batch_size, self.output_size / 2, self.output_size / 2, 128 ], name='gen_deconv1'), scope="gen_in")) # for 1 g_deconv_1_1 = tf.nn.relu( instance_norm(de_conv(g_deconv1, output_shape=[ self.batch_size, self.output_size, self.output_size, 64 ], name='g_deconv_1_1'), scope='gen_in_1_1')) g_deconv_1_1_x = tf.concat([g_deconv_1_1, x], axis=3) x_tilde1 = conv2d(g_deconv_1_1_x, output_dim=self.channel, k_w=7, k_h=7, d_h=1, d_w=1, name='gen_conv_1_2') # for 2 g_deconv_2_1 = tf.nn.relu( instance_norm(de_conv(g_deconv1, output_shape=[ self.batch_size, self.output_size, self.output_size, 64 ], name='g_deconv_2_1'), scope='gen_in_2_1')) g_deconv_2_1_x = tf.concat([g_deconv_2_1, x], axis=3) x_tilde2 = conv2d(g_deconv_2_1_x, output_dim=self.channel, k_w=7, k_h=7, d_h=1, d_w=1, name='gen_conv_2_2') # for 3 g_deconv_3_1 = tf.nn.relu( instance_norm(de_conv(g_deconv1, output_shape=[ self.batch_size, self.output_size, self.output_size, 64 ], name='gen_deconv3_1'), scope='gen_in_3_1')) g_deconv_3_1_x = tf.concat([g_deconv_3_1, x], axis=3) g_deconv_3_2 = conv2d(g_deconv_3_1_x, output_dim=32, k_w=3, k_h=3, d_h=1, d_w=1, name='gen_conv_3_2') x_tilde3 = conv2d(g_deconv_3_2, output_dim=3, k_h=3, k_w=3, d_h=1, d_w=1, name='gen_conv_3_3') # for 4 g_deconv_4_1 = tf.nn.relu( instance_norm(de_conv(g_deconv1, output_shape=[ self.batch_size, self.output_size, self.output_size, 64 ], name='gen_deconv4_1'), scope='gen_in_4_1')) g_deconv_4_1_x = tf.concat([g_deconv_4_1, x], axis=3) g_deconv_4_2 = conv2d(g_deconv_4_1_x, output_dim=32, k_w=3, k_h=3, d_h=1, d_w=1, name='gen_conv_4_2') x_tilde4 = conv2d(g_deconv_4_2, output_dim=3, k_h=3, k_w=3, d_h=1, d_w=1, name='gen_conv_4_3') # for 5 g_deconv_5_1 = tf.nn.relu( instance_norm(de_conv(g_deconv1, output_shape=[ self.batch_size, self.output_size, self.output_size, 64 ], name='gen_deconv5_1'), scope='gen_in_5_1')) g_deconv_5_1_x = tf.concat([g_deconv_5_1, x], axis=3) g_deconv_5_2 = conv2d(g_deconv_5_1_x, output_dim=32, k_w=3, k_h=3, d_h=1, d_w=1, name='gen_conv_5_2') x_tilde5 = conv2d(g_deconv_5_2, output_dim=3, k_h=3, k_w=3, d_h=1, d_w=1, name='gen_conv_5_3') # for 6 g_deconv_6_1 = tf.nn.relu( instance_norm(de_conv(g_deconv1, output_shape=[ self.batch_size, self.output_size, self.output_size, 64 ], name='gen_deconv6_1'), scope='gen_in_6_1')) g_deconv_6_1_x = tf.concat([g_deconv_6_1, x], axis=3) g_deconv_6_2 = conv2d(g_deconv_6_1_x, output_dim=32, k_w=3, k_h=3, d_h=1, d_w=1, name='gen_conv_6_2') x_tilde6 = conv2d(g_deconv_6_2, output_dim=3, k_h=3, k_w=3, d_h=1, d_w=1, name='gen_conv_6_3') # for 7 g_deconv_7_1 = tf.nn.relu( instance_norm(de_conv(g_deconv1, output_shape=[ self.batch_size, self.output_size, self.output_size, 64 ], name='g_deconv_7_1'), scope='gen_in_7_1')) g_deconv_7_1_x = tf.concat([g_deconv_7_1, x], axis=3) x_tilde7 = conv2d(g_deconv_7_1_x, output_dim=self.channel, k_w=7, k_h=7, d_h=1, d_w=1, name='gen_conv_7_2') # for 8 g_deconv_8_1 = tf.nn.relu( instance_norm(de_conv(g_deconv1, output_shape=[ self.batch_size, self.output_size, self.output_size, 64 ], name='g_deconv_8_1'), scope='gen_in_8_1')) g_deconv_8_1_x = tf.concat([g_deconv_8_1, x], axis=3) x_tilde8 = conv2d(g_deconv_8_1_x, output_dim=self.channel, k_w=7, k_h=7, d_h=1, d_w=1, name='gen_conv_8_2') return tf.nn.tanh(x_tilde1), tf.nn.tanh(x_tilde2), tf.nn.tanh(x_tilde3), \ tf.nn.tanh(x_tilde4), tf.nn.tanh(x_tilde5), tf.nn.tanh(x_tilde6), tf.nn.tanh(x_tilde7), tf.nn.tanh(x_tilde8)