def discriminator(images, layers, spectral, activation, reuse, normalization=None): net = images channels = [32, 64, 128, 256, 512, 1024] if display: print('Discriminator Information.') print('Channels: ', channels[:layers]) print('Normalization: ', normalization) print('Activation: ', activation) print() with tf.variable_scope('discriminator', reuse=reuse): # Padding = 'Same' -> H_new = H_old // Stride for layer in range(layers): # Down. net = convolutional(inputs=net, output_channels=channels[layer], filter_size=5, stride=2, padding='SAME', conv_type='convolutional', spectral=spectral, scope=layer + 1) if normalization is not None: net = normalization(inputs=net, training=True) net = activation(net) # Flatten. net = tf.layers.flatten(inputs=net) # Dense. net = dense(inputs=net, out_dim=channels[-1], spectral=spectral, scope=1) if normalization is not None: net = normalization(inputs=net, training=True) net = activation(net) # Dense logits = dense(inputs=net, out_dim=1, spectral=spectral, scope=2) output = sigmoid(logits) print() return output, logits
def discriminator_encoder(enconding, layers, spectral, activation, reuse, init='xavier', regularizer=None, normalization=None, name='dis_encoding'): net = enconding channels = [150, 100, 50, 25, 12] # channels = [200, 150, 100, 50, 24] if display: print('DISCRIMINATOR-ENCODER INFORMATION:') print('Channels: ', channels[:layers]) print('Normalization: ', normalization) print('Activation: ', activation) print() with tf.variable_scope(name, reuse=reuse): for layer in range(layers): # Residual Dense layer. net = residual_block_dense(inputs=net, scope=layer, is_training=True, normalization=normalization, spectral=spectral, activation=activation, init=init, regularizer=regularizer, display=True) # Dense layer downsample dim. net = dense(inputs=net, out_dim=channels[layer], spectral=spectral, init=init, regularizer=regularizer, scope=layer) if normalization is not None: net = normalization(inputs=net, training=True) net = activation(net) # Dense logits_net = dense(inputs=net, out_dim=1, spectral=spectral, init=init, regularizer=regularizer, scope=layer + 1) output = sigmoid(logits_net) print() return output, logits_net
def discriminator_resnet_contrastive(images, z_dim, layers, spectral, activation, is_train, reuse, init='xavier', regularizer=None, normalization=None, attention=None, down='downscale', name='contrastive_discriminator'): net = images channels = [32, 64, 128, 256, 512, 1024] if display: print('CONTRASTIVE DISCRIMINATOR INFORMATION:') print('Channels: ', channels[:layers]) print('Normalization: ', normalization) print('Activation: ', activation) print('Attention: ', attention) print() with tf.variable_scope(name, reuse=reuse): for layer in range(layers): # ResBlock. net = residual_block(inputs=net, filter_size=3, stride=1, padding='SAME', scope=layer, is_training=is_train, normalization=normalization, use_bias=True, spectral=spectral, init=init, regularizer=regularizer, activation=activation) # Attention layer. if attention is not None and net.shape.as_list()[1] == attention: net = attention_block(net, spectral=True, init=init, regularizer=regularizer, scope=layers) # Down. net = convolutional(inputs=net, output_channels=channels[layer], filter_size=4, stride=2, padding='SAME', conv_type=down, spectral=spectral, init=init, regularizer=regularizer, scope=layer) if normalization is not None: net = normalization(inputs=net, training=is_train) net = activation(net) # Feature space extraction conv_space = tf.layers.max_pooling2d(inputs=net, pool_size=[2, 2], strides=[2, 2]) conv_space = tf.layers.flatten(inputs=conv_space) # Flatten. net = tf.layers.flatten(inputs=net) # H Representation Layer. net = dense(inputs=net, out_dim=channels[-1], spectral=spectral, init=init, regularizer=regularizer, scope=1) if normalization is not None: net = normalization(inputs=net, training=is_train) h = activation(net) net = dense(inputs=h, out_dim=int((channels[-1]) / 2), spectral=spectral, init=init, regularizer=regularizer, scope=2) if normalization is not None: net = normalization(inputs=net, training=is_train) net = activation(net) # Z Representation Layer. z = dense(inputs=net, out_dim=128, spectral=spectral, init=init, regularizer=regularizer, scope='z_rep') net = activation(net) # Unused part, legacy. with tf.variable_scope('unused', reuse=reuse): logits_net = dense(inputs=z, out_dim=1, spectral=spectral, init=init, regularizer=regularizer, scope='Adversarial') output = sigmoid(logits_net) print() return output, logits_net, conv_space, h, z
def discriminator_resnet_mask_class_tran(images, layers, spectral, activation, reuse, init='xavier', regularizer=None, normalization=None, attention=None, down='downscale', label=None, name='discriminator'): net = images channels = [32, 64, 128, 256, 512, 1024] if display: print('DISCRIMINATOR INFORMATION:', name) print('Total Channels: ', channels) print('Chosen Channels: ', channels[:layers]) print('Normalization: ', normalization) print('Activation: ', activation) print('Attention: ', attention) print() with tf.variable_scope(name, reuse=reuse): # Discriminator with conditional projection. batch_size, label_dim = label.shape.as_list() embedding_size = channels[-1] for layer in range(layers): # ResBlock. net = residual_block(inputs=net, filter_size=3, stride=1, padding='SAME', scope=layer, is_training=True, normalization=normalization, use_bias=True, spectral=spectral, init=init, regularizer=regularizer, activation=activation) # Attention layer. if attention is not None and net.shape.as_list()[1] == attention: net = attention_block(net, spectral=True, init=init, regularizer=regularizer, scope=layers) # Down. net = convolutional(inputs=net, output_channels=channels[layer], filter_size=4, stride=2, padding='SAME', conv_type=down, spectral=spectral, init=init, regularizer=regularizer, scope=layer) if normalization is not None: net = normalization(inputs=net, training=True) net = activation(net) # Flatten. net = tf.layers.flatten(inputs=net) # Dense Feature Space. net = dense(inputs=net, out_dim=channels[-1], spectral=spectral, init=init, regularizer=regularizer, scope=1) feature_space = activation(net) # Dense. net = dense(inputs=feature_space, out_dim=channels[-2], spectral=spectral, init=init, regularizer=regularizer, scope=2) net = activation(net) # Dense Classes. class_logits = dense(inputs=net, out_dim=label_dim, spectral=spectral, init=init, regularizer=regularizer, scope=3) # One encoding for label input logits = class_logits * label logits = tf.reduce_sum(logits, axis=-1) output = sigmoid(logits) print() return output, logits, feature_space
def discriminator_resnet_class2(images, layers, spectral, activation, reuse, l_dim, init='xavier', regularizer=None, normalization=None, attention=None, down='downscale', name='discriminator'): net = images # channels = [32, 64, 128, 256, 512, 1024, 2048] channels = [32, 64, 128, 256, 512, 1024] # New layers = layers + 1 if display: print('DISCRIMINATOR INFORMATION:', name) print('Total Channels: ', channels) print('Chosen Channels: ', channels[:layers]) print('Normalization: ', normalization) print('Activation: ', activation) print('Attention: ', attention) print() with tf.variable_scope(name, reuse=reuse): for layer in range(layers): # ResBlock. net = residual_block(inputs=net, filter_size=3, stride=1, padding='SAME', scope=layer, is_training=True, normalization=normalization, use_bias=True, spectral=spectral, init=init, regularizer=regularizer, activation=activation) # Attention layer. if attention is not None and net.shape.as_list()[1] == attention: net = attention_block(net, spectral=True, init=init, regularizer=regularizer, scope=layers) # Down. net = convolutional(inputs=net, output_channels=channels[layer], filter_size=4, stride=2, padding='SAME', conv_type=down, spectral=spectral, init=init, regularizer=regularizer, scope=layer) if normalization is not None: net = normalization(inputs=net, training=True) net = activation(net) # New # Flatten. net = tf.layers.flatten(inputs=net) # Dense. feature_space = dense(inputs=net, out_dim=channels[-1], spectral=spectral, init=init, regularizer=regularizer, scope=2) net = activation(feature_space) # Dense logits = dense(inputs=net, out_dim=1, spectral=spectral, init=init, regularizer=regularizer, scope=3) output = sigmoid(logits) net = dense(inputs=net, out_dim=channels[-1], spectral=spectral, init=init, regularizer=regularizer, scope=4) net = activation(net) # Dense Classes class_logits = dense(inputs=net, out_dim=l_dim, spectral=spectral, init=init, regularizer=regularizer, scope=5) print() return output, logits, feature_space, class_logits
def encoder_resnet_incr(images, z_dim, layers, spectral, activation, reuse, is_train, init='xavier', regularizer=None, normalization=None, attention=None, stack_layers=False, concat_img=False, down='downscale', name='encoder'): out_stack_layers = list() net = images channels = [32, 64, 128, 256, 512, 1024] if display: print('ENCODER INFORMATION:') print('Channels: ', channels[:layers]) print('Normalization: ', normalization) print('Activation: ', activation) print('Attention: ', attention) print() _, height, width, _ = images.shape.as_list() with tf.variable_scope(name, reuse=reuse): layer = 0 net = convolutional(inputs=net, output_channels=channels[layer], filter_size=3, stride=1, padding='SAME', conv_type='convolutional', spectral=spectral, init=init, regularizer=regularizer, scope=layer) for layer in range(layers): # ResBlock. net = residual_block(inputs=net, filter_size=3, stride=1, padding='SAME', scope=layer, is_training=is_train, normalization=normalization, use_bias=True, spectral=spectral, init=init, regularizer=regularizer, activation=activation) if concat_img and layer != 0: down_sample = tf.image.resize_images( images=images, size=(int(height / (2**layer)), int(width / (2**layer))), method=tf.image.ResizeMethod.BILINEAR, align_corners=False) print('down_sample', down_sample.shape) print('net', net.shape) net = tf.concat([net, down_sample], axis=-1) print('net', net.shape) # Attention layer. if attention is not None and net.shape.as_list()[1] == attention: net = attention_block(net, spectral=True, init=init, regularizer=regularizer, scope=layers) if stack_layers: print('Adding layer output to stack layer output.') out_stack_layers.append(net) # Down. layer_channel = layer + 1 if layer == layers - 1: layer_channel = -2 net = convolutional(inputs=net, output_channels=channels[layer_channel], filter_size=4, stride=2, padding='SAME', conv_type=down, spectral=spectral, init=init, regularizer=regularizer, scope=layer + 1) if normalization is not None: net = normalization(inputs=net, training=is_train) net = activation(net) if stack_layers: print('Adding layer output to stack layer output.') out_stack_layers.append(net) if concat_img and layer != 0: down_sample = tf.image.resize_images( images=images, size=(int(height / (2**(layer + 1))), int(width / (2**(layer + 1)))), method=tf.image.ResizeMethod.BILINEAR, align_corners=False) print('down_sample', down_sample.shape) print('net', net.shape) net = tf.concat([net, down_sample], axis=-1) print('net', net.shape) # Flatten. net = tf.layers.flatten(inputs=net) # shape = int(np.product(net.shape.as_list()[1:3])/2) # # # Dense. # net = dense(inputs=net, out_dim=shape, spectral=spectral, init=init, regularizer=regularizer, scope=1) # if normalization is not None: net = normalization(inputs=net, training=True) # net = activation(net) # Dense. net = dense(inputs=net, out_dim=channels[-1], spectral=spectral, init=init, regularizer=regularizer, scope=2) if normalization is not None: net = normalization(inputs=net, training=is_train) net = activation(net) # Dense w_latent = dense(inputs=net, out_dim=z_dim, spectral=spectral, init=init, regularizer=regularizer, scope=3) print() if stack_layers: return w_latent, out_stack_layers return w_latent
def discriminator_resnet(images, layers, spectral, activation, reuse, init='xavier', regularizer=None, normalization=None, attention=None, down='downscale', label=None, feature_space_flag=False, name='discriminator', realness=1): net = images channels = [32, 64, 128, 256, 512, 1024] if display: print('DISCRIMINATOR INFORMATION:') print('Channels: ', channels[:layers]) print('Normalization: ', normalization) print('Activation: ', activation) print('Attention: ', attention) print() with tf.variable_scope(name, reuse=reuse): for layer in range(layers): # ResBlock. net = residual_block(inputs=net, filter_size=3, stride=1, padding='SAME', scope=layer, is_training=True, normalization=normalization, use_bias=True, spectral=spectral, init=init, regularizer=regularizer, activation=activation) # Attention layer. if attention is not None and net.shape.as_list()[1] == attention: net = attention_block(net, spectral=True, init=init, regularizer=regularizer, scope=layers) # Down. net = convolutional(inputs=net, output_channels=channels[layer], filter_size=4, stride=2, padding='SAME', conv_type=down, spectral=spectral, init=init, regularizer=regularizer, scope=layer) if normalization is not None: net = normalization(inputs=net, training=True) net = activation(net) # Feature space extraction feature_space = tf.layers.max_pooling2d(inputs=net, pool_size=[2, 2], strides=[2, 2]) feature_space = tf.layers.flatten(inputs=feature_space) # Flatten. net = tf.layers.flatten(inputs=net) # Dense. net = dense(inputs=net, out_dim=channels[-1], spectral=spectral, init=init, regularizer=regularizer, scope=1) if normalization is not None: net = normalization(inputs=net, training=True) net = activation(net) if label is not None: print(label.shape) net = dense(inputs=net, out_dim=label.shape[-1], spectral=spectral, init=init, regularizer=regularizer, scope=3) if normalization is not None: net = normalization(inputs=net, training=True) net = activation(net) # Dense logits_net = dense(inputs=net, out_dim=1, spectral=spectral, init=init, regularizer=regularizer, scope=2) if label is not None: inner_prod = tf.reduce_sum(net * label, axis=-1, keepdims=True) logits = logits_net + inner_prod output = sigmoid(logits) else: logits = logits_net output = sigmoid(logits) print() if feature_space_flag: return output, logits, feature_space return output, logits
def encoder_resnet(images, z_dim, layers, spectral, activation, reuse, init='xavier', regularizer=None, normalization=None, attention=None, down='downscale', name='encoder'): net = images channels = [32, 64, 128, 256, 512, 1024] if display: print('ENCODER INFORMATION:', name) print('Channels: ', channels[:layers]) print('Normalization: ', normalization) print('Activation: ', activation) print('Attention: ', attention) print() with tf.variable_scope(name, reuse=reuse): for layer in range(layers + 1): # ResBlock. net = residual_block(inputs=net, filter_size=3, stride=1, padding='SAME', scope=layer, is_training=True, normalization=normalization, use_bias=True, spectral=spectral, init=init, regularizer=regularizer, activation=activation) # Attention layer. if attention is not None and net.shape.as_list()[1] == attention: net = attention_block(net, spectral=True, init=init, regularizer=regularizer, scope=layers) # Down. net = convolutional(inputs=net, output_channels=channels[layer], filter_size=4, stride=2, padding='SAME', conv_type=down, spectral=spectral, init=init, regularizer=regularizer, scope=layer) if normalization is not None: net = normalization(inputs=net, training=True, scope=layer) net = activation(net) # Flatten. net = tf.layers.flatten(inputs=net) # Dense. net = dense(inputs=net, out_dim=channels[-1], spectral=spectral, init=init, regularizer=regularizer, scope=1) if normalization is not None: net = normalization(inputs=net, training=True) net = activation(net) # Dense w_latent = dense(inputs=net, out_dim=z_dim, spectral=spectral, init=init, regularizer=regularizer, scope=2) print() return w_latent
def encoder_resnet_instnorm(images, latent_dim, layers, spectral, activation, reuse, is_train, init='xavier', regularizer=None, normalization=instance_norm, attention=None, down='downscale', name='encoder'): net = images channels = [32, 64, 128, 256, 512, 1024] if display: print('ENCODER INFORMATION:') print('Channels: ', channels[:layers]) print('Normalization: ', normalization) print('Activation: ', activation) print('Attention: ', attention) print() _, height, width, _ = images.shape.as_list() with tf.variable_scope(name, reuse=reuse): layer = 0 net = convolutional(inputs=net, output_channels=channels[layer], filter_size=3, stride=1, padding='SAME', conv_type='convolutional', spectral=spectral, init=init, regularizer=regularizer, scope=layer) # Style extraction. styles = style_extract(inputs=net, latent_dim=latent_dim, spectral=spectral, init=init, regularizer=regularizer, scope=layer) if normalization is not None: net = normalization(inputs=net, training=is_train) net = activation(net) for layer in range(layers): # ResBlock. net, style = residual_block(inputs=net, filter_size=3, stride=1, padding='SAME', scope=layer, style_extract_f=True, latent_dim=latent_dim, is_training=is_train, normalization=normalization, use_bias=True, spectral=spectral, init=init, regularizer=regularizer, activation=activation) styles += style # Attention layer. if attention is not None and net.shape.as_list()[1] == attention: net = attention_block(net, spectral=True, init=init, regularizer=regularizer, scope=layers) # Down. layer_channel = layer + 1 if layer == layers - 1: layer_channel = -2 net = convolutional(inputs=net, output_channels=channels[layer_channel], filter_size=4, stride=2, padding='SAME', conv_type=down, spectral=spectral, init=init, regularizer=regularizer, scope=layer + 1) # Style extraction. style = style_extract(inputs=net, latent_dim=latent_dim, spectral=spectral, init=init, regularizer=regularizer, scope=layer + 1) styles += style if normalization is not None: net = normalization(inputs=net, training=is_train) net = activation(net) # Flatten. net = tf.layers.flatten(inputs=net) # Dense. net = dense(inputs=net, out_dim=channels[-1], spectral=spectral, init=init, regularizer=regularizer, scope=2) net = activation(net) # Dense style = dense(inputs=net, out_dim=latent_dim, spectral=spectral, init=init, regularizer=regularizer, scope=3) styles += style print() return styles
def generator_resnet(z_input, image_channels, layers, spectral, activation, reuse, is_train, normalization, init='xavier', noise_input_f=False, regularizer=None, cond_label=None, attention=None, up='upscale', bigGAN=False, name='generator'): channels = [32, 64, 128, 256, 512, 1024] reversed_channel = list(reversed(channels[:layers])) # Question here: combine z dims for upscale and the conv after, or make them independent. if bigGAN: z_dim = z_input.shape.as_list()[-1] blocks = 2 + layers block_dims = math.floor(z_dim / blocks) remainder = z_dim - block_dims * blocks if remainder == 0: z_sets = [block_dims] * (blocks + 1) else: z_sets = [block_dims] * blocks + [remainder] z_splits = tf.split(z_input, num_or_size_splits=z_sets, axis=-1) if display: print('GENERATOR INFORMATION:') print('Channels: ', channels[:layers]) print('Normalization: ', normalization) print('Activation: ', activation) print('Attention H/W: ', attention) print() with tf.variable_scope(name, reuse=reuse): if bigGAN: z_input_block = z_splits[0] label = z_splits[1] else: z_input_block = z_input label = z_input if cond_label is not None: if 'training_gate' in cond_label.name: label = cond_label else: label = tf.concat([cond_label, label], axis=-1) # Dense. net = dense(inputs=z_input_block, out_dim=1024, spectral=spectral, init=init, regularizer=regularizer, scope=1) net = normalization(inputs=net, training=is_train, c=label, spectral=spectral, scope='dense_1') net = activation(net) if bigGAN: label = z_splits[2] else: label = z_input if cond_label is not None: if 'training_gate' in cond_label.name: label = cond_label else: label = tf.concat([cond_label, label], axis=-1) # Dense. net = dense(inputs=net, out_dim=256 * 7 * 7, spectral=spectral, init=init, regularizer=regularizer, scope=2) net = normalization(inputs=net, training=is_train, c=label, spectral=spectral, scope='dense_2') net = activation(net) # Reshape net = tf.reshape(tensor=net, shape=(-1, 7, 7, 256), name='reshape') for layer in range(layers): if bigGAN: label = z_splits[3 + layer] else: label = z_input if cond_label is not None: if 'training_gate' in cond_label.name: label = cond_label else: label = tf.concat([cond_label, label], axis=-1) # ResBlock. net = residual_block(inputs=net, filter_size=3, stride=1, padding='SAME', scope=layer, is_training=is_train, spectral=spectral, init=init, regularizer=regularizer, noise_input_f=noise_input_f, activation=activation, normalization=normalization, cond_label=label) # Attention layer. if attention is not None and net.shape.as_list()[1] == attention: net = attention_block(net, spectral=True, init=init, regularizer=regularizer, scope=layers) # Up. net = convolutional(inputs=net, output_channels=reversed_channel[layer], filter_size=2, stride=2, padding='SAME', conv_type=up, spectral=spectral, init=init, regularizer=regularizer, scope=layer) if noise_input_f: net = noise_input(inputs=net, scope=layer) net = normalization(inputs=net, training=is_train, c=label, spectral=spectral, scope=layer) net = activation(net) logits = convolutional(inputs=net, output_channels=image_channels, filter_size=3, stride=1, padding='SAME', conv_type='convolutional', spectral=spectral, init=init, regularizer=regularizer, scope='logits') output = sigmoid(logits) print() return output
def generator_msg(w_input, image_channels, layers, spectral, activation, reuse, is_train, normalization, init='xavier', noise_input_f=False, regularizer=None, cond_label=None, attention=None, up='upscale'): channels = [32, 64, 128, 256, 512, 1024, 2048] # channels = [32, 64, 128, 256, 512, 1024] i_pixel = 4 msg_layers = list() reversed_channel = list(reversed(channels[:layers])) if display: print('GENERATOR INFORMATION:') print('Total Channels: ', channels) print('Chosen Channels: ', channels[:layers]) print('Normalization: ', normalization) print('Activation: ', activation) print('Attention H/W: ', attention) print() with tf.variable_scope('generator', reuse=reuse): w_input_block = w_input[:, :, 0] # Dense. label = w_input[:, :, 0] # net = dense(inputs=w_input_block, out_dim=2048, spectral=spectral, init=init, regularizer=regularizer, scope=1) net = dense(inputs=w_input_block, out_dim=1024, spectral=spectral, init=init, regularizer=regularizer, scope=1) net = normalization(inputs=net, training=is_train, c=label, spectral=spectral, scope='dense_1') net = activation(net) # Dense. # net = dense(inputs=net, out_dim=512*i_pixel*i_pixel, spectral=spectral, init=init, regularizer=regularizer, scope=2) net = dense(inputs=net, out_dim=256 * i_pixel * i_pixel, spectral=spectral, init=init, regularizer=regularizer, scope=2) net = normalization(inputs=net, training=is_train, c=label, spectral=spectral, scope='dense_2') net = activation(net) # Reshape # net = tf.reshape(tensor=net, shape=(-1, i_pixel, i_pixel, 512), name='reshape') net = tf.reshape(tensor=net, shape=(-1, i_pixel, i_pixel, 256), name='reshape') # Loop for convolutional layers. for layer in range(layers): # ResBlock. label = w_input[:, :, layer] net = residual_block(inputs=net, filter_size=3, stride=1, padding='SAME', scope=layer, is_training=is_train, spectral=spectral, init=init, regularizer=regularizer, noise_input_f=noise_input_f, activation=activation, normalization=normalization, cond_label=label) # Attention layer. if attention is not None and net.shape.as_list()[1] == attention: net = attention_block(net, spectral=True, init=init, regularizer=regularizer, scope=layers) # MSG layer. if net.shape.as_list()[1] >= 64: msg_i = convolutional(inputs=net, output_channels=image_channels, filter_size=1, stride=1, padding='SAME', conv_type='convolutional', spectral=spectral, init=init, regularizer=regularizer, scope='msg_%s' % layer) msg_layers.append(msg_i) # Convolutional Up. label = w_input[:, :, layer + 1] net = convolutional(inputs=net, output_channels=reversed_channel[layer], filter_size=2, stride=2, padding='SAME', conv_type=up, spectral=spectral, init=init, regularizer=regularizer, scope=layer) if noise_input_f: net = noise_input(inputs=net, scope=layer) net = normalization(inputs=net, training=is_train, c=label, spectral=spectral, scope=layer) net = activation(net) net = convolutional(inputs=net, output_channels=image_channels, filter_size=3, stride=1, padding='SAME', conv_type='convolutional', spectral=spectral, init=init, regularizer=regularizer, scope='conv_logits') logits = residual_block(inputs=net, filter_size=3, stride=1, padding='SAME', scope='resnet_logits', is_training=is_train, spectral=spectral, init=init, regularizer=regularizer, noise_input_f=noise_input_f, activation=activation, normalization=normalization, cond_label=label) logits = convolutional(inputs=net, output_channels=image_channels, filter_size=3, stride=1, padding='SAME', conv_type='convolutional', spectral=spectral, init=init, regularizer=regularizer, scope='logits') output = sigmoid(logits) print() return output, msg_layers
def generator_resnet_style_modulation(w_input, image_channels, layers, spectral, activation, reuse, is_train, normalization, init='xavier', noise_input_f=False, regularizer=None, cond_label=None, attention=None, up='upscale', name='generator'): channels = [32, 64, 128, 256, 512, 1024] reversed_channel = list(reversed(channels[:layers])) i_pixel = 7 if display: print('GENERATOR INFORMATION:') print('Channels: ', channels[:layers]) print('Normalization: ', normalization) print('Activation: ', activation) print('Attention H/W: ', attention) print() with tf.variable_scope(name, reuse=reuse): w_input_block = w_input[:, :, 0] label = w_input[:, :, 0] # Dense. net = dense(inputs=w_input_block, out_dim=1024, spectral=spectral, init=init, regularizer=regularizer, scope=1) if normalization is not None: net = normalization(inputs=net, training=is_train, c=label, spectral=spectral, scope='dense_1') net = activation(net) # Dense. net = dense(inputs=net, out_dim=256 * i_pixel * i_pixel, spectral=spectral, init=init, regularizer=regularizer, scope=2) if normalization is not None: net = normalization(inputs=net, training=is_train, c=label, spectral=spectral, scope='dense_2') net = activation(net) # Reshape net = tf.reshape(tensor=net, shape=(-1, i_pixel, i_pixel, 256), name='reshape') for layer in range(layers): label = w_input[:, :, layer] # ResBlock. net = residual_block_mod(inputs=net, filter_size=3, stride=1, padding='SAME', scope=layer, is_training=is_train, spectral=spectral, init=init, regularizer=regularizer, noise_input_f=noise_input_f, activation=activation, normalization=normalization, cond_label=label) # Attention layer. if attention is not None and net.shape.as_list()[1] == attention: net = attention_block(net, spectral=True, init=init, regularizer=regularizer, scope=layers) # Up. label = w_input[:, :, layer + 1] net = convolutional(inputs=net, output_channels=reversed_channel[layer], filter_size=2, stride=2, padding='SAME', conv_type=up, spectral=spectral, init=init, regularizer=regularizer, scope=layer) if noise_input_f: net = noise_input(inputs=net, scope=layer) net = activation(net) # net = residual_block_mod(inputs=net, filter_size=3, stride=1, padding='SAME', scope=layer+1, is_training=is_train, spectral=spectral, init=init, regularizer=regularizer, noise_input_f=noise_input_f, activation=activation, normalization=normalization, cond_label=label) # logits = conv_mod(inputs=net, label=label, output_channels=image_channels, filter_size=3, stride=1, padding='SAME', conv_type='convolutional', scope=layer+1, init=init, regularizer=regularizer, spectral=spectral) logits = convolutional(inputs=net, output_channels=image_channels, filter_size=3, stride=1, padding='SAME', conv_type='convolutional', spectral=spectral, init=init, regularizer=regularizer, scope='logits') output = sigmoid(logits) print() return output