Esempio n. 1
0
def discriminator(images, layers, spectral, activation, reuse, normalization=None):
	net = images
	channels = [32, 64, 128, 256, 512, 1024]
	
	if display:
		print('Discriminator Information.')
		print('Channels: ', channels[:layers])
		print('Normalization: ', normalization)
		print('Activation: ', activation)
		print()
	with tf.variable_scope('discriminator', reuse=reuse):
		# Padding = 'Same' -> H_new = H_old // Stride

		for layer in range(layers):
			# Down.
			net = convolutional(inputs=net, output_channels=channels[layer], filter_size=5, stride=2, padding='SAME', conv_type='convolutional', spectral=spectral, scope=layer+1)
			if normalization is not None: net = normalization(inputs=net, training=True)
			net = activation(net)

		# Flatten.
		net = tf.layers.flatten(inputs=net)
		
		# Dense.
		net = dense(inputs=net, out_dim=channels[-1], spectral=spectral, scope=1)				
		if normalization is not None: net = normalization(inputs=net, training=True)
		net = activation(net)
		
		# Dense
		logits = dense(inputs=net, out_dim=1, spectral=spectral, scope=2)				
		output = sigmoid(logits)

	print()
	return output, logits
Esempio n. 2
0
def mapping_network(z_input,
                    z_dim,
                    layers,
                    spectral,
                    activation,
                    normalization,
                    init='xavier',
                    regularizer=None):
    if display:
        print('MAPPING NETWORK INFORMATION:')
        print('Layers:      ', layers)
        print('Normalization: ', normalization)
        print('Activation:    ', activation)
        print()

    with tf.variable_scope('mapping_network'):
        net = z_input
        for layer in range(layers):
            net = normalization(inputs=net,
                                training=True,
                                c=None,
                                spectral=None,
                                scope=layer)
            net = dense(inputs=net,
                        out_dim=z_dim,
                        spectral=spectral,
                        init=init,
                        regularizer=regularizer,
                        scope=layer)
        w_input = net

    return w_input
Esempio n. 3
0
def discriminator_encoder(enconding, layers, spectral, activation, reuse, init='xavier', regularizer=None, normalization=None, name='dis_encoding'):
	net = enconding
	channels = [150, 100, 50, 25, 12]
	# channels = [200, 150, 100, 50, 24]
	if display:
		print('DISCRIMINATOR-ENCODER INFORMATION:')
		print('Channels: ', channels[:layers])
		print('Normalization: ', normalization)
		print('Activation: ', activation)
		print()

	with tf.variable_scope(name, reuse=reuse):
		for layer in range(layers):

			# Residual Dense layer.
			net = residual_block_dense(inputs=net, scope=layer, is_training=True, normalization=normalization, spectral=spectral, activation=activation, init=init, regularizer=regularizer, display=True)

			# Dense layer downsample dim.
			net = dense(inputs=net, out_dim=channels[layer], spectral=spectral, init=init, regularizer=regularizer, scope=layer)				
			if normalization is not None: net = normalization(inputs=net, training=True)
			net = activation(net)

		# Dense
		logits_net = dense(inputs=net, out_dim=1, spectral=spectral, init=init, regularizer=regularizer, scope=layer+1)		
		output = sigmoid(logits_net)

	print()
	return output, logits_net
Esempio n. 4
0
def discriminator_resnet_incr(images, layers, spectral, activation, reuse, init='xavier', regularizer=None, normalization=None, attention=None, down='downscale', label=None, label_t='cat', infoGAN=False, c_dim=None, name='discriminator'):
	net = images
	channels = [32, 64, 128, 256, 512, 1024]
	if display:
		print('DISCRIMINATOR INFORMATION:')
		print('Channels: ', channels[:layers])
		print('Normalization: ', normalization)
		print('Activation: ', activation)
		print('Attention:  ', attention)
		print()

	with tf.variable_scope(name, reuse=reuse):

		layer = 0
		net = convolutional(inputs=net, output_channels=channels[layer], filter_size=3, stride=1, padding='SAME', conv_type='convolutional', spectral=spectral, init=init, regularizer=regularizer, scope=layer)

		for layer in range(layers):
			# ResBlock.
			net = residual_block(inputs=net, filter_size=3, stride=1, padding='SAME', scope=layer, is_training=True, normalization=normalization, use_bias=True, spectral=spectral, init=init, regularizer=regularizer, activation=activation)
			
			# Attention layer. 
			if attention is not None and net.shape.as_list()[1]==attention:
				net = attention_block(net, spectral=True, init=init, regularizer=regularizer, scope=layers)
			
			# Down.
			layer_channel = layer+1
			if layer == layers - 1:
				layer_channel = -2
			net = convolutional(inputs=net, output_channels=channels[layer_channel], filter_size=4, stride=2, padding='SAME', conv_type=down, spectral=spectral, init=init, regularizer=regularizer, scope=layer+1)
			if normalization is not None: net = normalization(inputs=net, training=True)
			net = activation(net)
			
		# Flatten.
		net = tf.layers.flatten(inputs=net)

		# Dense.
		net = dense(inputs=net, out_dim=channels[-1], spectral=spectral, init=init, regularizer=regularizer, scope=2)				
		if normalization is not None: net = normalization(inputs=net, training=True)
		net = activation(net)

		# Dense
		logits = dense(inputs=net, out_dim=1, spectral=spectral, init=init, regularizer=regularizer, scope=3)	
		output = sigmoid(logits)

	print()
	return output, logits
Esempio n. 5
0
def generator_resnet_cond(z_input, c_input, image_channels, layers, spectral, activation, reuse, is_train, normalization, up='upscale'):
	channels = [32, 64, 128, 256, 512, 1024]
	channels = [32, 64, 128, 256, 512, 1024]
	reversed_channel = list(reversed(channels[:layers]))

	if display:
		print('Generator Information.')
		print('Channels: ', channels[:layers])
		print('Normalization: ', normalization)
		print('Activation: ', activation)

	with tf.variable_scope('generator', reuse=reuse):
		# Doesn't work ReLU, tried.
		# Z Input Shape = (None, 100)
		# C Input Shape = (None, 20)
		net = tf.concat([z_input, c_input], axis=1)

		# Dense.			
		net = dense(inputs=net, out_dim=1024, spectral=spectral, scope=1)				
		net = normalization(inputs=net, training=is_train)
		net = activation(net)
		
		# Dense.
		net = dense(inputs=net, out_dim=256*7*7, spectral=spectral, scope=2)				
		net = normalization(inputs=net, training=is_train)
		net = activation(net)
		
		# Reshape
		net = tf.reshape(tensor=net, shape=(-1, 7, 7, 256), name='reshape')

		for layer in range(layers):
			# ResBlock.
			net = residual_block(inputs=net, filter_size=3, stride=1, padding='SAME', scope=layer, is_training=is_train, spectral=spectral,
								 activation=activation, normalization=normalization, c_input=c_input)
		
			# Up.
			net = convolutional(inputs=net, output_channels=reversed_channel[layer], filter_size=2, stride=2, padding='SAME', conv_type=up, spectral=spectral, scope=layer)
			net = normalization(inputs=net, training=is_train, c=c_input, spectral=spectral)
			net = activation(net)
		
		logits = convolutional(inputs=net, output_channels=image_channels, filter_size=3, stride=1, padding='SAME', conv_type='convolutional', spectral=spectral, scope='logits')
		output = sigmoid(logits)
		
	print()
	return output
Esempio n. 6
0
def generator(z_input, image_channels, layers, spectral, activation, reuse, is_train, normalization):
	channels = [32, 64, 128, 256, 512, 1024]
	reversed_channel = list(reversed(channels[:layers]))

	if display:
		print('Generator Information.')
		print('Channels: ', channels[:layers])
		print('Normalization: ', normalization)
		print('Activation: ', activation)
	
	with tf.variable_scope('generator', reuse=reuse):
		# Doesn't work ReLU, tried.
		
		# Dense.
		net = dense(inputs=z_input, out_dim=1024, spectral=spectral, scope=1)				
		net = normalization(inputs=net, training=is_train)
		net = activation(net)

		# Dense.
		net = dense(inputs=net, out_dim=256*7*7, spectral=spectral, scope=2)				
		net = normalization(inputs=net, training=is_train)
		net = activation(net)

		# Reshape
		net = tf.reshape(tensor=net, shape=(-1, 7, 7, 256), name='reshape')

		for layer in range(layers):
			# Conv.
			net = convolutional(inputs=net, output_channels=reversed_channel[layer], filter_size=2, stride=2, padding='SAME', conv_type='transpose', spectral=spectral, scope=2*(layer+1)-1)
			net = normalization(inputs=net, training=is_train)
			net = activation(net)

			if layer != len(range(layers))-1:
				# Conv.
				net = convolutional(inputs=net, output_channels=reversed_channel[layer+1], filter_size=5, stride=1, padding='SAME', conv_type='convolutional', spectral=spectral, scope=2*(layer+1))
				net = normalization(inputs=net, training=is_train)
				net = activation(net)

		# Conv.
		logits = convolutional(inputs=net, output_channels=image_channels, filter_size=2, stride=2, padding='SAME', conv_type='transpose', spectral=spectral, scope='logits')
		output = sigmoid(logits)
	
	print()
	return output
Esempio n. 7
0
def discriminator_resnet_mask_class(images, layers, spectral, activation, reuse, init='xavier', regularizer=None, normalization=None, attention=None, down='downscale', label=None, name='discriminator'):
	net = images
	# channels = [32, 64, 128, 256, 512, 1024, 2048]
	channels = [32, 64, 128, 256, 512, 1024]

	if display:
		print('DISCRIMINATOR INFORMATION:', name)
		print('Total  Channels: ', channels)
		print('Chosen Channels: ', channels[:layers])
		print('Normalization: ', normalization)
		print('Activation: ', activation)
		print('Attention:  ', attention)
		print()

	with tf.variable_scope(name, reuse=reuse):

		# Discriminator with conditional projection.
		batch_size, label_dim = label.shape.as_list()
		embedding_size = channels[-1]

		for layer in range(layers):
			# ResBlock.
			net = residual_block(inputs=net, filter_size=3, stride=1, padding='SAME', scope=layer, is_training=True, normalization=normalization, use_bias=True, 
								 spectral=spectral, init=init, regularizer=regularizer, activation=activation)
			# Attention layer. 
			if attention is not None and net.shape.as_list()[1]==attention:
				net = attention_block(net, spectral=True, init=init, regularizer=regularizer, scope=layers)
			
			# Down.
			net = convolutional(inputs=net, output_channels=channels[layer], filter_size=4, stride=2, padding='SAME', conv_type=down, spectral=spectral, init=init, regularizer=regularizer, scope=layer)
			if normalization is not None: net = normalization(inputs=net, training=True)
			net = activation(net)

		# Feature space extraction
		feature_space = tf.layers.max_pooling2d(inputs=net, pool_size=[2, 2], strides=1)
		feature_space = tf.layers.flatten(inputs=feature_space)
			
		# Flatten.
		net = tf.layers.flatten(inputs=net)

		net = dense(inputs=net, out_dim=channels[-1], spectral=spectral, init=init, regularizer=regularizer, scope=1)				
		net = activation(net)

		# Dense Classes
		class_logits = dense(inputs=net, out_dim=label_dim, spectral=spectral, init=init, regularizer=regularizer, scope=3)		
		class_logits = tf.nn.log_softmax(class_logits)
		# One encoding for label input
		logits = class_logits*label
		logits = tf.reduce_sum(logits, axis=-1)
		output = sigmoid(logits)

		
	print()
	return output, logits, feature_space
Esempio n. 8
0
def discriminator_resnet_class2(images, layers, spectral, activation, reuse, l_dim, init='xavier', regularizer=None, normalization=None, attention=None, down='downscale', name='discriminator'):
	net = images
	# channels = [32, 64, 128, 256, 512, 1024, 2048]
	channels = [32, 64, 128, 256, 512, 1024]

	# New
	layers = layers + 1

	if display:
		print('DISCRIMINATOR INFORMATION:', name)
		print('Total  Channels: ', channels)
		print('Chosen Channels: ', channels[:layers])
		print('Normalization: ', normalization)
		print('Activation: ', activation)
		print('Attention:  ', attention)
		print()

	with tf.variable_scope(name, reuse=reuse):

		for layer in range(layers):
			# ResBlock.
			net = residual_block(inputs=net, filter_size=3, stride=1, padding='SAME', scope=layer, is_training=True, normalization=normalization, use_bias=True, 
								 spectral=spectral, init=init, regularizer=regularizer, activation=activation)
			# Attention layer. 
			if attention is not None and net.shape.as_list()[1]==attention:
				net = attention_block(net, spectral=True, init=init, regularizer=regularizer, scope=layers)
			
			# Down.
			net = convolutional(inputs=net, output_channels=channels[layer], filter_size=4, stride=2, padding='SAME', conv_type=down, spectral=spectral, init=init, regularizer=regularizer, scope=layer)
			if normalization is not None: net = normalization(inputs=net, training=True)
			net = activation(net)
			
		# New
		# Flatten.
		net = tf.layers.flatten(inputs=net)

		# Dense.
		feature_space = dense(inputs=net, out_dim=channels[-1], spectral=spectral, init=init, regularizer=regularizer, scope=2)		
		net = activation(feature_space)		

		# Dense
		logits = dense(inputs=net, out_dim=1, spectral=spectral, init=init, regularizer=regularizer, scope=3)		
		output = sigmoid(logits)

		net = dense(inputs=net, out_dim=channels[-1], spectral=spectral, init=init, regularizer=regularizer, scope=4)		
		net = activation(net)	

		# Dense Classes
		class_logits = dense(inputs=net, out_dim=l_dim, spectral=spectral, init=init, regularizer=regularizer, scope=5)			

	print()
	return output, logits, feature_space, class_logits
Esempio n. 9
0
def generator_decoder_resnet(z_input, image_channels, layers, spectral, activation, reuse, is_train, normalization, attention=None, up='upscale'):
	channels = [32, 64, 128, 256, 512, 1024]
	reversed_channel = list(reversed(channels[:layers]))

	if display:
		print('GENERATOR-DECODER INFORMATION:')
		print('Channels:      ', channels[:layers])
		print('Normalization: ', normalization)
		print('Activation:    ', activation)
		print('Attention H/W: ', attention)
		print()

	with tf.variable_scope('generator_decoder', reuse=reuse):
		# Doesn't work ReLU, tried.

		# Dense.			
		net = dense(inputs=z_input, out_dim=1024, spectral=spectral, scope=1)				
		net = normalization(inputs=net, training=is_train)
		net = activation(net)
		
		# Dense.
		net = dense(inputs=net, out_dim=256*7*7, spectral=spectral, scope=2)				
		net = normalization(inputs=net, training=is_train)
		net = activation(net)
		
		# Reshape
		net = tf.reshape(tensor=net, shape=(-1, 7, 7, 256), name='reshape')

		for layer in range(layers):

			# ResBlock.
			net = residual_block(inputs=net, filter_size=3, stride=1, padding='SAME', scope=layer, is_training=is_train, spectral=spectral, activation=activation, normalization=normalization)
		
			# Attention layer. 
			if attention is not None and net.shape.as_list()[1]==attention:
				net = attention_block(net, spectral=True, scope=layers)
		
			# if (vae_dim/2.) == net.shape.as_list()[1]:
			# 	lr_logs2_xi_z = convolutional(inputs=net, output_channels=reversed_channel[layer], filter_size=2, stride=2, padding='SAME', conv_type=up, spectral=spectral, scope='lr_logs2_xi_z')
		
			# if (vae_dim/2.) == net.shape.as_list()[1]:
			# 	scope = 'lr_mean_xi_z'
			# else:
			# 	scope = layer

			# Up.
			net = convolutional(inputs=net, output_channels=reversed_channel[layer], filter_size=2, stride=2, padding='SAME', conv_type=up, spectral=spectral, scope=layer)
			net = normalization(inputs=net, training=is_train)
			net = activation(net)

			# if vae_dim == net.shape.as_list()[1]:
			# 	lr_mean_xi_z = sigmoid(net)

			
		# Final outputs
		logits = convolutional(inputs=net, output_channels=image_channels, filter_size=3, stride=1, padding='SAME', conv_type='convolutional', spectral=spectral, scope='mean_xi_z')
		mean_xi_z = sigmoid(logits)

		# Final outputs
		logs2_xi_z = convolutional(inputs=net, output_channels=image_channels, filter_size=3, stride=1, padding='SAME', conv_type='convolutional', spectral=spectral, scope='logs2_xi_z')

		
	print()
	# return output, lr_mean_xi_z, lr_logs2_xi_z
	return mean_xi_z, logs2_xi_z
Esempio n. 10
0
def generator_resnet_cond(z_input,
                          c_input,
                          image_channels,
                          layers,
                          spectral,
                          activation,
                          reuse,
                          is_train,
                          normalization,
                          up='upscale'):
    channels = [32, 64, 128, 256, 512, 1024]
    channels = [32, 64, 128, 256, 512, 1024]
    reversed_channel = list(reversed(channels[:layers]))

    if display:
        print('Generator Information.')
        print('Channels: ', channels[:layers])
        print('Normalization: ', normalization)
        print('Activation: ', activation)

    with tf.variable_scope('generator', reuse=reuse):
        # Doesn't work ReLU, tried.
        # Z Input Shape = (None, 100)
        # C Input Shape = (None, 20)
        net = tf.concat([z_input, c_input], axis=1)

        # Dense.
        net = dense(inputs=net, out_dim=1024, spectral=spectral, scope=1)
        net = normalization(inputs=net, training=is_train)
        net = activation(net)

        # Dense.
        net = dense(inputs=net,
                    out_dim=256 * 7 * 7,
                    spectral=spectral,
                    scope=2)
        net = normalization(inputs=net, training=is_train)
        net = activation(net)

        # Reshape
        net = tf.reshape(tensor=net, shape=(-1, 7, 7, 256), name='reshape')

        for layer in range(layers):
            # ResBlock.
            net = residual_block(inputs=net,
                                 filter_size=3,
                                 stride=1,
                                 padding='SAME',
                                 scope=layer,
                                 is_training=is_train,
                                 spectral=spectral,
                                 activation=activation,
                                 normalization=normalization,
                                 c_input=c_input)

            # Up.
            net = convolutional(inputs=net,
                                output_channels=reversed_channel[layer],
                                filter_size=2,
                                stride=2,
                                padding='SAME',
                                conv_type=up,
                                spectral=spectral,
                                scope=layer)
            net = normalization(inputs=net,
                                training=is_train,
                                c=c_input,
                                spectral=spectral)
            net = activation(net)

        logits = convolutional(inputs=net,
                               output_channels=image_channels,
                               filter_size=3,
                               stride=1,
                               padding='SAME',
                               conv_type='convolutional',
                               spectral=spectral,
                               scope='logits')
        output = sigmoid(logits)

    print()
    return output
Esempio n. 11
0
def encoder_resnet(images,
                   z_dim,
                   layers,
                   spectral,
                   activation,
                   reuse,
                   init='xavier',
                   regularizer=None,
                   normalization=None,
                   attention=None,
                   down='downscale',
                   name='encoder'):
    net = images
    channels = [32, 64, 128, 256, 512, 1024]
    if display:
        print('ENCODER INFORMATION:', name)
        print('Channels: ', channels[:layers])
        print('Normalization: ', normalization)
        print('Activation: ', activation)
        print('Attention:  ', attention)
        print()

    with tf.variable_scope(name, reuse=reuse):

        for layer in range(layers + 1):
            # ResBlock.
            net = residual_block(inputs=net,
                                 filter_size=3,
                                 stride=1,
                                 padding='SAME',
                                 scope=layer,
                                 is_training=True,
                                 normalization=normalization,
                                 use_bias=True,
                                 spectral=spectral,
                                 init=init,
                                 regularizer=regularizer,
                                 activation=activation)

            # Attention layer.
            if attention is not None and net.shape.as_list()[1] == attention:
                net = attention_block(net,
                                      spectral=True,
                                      init=init,
                                      regularizer=regularizer,
                                      scope=layers)

            # Down.
            net = convolutional(inputs=net,
                                output_channels=channels[layer],
                                filter_size=4,
                                stride=2,
                                padding='SAME',
                                conv_type=down,
                                spectral=spectral,
                                init=init,
                                regularizer=regularizer,
                                scope=layer)
            if normalization is not None:
                net = normalization(inputs=net, training=True, scope=layer)
            net = activation(net)

        # Flatten.
        net = tf.layers.flatten(inputs=net)

        # Dense.
        net = dense(inputs=net,
                    out_dim=channels[-1],
                    spectral=spectral,
                    init=init,
                    regularizer=regularizer,
                    scope=1)
        if normalization is not None:
            net = normalization(inputs=net, training=True)
        net = activation(net)

        # Dense
        w_latent = dense(inputs=net,
                         out_dim=z_dim,
                         spectral=spectral,
                         init=init,
                         regularizer=regularizer,
                         scope=2)

    print()
    return w_latent
Esempio n. 12
0
def encoder_resnet_incr(images,
                        z_dim,
                        layers,
                        spectral,
                        activation,
                        reuse,
                        is_train,
                        init='xavier',
                        regularizer=None,
                        normalization=None,
                        attention=None,
                        stack_layers=False,
                        concat_img=False,
                        down='downscale',
                        name='encoder'):
    out_stack_layers = list()
    net = images
    channels = [32, 64, 128, 256, 512, 1024]
    if display:
        print('ENCODER INFORMATION:')
        print('Channels: ', channels[:layers])
        print('Normalization: ', normalization)
        print('Activation: ', activation)
        print('Attention:  ', attention)
        print()

    _, height, width, _ = images.shape.as_list()
    with tf.variable_scope(name, reuse=reuse):

        layer = 0
        net = convolutional(inputs=net,
                            output_channels=channels[layer],
                            filter_size=3,
                            stride=1,
                            padding='SAME',
                            conv_type='convolutional',
                            spectral=spectral,
                            init=init,
                            regularizer=regularizer,
                            scope=layer)

        for layer in range(layers):
            # ResBlock.
            net = residual_block(inputs=net,
                                 filter_size=3,
                                 stride=1,
                                 padding='SAME',
                                 scope=layer,
                                 is_training=is_train,
                                 normalization=normalization,
                                 use_bias=True,
                                 spectral=spectral,
                                 init=init,
                                 regularizer=regularizer,
                                 activation=activation)

            if concat_img and layer != 0:
                down_sample = tf.image.resize_images(
                    images=images,
                    size=(int(height / (2**layer)), int(width / (2**layer))),
                    method=tf.image.ResizeMethod.BILINEAR,
                    align_corners=False)
                print('down_sample', down_sample.shape)
                print('net', net.shape)
                net = tf.concat([net, down_sample], axis=-1)
                print('net', net.shape)

            # Attention layer.
            if attention is not None and net.shape.as_list()[1] == attention:
                net = attention_block(net,
                                      spectral=True,
                                      init=init,
                                      regularizer=regularizer,
                                      scope=layers)

            if stack_layers:
                print('Adding layer output to stack layer output.')
                out_stack_layers.append(net)

            # Down.
            layer_channel = layer + 1
            if layer == layers - 1:
                layer_channel = -2
            net = convolutional(inputs=net,
                                output_channels=channels[layer_channel],
                                filter_size=4,
                                stride=2,
                                padding='SAME',
                                conv_type=down,
                                spectral=spectral,
                                init=init,
                                regularizer=regularizer,
                                scope=layer + 1)
            if normalization is not None:
                net = normalization(inputs=net, training=is_train)
            net = activation(net)

        if stack_layers:
            print('Adding layer output to stack layer output.')
            out_stack_layers.append(net)

        if concat_img and layer != 0:
            down_sample = tf.image.resize_images(
                images=images,
                size=(int(height / (2**(layer + 1))),
                      int(width / (2**(layer + 1)))),
                method=tf.image.ResizeMethod.BILINEAR,
                align_corners=False)
            print('down_sample', down_sample.shape)
            print('net', net.shape)
            net = tf.concat([net, down_sample], axis=-1)
            print('net', net.shape)

        # Flatten.
        net = tf.layers.flatten(inputs=net)

        # shape = int(np.product(net.shape.as_list()[1:3])/2)
        # # # Dense.
        # net = dense(inputs=net, out_dim=shape, spectral=spectral, init=init, regularizer=regularizer, scope=1)
        # if normalization is not None: net = normalization(inputs=net, training=True)
        # net = activation(net)

        # Dense.
        net = dense(inputs=net,
                    out_dim=channels[-1],
                    spectral=spectral,
                    init=init,
                    regularizer=regularizer,
                    scope=2)
        if normalization is not None:
            net = normalization(inputs=net, training=is_train)
        net = activation(net)

        # Dense
        w_latent = dense(inputs=net,
                         out_dim=z_dim,
                         spectral=spectral,
                         init=init,
                         regularizer=regularizer,
                         scope=3)

    print()
    if stack_layers:
        return w_latent, out_stack_layers
    return w_latent
Esempio n. 13
0
def generator_resnet(z_input,
                     image_channels,
                     layers,
                     spectral,
                     activation,
                     reuse,
                     is_train,
                     normalization,
                     init='xavier',
                     noise_input_f=False,
                     regularizer=None,
                     cond_label=None,
                     attention=None,
                     up='upscale',
                     bigGAN=False):
    channels = [32, 64, 128, 256, 512, 1024]
    reversed_channel = list(reversed(channels[:layers]))

    # Question here: combine z dims for upscale and the conv after, or make them independent.
    if bigGAN:
        z_dim = z_input.shape.as_list()[-1]
        blocks = 2 + layers
        block_dims = math.floor(z_dim / blocks)
        remainder = z_dim - block_dims * blocks
        if remainder == 0:
            z_sets = [block_dims] * (blocks + 1)
        else:
            z_sets = [block_dims] * blocks + [remainder]
        z_splits = tf.split(z_input, num_or_size_splits=z_sets, axis=-1)

    if display:
        print('GENERATOR INFORMATION:')
        print('Channels:      ', channels[:layers])
        print('Normalization: ', normalization)
        print('Activation:    ', activation)
        print('Attention H/W: ', attention)
        print()

    with tf.variable_scope('generator', reuse=reuse):
        if bigGAN:
            z_input_block = z_splits[0]
            label = z_splits[1]
        else:
            z_input_block = z_input
            label = z_input
        if cond_label is not None:
            if 'training_gate' in cond_label.name:
                label = cond_label
            else:
                label = tf.concat([cond_label, label], axis=-1)

        # Dense.
        net = dense(inputs=z_input_block,
                    out_dim=1024,
                    spectral=spectral,
                    init=init,
                    regularizer=regularizer,
                    scope=1)
        net = normalization(inputs=net,
                            training=is_train,
                            c=label,
                            spectral=spectral,
                            scope='dense_1')
        net = activation(net)

        if bigGAN: label = z_splits[2]
        else: label = z_input
        if cond_label is not None:
            if 'training_gate' in cond_label.name:
                label = cond_label
            else:
                label = tf.concat([cond_label, label], axis=-1)

        # Dense.
        net = dense(inputs=net,
                    out_dim=256 * 7 * 7,
                    spectral=spectral,
                    init=init,
                    regularizer=regularizer,
                    scope=2)
        net = normalization(inputs=net,
                            training=is_train,
                            c=label,
                            spectral=spectral,
                            scope='dense_2')
        net = activation(net)

        # Reshape
        net = tf.reshape(tensor=net, shape=(-1, 7, 7, 256), name='reshape')

        for layer in range(layers):

            if bigGAN: label = z_splits[3 + layer]
            else: label = z_input
            if cond_label is not None:
                if 'training_gate' in cond_label.name:
                    label = cond_label
                else:
                    label = tf.concat([cond_label, label], axis=-1)

            # ResBlock.
            net = residual_block(inputs=net,
                                 filter_size=3,
                                 stride=1,
                                 padding='SAME',
                                 scope=layer,
                                 is_training=is_train,
                                 spectral=spectral,
                                 init=init,
                                 regularizer=regularizer,
                                 noise_input_f=noise_input_f,
                                 activation=activation,
                                 normalization=normalization,
                                 cond_label=label)

            # Attention layer.
            if attention is not None and net.shape.as_list()[1] == attention:
                net = attention_block(net,
                                      spectral=True,
                                      init=init,
                                      regularizer=regularizer,
                                      scope=layer)
                # ResBlock.
                # net = residual_block(inputs=net, filter_size=3, stride=1, padding='SAME', scope=layer+layers, is_training=is_train, spectral=spectral, init=init, regularizer=regularizer, noise_input_f=noise_input_f,
                # activation=activation, normalization=normalization, cond_label=label)

            # Up.
            net = convolutional(inputs=net,
                                output_channels=reversed_channel[layer],
                                filter_size=2,
                                stride=2,
                                padding='SAME',
                                conv_type=up,
                                spectral=spectral,
                                init=init,
                                regularizer=regularizer,
                                scope=layer)
            if noise_input_f:
                net = noise_input(inputs=net, scope=layer)
            # net = normalization(inputs=net, training=is_train, c=label, spectral=spectral, scope=layer+layers)
            net = normalization(inputs=net,
                                training=is_train,
                                c=label,
                                spectral=spectral,
                                scope=layer)
            net = activation(net)

        logits = convolutional(inputs=net,
                               output_channels=image_channels,
                               filter_size=3,
                               stride=1,
                               padding='SAME',
                               conv_type='convolutional',
                               spectral=spectral,
                               init=init,
                               regularizer=regularizer,
                               scope='logits')
        output = sigmoid(logits)

    print()
    return output
Esempio n. 14
0
def encoder_resnet_instnorm(images,
                            latent_dim,
                            layers,
                            spectral,
                            activation,
                            reuse,
                            is_train,
                            init='xavier',
                            regularizer=None,
                            normalization=instance_norm,
                            attention=None,
                            down='downscale',
                            name='encoder'):
    net = images
    channels = [32, 64, 128, 256, 512, 1024]
    if display:
        print('ENCODER INFORMATION:')
        print('Channels: ', channels[:layers])
        print('Normalization: ', normalization)
        print('Activation: ', activation)
        print('Attention:  ', attention)
        print()

    _, height, width, _ = images.shape.as_list()
    with tf.variable_scope(name, reuse=reuse):

        layer = 0
        net = convolutional(inputs=net,
                            output_channels=channels[layer],
                            filter_size=3,
                            stride=1,
                            padding='SAME',
                            conv_type='convolutional',
                            spectral=spectral,
                            init=init,
                            regularizer=regularizer,
                            scope=layer)
        # Style extraction.
        styles = style_extract(inputs=net,
                               latent_dim=latent_dim,
                               spectral=spectral,
                               init=init,
                               regularizer=regularizer,
                               scope=layer)
        if normalization is not None:
            net = normalization(inputs=net, training=is_train)
        net = activation(net)

        for layer in range(layers):
            # ResBlock.
            net, style = residual_block(inputs=net,
                                        filter_size=3,
                                        stride=1,
                                        padding='SAME',
                                        scope=layer,
                                        style_extract_f=True,
                                        latent_dim=latent_dim,
                                        is_training=is_train,
                                        normalization=normalization,
                                        use_bias=True,
                                        spectral=spectral,
                                        init=init,
                                        regularizer=regularizer,
                                        activation=activation)
            styles += style

            # Attention layer.
            if attention is not None and net.shape.as_list()[1] == attention:
                net = attention_block(net,
                                      spectral=True,
                                      init=init,
                                      regularizer=regularizer,
                                      scope=layers)

            # Down.
            layer_channel = layer + 1
            if layer == layers - 1:
                layer_channel = -2
            net = convolutional(inputs=net,
                                output_channels=channels[layer_channel],
                                filter_size=4,
                                stride=2,
                                padding='SAME',
                                conv_type=down,
                                spectral=spectral,
                                init=init,
                                regularizer=regularizer,
                                scope=layer + 1)
            # Style extraction.
            style = style_extract(inputs=net,
                                  latent_dim=latent_dim,
                                  spectral=spectral,
                                  init=init,
                                  regularizer=regularizer,
                                  scope=layer + 1)
            styles += style
            if normalization is not None:
                net = normalization(inputs=net, training=is_train)
            net = activation(net)

        # Flatten.
        net = tf.layers.flatten(inputs=net)

        # Dense.
        net = dense(inputs=net,
                    out_dim=channels[-1],
                    spectral=spectral,
                    init=init,
                    regularizer=regularizer,
                    scope=2)
        net = activation(net)

        # Dense
        style = dense(inputs=net,
                      out_dim=latent_dim,
                      spectral=spectral,
                      init=init,
                      regularizer=regularizer,
                      scope=3)
        styles += style

    print()
    return styles
Esempio n. 15
0
def generator_resnet_style(z_input,
                           image_channels,
                           layers,
                           spectral,
                           activation,
                           reuse,
                           is_train,
                           normalization,
                           init='xavier',
                           noise_input_f=False,
                           regularizer=None,
                           cond_label=None,
                           attention=None,
                           up='upscale'):
    channels = [32, 64, 128, 256, 512, 1024]
    reversed_channel = list(reversed(channels[:layers]))

    if display:
        print('GENERATOR INFORMATION:')
        print('Channels:      ', channels[:layers])
        print('Normalization: ', normalization)
        print('Activation:    ', activation)
        print('Attention H/W: ', attention)
        print()

    with tf.variable_scope('generator', reuse=reuse):

        z_input_block = z_input[:, :, 0]
        label = z_input[:, :, 0]

        # Dense.
        net = dense(inputs=z_input_block,
                    out_dim=1024,
                    spectral=spectral,
                    init=init,
                    regularizer=regularizer,
                    scope=1)
        net = normalization(inputs=net,
                            training=is_train,
                            c=label,
                            spectral=spectral,
                            scope='dense_1')
        net = activation(net)

        # Dense.
        net = dense(inputs=net,
                    out_dim=256 * 7 * 7,
                    spectral=spectral,
                    init=init,
                    regularizer=regularizer,
                    scope=2)
        net = normalization(inputs=net,
                            training=is_train,
                            c=label,
                            spectral=spectral,
                            scope='dense_2')
        net = activation(net)

        # Reshape
        net = tf.reshape(tensor=net, shape=(-1, 7, 7, 256), name='reshape')

        for layer in range(layers):

            label = z_input[:, :, layer]
            # ResBlock.
            net = residual_block(inputs=net,
                                 filter_size=3,
                                 stride=1,
                                 padding='SAME',
                                 scope=layer,
                                 is_training=is_train,
                                 spectral=spectral,
                                 init=init,
                                 regularizer=regularizer,
                                 noise_input_f=noise_input_f,
                                 activation=activation,
                                 normalization=normalization,
                                 cond_label=label)
            print('Z input:', layer)

            # Attention layer.
            if attention is not None and net.shape.as_list()[1] == attention:
                net = attention_block(net,
                                      spectral=True,
                                      init=init,
                                      regularizer=regularizer,
                                      scope=layer)
                # ResBlock.
                # net = residual_block(inputs=net, filter_size=3, stride=1, padding='SAME', scope=layer+layers, is_training=is_train, spectral=spectral, init=init, regularizer=regularizer, noise_input_f=noise_input_f, activation=activation, normalization=normalization, cond_label=label)

            label = z_input[:, :, layer + 1]
            # Up.
            net = convolutional(inputs=net,
                                output_channels=reversed_channel[layer],
                                filter_size=2,
                                stride=2,
                                padding='SAME',
                                conv_type=up,
                                spectral=spectral,
                                init=init,
                                regularizer=regularizer,
                                scope=layer)
            if noise_input_f:
                net = noise_input(inputs=net, scope=layer)
            net = normalization(inputs=net,
                                training=is_train,
                                c=label,
                                spectral=spectral,
                                scope=layer)
            net = activation(net)
            print('Z input:', layer + 1)

        logits = convolutional(inputs=net,
                               output_channels=image_channels,
                               filter_size=3,
                               stride=1,
                               padding='SAME',
                               conv_type='convolutional',
                               spectral=spectral,
                               init=init,
                               regularizer=regularizer,
                               scope='logits')
        output = sigmoid(logits)

    print()
    return output
Esempio n. 16
0
def encoder_resnet(images,
                   z_dim,
                   layers,
                   spectral,
                   activation,
                   reuse,
                   normalization=None,
                   is_train=None,
                   attention=None,
                   down='downscale'):
    net = images
    channels = [32, 64, 128, 256, 512, 1024]
    channels = [64, 128, 256, 512, 1024]

    if display:
        print('ENCODER INFORMATION:')
        print('Channels: ', channels[:layers])
        print('Normalization: ', normalization)
        print('Activation: ', activation)
        print('Attention:  ', attention)
        print()

    with tf.variable_scope('encoder', reuse=reuse):
        for layer in range(layers):
            # ResBlock.
            # if vae_dim == net.shape.as_list()[1]:
            # 	scope = 'vae_out'
            # else:
            # 	scope = layer

            net = residual_block(inputs=net,
                                 filter_size=3,
                                 stride=1,
                                 padding='SAME',
                                 scope=layer,
                                 is_training=is_train,
                                 normalization=normalization,
                                 use_bias=True,
                                 spectral=spectral,
                                 activation=activation)
            # Attention layer.
            if attention is not None and net.shape.as_list()[1] == attention:
                net = attention_block(net, spectral=True, scope=layers)

            # if vae_dim == net.shape.as_list()[1]:
            # 	vae_out = sigmoid(net)

            # Down.
            net = convolutional(inputs=net,
                                output_channels=channels[layer],
                                filter_size=4,
                                stride=2,
                                padding='SAME',
                                conv_type=down,
                                spectral=spectral,
                                scope=layer)
            if normalization is not None:
                net = normalization(inputs=net, training=is_train)
            net = activation(net)

        # Flatten.
        net = tf.layers.flatten(inputs=net)

        # Dense.
        net = dense(inputs=net,
                    out_dim=channels[-1],
                    spectral=spectral,
                    scope=1)
        if normalization is not None:
            net = normalization(inputs=net, training=is_train)
        net = activation(net)

        # Dense.
        mean_z_xi = dense(inputs=net,
                          out_dim=z_dim,
                          spectral=spectral,
                          scope='mean_z_xi')
        logs2_z_xi = dense(inputs=net,
                           out_dim=z_dim,
                           spectral=spectral,
                           scope='logs2_z_xi')

    print()
    # return mean_z_xi, logs2_z_xi, vae_out
    return mean_z_xi, logs2_z_xi
Esempio n. 17
0
def discriminator_resnet(images,
                         layers,
                         spectral,
                         activation,
                         reuse,
                         init='xavier',
                         regularizer=None,
                         normalization=None,
                         attention=None,
                         down='downscale',
                         label=None,
                         label_t='cat',
                         infoGAN=False,
                         c_dim=None):
    net = images
    channels = [32, 64, 128, 256, 512, 1024]
    if display:
        print('DISCRIMINATOR INFORMATION:')
        print('Channels: ', channels[:layers])
        print('Normalization: ', normalization)
        print('Activation: ', activation)
        print('Attention:  ', attention)
        print()

    with tf.variable_scope('discriminator', reuse=reuse):

        # Discriminator with conditional projection.
        if label is not None:
            batch_size, label_dim = label.shape.as_list()
            embedding_size = channels[-1]
            # Categorical Embedding.
            print(label_t)
            if label_t == 'cat':
                # emb = embedding(shape=(label_dim, embedding_size), init=init, regularizer=regularizer, power_iterations=1)
                emb = embedding(shape=(label_dim, embedding_size),
                                init=init,
                                power_iterations=1)
                index = tf.argmax(label, axis=-1)
                label_emb = tf.nn.embedding_lookup(emb, index)
            # Linear conditioning, using NN to produce embedding.
            else:
                inter_dim = int((label_dim + net.shape.as_list()[-1]) / 2)
                net_label = dense(inputs=label,
                                  out_dim=inter_dim,
                                  spectral=spectral,
                                  init='xavier',
                                  regularizer=None,
                                  scope='label_nn_1')
                if normalization is not None:
                    net_label = normalization(inputs=net_label, training=True)
                net_label = activation(net_label)
                label_emb = dense(inputs=net_label,
                                  out_dim=embedding_size,
                                  spectral=spectral,
                                  init='xavier',
                                  regularizer=None,
                                  scope='label_nn_2')

        for layer in range(layers):
            # ResBlock.
            net = residual_block(inputs=net,
                                 filter_size=3,
                                 stride=1,
                                 padding='SAME',
                                 scope=layer,
                                 is_training=True,
                                 normalization=normalization,
                                 use_bias=True,
                                 spectral=spectral,
                                 init=init,
                                 regularizer=regularizer,
                                 activation=activation)
            # Attention layer.
            if attention is not None and net.shape.as_list()[1] == attention:
                net = attention_block(net,
                                      spectral=True,
                                      init=init,
                                      regularizer=regularizer,
                                      scope=layers)

            # Down.
            net = convolutional(inputs=net,
                                output_channels=channels[layer],
                                filter_size=4,
                                stride=2,
                                padding='SAME',
                                conv_type=down,
                                spectral=spectral,
                                init=init,
                                regularizer=regularizer,
                                scope=layer)
            if normalization is not None:
                net = normalization(inputs=net, training=True)
            net = activation(net)

        # Flatten.
        net = tf.layers.flatten(inputs=net)

        # Dense.
        net = dense(inputs=net,
                    out_dim=channels[-1],
                    spectral=spectral,
                    init=init,
                    regularizer=regularizer,
                    scope=1)
        if normalization is not None:
            net = normalization(inputs=net, training=True)
        # net = activation(net)

        # Dense
        logits = dense(inputs=net,
                       out_dim=1,
                       spectral=spectral,
                       init=init,
                       regularizer=regularizer,
                       scope=2)
        if label is not None:
            inner_prod = tf.reduce_sum(net * label_emb, axis=-1, keepdims=True)
            output = sigmoid(logits + inner_prod)
        else:
            output = sigmoid(logits)

        if infoGAN:
            mean_c_x = dense(inputs=net,
                             out_dim=c_dim,
                             spectral=spectral,
                             init=init,
                             regularizer=regularizer,
                             scope=3)
            logs2_c_x = dense(inputs=net,
                              out_dim=c_dim,
                              spectral=spectral,
                              init=init,
                              regularizer=regularizer,
                              scope=4)
            return output, logits, mean_c_x, logs2_c_x

    print()
    return output, logits
Esempio n. 18
0
def generator_resnet_style(w_input,
                           image_channels,
                           layers,
                           spectral,
                           activation,
                           reuse,
                           is_train,
                           normalization,
                           init='xavier',
                           noise_input_f=False,
                           regularizer=None,
                           cond_label=None,
                           attention=None,
                           stack_layers=False,
                           up='upscale',
                           name='generator'):

    out_stack_layers = list()
    channels = [32, 64, 128, 256, 512, 1024]
    i_pixel = 7

    reversed_channel = list(reversed(channels[:layers]))
    if display:
        print('GENERATOR INFORMATION:', name)
        print('Total  Channels:      ', channels)
        print('Chosen Channels:      ', channels[:layers])
        print('Normalization: ', normalization)
        print('Activation:    ', activation)
        print('Attention H/W: ', attention)
        print()

    with tf.variable_scope(name, reuse=reuse):

        w_input_block = w_input[:, :, 0]

        # Dense.
        label = w_input[:, :, 0]
        net = dense(inputs=w_input_block,
                    out_dim=1024,
                    spectral=spectral,
                    init=init,
                    regularizer=regularizer,
                    scope=1)
        net = normalization(inputs=net,
                            training=is_train,
                            c=label,
                            spectral=spectral,
                            scope='dense_1')
        net = activation(net)

        # Dense.
        net = dense(inputs=net,
                    out_dim=256 * i_pixel * i_pixel,
                    spectral=spectral,
                    init=init,
                    regularizer=regularizer,
                    scope=2)
        net = normalization(inputs=net,
                            training=is_train,
                            c=label,
                            spectral=spectral,
                            scope='dense_2')
        net = activation(net)

        # Reshape
        # net = tf.reshape(tensor=net, shape=(-1, i_pixel, i_pixel, 1024), name='reshape')
        net = tf.reshape(tensor=net,
                         shape=(-1, i_pixel, i_pixel, 256),
                         name='reshape')

        # Loop for convolutional layers.
        for layer in range(layers):
            # ResBlock.
            label = w_input[:, :, layer]
            net = residual_block(inputs=net,
                                 filter_size=3,
                                 stride=1,
                                 padding='SAME',
                                 scope=layer,
                                 is_training=is_train,
                                 spectral=spectral,
                                 init=init,
                                 regularizer=regularizer,
                                 noise_input_f=noise_input_f,
                                 activation=activation,
                                 normalization=normalization,
                                 cond_label=label)

            # Attention layer.
            if attention is not None and net.shape.as_list()[1] == attention:
                net = attention_block(net,
                                      spectral=True,
                                      init=init,
                                      regularizer=regularizer,
                                      scope=layers)

            if stack_layers:
                print('Adding layer output to stack layer output.')
                out_stack_layers.append(net)

            # Convolutional Up.
            label = w_input[:, :, layer + 1]
            net = convolutional(inputs=net,
                                output_channels=reversed_channel[layer],
                                filter_size=2,
                                stride=2,
                                padding='SAME',
                                conv_type=up,
                                spectral=spectral,
                                init=init,
                                regularizer=regularizer,
                                scope=layer)
            if noise_input_f: net = noise_input(inputs=net, scope=layer)
            net = normalization(inputs=net,
                                training=is_train,
                                c=label,
                                spectral=spectral,
                                scope=layer)
            net = activation(net)

        # net = residual_block(inputs=net, filter_size=3, stride=1, padding='SAME', scope=layer+1, is_training=is_train, spectral=spectral, init=init, regularizer=regularizer, noise_input_f=noise_input_f, activation=activation, normalization=normalization, cond_label=label)
        if stack_layers:
            print('Adding layer output to stack layer output.')
            out_stack_layers.append(net)
        logits = convolutional(inputs=net,
                               output_channels=image_channels,
                               filter_size=3,
                               stride=1,
                               padding='SAME',
                               conv_type='convolutional',
                               spectral=spectral,
                               init=init,
                               regularizer=regularizer,
                               scope='logits')
        logits = normalization(inputs=logits,
                               training=is_train,
                               c=label,
                               spectral=spectral,
                               scope='logits_norm')
        output = sigmoid(logits)

    print()
    if stack_layers:
        return output, out_stack_layers
    return output
Esempio n. 19
0
def generator_decoder_resnet(z_input,
                             image_channels,
                             layers,
                             spectral,
                             activation,
                             reuse,
                             is_train,
                             normalization,
                             attention=None,
                             up='upscale'):
    channels = [32, 64, 128, 256, 512, 1024]
    reversed_channel = list(reversed(channels[:layers]))

    if display:
        print('GENERATOR-DECODER INFORMATION:')
        print('Channels:      ', channels[:layers])
        print('Normalization: ', normalization)
        print('Activation:    ', activation)
        print('Attention H/W: ', attention)
        print()

    with tf.variable_scope('generator_decoder', reuse=reuse):
        # Doesn't work ReLU, tried.

        # Dense.
        net = dense(inputs=z_input, out_dim=1024, spectral=spectral, scope=1)
        net = normalization(inputs=net, training=is_train)
        net = activation(net)

        # Dense.
        net = dense(inputs=net,
                    out_dim=256 * 7 * 7,
                    spectral=spectral,
                    scope=2)
        net = normalization(inputs=net, training=is_train)
        net = activation(net)

        # Reshape
        net = tf.reshape(tensor=net, shape=(-1, 7, 7, 256), name='reshape')

        for layer in range(layers):

            # ResBlock.
            net = residual_block(inputs=net,
                                 filter_size=3,
                                 stride=1,
                                 padding='SAME',
                                 scope=layer,
                                 is_training=is_train,
                                 spectral=spectral,
                                 activation=activation,
                                 normalization=normalization)

            # Attention layer.
            if attention is not None and net.shape.as_list()[1] == attention:
                net = attention_block(net, spectral=True, scope=layers)

            # if (vae_dim/2.) == net.shape.as_list()[1]:
            # 	lr_logs2_xi_z = convolutional(inputs=net, output_channels=reversed_channel[layer], filter_size=2, stride=2, padding='SAME', conv_type=up, spectral=spectral, scope='lr_logs2_xi_z')

            # if (vae_dim/2.) == net.shape.as_list()[1]:
            # 	scope = 'lr_mean_xi_z'
            # else:
            # 	scope = layer

            # Up.
            net = convolutional(inputs=net,
                                output_channels=reversed_channel[layer],
                                filter_size=2,
                                stride=2,
                                padding='SAME',
                                conv_type=up,
                                spectral=spectral,
                                scope=layer)
            net = normalization(inputs=net, training=is_train)
            net = activation(net)

            # if vae_dim == net.shape.as_list()[1]:
            # 	lr_mean_xi_z = sigmoid(net)

        # Final outputs
        logits = convolutional(inputs=net,
                               output_channels=image_channels,
                               filter_size=3,
                               stride=1,
                               padding='SAME',
                               conv_type='convolutional',
                               spectral=spectral,
                               scope='mean_xi_z')
        mean_xi_z = sigmoid(logits)

        # Final outputs
        logs2_xi_z = convolutional(inputs=net,
                                   output_channels=image_channels,
                                   filter_size=3,
                                   stride=1,
                                   padding='SAME',
                                   conv_type='convolutional',
                                   spectral=spectral,
                                   scope='logs2_xi_z')

    print()
    # return output, lr_mean_xi_z, lr_logs2_xi_z
    return mean_xi_z, logs2_xi_z
Esempio n. 20
0
def generator_resnet(z_input, image_channels, layers, spectral, activation, reuse, is_train, normalization, init='xavier', regularizer=None, cond_label=None, attention=None, up='upscale', bigGAN=False):
	channels = [32, 64, 128, 256, 512, 1024]
	reversed_channel = list(reversed(channels[:layers]))

	# Question here: combine z dims for upscale and the conv after, or make them independent.
	if bigGAN:
		z_dim = z_input.shape.as_list()[-1]
		blocks = 2 + layers
		block_dims = math.floor(z_dim/blocks)
		remainder = z_dim - block_dims*blocks
		if remainder == 0:
			z_sets = [block_dims]*(blocks + 1)
		else:
			z_sets = [block_dims]*blocks + [remainder]
		z_splits = tf.split(z_input, num_or_size_splits=z_sets, axis=-1)


	if display:
		print('GENERATOR INFORMATION:')
		print('Channels:      ', channels[:layers])
		print('Normalization: ', normalization)
		print('Activation:    ', activation)
		print('Attention H/W: ', attention)
		print()

	with tf.variable_scope('generator', reuse=reuse):
		if bigGAN: 
			z_input_block = z_splits[0]
			label = z_splits[1]
		else:
			z_input_block = z_input
			label = z_input
		if cond_label is not None: label = tf.concat([cond_label, label], axis=-1)

		# Dense.			
		net = dense(inputs=z_input_block, out_dim=1024, spectral=spectral, init=init, regularizer=regularizer, scope=1)			
		# net = batch_norm(inputs=net, training=is_train)
		# Introducing instance norm into dense layers.
		net = normalization(inputs=net, training=is_train, c=label, spectral=spectral, scope='dense_1')
		net = activation(net)

		if bigGAN: label = z_splits[2]
		else: label = z_input
		if cond_label is not None: label = tf.concat([cond_label, label], axis=-1)

		# Dense.
		net = dense(inputs=net, out_dim=256*7*7, spectral=spectral, init=init, regularizer=regularizer, scope=2)				
		# net = batch_norm(inputs=net, training=is_train)
		# Introducing instance norm into dense layers.
		net = normalization(inputs=net, training=is_train, c=label, spectral=spectral, scope='dense_2')
		net = activation(net)
		
		# Reshape
		net = tf.reshape(tensor=net, shape=(-1, 7, 7, 256), name='reshape')

		for layer in range(layers):

			if bigGAN: label = z_splits[3+layer] 
			else: label = z_input
			if cond_label is not None: label = tf.concat([cond_label, label], axis=-1)

			# ResBlock.
			net = residual_block(inputs=net, filter_size=3, stride=1, padding='SAME', scope=layer, is_training=is_train, spectral=spectral, init=init, regularizer=regularizer, 
								 activation=activation, normalization=normalization, cond_label=label)
			
			# Attention layer. 
			if attention is not None and net.shape.as_list()[1]==attention:
				net = attention_block(net, spectral=True, init=init, regularizer=regularizer, scope=layers)
			
			# Up.
			net = convolutional(inputs=net, output_channels=reversed_channel[layer], filter_size=2, stride=2, padding='SAME', conv_type=up, spectral=spectral, init=init, regularizer=regularizer, scope=layer)
			net = normalization(inputs=net, training=is_train, c=label, spectral=spectral, scope=layer)
			net = activation(net)
			
		logits = convolutional(inputs=net, output_channels=image_channels, filter_size=3, stride=1, padding='SAME', conv_type='convolutional', spectral=spectral, init=init, regularizer=regularizer, scope='logits')
		output = sigmoid(logits)
		
	print()
	return output
Esempio n. 21
0
def generator(z_input, image_channels, layers, spectral, activation, reuse,
              is_train, normalization):
    channels = [32, 64, 128, 256, 512, 1024]
    reversed_channel = list(reversed(channels[:layers]))

    if display:
        print('Generator Information.')
        print('Channels: ', channels[:layers])
        print('Normalization: ', normalization)
        print('Activation: ', activation)

    with tf.variable_scope('generator', reuse=reuse):
        # Doesn't work ReLU, tried.

        # Dense.
        net = dense(inputs=z_input, out_dim=1024, spectral=spectral, scope=1)
        net = normalization(inputs=net, training=is_train)
        net = activation(net)

        # Dense.
        net = dense(inputs=net,
                    out_dim=256 * 7 * 7,
                    spectral=spectral,
                    scope=2)
        net = normalization(inputs=net, training=is_train)
        net = activation(net)

        # Reshape
        net = tf.reshape(tensor=net, shape=(-1, 7, 7, 256), name='reshape')

        for layer in range(layers):
            # Conv.
            net = convolutional(inputs=net,
                                output_channels=reversed_channel[layer],
                                filter_size=2,
                                stride=2,
                                padding='SAME',
                                conv_type='transpose',
                                spectral=spectral,
                                scope=2 * (layer + 1) - 1)
            net = normalization(inputs=net, training=is_train)
            net = activation(net)

            if layer != len(range(layers)) - 1:
                # Conv.
                net = convolutional(inputs=net,
                                    output_channels=reversed_channel[layer +
                                                                     1],
                                    filter_size=5,
                                    stride=1,
                                    padding='SAME',
                                    conv_type='convolutional',
                                    spectral=spectral,
                                    scope=2 * (layer + 1))
                net = normalization(inputs=net, training=is_train)
                net = activation(net)

        # Conv.
        logits = convolutional(inputs=net,
                               output_channels=image_channels,
                               filter_size=2,
                               stride=2,
                               padding='SAME',
                               conv_type='transpose',
                               spectral=spectral,
                               scope='logits')
        output = sigmoid(logits)

    print()
    return output