Exemple #1
0
        def __call__(self, x, reuse=False):
            with tf.variable_scope(self.name) as scope:

                if reuse:
                    scope.reuse_variables()

                M, N = x.get_shape().as_list()[-2:]
                x = scattering.Scattering(M=M, N=N, J=2)(x)
                x = tf.contrib.layers.batch_norm(x,
                                                 data_format=FLAGS.data_format,
                                                 fused=True,
                                                 scope="scat_bn")
                x = layers.conv2d_block("CONV2D",
                                        x,
                                        64,
                                        1,
                                        1,
                                        p="SAME",
                                        data_format=FLAGS.data_format,
                                        bias=True,
                                        bn=False,
                                        activation_fn=tf.nn.relu)

                target_shape = (-1, 64 * 7 * 7)
                x = layers.reshape(x, target_shape)
                x = layers.linear(x, 512, name="dense1")
                x = tf.nn.relu(x)
                x = layers.linear(x, 10, name="dense2")

                return x
def get_model(X, batch_size, image_dimension):

    input_shape = (batch_size, 3, image_dimension, image_dimension)
    all_parameters = []

    #############################################
    # a first convolution with 32 (3, 3) filters
    output, output_test, params, output_shape = convolutional(
        X, X, input_shape, 32, (3, 3))
    all_parameters += params

    # maxpool with size=(2, 2)
    output, output_test, params, output_shape = maxpool(
        output, output_test, output_shape, (2, 2))

    # relu activation
    output, output_test, params, output_shape = activation(
        output, output_test, output_shape, 'relu')

    #############################################
    # a second convolution with 32 (5, 5) filters
    output, output_test, params, output_shape = convolutional(
        output, output_test, output_shape, 32, (5, 5))
    all_parameters += params

    # maxpool with size=(2, 2)
    output, output_test, params, output_shape = maxpool(
        output, output_test, output_shape, (2, 2))

    # relu activation
    output, output_test, params, output_shape = activation(
        output, output_test, output_shape, 'relu')

    #############################################
    # MLP first layer

    output = output.flatten(2)
    output_test = output_test.flatten(2)

    output, output_test, params, output_shape = linear(
        output, output_test,
        (output_shape[0], output_shape[1] * output_shape[2] * output_shape[3]),
        500)
    all_parameters += params

    output, output_test, params, output_shape = activation(
        output, output_test, output_shape, 'relu')

    #############################################
    # MLP second layer

    output, output_test, params, output_shape = linear(output, output_test,
                                                       output_shape, 1)
    all_parameters += params

    output, output_test, params, output_shape = activation(
        output, output_test, output_shape, 'sigmoid')

    #
    return output, output_test, all_parameters
Exemple #3
0
def get_model(X, batch_size, image_dimension):

	input_shape = (batch_size, 3, image_dimension, image_dimension)
	all_parameters = []

	#############################################
	# a first convolution with 32 (3, 3) filters
	output, output_test, params, output_shape = convolutional(X, X, input_shape, 32, (3, 3))
	all_parameters += params

	# maxpool with size=(2, 2)
	output, output_test, params, output_shape = maxpool(output, output_test, output_shape, (2, 2))

	# relu activation
	output, output_test, params, output_shape = activation(output, output_test, output_shape, 'relu')

	#############################################
	# a second convolution with 32 (3, 3) filters
	output, output_test, params, output_shape = convolutional(output, output_test, output_shape, 32, (3, 3))
	all_parameters += params

	# maxpool with size=(2, 2)
	output, output_test, params, output_shape = maxpool(output, output_test, output_shape, (2, 2))

	# relu activation
	output, output_test, params, output_shape = activation(output, output_test, output_shape, 'relu')
	
	#############################################
	# a third convolution with 32 (3, 3) filters
	output, output_test, params, output_shape = convolutional(output, output_test, output_shape, 32, (3, 3))
	all_parameters += params

	# maxpool with size=(2, 2)
	output, output_test, params, output_shape = maxpool(output, output_test, output_shape, (2, 2))

	# relu activation
	output, output_test, params, output_shape = activation(output, output_test, output_shape, 'relu')

	#############################################
	# MLP first layer

	output = output.flatten(2)
	output_test = output_test.flatten(2)
	
	output, output_test, params, output_shape = linear(output, output_test, (output_shape[0], output_shape[1]*output_shape[2]*output_shape[3]), 500)
	all_parameters += params

	output, output_test, params, output_shape = activation(output, output_test, output_shape, 'relu')

	#############################################
	# MLP second layer

	output, output_test, params, output_shape = linear(output, output_test, output_shape, 1)
	all_parameters += params

	output, output_test, params, output_shape = activation(output, output_test, output_shape, 'sigmoid')

	#
	return output, output_test, all_parameters
Exemple #4
0
 def __init__(self,layer_nums=0,activation='relu',dropout=False):
     self.layers=[]
     if layer_nums==0: return
     for i in range(len(layer_nums)-2):
         self.add(layers.linear(layer_nums[i],layer_nums[i+1]))
         self.add(self.str_to_layer(activation)())
         if dropout: self.add(layers.dropout())
     self.add(layers.linear(layer_nums[-2],layer_nums[-1]))
     self.add(layers.softmax())
Exemple #5
0
 def forward(self, x, weights=None):
     if weights is None:
         x = F.relu(self.fc1(x))
         x = F.relu(self.fc2(x))
         x = self.fc3(x)
         return x
     else:
         x = F.relu(linear(x, weights['fc1.weight'], weights['fc1.bias']))
         x = F.relu(linear(x, weights['fc2.weight'], weights['fc2.bias']))
         x = linear(x, weights['fc3.weight'], weights['fc3.bias'])
         return x
Exemple #6
0
 def __init__( self , in_channels , num_features , out_channels , activation_fn , use_batchnorm , dropout ):
     super(type(self),self).__init__()
     fcs= []
     num_features = [in_channels] + num_features 
     for idx,n_feat in enumerate(num_features[:-1]):
         in_c = n_feat
         out_c = num_features[idx+1]
         fcs.append( layers.linear( in_c , out_c  , activation_fn = activation_fn , use_batchnorm = use_batchnorm ) )
         fcs.append( nn.Dropout(dropout) )
     fcs.append( layers.linear( num_features[-1] , out_channels , activation_fn = None , use_batchnorm = False ) ) 
     self.dense = nn.Sequential( *fcs )
def feedforward_network(state, out_size=128):
    initializer = tf.truncated_normal_initializer(0, 0.1)
    activation_fn = tf.nn.relu

    l1 = linear(state, 64, activation_fn=activation_fn, name='l1')
    l2 = linear(state, 64, activation_fn=activation_fn, name='l2')

    embedding = linear(l2, out_size, activation_fn=activation_fn, name='l3')

    # Returns the network output, parameters
    return embedding
Exemple #8
0
    def setUpArch(self, nnInput, numChannels, numCLayers, filterHeights,
                  leakSlope):

        #Create a cnn, gaining the last layer of the cnn as a return
        cnnOut = self.cnn(nnInput, numChannels, numCLayers, filterHeights,
                          leakSlope)

        #Parameters for the fully connected layer
        fcWordDim = (len(filterHeights) * numChannels[-1] + len(self.aminos))\
                            * cnnOut.get_shape()[1]

        fcInput = tf.reshape(tf.concat(axis=2, values=[cnnOut, self.X]),
                             [-1, fcWordDim])
        fcInput = tf.reshape(fcInput, [-1, fcWordDim])

        #fcInput = tf.concat(axis=1,values=[fcInput, self.energy])

        #If there is a fully connected hidden layer, create one and then create
        #the logits (the values before the softmax function). Otherwise, just
        #create the logits
        if self.numHiddenNodes != 0:
            fcOut = layers.linear([fcInput],
                                  self.numHiddenNodes,
                                  True,
                                  1.0,
                                  scope="fc")
            sx_inputs = tf.nn.relu(fcOut)
            sx_inputs = tf.nn.dropout(sx_inputs, rate=1 - self.keep_prob[-1])
            self.logits = layers.linear([sx_inputs],
                                        len(self.allTurnCombs),
                                        True,
                                        1.0,
                                        scope="softmax")
        else:
            self.logits = layers.linear([fcInput],
                                        len(self.allTurnCombs),
                                        True,
                                        1.0,
                                        scope="softmax")

        #energyLogits = layers.linear([self.energy],len(self.allTurnCombs),
        #        True, 1.0, scope="energy")

        #The loss function is cross softmax entropy with the logits created
        #above and the labels passed in above
        xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(
            labels=self.y, logits=self.logits)
        #xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=self.y,logits=energyLogits)
        self.loss = tf.reduce_mean(xentropy, name="loss")

        #Create an optimizer to minimize the loss function
        self.optimizer = tf.train.AdamOptimizer(self.learning_rate).minimize(
            self.loss)
Exemple #9
0
def discriminator(inp, reuse=False):
    with tf.variable_scope('Encoder', reuse=reuse):
        # 32
        inp = gaussnoise(inp, std=0.05)
        conv1 = conv2d(inp, 96, kernel=3, strides=1, name=dname + 'conv1')
        conv1 = lrelu(conv1, 0.2)

        conv1b = conv2d(conv1, 96, kernel=3, strides=2, name=dname + 'conv1b')
        conv1b = batchnorm(conv1b, is_training=is_train, name=dname + 'bn1b')
        conv1b = lrelu(conv1b, 0.2)
        conv1b = tf.nn.dropout(conv1b, keep_prob)
        # 16
        conv2 = conv2d(conv1b, 192, kernel=3, strides=1, name=dname + 'conv2')
        conv2 = batchnorm(conv2, is_training=is_train, name=dname + 'bn2')
        conv2 = lrelu(conv2, 0.2)

        conv2b = conv2d(conv2, 192, kernel=3, strides=2, name=dname + 'conv2b')
        conv2b = batchnorm(conv2b, is_training=is_train, name=dname + 'bn2b')
        conv2b = lrelu(conv2b, 0.2)
        conv2b = tf.nn.dropout(conv2b, keep_prob)
        # 8
        conv3 = conv2d(conv2b, 256, kernel=3, strides=1, name=dname + 'conv3')
        conv3 = batchnorm(conv3, is_training=is_train, name=dname + 'bn3')
        conv3 = lrelu(conv3, 0.2)

        conv3b = conv2d(conv3, 256, kernel=1, strides=1, name=dname + 'conv3b')
        conv3b = batchnorm(conv3b, is_training=is_train, name=dname + 'bn3b')
        conv3b = lrelu(conv3b, 0.2)

        conv4 = conv2d(conv3b, 512, kernel=1, strides=1, name=dname + 'conv4')
        conv4 = batchnorm(conv4, is_training=is_train, name=dname + 'bn4')
        conv4 = lrelu(conv4, 0.2)

        flat = flatten(conv4)
        # Classifier
        clspred = linear(flat, n_classes, name=dname + 'cpred')
        # Decoder
        g2 = conv2d(conv4, nout=256, kernel=3, name=dname + 'deconv2')
        g2 = batchnorm(g2, is_training=tf.constant(True), name=dname + 'bn2g')
        g2 = lrelu(g2, 0.2)

        g3 = nnupsampling(g2, [16, 16])
        g3 = conv2d(g3, nout=128, kernel=3, name=dname + 'deconv3')
        g3 = batchnorm(g3, is_training=tf.constant(True), name=dname + 'bn3g')
        g3 = lrelu(g3, 0.2)

        g3b = conv2d(g3, nout=128, kernel=3, name=dname + 'deconv3b')
        g3b = batchnorm(g3b,
                        is_training=tf.constant(True),
                        name=dname + 'bn3bg')
        g3b = lrelu(g3b, 0.2)

        g4 = nnupsampling(g3b, [32, 32])
        g4 = conv2d(g4, nout=64, kernel=3, name=dname + 'deconv4')
        g4 = batchnorm(g4, is_training=tf.constant(True), name=dname + 'bn4g')
        g4 = lrelu(g4, 0.2)

        g4b = conv2d(g4, nout=3, kernel=3, name=dname + 'deconv4b')
        g4b = tf.nn.tanh(g4b)
        return clspred, g4b
Exemple #10
0
    def _build(self, num_classifiers, learning_rate):
        # inputs
        self.X = tf.placeholder(tf.float32, [None, 28, 28])
        self.y = tf.placeholder(tf.int32, [None])
        one_hot_y = tf.one_hot(self.y, 10)

        networks = [layers.feedforward(self.X) for _ in range(num_classifiers)]
        self.individual_loss = [
            layers.loss(net, one_hot_y) for net in networks
        ]
        self.individual_accuracy = [
            layers.accuracy(net, one_hot_y) for net in networks
        ]

        logits = layers.linear(tf.concat(networks, axis=-1), 10, bias=False)
        l2_distance = tf.add_n([
            tf.norm(networks[0] - networks[1]),
            tf.norm(networks[1] - networks[2]),
            tf.norm(networks[2] - networks[0])
        ])

        cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(
            logits=logits, labels=one_hot_y)
        self.loss = tf.reduce_mean(cross_entropy) - 1e-5 * l2_distance
        optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
        self.train_op = optimizer.minimize(self.loss)

        correct_prediction = tf.equal(tf.argmax(logits, axis=1),
                                      tf.argmax(one_hot_y, axis=1))
        self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        self.prediction = tf.argmax(logits, axis=1)
Exemple #11
0
def generator(n_samples, noise=None, dim=64):
    with tf.variable_scope('generator', reuse=tf.AUTO_REUSE):
        if noise is None:
            noise = tf.random_normal([n_samples, 128])

        x = linear('input', 128, 8 * 4 * 4 * dim, noise)
        x = tf.reshape(x, [-1, 8 * dim, 4, 4])
        x = batch_norm('bn1', x)
        x = tf.nn.relu(x)

        x = conv2d_transpose('c2', 8 * dim, 4 * dim, 5, x)
        x = batch_norm('bn2', x)
        x = tf.nn.relu(x)

        x = conv2d_transpose('c3', 4 * dim, 2 * dim, 5, x)
        x = batch_norm('bn3', x)
        x = tf.nn.relu(x)

        x = conv2d_transpose('c4', 2 * dim, dim, 5, x)
        x = batch_norm('bn4', x)
        x = tf.nn.relu(x)

        x = conv2d_transpose('c5', dim, 3, 5, x)
        x = tf.tanh(x)

        return tf.reshape(x, [-1, 3 * dim * dim])
    def __call__(self, x, reuse=False, output_name=None):
        with tf.variable_scope(self.name) as scope:

            if reuse:
                scope.reuse_variables()

            # Initial dense multiplication
            x = layers.linear(x, "G_FC1", 512 * 8 * 8)

            batch_size = tf.shape(x)[0]
            if FLAGS.data_format == "NHWC":
                target_shape = (batch_size, 8, 8, 512)
            elif FLAGS.data_format == "NCHW":
                target_shape = (batch_size, 512, 8, 8)

            x = layers.reshape(x, target_shape)
            x = tf.contrib.layers.batch_norm(x, fused=True, data_format=FLAGS.data_format)
            x = layers.lrelu(x)

            x = layers.G_conv2d_block(x, "G_conv2D1", 256, 3, data_format=FLAGS.data_format, bn=True)
            x = layers.upsampleNN(x, "G_up1", 2, data_format=FLAGS.data_format)

            x = layers.G_conv2d_block(x, "G_conv2D2", 128, 3, data_format=FLAGS.data_format, bn=True)
            x = layers.upsampleNN(x, "G_up2", 2, data_format=FLAGS.data_format)

            x = layers.G_conv2d_block(x, "G_conv2D3", 64, 3, data_format=FLAGS.data_format, bn=True)
            x = layers.upsampleNN(x, "G_up3", 2, data_format=FLAGS.data_format)

            # Last conv
            x = layers.conv2d(x, "G_conv2D4", 64, FLAGS.channels, 3, 1, "SAME", data_format=FLAGS.data_format)

            x = tf.nn.tanh(x, name=output_name)

            return x
    def __call__(self, x, reuse=False):
        with tf.variable_scope(self.name) as scope:

            if reuse:
                scope.reuse_variables()

            for idx, (f, k, s, p) in enumerate(zip(self.list_filters, self.list_kernel_size, self.list_strides, self.list_padding)):
                if idx == 0:
                    bn = False
                else:
                    bn = True
                name = "conv2D_%s" % idx
                x = layers.conv2d_block(name, x, f, k, s, p=p, stddev=0.02,
                                        data_format=self.data_format, bias=True, bn=bn, activation_fn=layers.lrelu)

            target_shape = (self.batch_size, -1)
            x = layers.reshape(x, target_shape)

            # # Add MBD
            # x_mbd = layers.mini_batch_disc(x, num_kernels=100, dim_per_kernel=5)
            # # Concat
            # x = tf.concat([x, x_mbd], axis=1)

            x = layers.linear(x, 1)

            return x
Exemple #14
0
    def generator(self, z, image_Y, config):
        with tf.variable_scope("generator"):
            h0 = linear(z,
                        100,
                        config.image_size * config.image_size,
                        name="g_h0_lin")
            h0 = tf.reshape(h0, [-1, config.image_size, config.image_size, 1])
            h0 = tf.nn.relu(batch_norm(h0, name="g_bn0"))

            h1 = tf.concat([image_Y, h0], 3)
            h1 = layers.conv(h1, 2, 128, name="g_h1_conv")
            h1 = tf.nn.relu(batch_norm(h1, name="g_bn1"))

            h2 = tf.concat([image_Y, h1], 3)
            h2 = layers.conv(h2, 129, 64, name="g_h2_conv")
            h2 = tf.nn.relu(batch_norm(h2, name="g_bn2"))

            h3 = tf.concat([image_Y, h2], 3)
            h3 = layers.conv(h3, 65, 64, name="g_h3_conv")
            h3 = tf.nn.relu(batch_norm(h3, name="g_bn3"))

            h4 = tf.concat([image_Y, h3], 3)
            h4 = layers.conv(h4, 65, 64, name="g_h4_conv")
            h4 = tf.nn.relu(batch_norm(h4, name="g_bn4"))

            h5 = tf.concat([image_Y, h4], 3)
            h5 = layers.conv(h5, 65, 32, name="g_h5_conv")
            h5 = tf.nn.relu(batch_norm(h5, name="g_bn5"))

            h6 = tf.concat([image_Y, h5], 3)
            h6 = layers.conv(h6, 33, 2, name="g_h6_conv")
            return tf.nn.tanh(h6)
def deepmind_CNN(state, output_size=128):
    initializer = tf.truncated_normal_initializer(0, 0.1)
    activation_fn = tf.nn.relu

    state = tf.transpose(state, [0, 2, 3, 1])

    l1 = conv2d(state,
                32, [8, 8], [4, 4],
                initializer,
                activation_fn,
                'NHWC',
                name='l1')
    l2 = conv2d(l1,
                64, [4, 4], [2, 2],
                initializer,
                activation_fn,
                'NHWC',
                name='l2')
    l3 = conv2d(l2,
                64, [3, 3], [1, 1],
                initializer,
                activation_fn,
                'NHWC',
                name='l3')

    shape = l3.get_shape().as_list()
    l3_flat = tf.reshape(l3, [-1, reduce(lambda x, y: x * y, shape[1:])])

    embedding = linear(l3_flat,
                       output_size,
                       activation_fn=activation_fn,
                       name='l4')

    # Returns the network output, parameters
    return embedding
Exemple #16
0
 def proba_distribution_from_latent(cls,
                                    size,
                                    pi_latent_vector,
                                    vf_latent_vector,
                                    init_scale=1.0,
                                    init_bias=0.0):
     pdparam = linear(pi_latent_vector,
                      'pi',
                      size,
                      init_scale=init_scale,
                      init_bias=init_bias)
     q_values = linear(vf_latent_vector,
                       'q',
                       size,
                       init_scale=init_scale,
                       init_bias=init_bias)
     return cls(pdparam), pdparam, q_values
Exemple #17
0
def discriminator(inp, reuse=False):
    with tf.variable_scope('Encoder', reuse=reuse):
        # 64
        inp = gaussnoise(inp, std=0.05)
        conv1 = conv2d(inp, 128, kernel=3, strides=2, name=dname + 'conv1')
        conv1 = lrelu(conv1, 0.2)
        # 32
        conv2 = tf.nn.dropout(conv1, keep_prob)
        conv2 = conv2d(conv2, 256, kernel=3, strides=2, name=dname + 'conv2')
        conv2 = batchnorm(conv2, is_training=is_train, name=dname + 'bn2')
        conv2 = lrelu(conv2, 0.2)
        # 16
        conv3 = tf.nn.dropout(conv2, keep_prob)
        conv3 = conv2d(conv3, 512, kernel=3, strides=2, name=dname + 'conv3')
        conv3 = batchnorm(conv3, is_training=is_train, name=dname + 'bn3')
        conv3 = lrelu(conv3, 0.2)
        # 8
        conv3b = conv2d(conv3, 512, kernel=3, strides=1, name=dname + 'conv3b')
        conv3b = batchnorm(conv3b, is_training=is_train, name=dname + 'bn3b')
        conv3b = lrelu(conv3b, 0.2)

        conv4 = tf.nn.dropout(conv3b, keep_prob)
        conv4 = conv2d(conv4, 1024, kernel=3, strides=2, name=dname + 'conv4')
        conv4 = batchnorm(conv4, is_training=is_train, name=dname + 'bn4')
        conv4 = lrelu(conv4, 0.2)
        # 4

        flat = flatten(conv4)
        # Classifier
        clspred = linear(flat, n_classes, name=dname + 'cpred')
        # Decoder
        g1 = conv2d(conv4, nout=512, kernel=3, name=dname + 'deconv1')
        g1 = batchnorm(g1, is_training=tf.constant(True), name=dname + 'bn1g')
        g1 = lrelu(g1, 0.2)

        g2 = nnupsampling(g1, [8, 8])
        g2 = conv2d(g2, nout=256, kernel=3, name=dname + 'deconv2')
        g2 = batchnorm(g2, is_training=tf.constant(True), name=dname + 'bn2g')
        g2 = lrelu(g2, 0.2)

        g3 = nnupsampling(g2, [16, 16])
        g3 = conv2d(g3, nout=128, kernel=3, name=dname + 'deconv3')
        g3 = batchnorm(g3, is_training=tf.constant(True), name=dname + 'bn3g')
        g3 = lrelu(g3, 0.2)

        g4 = nnupsampling(g3, [32, 32])
        g4 = conv2d(g4, nout=64, kernel=3, name=dname + 'deconv4')
        g4 = batchnorm(g4, is_training=tf.constant(True), name=dname + 'bn4g')
        g4 = lrelu(g4, 0.2)

        g5 = nnupsampling(g4, [64, 64])
        g5 = conv2d(g5, nout=32, kernel=3, name=dname + 'deconv5')
        g5 = batchnorm(g5, is_training=tf.constant(True), name=dname + 'bn5g')
        g5 = lrelu(g5, 0.2)

        g5b = conv2d(g5, nout=3, kernel=3, name=dname + 'deconv5b')
        g5b = tf.nn.tanh(g5b)
        return clspred, g5b
        def __call__(self, x, reuse=False):
            with tf.variable_scope(self.name) as scope:

                if reuse:
                    scope.reuse_variables()

                M, N = x.get_shape().as_list()[-2:]
                x = scattering.Scattering(M=M, N=N, J=2)(x)
                x = tf.contrib.layers.batch_norm(x, data_format=FLAGS.data_format, fused=True, scope="scat_bn")
                x = layers.conv2d_block("CONV2D", x, 64, 1, 1, p="SAME", data_format=FLAGS.data_format, bias=True, bn=False, activation_fn=tf.nn.relu)

                target_shape = (-1, 64 * 7 * 7)
                x = layers.reshape(x, target_shape)
                x = layers.linear(x, 512, name="dense1")
                x = tf.nn.relu(x)
                x = layers.linear(x, 10, name="dense2")

                return x
Exemple #19
0
    def cnn(self, nnInput, numChannels, numCLayers, filterHeights, leakSlope):
        layerIn = nnInput
        imgH = layerIn.get_shape()[1].value
        #Create the first layer of the convolutional neural network
        with tf.variable_scope("cnn_layer{0}".format(0)):
            imgW = layerIn.get_shape()[2].value
            cnnFilterIn = tf.reshape(layerIn, [-1, imgH, imgW, 1])
            layerOut = layers.multiChannelCnn(cnnFilterIn, imgH, imgW, \
                                        filterHeights, self.batchSize,\
                                        numChannels[0], leakSlope)
            if self.batchNorm:
                layerOut = tf.layers.batch_normalization(layerOut,
                                                         training=True)
            layerIn = layerOut

            #Apply dropout to the outputted layer, using the keep_prob hyperparameter
            layerIn = tf.nn.dropout(layerIn, rate=1 - self.keep_prob[0])

        #Create the rest of the layers in thre network
        for i in range(1, numCLayers):

            #Add the highway, to pass in the previous output to the output of
            #the current layer. Measure the amount that should be passed with
            #the variable u, an series of values created by the output of a
            #linear layer
            inputDim = layerIn.get_shape()[2].value
            layerInB = tf.reshape(layerIn, [-1, inputDim])
            with tf.variable_scope("cnn_gates"):
                if i > 1:
                    tf.get_variable_scope().reuse_variables()
                u = layers.linear([layerInB],
                                  inputDim,
                                  True,
                                  1.0,
                                  scope="gate")
                u = tf.sigmoid(u)
            input_shape = layerIn.get_shape().as_list()

            #Create another convolutional layer, taking the output of the last
            #as the input for the current. Then, apply the highway, adding
            #together some of the current layer with some of the previous
            with tf.variable_scope("cnn_layer{0}".format(i)):
                imgW = layerIn.get_shape()[2].value
                cnnLayerIn = tf.reshape(layerIn, [-1, imgH, imgW, 1])
                layerOut = layers.multiChannelCnn(cnnLayerIn, imgH, imgW,\
                                filterHeights, self.batchSize, numChannels[i],leakSlope)
                layerOutputB = tf.reshape(layerOut, [-1, inputDim])
                layerOut = u * layerInB + (1 - u) * layerOutputB
                input_shape[0] = -1
                layerOut = tf.reshape(layerOut, input_shape)
                layerIn = layerOut
                layerIn = tf.nn.dropout(layerIn, rate=1 - self.keep_prob[i])
        cnnOut = layerIn

        #Return the final layer
        return cnnOut
Exemple #20
0
    def __init__( self , num_classes , num_attributes ,  feature_net_kwargs,  visual_mlp_kwargs , semantic_mlp_kwargs ):
        super(type(self),self).__init__()
        self.features = ResNet( **feature_net_kwargs )
        del( self.features.dropout )
        del( self.features.fc2 )
        self.features = nn.DataParallel( self.features )
        assert visual_mlp_kwargs['out_channels'] == semantic_mlp_kwargs['out_channels']

        self.visual_mlp = layers.linear( feature_net_kwargs['num_features'][-1] , visual_mlp_kwargs['out_channels'] , activation_fn = visual_mlp_kwargs['activation_fn'] , use_batchnorm = visual_mlp_kwargs['use_batchnorm'] )

        semantic_mlp_layers = []
        prev_num_channels = num_attributes
        for i in range( semantic_mlp_kwargs['num_layers'] ):
            num_channels = round( ( i + 1 ) * ( visual_mlp_kwargs['out_channels'] - num_attributes ) / semantic_mlp_kwargs['num_layers'] ) + num_attributes
            semantic_mlp_layers.append( layers.linear( prev_num_channels , num_channels , activation_fn = semantic_mlp_kwargs['activation_fn'] if i < semantic_mlp_kwargs['num_layers'] - 1 else semantic_mlp_kwargs['last_activation_fn']  , use_batchnorm = semantic_mlp_kwargs['use_batchnorm'] ) )
            prev_num_channels = num_channels

        self.semantic_mlp = nn.Sequential( *semantic_mlp_layers )
        self.classifier = ArcLinear( visual_mlp_kwargs['out_channels'] , num_classes )
Exemple #21
0
 def make_critics(self,
                  obs,
                  action=None,
                  reuse=True,
                  scope="values_fn",
                  create_vf=True,
                  create_qf=True,
                  feature_extraction="cnn",
                  net_arch=[64, 64],
                  act_fun=tf.nn.relu,
                  layer_norm=True):
     value_fn, qf1, qf2 = None, None, None
     with tf.variable_scope(scope, reuse=reuse):
         if feature_extraction == "cnn":
             critics_h = nature_cnn(obs)
         else:
             critics_h = tf.layers.flatten(obs)
         if create_vf:
             with tf.variable_scope('vf', reuse=reuse):
                 vf_h = mlp_extractor(critics_h,
                                      net_arch,
                                      act_fun,
                                      layer_norm=layer_norm)
                 value_fn = linear(vf_h, "vf", 1)
         if create_qf:
             if action is None:
                 qf_h = critics_h
             else:
                 qf_h = tf.concat([critics_h, action], axis=-1)
             with tf.variable_scope('qf1', reuse=reuse):
                 qf1_h = mlp_extractor(qf_h,
                                       net_arch,
                                       act_fun,
                                       layer_norm=layer_norm)
                 qf1 = linear(qf1_h, "qf1", 1)
             with tf.variable_scope('qf2', reuse=reuse):
                 qf2_h = mlp_extractor(qf_h,
                                       net_arch,
                                       act_fun,
                                       layer_norm=layer_norm)
                 qf2 = linear(qf2_h, "qf2", 1)
     return qf1, qf2, value_fn
Exemple #22
0
 def proba_distribution_from_latent(cls,
                                    size,
                                    pi_latent_vector,
                                    vf_latent_vector,
                                    init_scale=1.0,
                                    init_bias=0.0):
     mean = linear(pi_latent_vector,
                   'pi',
                   size,
                   init_scale=init_scale,
                   init_bias=init_bias)
     logstd = tf.get_variable(name='pi/logstd',
                              shape=[1, size],
                              initializer=tf.zeros_initializer())
     pdparam = tf.concat([mean, mean * 0.0 + logstd], axis=1)
     q_values = linear(vf_latent_vector,
                       'q',
                       size,
                       init_scale=init_scale,
                       init_bias=init_bias)
     return cls(pdparam), mean, q_values
Exemple #23
0
    def discriminator(self, image, reuse=False, config=None):
        with tf.variable_scope("discriminator") as scope:
            if reuse:
                scope.reuse_variables()

            h0 = layers.lrelu(layers.conv(image, 3, 64, name='d_h0_conv'))
            h1 = layers.lrelu(
                batch_norm(layers.conv(h0, 64, 128, name='d_h1_conv'),
                           name='d_bn1'))
            h2 = layers.lrelu(
                batch_norm(layers.conv(h1, 128, 256, name='d_h2_conv'),
                           name='d_bn2'))
            h3 = layers.lrelu(
                batch_norm(layers.conv(h2, 256, 512, name='d_h3_conv'),
                           name='d_bn3'))

            h4 = linear(tf.reshape(h3, [config.batch_size, -1]),
                        524288,
                        64,
                        name="d_h4_lin")
            h5 = linear(h4, 64, 1, name="d_h5_lin")
            return h5
Exemple #24
0
def generator(inp_z, inp_y, reuse=False):
    with tf.variable_scope('Generator', reuse=reuse):
        inp = tf.concat([inp_z, inp_y], 1)
        sz = 4
        g1 = linear(inp, 512 * sz * sz, name=gname + 'deconv1')
        g1 = batchnorm(g1, is_training=tf.constant(True), name=gname + 'bn1g')
        g1 = lrelu(g1, 0.2)
        g1_reshaped = tf.reshape(g1, [-1, 512, sz, sz])
        print 'genreshape: ' + str(g1_reshaped.get_shape().as_list())

        g2 = nnupsampling(g1_reshaped, [8, 8])
        g2 = conv2d(g2, nout=512, kernel=3, name=gname + 'deconv2')
        g2 = batchnorm(g2, is_training=tf.constant(True), name=gname + 'bn2g')
        g2 = lrelu(g2, 0.2)

        g3 = nnupsampling(g2, [16, 16])
        g3 = conv2d(g3, nout=256, kernel=3, name=gname + 'deconv3')
        g3 = batchnorm(g3, is_training=tf.constant(True), name=gname + 'bn3g')
        g3 = lrelu(g3, 0.2)

        g4 = nnupsampling(g3, [32, 32])
        g4 = conv2d(g4, nout=128, kernel=3, name=gname + 'deconv4')
        g4 = batchnorm(g4, is_training=tf.constant(True), name=gname + 'bn4g')
        g4 = lrelu(g4, 0.2)

        g4b = conv2d(g4, nout=128, kernel=3, name=gname + 'deconv4b')
        g4b = batchnorm(g4b,
                        is_training=tf.constant(True),
                        name=gname + 'bn4bg')
        g4b = lrelu(g4b, 0.2)

        g5 = nnupsampling(g4b, [64, 64])
        g5 = conv2d(g5, nout=64, kernel=3, name=gname + 'deconv5')
        g5 = batchnorm(g5, is_training=tf.constant(True), name=gname + 'bn5g')
        g5 = lrelu(g5, 0.2)

        g5b = conv2d(g5, nout=64, kernel=3, name=gname + 'deconv5b')
        g5b = batchnorm(g5b,
                        is_training=tf.constant(True),
                        name=gname + 'bn5bg')
        g5b = lrelu(g5b, 0.2)

        g6 = nnupsampling(g5b, [128, 128])
        g6 = conv2d(g6, nout=32, kernel=3, name=gname + 'deconv6')
        g6 = batchnorm(g6, is_training=tf.constant(True), name=gname + 'bn6g')
        g6 = lrelu(g6, 0.2)

        g6b = conv2d(g6, nout=3, kernel=3, name=gname + 'deconv6b')
        g6b = tf.nn.tanh(g6b)
        g6b_64 = pool(g6b, fsize=3, strides=2, op='avg')
        return g6b_64, g6b
Exemple #25
0
    def __call__(self, x, reuse=False):
        with tf.variable_scope(self.name) as scope:

            if reuse:
                scope.reuse_variables()

            #################
            # Generator
            #################

            # Initial dense multiplication
            x = layers.linear(x, 512 * 8 * 8)

            # Reshape to image format
            if FLAGS.data_format == "NCHW":
                target_shape = (-1, 512, 8, 8)
            else:
                target_shape = (-1, 8, 8, 512)

            x = layers.reshape(x, target_shape)
            x = tf.contrib.layers.batch_norm(x, fused=True)
            x = tf.nn.elu(x)

            # Conv2D + Phase shift blocks
            x = layers.conv2d_block(x,
                                    "G16_conv2D_1",
                                    256,
                                    3,
                                    1,
                                    data_format=FLAGS.data_format)
            x = layers.conv2d_block(x,
                                    "G16_conv2D_2",
                                    256,
                                    3,
                                    1,
                                    data_format=FLAGS.data_format)
            x = layers.phase_shift(x,
                                   upsampling_factor=2,
                                   name="PS_G16",
                                   data_format=FLAGS.data_format)
            x = layers.conv2d_block(x,
                                    "G16_conv2D_3",
                                    FLAGS.channels,
                                    3,
                                    1,
                                    bn=False,
                                    activation_fn=None,
                                    data_format=FLAGS.data_format)
            x = tf.nn.tanh(x, name="x_G16")

            return x
Exemple #26
0
    def __call__(self, x, reuse=False):
        with tf.variable_scope(self.name) as scope:

            if reuse:
                scope.reuse_variables()

            # Initial dense multiplication
            x = layers.linear(x,
                              self.filters * self.start_dim * self.start_dim,
                              bias=True)

            # Reshape to image format
            if self.data_format == "NCHW":
                target_shape = (self.batch_size, self.filters, self.start_dim,
                                self.start_dim)
            else:
                target_shape = (self.batch_size, self.start_dim,
                                self.start_dim, self.filters)

            x = layers.reshape(x, target_shape)
            x = tf.contrib.layers.batch_norm(x, fused=True)
            x = layers.lrelu(x)

            # # Upsampling2D + conv blocks
            for idx, (f, k, s, p) in enumerate(
                    zip(self.list_filters, self.list_kernel_size,
                        self.list_strides, self.list_padding)):
                name = "upsample2D_%s" % idx
                if idx == len(self.list_filters) - 1:
                    bn = False
                    bias = False
                    activation_fn = None
                else:
                    bias = True
                    bn = True
                    activation_fn = layers.lrelu
                x = layers.upsample2d_block(name,
                                            x,
                                            f,
                                            k,
                                            s,
                                            p,
                                            data_format=self.data_format,
                                            bias=bias,
                                            bn=bn,
                                            activation_fn=activation_fn)

            x = tf.nn.tanh(x, name="X_G")

            return x
Exemple #27
0
 def __init__( self , A_hat , in_channels , num_features , out_channels , activation_fn , use_batchnorm = False , fm_mult = 1.0 , last_fc = False):
     super(type(self),self).__init__()
     num_features = [ int(x * fm_mult) for x in num_features]
     self.A_hat = A_hat
     self.A_hat.requires_grad = False
     num_features = [in_channels] + num_features 
     layer_list = []
     for prev_num_feature , num_feature in zip( num_features[:-1] , num_features[1:]):
         layer_list.append( GraphConv( self.A_hat , prev_num_feature,num_feature , activation_fn , use_batchnorm = use_batchnorm , bias = True ) )
     if not last_fc:
         layer_list.append( GraphConv( self.A_hat , num_features[-1] , out_channels , activation_fn = None , use_batchnorm = use_batchnorm , bias = True ) )
     else:
         layer_list.append( layers.linear( num_features[-1] , out_channels , activation_fn = None , use_batchnorm = use_batchnorm , bias = True ) )
     self.layers = nn.Sequential( *layer_list )
Exemple #28
0
    def __call__(self, x, reuse=False):
        with tf.variable_scope(self.name) as scope:

            if reuse:
                scope.reuse_variables()

            x = layers.conv2d_block(x,
                                    "D64_conv2D_1",
                                    32,
                                    3,
                                    2,
                                    data_format=FLAGS.data_format,
                                    bn=False)
            x = layers.conv2d_block(x,
                                    "D64_conv2D_2",
                                    64,
                                    3,
                                    2,
                                    data_format=FLAGS.data_format)
            x = layers.conv2d_block(x,
                                    "D64_conv2D_3",
                                    128,
                                    3,
                                    2,
                                    data_format=FLAGS.data_format)
            x = layers.conv2d_block(x,
                                    "D64_conv2D_4",
                                    256,
                                    3,
                                    2,
                                    data_format=FLAGS.data_format)

            x_shape = x.get_shape().as_list()
            flat_dim = 1
            for d in x_shape[1:]:
                flat_dim *= d

            target_shape = (-1, flat_dim)
            x = layers.reshape(x, target_shape)

            x_mbd = layers.mini_batch_disc(x,
                                           num_kernels=100,
                                           dim_per_kernel=5,
                                           name="mbd64")
            x = tf.concat([x, x_mbd], axis=1)

            x = layers.linear(x, 1)

            return x
Exemple #29
0
    def discriminator(self, image, reuse=False):
        with tf.variable_scope("discriminator") as scope:
            if reuse:
                scope.reuse_variables()

            h0 = leaky_relu(conv2d(image, self.df_dim, name='d_h0_conv'))
            h1 = leaky_relu(
                self.d_bn1(conv2d(h0, self.df_dim * 2, name='d_h1_conv')))
            h2 = leaky_relu(
                self.d_bn2(conv2d(h1, self.df_dim * 4, name='d_h2_conv')))
            h3 = leaky_relu(
                self.d_bn3(conv2d(h2, self.df_dim * 8, name='d_h3_conv')))
            h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h3_lin')

            return h4
Exemple #30
0
    def __init__(self, conv_dim, num_classes):
        super(Discriminator, self).__init__()
        self.conv_dim = conv_dim

        self.res_1 = ResidualBlock_D(3, conv_dim)
        self.res_2 = ResidualBlock_D(conv_dim, conv_dim * 2)
        self.attn = SelfAttn(conv_dim * 2)
        self.res_3 = ResidualBlock_D(conv_dim * 2, conv_dim * 4)
        self.res_4 = ResidualBlock_D(conv_dim * 4, conv_dim * 8)
        self.res_5 = ResidualBlock_D(conv_dim * 8, conv_dim * 16)
        self.lrelu = lrelu(inplace=True)
        self.linear = spectral_norm(linear(conv_dim * 16, 1))
        self.embed = embedding(num_classes, conv_dim * 16)

        self.apply(init_weights)
 def forward(self, x, weights=None, prefix=''):
     '''
 Runs the net forward; if weights are None it uses 'self' layers,
 otherwise keeps the structure and uses 'weights' instead.
 '''
     if weights is None:
         x = self.features(x)
         x = self.fa(x)
     else:
         for i in range(self.num_layers):
             x = linear(x, weights[prefix + 'fc' + str(i) + '.weight'],
                        weights[prefix + 'fc' + str(i) + '.bias'])
             if i < self.num_layers - 1: x = relu(x)
         x = self.fa(x)
     return x
Exemple #32
0
 def build_raw_branch(self , resnet_kwargs ):
     branch_layers = []
     blocks = self.trunk.build_blocks( resnet_kwargs['block'] , resnet_kwargs['num_features'][-2] , resnet_kwargs['num_features'][-1] , resnet_kwargs['strides'][-1] , resnet_kwargs['num_blocks'][-1] )
     branch_layers.append( blocks )
     shape = ( resnet_kwargs['input_shape'][0] // prod( resnet_kwargs['strides'])  , resnet_kwargs['input_shape'][1] // prod( resnet_kwargs['strides'] ) )  
     if resnet_kwargs['use_maxpool']:
         shape = ( shape[0] // 2 , shape[1] // 2 )
     if resnet_kwargs['use_avgpool']:
         avgpool = nn.AvgPool2d( [*shape] , 1 )
         branch_layers.append( avgpool )
         shape = 1 * 1
     if resnet_kwargs['feature_layer_dim'] is not None:
         fc1 = nn.Sequential( layers.Flatten() , layers.linear( resnet_kwargs['num_features'][-1] * shape , resnet_kwargs['feature_layer_dim'] , activation_fn = None , pre_activation  = False , use_batchnorm = resnet_kwargs['use_batchnorm']) )
         branch_layers.append( fc1 )
     return nn.Sequential( *branch_layers )
Exemple #33
0
def SimpleNet1(x,
               input_shape,
               neurons=1024,
               n_classes=10,
               non_linearity='relu',
               create_summaries=True):
    h = x
    h, output_shape = l.flatten(input_shape, h)
    h, output_shape = l.linear(output_shape, neurons, h, name='linear1')
    if create_summaries:
        utils.variable_summaries(h, name='linear-comb-hidden-layer')

    h = l.non_linearity(h, name=non_linearity)
    if create_summaries:
        utils.variable_summaries(h, name='activation-hidden-layer')
        sparsity = tf.nn.zero_fraction(h,
                                       name='activation-hidden-layer-sparsity')
        tf.summary.scalar(sparsity.op.name, sparsity)

    logits, output_shape = l.linear(output_shape, n_classes, h, name='output')
    if create_summaries:
        utils.variable_summaries(logits, name='unscaled-logits-output-layer')

    return logits
    def __call__(self, x, reuse=False, output_name=None):
        with tf.variable_scope(self.name) as scope:

            if reuse:
                scope.reuse_variables()

            # Initial dense multiplication
            x = layers.linear(x, "G_FC1", self.nb_filters * 8 * 8)

            batch_size = tf.shape(x)[0]
            if FLAGS.data_format == "NHWC":
                target_shape = (batch_size, 8, 8, self.nb_filters)
            elif FLAGS.data_format == "NCHW":
                target_shape = (batch_size, self.nb_filters, 8, 8)

            x = layers.reshape(x, target_shape)
            # x = tf.contrib.layers.batch_norm(x, fused=True, data_format=FLAGS.data_format)
            x = tf.nn.elu(x)

            x = layers.dec_conv2d_block(x, "G_conv2D1", self.nb_filters, 3, data_format=FLAGS.data_format)
            x = layers.upsampleNN(x, "G_up1", 2, data_format=FLAGS.data_format)

            x = layers.dec_conv2d_block(x, "G_conv2D2", self.nb_filters, 3, data_format=FLAGS.data_format)
            x = layers.upsampleNN(x, "G_up2", 2, data_format=FLAGS.data_format)

            x = layers.dec_conv2d_block(x, "G_conv2D3", self.nb_filters, 3, data_format=FLAGS.data_format)
            x = layers.upsampleNN(x, "G_up3", 2, data_format=FLAGS.data_format)

            x = layers.dec_conv2d_block(x, "G_conv2D4", self.nb_filters, 3, data_format=FLAGS.data_format)

            # Last conv
            x = layers.conv2d(x, "G_conv2D5", self.nb_filters, FLAGS.channels, 3, 1, "SAME", data_format=FLAGS.data_format)

            x = tf.nn.tanh(x, name=output_name)

            return x
    def __call__(self, x, reuse=False, output_name=None):
        with tf.variable_scope(self.name) as scope:

            if reuse:
                scope.reuse_variables()

            ##################
            # Encoding part
            ##################

            # First conv
            x = layers.conv2d(x, "D_conv2D1", FLAGS.channels, self.nb_filters, 3, 1, "SAME", data_format=FLAGS.data_format)
            x = tf.nn.elu(x)

            # Conv blocks
            x = layers.enc_conv2d_block(x, "D_enc_conv2D2", self.nb_filters, 3, activation_fn=tf.nn.elu, data_format=FLAGS.data_format)
            x = layers.enc_conv2d_block(x, "D_enc_conv2D3", 2 * self.nb_filters, 3, activation_fn=tf.nn.elu, data_format=FLAGS.data_format)
            x = layers.enc_conv2d_block(x, "D_enc_conv2D4", 3 * self.nb_filters, 3, activation_fn=tf.nn.elu, data_format=FLAGS.data_format)
            x = layers.enc_conv2d_block(x, "D_enc_conv2D5", 4 * self.nb_filters, 3, activation_fn=tf.nn.elu, data_format=FLAGS.data_format, downsampling=False)

            # Flatten
            batch_size = tf.shape(x)[0]
            other_dims = x.get_shape().as_list()[1:]
            prod_dim = 1
            for d in other_dims:
                prod_dim *= d
            x = layers.reshape(x, (batch_size, prod_dim))

            # Linear
            x = layers.linear(x, "D_FC1", self.h_dim, activation_fn=None)

            ##################
            # Decoding part
            ##################

            x = layers.linear(x, "D_FC2", self.nb_filters * 8 * 8)

            batch_size = tf.shape(x)[0]
            if FLAGS.data_format == "NHWC":
                target_shape = (batch_size, 8, 8, self.nb_filters)
            elif FLAGS.data_format == "NCHW":
                target_shape = (batch_size, self.nb_filters, 8, 8)

            x = layers.reshape(x, target_shape)
            # x = tf.contrib.layers.batch_norm(x, fused=True, data_format=FLAGS.data_format)
            x = tf.nn.elu(x)

            x = layers.dec_conv2d_block(x, "D_dec_conv2D1", self.nb_filters, 3, data_format=FLAGS.data_format)
            x = layers.upsampleNN(x, "D_up1", 2, data_format=FLAGS.data_format)

            x = layers.dec_conv2d_block(x, "D_dec_conv2D2", self.nb_filters, 3, data_format=FLAGS.data_format)
            x = layers.upsampleNN(x, "D_up2", 2, data_format=FLAGS.data_format)

            x = layers.dec_conv2d_block(x, "D_dec_conv2D3", self.nb_filters, 3, data_format=FLAGS.data_format)
            x = layers.upsampleNN(x, "D_up3", 2, data_format=FLAGS.data_format)

            x = layers.dec_conv2d_block(x, "D_dec_conv2D4", self.nb_filters, 3, data_format=FLAGS.data_format)

            # Last conv
            x = layers.conv2d(x, "D_dec_conv2D5", self.nb_filters, FLAGS.channels, 3, 1, "SAME", data_format=FLAGS.data_format)
            x = tf.nn.tanh(x, name=output_name)

            return x
    def __call__(self, x, reuse=False):
        with tf.variable_scope(self.name) as scope:

            if reuse:
                # list_v = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope.name)
                # for v in list_v:
                #     print v
                # print
                # print
                # for v in tf.get_collection(tf.GraphKeys.UPDATE_OPS):
                #     print v
                # import ipdb; ipdb.set_trace()
                scope.reuse_variables()

            # Store all layers in a dict
            d = collections.OrderedDict()

            # Initial dense multiplication
            x = layers.linear(x, self.filters * self.start_dim * self.start_dim)

            # Reshape to image format
            if self.data_format == "NCHW":
                target_shape = (self.batch_size, self.filters, self.start_dim, self.start_dim)
            else:
                target_shape = (self.batch_size, self.start_dim, self.start_dim, self.filters)

            x = layers.reshape(x, target_shape)
            x = tf.contrib.layers.batch_norm(x, fused=True)
            x = tf.nn.relu(x)

            import ipdb; ipdb.set_trace()

            # # Conv2D + Phase shift blocks
            # x = layers.conv2d_block("conv2D_1_1", x, 512, 3, 1, p="SAME", stddev=0.02,
            #                         data_format=self.data_format, bias=False, bn=True, activation_fn=layers.lrelu)
            # x = layers.conv2d_block("conv2D_1_2", x, 512, 3, 1, p="SAME", stddev=0.02,
            #                         data_format=self.data_format, bias=False, bn=False, activation_fn=layers.lrelu)
            # x = layers.phase_shift(x, upsampling_factor=2, name="PS1")

            # x = layers.conv2d_block("conv2D_2_1", x, 256, 3, 1, p="SAME", stddev=0.02,
            #                         data_format=self.data_format, bias=False, bn=False, activation_fn=layers.lrelu)
            # x = layers.conv2d_block("conv2D_2_2", x, 256, 3, 1, p="SAME", stddev=0.02,
            #                         data_format=self.data_format, bias=False, bn=False, activation_fn=layers.lrelu)
            # x = layers.phase_shift(x, upsampling_factor=2, name="PS2")

            # x = layers.conv2d_block("conv2D_3", x, 1, 1, 1, p="SAME", stddev=0.02,
            #                         data_format=self.data_format, bn=False)

            # # Upsampling2D + conv blocks
            # for idx, (f, k, s, p) in enumerate(zip(self.list_filters, self.list_kernel_size, self.list_strides, self.list_padding)):
            #     name = "upsample2D_%s" % idx
            #     if idx == len(self.list_filters) - 1:
            #         bn = False
            #     else:
            #         bn = True
            #     x = layers.upsample2d_block(name, x, f, k, s, p, data_format=self.data_format, bn=bn, activation_fn=layers.lrelu)

            # Transposed conv blocks
            for idx, (f, k, s, p) in enumerate(zip(self.list_filters, self.list_kernel_size, self.list_strides, self.list_padding)):
                img_size = self.start_dim * (2 ** (idx + 1))
                if self.data_format == "NCHW":
                    output_shape = (self.batch_size, f, img_size, img_size)
                else:
                    output_shape = (self.batch_size, img_size, img_size, f)
                name = "deconv2D_%s" % idx
                if idx == len(self.list_filters) - 1:
                    bn = False
                else:
                    bn = True
                x = layers.deconv2d_block(name, x, output_shape, k, s, p, data_format=self.data_format, bn=bn)

            x = tf.nn.tanh(x, name="X_G")

            return x