Exemple #1
0
    def _GAN_generator(self, z, z_dim, reuse=False):
        # Model Parameters
        with tf.variable_scope("generator", reuse=reuse):
            g_w1 = weight_xavier_init(shape=[z_dim, 1024], n_inputs=z_dim, n_outputs=1024, variable_name='g_w1')
            g_b1 = bias_variable(shape=[1024], variable_name='g_b1')
            g1 = tf.matmul(z, g_w1) + g_b1
            g1 = tf.nn.relu(g1)

            # Generate 50 features
            g_w2 = weight_xavier_init(shape=[1024, 128 * 7 * 7], n_inputs=1024, n_outputs=128 * 7 * 7,
                                      variable_name='g_w2')
            g_b2 = bias_variable(shape=[128 * 7 * 7], variable_name='g_b2')
            g2 = tf.matmul(g1, g_w2) + g_b2
            g2 = tf.nn.relu(g2)
            g2 = tf.reshape(g2, shape=[-1, 7, 7, 128])
            # Generate 25 features
            g_w3 = weight_xavier_init(shape=[5, 5, 64, 128], n_inputs=128, n_outputs=64, variable_name='g_w3')
            g_b3 = bias_variable(shape=[64], variable_name='g_b3')
            g3 = deconvolution_2d(g2, g_w3)
            g3 = g3 + g_b3
            g3 = tf.nn.relu(g3)

            # Final convolution with one output channel
            g_w4 = weight_xavier_init(shape=[5, 5, 32, 64], n_inputs=64, n_outputs=32, variable_name='g_w4')
            g_b4 = bias_variable(shape=[32], variable_name='g_b4')
            g4 = deconvolution_2d(g3, g_w4)
            g4 = g4 + g_b4
            g4 = tf.nn.relu(g4)

            g_w5 = weight_xavier_init(shape=[1, 1, 32, 1], n_inputs=32, n_outputs=1, variable_name='g_w5')
            g_b5 = bias_variable(shape=[1], variable_name='g_b5')
            g5 = convolution_2d(g4, g_w5)
            g5 = g5 + g_b5
            out = tf.nn.sigmoid(g5)
        return out
Exemple #2
0
    def _GAN_discriminator(self, X, image_width, image_height, image_channel=1, reuse=False):
        # Model Parameters
        # CNN model
        with tf.variable_scope("discriminator", reuse=reuse):
            X1 = tf.reshape(X, shape=[-1, image_width, image_height, image_channel])
            # First convolutional and pool layers
            # This finds 32 different 5 x 5 pixel features
            d_w1 = tf.get_variable('d_w1', shape=[5, 5, 1, 32],
                                   initializer=tf.truncated_normal_initializer(stddev=0.02))
            d_b1 = tf.get_variable('d_b1', [32], initializer=tf.constant_initializer(0))
            d1 = convolution_2d(X1, d_w1)
            d1 = d1 + d_b1
            d1 = tf.nn.relu(d1)
            d1 = average_pool_2x2(d1)

            # Second convolutional and pool layers
            # This finds 64 different 5 x 5 pixel features
            d_w2 = tf.get_variable('d_w2', shape=[5, 5, 32, 64],
                                   initializer=tf.truncated_normal_initializer(stddev=0.02))
            d_b2 = tf.get_variable('d_b2', [64], initializer=tf.constant_initializer(0))
            d2 = convolution_2d(d1, d_w2)
            d2 = d2 + d_b2
            d2 = tf.nn.relu(d2)
            d2 = average_pool_2x2(d2)

            # First fully connected layer
            d_w3 = tf.get_variable('d_w3', [7 * 7 * 64, 1024],
                                   initializer=tf.truncated_normal_initializer(stddev=0.02))
            d_b3 = tf.get_variable('d_b3', [1024], initializer=tf.constant_initializer(0))
            d3 = tf.reshape(d2, [-1, 7 * 7 * 64])
            d3 = tf.matmul(d3, d_w3)
            d3 = d3 + d_b3
            d3 = tf.nn.relu(d3)

            # Second fully connected layer
            d_w4 = tf.get_variable('d_w4', [1024, 1], initializer=tf.truncated_normal_initializer(stddev=0.02))
            d_b4 = tf.get_variable('d_b4', [1], initializer=tf.constant_initializer(0))
            out_logit = tf.matmul(d3, d_w4) + d_b4
            out = tf.nn.sigmoid(out_logit)
            return out, out_logit
Exemple #3
0
    def _GAN_discriminator(self, X, reuse=True):
        # Model Parameters
        # CNN model
        with tf.variable_scope("discriminator", reuse=reuse):
            X1 = tf.reshape(X, shape=[self.batch_size, self.image_width, self.image_height, self.channels])
            # First convolutional and pool layers
            # This finds 32 different 5 x 5 pixel features
            d_w1 = weight_xavier_init(shape=[5, 5, 1, 64], n_inputs=1, n_outputs=64, variable_name='d_w1')
            d_b1 = bias_variable(shape=[64], variable_name='d_b1')
            d1 = convolution_2d(X1, d_w1, 2)
            d1 = d1 + d_b1
            d1 = leaky_relu(d1)

            # Second convolutional and pool layers
            # This finds 64 different 5 x 5 pixel features
            d_w2 = weight_xavier_init(shape=[5, 5, 64, 128], n_inputs=64, n_outputs=128, variable_name='d_w2')
            d_b2 = bias_variable(shape=[128], variable_name='d_b2')
            d2 = convolution_2d(d1, d_w2, 2)
            d2 = d2 + d_b2
            d2 = leaky_relu(d2)

            # First fully connected layer
            d_w3 = weight_xavier_init(shape=[7 * 7 * 128, 1024], n_inputs=7 * 7 * 128, n_outputs=1024,
                                      variable_name='d_w3')
            d_b3 = bias_variable(shape=[1024], variable_name='d_b3')
            d3 = tf.reshape(d2, [-1, 7 * 7 * 128])
            d3 = tf.matmul(d3, d_w3)
            d3 = d3 + d_b3
            d3 = leaky_relu(d3)

            # Second fully connected layer
            d_w4 = weight_xavier_init(shape=[1024, 1], n_inputs=1024, n_outputs=1,
                                      variable_name='d_w4')
            d_b4 = bias_variable(shape=[1], variable_name='d_b4')
            out_logit = tf.matmul(d3, d_w4) + d_b4
            return out_logit
Exemple #4
0
    def _GAN_generator(self, z, z_dim):
        # Model Parameters
        g_w1 = tf.get_variable('g_w1', [z_dim, 1024], dtype=tf.float32,
                               initializer=tf.truncated_normal_initializer(stddev=0.02))
        g_b1 = tf.get_variable('g_b1', [1024], initializer=tf.truncated_normal_initializer(stddev=0.02))
        g1 = tf.matmul(z, g_w1) + g_b1
        # g1 = tf.contrib.layers.batch_norm(g1, center=True, scale=True, is_training=self.phase, scope='bn1')
        g1 = tf.contrib.layers.batch_norm(g1, epsilon=1e-5, is_training=self.phase, scope='bn1')
        g1 = tf.nn.relu(g1)

        # Generate 50 features
        g_w2 = tf.get_variable('g_w2', [1024, 64 * 7 * 7], dtype=tf.float32,
                               initializer=tf.truncated_normal_initializer(stddev=0.02))
        g_b2 = tf.get_variable('g_b2', [64 * 7 * 7], initializer=tf.truncated_normal_initializer(stddev=0.02))
        g2 = tf.matmul(g1, g_w2) + g_b2
        # g2 = tf.contrib.layers.batch_norm(g2, center=True, scale=True, is_training=self.phase, scope='bn2')
        g2 = tf.contrib.layers.batch_norm(g2, epsilon=1e-5, is_training=self.phase, scope='bn2')
        g2 = tf.nn.relu(g2)
        g2 = tf.reshape(g2, shape=[-1, 7, 7, 64])
        # Generate 25 features
        g_w3 = tf.get_variable('g_w3', [5, 5, 32, 64], dtype=tf.float32,
                               initializer=tf.truncated_normal_initializer(stddev=0.02))
        g_b3 = tf.get_variable('g_b3', [32], initializer=tf.truncated_normal_initializer(stddev=0.02))
        g3 = deconvolution_2d(g2, g_w3)
        g3 = g3 + g_b3
        # g3 = tf.contrib.layers.batch_norm(g3, center=True, scale=True, is_training=self.phase, scope='bn3')
        g3 = tf.contrib.layers.batch_norm(g3, epsilon=1e-5, is_training=self.phase, scope='bn3')
        g3 = tf.nn.relu(g3)

        # Final convolution with one output channel
        g_w4 = tf.get_variable('g_w4', [5, 5, 16, 32], dtype=tf.float32,
                               initializer=tf.truncated_normal_initializer(stddev=0.02))
        g_b4 = tf.get_variable('g_b4', [16], initializer=tf.truncated_normal_initializer(stddev=0.02))
        g4 = deconvolution_2d(g3, g_w4)
        g4 = g4 + g_b4
        g4 = tf.contrib.layers.batch_norm(g4, epsilon=1e-5, is_training=self.phase, scope='bn4')
        g4 = tf.nn.relu(g4)

        g_w5 = tf.get_variable('g_w5', [1, 1, 16, 1], dtype=tf.float32,
                               initializer=tf.truncated_normal_initializer(stddev=0.02))
        g_b5 = tf.get_variable('g_b5', [1], initializer=tf.truncated_normal_initializer(stddev=0.02))
        g5 = convolution_2d(g4, g_w5)
        g5 = g5 + g_b5
        g5 = tf.nn.tanh(g5)

        return g5
Exemple #5
0
    def _GAN_generator(self, z, z_dim, reuse=False):
        # Model Parameters
        with tf.variable_scope("generator", reuse=reuse):
            # Layer1
            g_w1 = weight_xavier_init(shape=[z_dim, 1024],
                                      n_inputs=z_dim,
                                      n_outputs=1024,
                                      variable_name='g_w1')
            g_b1 = bias_variable(shape=[1024], variable_name='g_b1')
            g1 = tf.matmul(z, g_w1) + g_b1
            g1 = tf.nn.relu(g1)

            # Layer2
            g_w2 = weight_xavier_init(shape=[1024, 512 * 6 * 6],
                                      n_inputs=1024,
                                      n_outputs=512 * 6 * 6,
                                      variable_name='g_w2')
            g_b2 = bias_variable(shape=[512 * 6 * 6], variable_name='g_b2')
            g2 = tf.matmul(g1, g_w2) + g_b2
            g2 = tf.nn.relu(g2)
            g2 = tf.reshape(g2, shape=[-1, 6, 6, 512])
            # Layer3
            g_w3 = weight_xavier_init(shape=[5, 5, 256, 512],
                                      n_inputs=512,
                                      n_outputs=256,
                                      variable_name='g_w3')
            g_b3 = bias_variable(shape=[256], variable_name='g_b3')
            g3 = deconvolution_2d(g2, g_w3, 2)
            g3 = g3 + g_b3
            g3 = tf.nn.relu(g3)
            # Layer4
            g_w4 = weight_xavier_init(shape=[5, 5, 128, 256],
                                      n_inputs=256,
                                      n_outputs=128,
                                      variable_name='g_w4')
            g_b4 = bias_variable(shape=[128], variable_name='g_b4')
            g4 = deconvolution_2d(g3, g_w4, 2)
            g4 = g4 + g_b4
            g4 = tf.nn.relu(g4)
            # Layer5
            g_w5 = weight_xavier_init(shape=[5, 5, 64, 128],
                                      n_inputs=128,
                                      n_outputs=64,
                                      variable_name='g_w5')
            g_b5 = bias_variable(shape=[64], variable_name='g_b5')
            g5 = deconvolution_2d(g4, g_w5, 2)
            g5 = g5 + g_b5
            g5 = tf.nn.relu(g5)
            # Layer6
            g_w6 = weight_xavier_init(shape=[5, 5, 32, 64],
                                      n_inputs=64,
                                      n_outputs=32,
                                      variable_name='g_w6')
            g_b6 = bias_variable(shape=[32], variable_name='g_b6')
            g6 = deconvolution_2d(g5, g_w6, 2)
            g6 = g6 + g_b6
            g6 = tf.nn.relu(g6)

            # Final convolution with one output channel
            g_w7 = weight_xavier_init(shape=[1, 1, 32, self.channels],
                                      n_inputs=32,
                                      n_outputs=self.channels,
                                      variable_name='g_w7')
            g_b7 = bias_variable(shape=[self.channels], variable_name='g_b7')
            g7 = convolution_2d(g6, g_w7)
            g7 = g7 + g_b7
            out = tf.nn.sigmoid(g7)
        return out
Exemple #6
0
    def _GAN_discriminator(self, X, reuse=False):
        # Model Parameters
        # CNN model
        with tf.variable_scope("discriminator", reuse=reuse):
            X1 = tf.reshape(
                X,
                shape=[-1, self.image_width, self.image_height, self.channels])
            # First convolutional and pool layers
            # This finds 32 different 5 x 5 pixel features
            d_w1 = weight_xavier_init(shape=[5, 5, self.channels, 64],
                                      n_inputs=self.channels,
                                      n_outputs=64,
                                      variable_name='d_w1')
            d_b1 = bias_variable(shape=[64], variable_name='d_b1')
            d1 = convolution_2d(X1, d_w1, stride=2)
            d1 = d1 + d_b1
            d1 = leaky_relu(d1)

            # Second convolutional
            # This finds 64 different 5 x 5 pixel features
            d_w2 = weight_xavier_init(shape=[5, 5, 64, 128],
                                      n_inputs=64,
                                      n_outputs=128,
                                      variable_name='d_w2')
            d_b2 = bias_variable(shape=[128], variable_name='d_b2')
            d2 = convolution_2d(d1, d_w2, stride=2)
            d2 = d2 + d_b2
            d2 = leaky_relu(d2)
            # Second convolutional one
            d_w2_2 = weight_xavier_init(shape=[5, 5, 128, 256],
                                        n_inputs=128,
                                        n_outputs=256,
                                        variable_name='d_w2_2')
            d_b2_2 = bias_variable(shape=[256], variable_name='d_b2_2')
            d2_2 = convolution_2d(d2, d_w2_2, stride=2)
            d2_2 = d2_2 + d_b2_2
            d2_2 = leaky_relu(d2_2)
            # Second convolutional one
            d_w2_3 = weight_xavier_init(shape=[5, 5, 256, 512],
                                        n_inputs=256,
                                        n_outputs=512,
                                        variable_name='d_w2_3')
            d_b2_3 = bias_variable(shape=[512], variable_name='d_b2_3')
            d2_3 = convolution_2d(d2_2, d_w2_3, stride=2)
            d2_3 = d2_3 + d_b2_3
            d2_3 = leaky_relu(d2_3)
            # First fully connected layer
            d_w3 = weight_xavier_init(shape=[6 * 6 * 512, 1024],
                                      n_inputs=6 * 6 * 512,
                                      n_outputs=1024,
                                      variable_name='d_w3')
            d_b3 = bias_variable(shape=[1024], variable_name='d_b3')
            d3 = tf.reshape(d2_3, [-1, 6 * 6 * 512])
            d3 = tf.matmul(d3, d_w3)
            d3 = d3 + d_b3
            d3 = leaky_relu(d3)

            # Second fully connected layer
            d_w4 = weight_xavier_init(shape=[1024, self.channels],
                                      n_inputs=1024,
                                      n_outputs=self.channels,
                                      variable_name='d_w4')
            d_b4 = bias_variable(shape=[self.channels], variable_name='d_b4')
            out_logit = tf.matmul(d3, d_w4) + d_b4
            return out_logit