def _generator(self, z, reuse=False):
        with tf.variable_scope('G', reuse=reuse):
            net = z
            net = slim.fully_connected(net,
                                       4 * 4 * 1024,
                                       activation_fn=tf.nn.relu)
            net = tf.reshape(net, [-1, 4, 4, 1024])
            filter_num = 512
            input_size = 4
            stride = 2
            with slim.arg_scope([slim.conv2d_transpose],
                                kernel_size=[5, 5],
                                stride=stride,
                                padding='SAME',
                                activation_fn=tf.nn.relu,
                                normalizer_fn=self.norm_fn,
                                normalizer_params=self.bn_params):
                while input_size < (self.shape[0] // stride):
                    net = slim.conv2d_transpose(net, filter_num)
                    expected_shape(
                        net,
                        [input_size * stride, input_size * stride, filter_num])
                    filter_num = filter_num // 2
                    input_size = input_size * stride

                net = slim.conv2d_transpose(net,
                                            self.shape[2],
                                            activation_fn=tf.nn.tanh,
                                            normalizer_fn=None)
                expected_shape(net,
                               [self.shape[0], self.shape[1], self.shape[2]])

                return net
Пример #2
0
    def _good_critic(self, X, reuse=False):
        with tf.variable_scope('critic', reuse=reuse):
            nf = 64
            net = slim.conv2d(X, nf, [3, 3], activation_fn=None)  # 64x64x64
            net = self._residual_block(net,
                                       2 * nf,
                                       resample='down',
                                       name='res_block1')  # 32x32x128
            net = self._residual_block(net,
                                       4 * nf,
                                       resample='down',
                                       name='res_block2')  # 16x16x256
            net = self._residual_block(net,
                                       8 * nf,
                                       resample='down',
                                       name='res_block3')  # 8x8x512
            net = self._residual_block(net,
                                       8 * nf,
                                       resample='down',
                                       name='res_block4')  # 4x4x512
            expected_shape(net, [4, 4, 512])
            net = slim.flatten(net)
            net = slim.fully_connected(net, 1, activation_fn=None)

            return net
    def _discriminator(self, X, reuse=False):
        with tf.variable_scope('D', reuse=reuse):
            net = X
            width = self.shape[0]
            filter_num = 64
            stride = 2
            num_conv_layers = 4
            with slim.arg_scope([slim.conv2d],
                                kernel_size=[5, 5],
                                stride=stride,
                                padding='SAME',
                                activation_fn=ops.lrelu,
                                normalizer_fn=self.norm_fn,
                                normalizer_params=self.bn_params):
                for layer_num in range(1, num_conv_layers + 1):
                    if layer_num == 1:
                        net = slim.conv2d(net, filter_num, normalizer_fn=None)
                    else:
                        net = slim.conv2d(net,
                                          filter_num,
                                          normalizer_fn=self.norm_fn)

                    output_dim = math.ceil(
                        width / stride
                    )  # Since padding='SAME', refer : https://www.tensorflow.org/api_guides/python/nn#Convolution -- Ishaan
                    expected_shape(net, [output_dim, output_dim, filter_num])
                    width = width // 2
                    filter_num = filter_num * 2

            net = slim.flatten(net)
            logits = slim.fully_connected(net, 1, activation_fn=None)
            prob = tf.sigmoid(logits)

            return prob, logits
Пример #4
0
    def _discriminator(self, X, reuse=False):
        with tf.variable_scope('D', reuse=reuse):
            net = X

            with slim.arg_scope([slim.conv2d, slim.conv2d_transpose],
                                kernel_size=[4, 4],
                                stride=2,
                                padding='SAME',
                                activation_fn=ops.lrelu,
                                normalizer_fn=slim.batch_norm,
                                normalizer_params=self.bn_params):
                # encoder
                net = slim.conv2d(net, 64, normalizer_fn=None)  # 32x32
                net = slim.conv2d(net, 128)  # 16x16
                net = slim.conv2d(net, 256)  # 8x8
                latent = net
                expected_shape(latent, [8, 8, 256])
                # decoder
                net = slim.conv2d_transpose(net, 128)  # 16x16
                net = slim.conv2d_transpose(net, 64)  # 32x32
                x_recon = slim.conv2d_transpose(net,
                                                3,
                                                activation_fn=None,
                                                normalizer_fn=None)
                expected_shape(x_recon, [64, 64, 3])

            energy = tf.sqrt(
                tf.reduce_sum(tf.square(X - x_recon),
                              axis=[1, 2, 3]))  # l2-norm error
            energy = tf.reduce_mean(energy)

            return latent, energy
Пример #5
0
    def _good_generator(self, z, reuse=False):
        with tf.variable_scope('generator', reuse=reuse):
            nf = 64
            net = slim.fully_connected(z, 4 * 4 * 8 * nf,
                                       activation_fn=None)  # 4x4x512
            net = tf.reshape(net, [-1, 4, 4, 8 * nf])
            net = self._residual_block(net,
                                       8 * nf,
                                       resample='up',
                                       name='res_block1')  # 8x8x512
            net = self._residual_block(net,
                                       4 * nf,
                                       resample='up',
                                       name='res_block2')  # 16x16x256
            net = self._residual_block(net,
                                       2 * nf,
                                       resample='up',
                                       name='res_block3')  # 32x32x128
            net = self._residual_block(net,
                                       1 * nf,
                                       resample='up',
                                       name='res_block4')  # 64x64x64
            expected_shape(net, [64, 64, 64])
            net = slim.batch_norm(net,
                                  activation_fn=tf.nn.relu,
                                  **self.bn_params)
            net = slim.conv2d(net,
                              3,
                              kernel_size=[3, 3],
                              activation_fn=tf.nn.tanh)
            expected_shape(net, [64, 64, 3])

            return net
Пример #6
0
    def _generator(self, z, reuse=False):
        with tf.variable_scope('G', reuse=reuse):
            net = z
            net = slim.fully_connected(net,
                                       4 * 4 * 256,
                                       activation_fn=tf.nn.relu,
                                       normalizer_fn=slim.batch_norm,
                                       normalizer_params=self.bn_params)
            net = tf.reshape(net, [-1, 4, 4, 256])

            with slim.arg_scope([slim.conv2d_transpose],
                                kernel_size=[3, 3],
                                padding='SAME',
                                activation_fn=tf.nn.relu,
                                normalizer_fn=slim.batch_norm,
                                normalizer_params=self.bn_params):

                net = slim.conv2d_transpose(net, 256, stride=2)
                net = slim.conv2d_transpose(net, 256, stride=1)
                expected_shape(net, [8, 8, 256])
                net = slim.conv2d_transpose(net, 256, stride=2)
                net = slim.conv2d_transpose(net, 256, stride=1)
                expected_shape(net, [16, 16, 256])
                net = slim.conv2d_transpose(net, 128, stride=2)
                expected_shape(net, [32, 32, 128])
                net = slim.conv2d_transpose(net, 64, stride=2)
                expected_shape(net, [64, 64, 64])
                net = slim.conv2d_transpose(net,
                                            3,
                                            stride=1,
                                            activation_fn=tf.nn.tanh,
                                            normalizer_fn=None)
                expected_shape(net, [64, 64, 3])

                return net
Пример #7
0
    def _generator(self, z, reuse=False):
        with tf.variable_scope('generator', reuse=reuse):
            net = z
            net = slim.fully_connected(net,
                                       4 * 4 * 1024,
                                       activation_fn=tf.nn.relu)
            net = tf.reshape(net, [-1, 4, 4, 1024])

            with slim.arg_scope([slim.conv2d_transpose],
                                kernel_size=[5, 5],
                                stride=2,
                                activation_fn=tf.nn.relu):
                net = slim.conv2d_transpose(net, 512)
                expected_shape(net, [8, 8, 512])
                net = slim.conv2d_transpose(net, 256)
                expected_shape(net, [16, 16, 256])
                net = slim.conv2d_transpose(net, 128)
                expected_shape(net, [32, 32, 128])
                net = slim.conv2d_transpose(net, 256)
                expected_shape(net, [64, 64, 256])
                net = slim.conv2d_transpose(net,
                                            1,
                                            activation_fn=tf.nn.tanh,
                                            normalizer_fn=None)
                expected_shape(net, [128, 128, 1])

                return net
Пример #8
0
    def _dcgan_critic(self, X, reuse=False):
        '''
        K-Lipschitz function.
        WGAN-GP does not use critic in batch norm.
        '''
        with tf.variable_scope('critic', reuse=reuse):
            net = X

            with slim.arg_scope([slim.conv2d],
                                kernel_size=[5, 5],
                                stride=2,
                                padding='SAME',
                                activation_fn=ops.lrelu):
                net = slim.conv2d(net, 32)
                expected_shape(net, [64, 64, 32])
                net = slim.conv2d(net, 64)
                expected_shape(net, [32, 32, 64])
                net = slim.conv2d(net, 128)
                expected_shape(net, [16, 16, 128])
                net = slim.conv2d(net, 256)
                expected_shape(net, [8, 8, 256])
                net = slim.conv2d(net, 512)
                expected_shape(net, [4, 4, 512])

            net = slim.flatten(net)
            net = slim.fully_connected(net, 1, activation_fn=None)

            return net
Пример #9
0
 def pt_regularizer(self, lf):
     eps = 1e-8 # epsilon for numerical stability
     lf = slim.flatten(lf)
     # l2_norm = tf.sqrt(tf.reduce_sum(tf.square(lf), axis=1, keep_dims=True))
     l2_norm = tf.norm(lf, axis=1, keep_dims=True)
     expected_shape(l2_norm, [1])
     unit_lf = lf / (l2_norm + eps) 
     cos_sim = tf.square(tf.matmul(unit_lf, unit_lf, transpose_b=True)) # [N, h_dim] x [h_dim, N] = [N, N]
     N = tf.cast(tf.shape(lf)[0], tf.float32) # batch_size
     pt_loss = (tf.reduce_sum(cos_sim)-N) / (N*(N-1))
     return pt_loss
     
Пример #10
0
    def _discriminator(self, X, reuse=False):
        with tf.variable_scope('D', reuse=reuse):
            net = X

            with slim.arg_scope([slim.conv2d],
                                kernel_size=[5, 5],
                                stride=2,
                                padding='SAME',
                                activation_fn=ops.lrelu,
                                normalizer_fn=slim.batch_norm,
                                normalizer_params=self.bn_params):

                net = slim.conv2d(net, 64, normalizer_fn=None)
                expected_shape(net, [32, 32, 64])
                net = slim.conv2d(net, 128)
                expected_shape(net, [16, 16, 128])
                net = slim.conv2d(net, 256)
                expected_shape(net, [8, 8, 256])
                net = slim.conv2d(net, 512)
                expected_shape(net, [4, 4, 512])

            net = slim.flatten(net)
            d_value = slim.fully_connected(net, 1, activation_fn=None)

            return d_value
Пример #11
0
    def _discriminator(self, X, reuse=False):
        with tf.variable_scope('D', reuse=reuse):
            net = X

            with slim.arg_scope([slim.conv2d],
                                kernel_size=[5, 5],
                                stride=2,
                                padding='SAME',
                                activation_fn=ops.lrelu,
                                normalizer_fn=slim.batch_norm,
                                normalizer_params=self.bn_params):
                net = slim.conv2d(net, 128, normalizer_fn=None)
                net = slim.conv2d(net, 256)
                net = slim.conv2d(net, 512)
                net = slim.conv2d(net, 1024)
                expected_shape(net, [4, 4, 1024])

            net = slim.flatten(net)
            logits = slim.fully_connected(net, 1, activation_fn=None)

            return logits  # potential
Пример #12
0
    def _discriminator(self, X, reuse=False):
        with tf.variable_scope('discriminator', reuse=reuse):
            net = X

            with slim.arg_scope([slim.conv2d],
                                kernel_size=[5, 5],
                                stride=2,
                                activation_fn=ops.lrelu):
                net = slim.conv2d(net, 32)
                expected_shape(net, [64, 64, 32])
                net = slim.conv2d(net, 64)
                expected_shape(net, [32, 32, 64])
                net = slim.conv2d(net, 128)
                expected_shape(net, [16, 16, 128])
                net = slim.conv2d(net, 256)
                expected_shape(net, [8, 8, 256])
                net = slim.conv2d(net, 512)
                expected_shape(net, [4, 4, 512])

            net = slim.flatten(net)
            logits = slim.fully_connected(net, 1, activation_fn=None)
            prob = tf.nn.sigmoid(logits)

            return prob, logits
Пример #13
0
    def _critic(self, X, reuse=False):
    	''' K-Lipschitz function '''
        with tf.variable_scope('critic', reuse=reuse):
            net = X
            
            with slim.arg_scope([slim.conv2d], kernel_size=[5,5], stride=2, activation_fn=ops.lrelu, 
                normalizer_fn=slim.batch_norm, normalizer_params=self.bn_params):
                net = slim.conv2d(net, 64, normalizer_fn=None)
                expected_shape(net, [64, 64, 64])
                net = slim.conv2d(net, 128)
                expected_shape(net, [32, 32, 128])
                net = slim.conv2d(net, 256)
                expected_shape(net, [16, 16, 256])
                net = slim.conv2d(net, 512)
                expected_shape(net, [8, 8, 512])

            net = slim.flatten(net)
            net = slim.fully_connected(net, 1, activation_fn=None)

            return net
Пример #14
0
    def _generator(self, z, reuse=False):
        with tf.variable_scope('generator', reuse=reuse):
            import pdb
            pdb.set_trace()
            net = z
            net = slim.flatten(net)
            net = slim.fully_connected(net, 8*8*1024, activation_fn=tf.nn.relu)
            net = tf.reshape(net, [-1, 8, 8, 1024])

            with slim.arg_scope([slim.conv2d_transpose], kernel_size=[5,5], stride=2, activation_fn=tf.nn.relu, 
                normalizer_fn=slim.batch_norm, normalizer_params=self.bn_params):
                net = slim.conv2d_transpose(net, 512)
                expected_shape(net, [16, 16, 512])
                net = slim.conv2d_transpose(net, 256)
                expected_shape(net, [32, 32, 256])
                net = slim.conv2d_transpose(net, 128)
                expected_shape(net, [64, 64, 128])
                net = slim.conv2d_transpose(net, 3, activation_fn=tf.nn.tanh, normalizer_fn=None)
                expected_shape(net, [128, 128, 3])

                return net