예제 #1
0
    def discriminator(self, x, y=None, share_params=False, reuse=False, name=""):
        with tf.variable_scope("discriminator-%s" % name, reuse=reuse):
            if y is None:
                x = tf.layers.flatten(x)

                x = tf.concat([x, y], axis=1)

                x = t.dense(x, self.height * self.width * self.channel,
                            name='disc-' + name + '-dense-0-y')
                x = tf.reshape(x, self.image_shape)
            else:
                pass

            # Using conv2d pooling instead of max_pool2d because of the speed.
            # In the CoGAN paper, max_pool2d is used.

            x = t.conv2d(x, f=self.df_dim, k=5, s=2, reuse=False, name='disc-' + name + '-conv2d-0')
            x = t.prelu(x, reuse=False, name='disc-' + name + '-prelu-0')
            # x = tf.nn.max_pool(x, ksize=2, strides=2, padding='SAME', name='disc' + name + '-max_pool2d-0')

            x = t.conv2d(x, f=self.df_dim * 2, k=5, s=2, reuse=False, name='disc-' + name + '-conv2d-1')
            x = t.batch_norm(x, is_train=False, name='disc-bn-0')
            x = t.prelu(x, reuse=False, name='disc-' + name + '-prelu-1')
            # x = tf.nn.max_pool(x, ksize=2, strides=2, padding='SAME', name='disc' + name + '-max_pool2d-1')

            x = tf.layers.flatten(x)

        x = t.dense(x, self.fc_unit, reuse=share_params, name='disc-dense-0')
        x = t.batch_norm(x, is_train=share_params, name='disc-bn-1')
        x = t.prelu(x, reuse=share_params, name='disc-prelu-2')

        x = t.dense(x, 1, reuse=share_params, name='disc-dense-1')

        return x
예제 #2
0
    def generator(self, z, y=None, share_params=False, reuse=False, training=True, name=""):
        if y is None:
            x = tf.concat([z, y], axis=1)
        else:
            x = z

        x = tf.layers.flatten(x)

        x = tf.layers.dense(x, self.fc_unit, reuse=share_params, name='gen-dense-0')
        x = t.prelu(x, reuse=share_params, name='gen-prelu-0')

        x = tf.layers.dense(x, self.gf_dim * 8 * 7 * 7, reuse=share_params, name='gen-dense-1')
        x = t.batch_norm(x, reuse=share_params, is_train=training, name='gen-bn-0')
        x = t.prelu(x, reuse=share_params, name='gen-prelu-1')

        x = tf.reshape(x, (self.batch_size, 7, 7, self.gf_dim * 8))

        # x = deconv2d(x, f=self.gf_dim * 16, k=4, s=1, reuse=share_params, name='gen-deconv2d-0')
        # x = batch_norm(x, reuse=share_params, training=training, name="gen-bn-0")
        # x = prelu(x, reuse=share_params, name='gen-prelu-1')

        x = t.deconv2d(x, f=self.gf_dim * 4, k=3, s=2, reuse=share_params, name='gen-deconv2d-1')
        x = t.batch_norm(x, reuse=share_params, is_train=training, name="gen-bn-1")
        x = t.prelu(x, reuse=share_params, name='gen-prelu-2')

        x = t.deconv2d(x, f=self.gf_dim * 2, k=3, s=2, reuse=share_params, name='gen-deconv2d-2')
        x = t.batch_norm(x, reuse=share_params, is_train=training, name="gen-bn-2")
        x = t.prelu(x, reuse=share_params, name='gen-prelu-3')

        with tf.variable_scope("generator-%s" % name, reuse=reuse):
            x = t.deconv2d(x, f=self.channel, k=6, s=1, reuse=False, name='gen-' + name + '-deconv2d-3')
            x = tf.nn.sigmoid(x, name='gen' + name + '-sigmoid-0')

        return x
예제 #3
0
    def generator(self, x, reuse=None, is_train=True):
        """
        :param x: LR (Low Resolution) images, (-1, 96, 96, 3)
        :param reuse: scope re-usability
        :param is_train: is trainable, default True
        :return: SR (Super Resolution) images, (-1, 384, 384, 3)
        """

        with tf.variable_scope("generator", reuse=reuse):

            def residual_block(x, f, name="", _is_train=True):
                with tf.variable_scope(name):
                    shortcut = tf.identity(x, name='n64s1-shortcut')

                    x = t.conv2d(x, f, 3, 1, name="n64s1-1")
                    x = t.batch_norm(x, is_train=_is_train, name="n64s1-bn-1")
                    x = t.prelu(x, reuse=reuse, name='n64s1-prelu-1')
                    x = t.conv2d(x, f, 3, 1, name="n64s1-2")
                    x = t.batch_norm(x, is_train=_is_train, name="n64s1-bn-2")
                    x = tf.add(x, shortcut)

                    return x

            x = t.conv2d(x, self.gf_dim, 9, 1, name='n64s1-1')
            x = t.prelu(x, name='n64s1-prelu-1')

            skip_conn = tf.identity(x, name='skip_connection')

            # B residual blocks
            for i in range(1, 17):  # (1, 9)
                x = residual_block(x,
                                   self.gf_dim,
                                   name='b-residual_block_%d' % i,
                                   _is_train=is_train)

            x = t.conv2d(x, self.gf_dim, 3, 1, name='n64s1-3')
            x = t.batch_norm(x, is_train=is_train, name='n64s1-bn-3')

            x = tf.add(x, skip_conn)

            # sub-pixel conv2d blocks
            for i in range(1, 3):
                x = t.conv2d(x,
                             self.gf_dim * 4,
                             3,
                             1,
                             name='n256s1-%d' % (i + 2))
                x = t.sub_pixel_conv2d(x, f=None, s=2)
                x = t.prelu(x, name='n256s1-prelu-%d' % i)

            x = t.conv2d(x, self.channel, 9, 1,
                         name='n3s1')  # (-1, 384, 384, 3)
            x = tf.nn.tanh(x)
            return x
예제 #4
0
    def generator(self, z, y=None, share_params=False, reuse=False, name=""):
        x = t.dense(z, self.fc_g_unit, reuse=share_params, name='gen-fc-1')
        x = t.batch_norm(x, reuse=share_params, name='gen-bn-1')
        x = t.prelu(x, reuse=share_params, name='gen-prelu-1')

        x = t.dense(x,
                    self.gf_dim * 8 * 7 * 7,
                    reuse=share_params,
                    name='gen-fc-2')
        x = t.batch_norm(x, reuse=share_params, name='gen-bn-2')
        x = t.prelu(x, reuse=share_params, name='gen-prelu-2')

        x = tf.reshape(x, (-1, 7, 7, self.gf_dim * 8))

        for i in range(1, 3):
            x = t.deconv2d(x,
                           f=self.gf_dim * 4 // i,
                           k=3,
                           s=2,
                           reuse=share_params,
                           name='gen-deconv2d-%d' % i)
            x = t.batch_norm(x, reuse=share_params, name="gen-bn-%d" % (i + 2))
            x = t.prelu(x, reuse=share_params, name='gen-prelu-%d' % (i + 2))
        """
        x = z  # tf.concat([z, y], axis=1)

        loop = 5
        for i in range(1, loop):
            x = t.dense(x, self.fc_g_unit, reuse=share_params, name='gen-fc-%d' % i)
            x = t.batch_norm(x, reuse=share_params, name='gen-bn-%d' % i)
            x = t.prelu(x, reuse=share_params, name='gen-prelu-%d' % i)
        """

        with tf.variable_scope("generator-%s" % name, reuse=reuse):
            x = t.deconv2d(x,
                           f=self.channel,
                           k=6,
                           s=1,
                           reuse=False,
                           name='gen-' + name + '-deconv2d-3')
            x = tf.nn.sigmoid(x, name='gen' + name + '-sigmoid-1')
            """
            x = t.dense(x, self.n_input, reuse=False, name='gen-' + name + '-fc-%d' % loop)
            x = tf.nn.sigmoid(x)
            """

        return x
예제 #5
0
            def residual_block(x, f, name="", _is_train=True):
                with tf.variable_scope(name):
                    shortcut = tf.identity(x, name='n64s1-shortcut')

                    x = t.conv2d(x, f, 3, 1, name="n64s1-1")
                    x = t.batch_norm(x, is_train=_is_train, name="n64s1-bn-1")
                    x = t.prelu(x, reuse=reuse, name='n64s1-prelu-1')
                    x = t.conv2d(x, f, 3, 1, name="n64s1-2")
                    x = t.batch_norm(x, is_train=_is_train, name="n64s1-bn-2")
                    x = tf.add(x, shortcut)

                    return x
예제 #6
0
    def discriminator(self,
                      x,
                      y=None,
                      share_params=False,
                      reuse=False,
                      name=""):
        with tf.variable_scope("discriminator-%s" % name, reuse=reuse):
            x = tf.reshape(x, (-1, self.height, self.width, self.channel))

            x = t.conv2d(x,
                         f=self.df_dim * 1,
                         k=5,
                         s=2,
                         reuse=False,
                         name='disc-' + name + '-conv2d-1')
            x = t.prelu(x, reuse=False, name='disc-' + name + '-prelu-1')
            # x = tf.layers.max_pooling2d(x, pool_size=2, strides=2, padding='SAME',
            #                             name='disc-' + name + '-max_pool2d-1')

            x = t.conv2d(x,
                         f=self.df_dim * 2,
                         k=5,
                         s=2,
                         reuse=False,
                         name='disc-' + name + '-conv2d-2')
            x = t.batch_norm(x, reuse=reuse, name='disc-' + name + '-bn-1')
            x = t.prelu(x, reuse=False, name='disc-' + name + '-prelu-2')
            # x = tf.layers.max_pooling2d(x, pool_size=2, strides=2, padding='SAME',
            #                             name='disc-' + name + '-max_pool2d-2')

            x = tf.layers.flatten(x)

        x = t.dense(x, self.fc_d_unit, reuse=share_params, name='disc-fc-1')
        x = t.batch_norm(x, reuse=share_params, name='disc-bn-2')
        x = t.prelu(x, reuse=share_params, name='disc-prelu-3')

        x = t.dense(x, 1, reuse=share_params, name='disc-fc-2')
        return x