Ejemplo n.º 1
0
    def discriminator(self, x, reuse=None):
        """
        :param x: images
        :param reuse: re-usable
        :return: logits
        """
        with tf.variable_scope("discriminator", reuse=reuse):
            x = t.conv2d(x, self.df_dim * 1, 4, 2, name='disc-conv2d-1')
            x = tf.nn.leaky_relu(x, alpha=0.1)

            x = t.conv2d(x, self.df_dim * 2, 4, 2, name='disc-conv2d-2')
            x = t.batch_norm(x, name='disc-bn-1')
            x = tf.nn.leaky_relu(x, alpha=0.1)

            x = t.conv2d(x, self.df_dim * 4, 4, 2, name='disc-conv2d-3')
            x = t.batch_norm(x, name='disc-bn-2')
            x = tf.nn.leaky_relu(x, alpha=0.1)

            x = t.conv2d(x, self.df_dim * 8, 4, 2, name='disc-conv2d-4')
            x = t.batch_norm(x, name='disc-bn-3')
            x = tf.nn.leaky_relu(x, alpha=0.1)

            x = tf.layers.flatten(x)

            x = t.dense(x, self.fc_unit, name='disc-fc-1')
            x = t.batch_norm(x, name='disc-bn-4')
            x = tf.nn.leaky_relu(x, alpha=0.1)

            x = t.dense(x, 1 + self.n_cont + self.n_cat, name='disc-fc-2')
            prob, cont, cat = x[:, 0], x[:, 1:1 + self.n_cont], x[:, 1 + self.n_cont:]  # logits

            prob = tf.nn.sigmoid(prob)  # probability
            cat = tf.nn.softmax(cat)    # categories

            return prob, cont, cat
Ejemplo n.º 2
0
    def discriminator(self, x, reuse=None):
        """
        # Following a D Network, CiFar-like-hood, referred in the paper
        :param x: images
        :param y: labels
        :param reuse: re-usable
        :return: classification, probability (fake or real), network
        """
        with tf.variable_scope("discriminator", reuse=reuse):
            x = t.conv2d(x, self.df_dim, 3, 2, name='disc-conv2d-1')
            x = tf.nn.leaky_relu(x, alpha=0.2)
            x = tf.layers.dropout(x, 0.5, name='disc-dropout2d-1')

            for i in range(5):
                x = t.conv2d(x,
                             self.df_dim * (2**(i + 1)),
                             k=3,
                             s=(i % 2 + 1),
                             name='disc-conv2d-%d' % (i + 2))
                x = t.batch_norm(x, reuse=reuse, name="disc-bn-%d" % (i + 1))
                x = tf.nn.leaky_relu(x, alpha=0.2)
                x = tf.layers.dropout(x,
                                      0.5,
                                      name='disc-dropout2d-%d' % (i + 1))

            net = tf.layers.flatten(x)

            cat = t.dense(net, self.n_classes, name='disc-fc-cat')
            disc = t.dense(net, 1, name='disc-fc-disc')

            return cat, disc, net
Ejemplo n.º 3
0
    def discriminator(self, x, y=None, reuse=None, is_train=True):
        """
        :param x: images
        :param y: labels
        :param reuse: re-usable
        :param is_train: en/disable batch_norm, default True
        :return: logits
        """
        with tf.variable_scope("discriminator", reuse=reuse):
            if y:
                raise NotImplemented("[-] Not Implemented Yet...")

            x = t.conv2d(x, f=self.gf_dim * 1, name="disc-conv2d-0")
            x = tf.nn.leaky_relu(x)

            for i in range(1, 4):
                x = t.conv2d(x, f=self.gf_dim * (2 ** i), name="disc-conv2d-%d" % i)
                x = t.batch_norm(x, is_train=is_train)
                x = tf.nn.leaky_relu(x)

            feature_match = x   # (-1, 8, 8, 512)

            x = tf.layers.flatten(x)

            x = t.dense(x, 1, name='disc-fc-0')

            return feature_match, x
Ejemplo n.º 4
0
    def discriminator(self, x, y=None, share_params=False, reuse=False, name=""):
        with tf.variable_scope("discriminator-%s" % name, reuse=reuse):
            if y is None:
                x = tf.layers.flatten(x)

                x = tf.concat([x, y], axis=1)

                x = t.dense(x, self.height * self.width * self.channel,
                            name='disc-' + name + '-dense-0-y')
                x = tf.reshape(x, self.image_shape)
            else:
                pass

            # Using conv2d pooling instead of max_pool2d because of the speed.
            # In the CoGAN paper, max_pool2d is used.

            x = t.conv2d(x, f=self.df_dim, k=5, s=2, reuse=False, name='disc-' + name + '-conv2d-0')
            x = t.prelu(x, reuse=False, name='disc-' + name + '-prelu-0')
            # x = tf.nn.max_pool(x, ksize=2, strides=2, padding='SAME', name='disc' + name + '-max_pool2d-0')

            x = t.conv2d(x, f=self.df_dim * 2, k=5, s=2, reuse=False, name='disc-' + name + '-conv2d-1')
            x = t.batch_norm(x, is_train=False, name='disc-bn-0')
            x = t.prelu(x, reuse=False, name='disc-' + name + '-prelu-1')
            # x = tf.nn.max_pool(x, ksize=2, strides=2, padding='SAME', name='disc' + name + '-max_pool2d-1')

            x = tf.layers.flatten(x)

        x = t.dense(x, self.fc_unit, reuse=share_params, name='disc-dense-0')
        x = t.batch_norm(x, is_train=share_params, name='disc-bn-1')
        x = t.prelu(x, reuse=share_params, name='disc-prelu-2')

        x = t.dense(x, 1, reuse=share_params, name='disc-dense-1')

        return x
Ejemplo n.º 5
0
    def generator(self, z, reuse=None):
        """
        :param z: embeddings
        :param reuse: re-usable
        :return: logits
        """
        with tf.variable_scope("generator", reuse=reuse):
            repeat = int(np.log2(self.height)) - 2

            x = t.dense(z, self.z_dim * 8 * 8, name='gen-fc-1')
            x = tf.nn.elu(x)

            x = tf.reshape(x, [-1, 8, 8, self.z_dim])

            # shortcut = tf.identity(x, name='shortcut')

            for i in range(1, repeat + 1):
                x = t.conv2d(x, f=self.gf_dim, name="gen-conv2d-%d" % (i * 2 - 1))
                x = tf.nn.elu(x)
                x = t.conv2d(x, f=self.gf_dim, name="gen-conv2d-%d" % (i * 2))
                x = tf.nn.elu(x)

                # if i < 3:
                #     x = tf.add(x, shortcut)

                if i < repeat:
                    x = t.up_sampling(x, tf.image.ResizeMethod.NEAREST_NEIGHBOR)  # NN up-sampling

            x = t.conv2d(x, f=self.channel, name='gen-conv2d-%d' % (2 * repeat + 1))
            x = tf.nn.tanh(x)
            return x
Ejemplo n.º 6
0
    def discriminator(self, x, reuse=None):
        """
        # Following a network architecture referred in the paper
        :param x: Input images (-1, 384, 384, 3)
        :param reuse: re-usability
        :return: HR (High Resolution) or SR (Super Resolution) images
        """
        with tf.variable_scope("discriminator", reuse=reuse):
            x = t.conv2d(x, self.df_dim, 3, 1, name='n64s1-1')
            x = tf.nn.leaky_relu(x)

            strides = [2, 1]
            filters = [1, 2, 2, 4, 4, 8, 8]

            for i, f in enumerate(filters):
                x = t.conv2d(x,
                             f=f,
                             k=3,
                             s=strides[i % 2],
                             name='n%ds%d-%d' % (f, strides[i % 2], i + 1))
                x = t.batch_norm(x, name='n%d-bn-%d' % (f, i + 1))
                x = tf.nn.leaky_relu(x)

            x = tf.layers.flatten(x)  # (-1, 96 * 96 * 64)

            x = t.dense(x, 1024, name='disc-fc-1')
            x = tf.nn.leaky_relu(x)

            x = t.dense(x, 1, name='disc-fc-2')
            # x = tf.nn.sigmoid(x)
            return x
Ejemplo n.º 7
0
    def generator(self, z, reuse=None):
        """
        :param z: embeddings
        :param reuse: re-usable
        :return: logits
        """
        with tf.variable_scope("generator", reuse=reuse):
            repeat = int(np.log2(self.height)) - 2

            x = t.dense(z, self.z_dim * 8 * 8, name='g-fc-1')
            x = tf.nn.elu(x)

            x = tf.reshape(x, [-1, 8, 8, self.z_dim])

            for i in range(1, repeat + 1):
                x = t.conv2d(x, f=self.gf_dim, name="g-conv-%d" % (i * 2 - 1))
                x = tf.nn.elu(x)
                x = t.conv2d(x, f=self.gf_dim, name="g-conv-%d" % (i * 2))
                x = tf.nn.elu(x)

                if i < repeat:
                    x = t.resize_nn(x,
                                    x.get_shape().as_list()[1] *
                                    2)  # NN up-sampling

            x = t.conv2d(x, f=self.channel)

            return x
Ejemplo n.º 8
0
    def non_local_block(x, f, sub_sampling=False, name="nonlocal"):
        """ non-local block, https://arxiv.org/pdf/1711.07971.pdf """
        with tf.variable_scope("non_local_block-%s" % name):
            with tf.name_scope("theta"):
                theta = t.conv2d(x, f=f, k=1, s=1, name="theta")
                if sub_sampling:
                    theta = tf.layers.max_pooling2d(theta, pool_size=(2, 2), name="max_pool-theta")
                theta = tf.reshape(theta, (-1, theta.get_shape().as_list()[-1]))

            with tf.name_scope("phi"):
                phi = t.conv2d(x, f=f, k=1, s=1, name="phi")
                if sub_sampling:
                    phi = tf.layers.max_pooling2d(theta, pool_size=(2, 2), name="max_pool-phi")
                phi = tf.reshape(phi, (-1, phi.get_shape().as_list()[-1]))
                phi = tf.transpose(phi, [1, 0])

            with tf.name_scope("g"):
                g = t.conv2d(x, f=f, k=1, s=1, name="g")
                if sub_sampling:
                    g = tf.layers.max_pooling2d(theta, pool_size=(2, 2), name="max_pool-g")
                g = tf.reshape(g, (-1, g.get_shape().as_list()[-1]))

            with tf.name_scope("self-attention"):
                theta_phi = tf.tensordot(theta, phi, axis=-1)
                theta_phi = tf.nn.softmax(theta_phi)

                theta_phi_g = tf.tensordot(theta_phi, g, axis=-1)

            theta_phi_g = t.conv2d(theta_phi_g, f=f, k=1, s=1, name="theta_phi_g")
            return x + theta_phi_g
    def discriminator(self, x, reuse=None, name=""):
        """
        :param x: 128x128x3 images
        :param reuse: re-usability
        :param name: name

        :return: logits, prob
        """
        with tf.variable_scope('discriminator-%s' % name, reuse=reuse):

            def residual_block(x, f, name=''):
                x = t.conv2d(x, f=f, k=4, s=2, name='disc-conv2d-%s' % name)
                x = t.instance_norm(x, name='disc-ins_norm-%s' % name)
                x = tf.nn.leaky_relu(x, alpha=0.2)
                return x

            x = t.conv2d(x, f=self.df_dim, name='disc-conv2d-0')
            x = tf.nn.leaky_relu(x, alpha=0.2)

            x = residual_block(x, f=self.df_dim * 2, name='1')
            x = residual_block(x, f=self.df_dim * 4, name='2')
            x = residual_block(x, f=self.df_dim * 8, name='3')
            # for 256x256x3 images
            # x = residual_block(x, f=self.df_dim * 8, name='4')
            # x = residual_block(x, f=self.df_dim * 8, name='5')

            logits = t.conv2d(x, f=1, name='disc-con2d-last')
            # prob = tf.nn.sigmoid(logits)

            return logits
Ejemplo n.º 10
0
    def discriminator(self, x, reuse=None):
        """
        # Following a D Network, CiFar-like-hood, referred in the paper
        :param x: image, shape=(-1, 28, 28, 1)
        :param reuse: re-usable
        :return: logits, networks
        """
        with tf.variable_scope("discriminator", reuse=reuse):
            x = t.conv2d(x, self.df_dim, k=3, s=2, name='d-conv-0')
            x = tf.nn.leaky_relu(x)
            x = tf.layers.dropout(x, 0.5, name='d-dropout-0')

            for i in range(1, 2 * 2 + 1):
                f = self.df_dim * (i + 1)
                x = t.conv2d(x, f=f, k=3, s=(i % 2 + 1), name='d-conv-%d' % i)
                x = t.batch_norm(x)
                x = tf.nn.leaky_relu(x)
                x = tf.layers.dropout(x, 0.5, name='d-dropout-%d' % i)

            x = tf.layers.flatten(x)

            x = t.dense(x, self.fc_unit * 2, name='d-fc-1')
            net = tf.nn.leaky_relu(x)

            x = tf.layers.dense(net, 1, name='d-fc-2')  # logits

            return x, net
Ejemplo n.º 11
0
    def encoder(self, x, reuse=None):
        """
        :param x: Input images (32x32x3 or 64x64x3)
        :param reuse: re-usable
        :return: embeddings
        """
        with tf.variable_scope('encoder', reuse=reuse):
            repeat = int(np.log2(self.height)) - 2

            x = t.conv2d(x, f=self.df_dim, name="enc-conv2d-1")
            x = tf.nn.elu(x)

            for i in range(1, repeat + 1):
                f = self.df_dim * i

                x = t.conv2d(x, f, 3, 1, name="enc-conv2d-%d" % (i * 2))
                x = tf.nn.elu(x)
                x = t.conv2d(x, f, 3, 1, name="enc-conv2d-%d" % (i * 2 + 1))
                x = tf.nn.elu(x)

                if i < repeat:
                    """
                        You can choose one of them. max-pool or avg-pool or conv-pool.
                        Speed Order : conv-pool > avg-pool > max-pool. i guess :)
                    """
                    x = t.conv2d(x, f, 3, 2, name='enc-conv2d-pool-%d' % i)  # conv pooling
                    x = tf.nn.elu(x)

            x = t.flatten(x)

            x = t.dense(x, self.z_dim, name='enc-fc-1')  # normally, (-1, 128)
            return x
Ejemplo n.º 12
0
 def residual_channel_attention_network(self, x, f, kernel_size, reduction,
                                        use_bn, scale):
     with tf.variable_scope("Residual_Channel_Attention_Network"):
         x = self.image_processing(x, sign=-1, name='pre-processing')
         # 1. head
         head = tfu.conv2d(x, f=f, k=kernel_size, name="conv2d-head")
         # 2. body
         x = head
         for i in range(self.n_res_groups):
             x = self.residual_group(x,
                                     f,
                                     kernel_size,
                                     reduction,
                                     use_bn,
                                     name=str(i))
         body = tfu.conv2d(x, f=f, k=kernel_size, name="conv2d-body")
         body += head  # tf.math.add(body, head)
         # 3. tail
         x = self.up_scaling(body, f, scale, name='up-scaling')
         tail = tfu.conv2d(x,
                           f=self.n_channel,
                           k=kernel_size,
                           name="conv2d-tail")  # (-1, 384, 384, 3)
         x = self.image_processing(tail, sign=1, name='post-processing')
         return x
Ejemplo n.º 13
0
    def generator(self, x, reuse=None):
        with tf.variable_scope('generator', reuse=reuse):

            def residual_block(x, f, name=""):
                with tf.variable_scope(name, reuse=reuse):
                    skip_connection = tf.identity(x,
                                                  name='gen-skip_connection-1')

                    x = t.conv2d(x, f, 3, 1, name='gen-conv2d-1')
                    x = t.instance_norm(x, reuse=reuse, name='gen-inst_norm-1')
                    x = tf.nn.relu(x)
                    x = t.conv2d(x, f, 3, 1, name='gen-conv2d-2')
                    x = tf.nn.relu(x)

                    return skip_connection + x

            shortcut = tf.identity(x, name='shortcut-init')

            x = t.conv2d(x, self.gf_dim * 1, 7, 1, name='gen-conv2d-1')
            x = t.instance_norm(x,
                                affine=False,
                                reuse=reuse,
                                name='gen-inst_norm-1')
            x = tf.nn.relu(x)

            for i in range(1, 3):
                x = t.conv2d(x,
                             self.gf_dim * (2**i),
                             3,
                             2,
                             name='gen-conv2d-%d' % (i + 1))
                x = t.instance_norm(x,
                                    affine=False,
                                    reuse=reuse,
                                    name='gen-inst_norm-%d' % (i + 1))
                x = tf.nn.relu(x)

            # 9 Residual Blocks
            for i in range(9):
                x = residual_block(x,
                                   self.gf_dim * 4,
                                   name='gen-residual_block-%d' % (i + 1))

            for i in range(1, 3):
                x = t.deconv2d(x,
                               self.gf_dim * (2**i),
                               3,
                               2,
                               name='gen-deconv2d-%d' % i)
                x = t.instance_norm(x,
                                    affine=False,
                                    reuse=reuse,
                                    name='gen-inst_norm-%d' % (i + 3))
                x = tf.nn.relu(x)

            x = t.conv2d(x, self.gf_dim * 1, 7, 1, name='gen-conv2d-4')
            x = tf.nn.tanh(x)
            return shortcut + x
Ejemplo n.º 14
0
 def SRDenseNet(self, x, f, kernel_size, reduction, use_bn, scale):
     with tf.variable_scope("SRDenseNet"):
         feat = [tfu.conv2d(x, f=f, k=kernel_size)]
         for i in range(8):
             feat.append(self.dense_block(feat[-1]))
         bottleneck = tfu.conv2d(tf.concat(feat, -1), 256, 1)
         sr = self.up_scaling(bottleneck, f, scale, name='up-scaling')
         sr = tfu.conv2d(sr, 3, 3)
         return sr
Ejemplo n.º 15
0
    def generator(self, z, pg=1, pg_t=False, reuse=None):
        def nf(n):
            return min(1024 // (2**n), self.z_dim)

        def block(x, fs, name="0"):
            x = resize_nn(x, x.get_shape()[1] * 2)
            x = t.conv2d(x,
                         fs,
                         k=3,
                         s=1,
                         name='gen_n_%s_conv2d-%d' % (name, x.get_shape()[1]))
            x = tf.nn.leaky_relu(x)
            x = pixel_norm(x)
            return x

        with tf.variable_scope("gen", reuse=reuse):
            x = tf.reshape(z, [-1, 1, 1, nf(1)])
            x = t.conv2d(x, nf(1), k=4, s=1, name='gen_n_1_conv2d')
            x = tf.nn.leaky_relu(x)
            x = pixel_norm(x)

            x = tf.reshape(x, [-1, 4, 4, nf(1)])
            x = t.conv2d(x, nf(1), k=3, s=1, name='gen_n_2_conv2d')
            x = tf.nn.leaky_relu(x)
            x = pixel_norm(x)

            x_out = None
            for i in range(pg - 1):
                if i == pg - 2 and pg_t:
                    x_out = t.conv2d(x,
                                     3,
                                     k=1,
                                     s=1,
                                     name='gen_out_conv2d-%d' %
                                     x.get_shape()[1])  # to RGB images
                    x_out = resize_nn(x_out,
                                      x_out.get_shape()[1] * 2)  # up-sampling

                x = block(x, nf(i + 1), name="1")
                x = block(x, nf(i + 1), name="2")

            x = t.conv2d(x,
                         3,
                         k=1,
                         s=1,
                         name='gen_out_conv2d-%d' %
                         x.get_shape()[1])  # to RGB images

            if pg == 1:
                return x

            if pg_t:
                x = (1. - self.alpha_trans) * x_out + self.alpha_trans * x

            return x
Ejemplo n.º 16
0
    def generator(self, x, reuse=None, is_train=True):
        """
        :param x: LR (Low Resolution) images, (-1, 96, 96, 3)
        :param reuse: scope re-usability
        :param is_train: is trainable, default True
        :return: SR (Super Resolution) images, (-1, 384, 384, 3)
        """

        with tf.variable_scope("generator", reuse=reuse):

            def residual_block(x, f, name="", _is_train=True):
                with tf.variable_scope(name):
                    shortcut = tf.identity(x, name='n64s1-shortcut')

                    x = t.conv2d(x, f, 3, 1, name="n64s1-1")
                    x = t.batch_norm(x, is_train=_is_train, name="n64s1-bn-1")
                    x = t.prelu(x, reuse=reuse, name='n64s1-prelu-1')
                    x = t.conv2d(x, f, 3, 1, name="n64s1-2")
                    x = t.batch_norm(x, is_train=_is_train, name="n64s1-bn-2")
                    x = tf.add(x, shortcut)

                    return x

            x = t.conv2d(x, self.gf_dim, 9, 1, name='n64s1-1')
            x = t.prelu(x, name='n64s1-prelu-1')

            skip_conn = tf.identity(x, name='skip_connection')

            # B residual blocks
            for i in range(1, 17):  # (1, 9)
                x = residual_block(x,
                                   self.gf_dim,
                                   name='b-residual_block_%d' % i,
                                   _is_train=is_train)

            x = t.conv2d(x, self.gf_dim, 3, 1, name='n64s1-3')
            x = t.batch_norm(x, is_train=is_train, name='n64s1-bn-3')

            x = tf.add(x, skip_conn)

            # sub-pixel conv2d blocks
            for i in range(1, 3):
                x = t.conv2d(x,
                             self.gf_dim * 4,
                             3,
                             1,
                             name='n256s1-%d' % (i + 2))
                x = t.sub_pixel_conv2d(x, f=None, s=2)
                x = t.prelu(x, name='n256s1-prelu-%d' % i)

            x = t.conv2d(x, self.channel, 9, 1,
                         name='n3s1')  # (-1, 384, 384, 3)
            x = tf.nn.tanh(x)
            return x
Ejemplo n.º 17
0
def residual_block(x, f, name="0"):
    with tf.variable_scope("residual_block-" + name):
        scope_name = "residual_block-" + name

        x = t.conv2d(x, f=f, k=3, s=1)
        x = t.instance_norm(x, affine=True, name=scope_name + '_0')
        x = tf.nn.relu(x)

        x = t.conv2d(x, f=f, k=3, s=1)
        x = t.instance_norm(x, affine=True, name=scope_name + '_1')

        return x
Ejemplo n.º 18
0
            def residual_block(x, f, name=""):
                with tf.variable_scope(name, reuse=reuse):
                    skip_connection = tf.identity(x,
                                                  name='gen-skip_connection-1')

                    x = t.conv2d(x, f, 3, 1, name='gen-conv2d-1')
                    x = t.instance_norm(x, reuse=reuse, name='gen-inst_norm-1')
                    x = tf.nn.relu(x)
                    x = t.conv2d(x, f, 3, 1, name='gen-conv2d-2')
                    x = tf.nn.relu(x)

                    return skip_connection + x
Ejemplo n.º 19
0
            def residual_block(x, f, name="", _is_train=True):
                with tf.variable_scope(name):
                    shortcut = tf.identity(x, name='n64s1-shortcut')

                    x = t.conv2d(x, f, 3, 1, name="n64s1-1")
                    x = t.batch_norm(x, is_train=_is_train, name="n64s1-bn-1")
                    x = t.prelu(x, reuse=reuse, name='n64s1-prelu-1')
                    x = t.conv2d(x, f, 3, 1, name="n64s1-2")
                    x = t.batch_norm(x, is_train=_is_train, name="n64s1-bn-2")
                    x = tf.add(x, shortcut)

                    return x
Ejemplo n.º 20
0
    def residual_channel_attention_block(self, x, f, kernel_size, reduction, use_bn, name):
        with tf.variable_scope("RCAB-%s" % name):
            skip_conn = tf.identity(x, name='identity')

            x = tfutil.conv2d(x, f=f, k=kernel_size, name="conv2d-1")
            x = tfu.conv2d(x, f=f, k=kernel_size, name="conv2d-1")
            x = tf.layers.BatchNormalization(epsilon=self._eps, name="bn-1")(x) if use_bn else x
            x = self.act(x)

            x = tfutil.conv2d(x, f=f, k=kernel_size, name="conv2d-2")
            x = tfu.conv2d(x, f=f, k=kernel_size, name="conv2d-2")
            x = tf.layers.BatchNormalization(epsilon=self._eps, name="bn-2")(x) if use_bn else x

            x = self.channel_attention(x, f, reduction, name="RCAB-%s" % name)
    def generator(self, x, reuse=None, name=""):
        """ The form of Auto-Encoder
        :param x: 128x128x3 images
        :param reuse: re-usability
        :param name: name

        :return: logits, prob
        """
        with tf.variable_scope('generator-%s' % name, reuse=reuse):

            def d(x, f, name=''):
                x = t.conv2d(x, f=f, k=3, s=2, name='gen-d-conv2d-%s' % name)
                x = t.instance_norm(x, name='gen-d-ins_norm-%s' % name)
                x = tf.nn.relu(x)
                return x

            def R(x, f, name=''):
                x = t.conv2d(x, f=f, k=3, s=1, name='gen-R-conv2d-%s-0' % name)
                x = t.conv2d(x, f=f, k=3, s=1, name='gen-R-conv2d-%s-1' % name)
                x = t.instance_norm(x, name='gen-R-ins_norm-%s' % name)
                x = tf.nn.relu(x)
                return x

            def u(x, f, name=''):
                x = t.deconv2d(x,
                               f=f,
                               k=3,
                               s=2,
                               name='gen-u-deconv2d-%s' % name)
                x = t.instance_norm(x, name='gen-u-ins_norm-%s' % name)
                x = tf.nn.relu(x)
                return x

            x = t.conv2d(x, f=self.gf_dim, k=7, s=1, name='gen-conv2d-0')

            x = d(x, self.gf_dim * 2, name='1')
            x = d(x, self.gf_dim * 4, name='2')

            for i in range(1, 7):
                x = R(x, self.gf_dim * 4, name=str(i))

            x = u(x, self.gf_dim * 4, name='1')
            x = u(x, self.gf_dim * 2, name='2')

            logits = t.conv2d(x, f=3, k=7, s=1, name='gen-conv2d-1')
            prob = tf.nn.tanh(logits)

            return prob
Ejemplo n.º 22
0
    def discriminator(self, x, reuse=None):
        with tf.variable_scope('discriminator', reuse=reuse):
            x = tf.reshape(x, (-1, self.height, self.width, self.channel))

            x = t.conv2d(x, self.df_dim, 5, 2, name='disc-conv2d-1')
            x = tf.nn.leaky_relu(x)

            for i in range(1, 3):
                x = t.conv2d(x, self.df_dim, 5, 2, name='disc-conv2d-%d' % (i + 1))
                x = t.batch_norm(x, reuse=reuse, name='disc-bn-%d' % i)
                x = tf.nn.leaky_relu(x)

            x = t.flatten(x)

            x = t.dense(x, 1, name='disc-fc-1')
            return x
Ejemplo n.º 23
0
    def generator(self, x, y, z, scale=32, reuse=None, do_rate=0.5):
        """
        :param x: images to fake
        :param y: classes
        :param z: noise
        :param scale: image size
        :param reuse: variable re-use
        :param do_rate: dropout rate
        :return: logits
        """

        assert (scale % 8 == 0)  # 32, 16, 8

        with tf.variable_scope('generator_{0}'.format(scale), reuse=reuse):
            if scale == 8:
                h = tf.concat([z, y], axis=1)

                h = t.dense(h, self.g_fc_unit, name='gen-fc-1')
                h = tf.nn.relu(h)
                h = tf.layers.dropout(h, do_rate, name='gen-dropout-1')

                h = t.dense(h, self.g_fc_unit, name='gen-fc-2')
                h = tf.nn.relu(h)
                h = tf.layers.dropout(h, do_rate, name='gen-dropout-2')

                h = t.dense(h, self.channel * 8 * 8, name='gen-fc-3')

                h = tf.reshape(h, [-1, 8, 8, self.channel])
            else:
                y = t.dense(y, scale * scale, name='gen-fc-y')

                y = tf.reshape(y, [-1, scale, scale, 1])
                z = tf.reshape(z, [-1, scale, scale, 1])

                h = tf.concat([z, y, x], axis=3)  # concat into 5 dims

                h = t.conv2d(h, self.gf_dim * 1, 5, 1, name='gen-deconv2d-1')
                h = tf.nn.relu(h)

                h = t.conv2d(h, self.gf_dim * 1, 5, 1, name='gen-deconv2d-2')
                h = tf.nn.relu(h)

                h = t.conv2d(h, self.channel, 5, 1, name='gen-conv2d-3')

            h = tf.nn.tanh(h)

            return h
Ejemplo n.º 24
0
 def conv_mean_pool(self, x, f, reuse=None, name=""):
     with tf.variable_scope(name, reuse=reuse):
         x = t.conv2d(x, f, 3, 1)
         x = tf.add_n([
             x[:, ::2, ::2, :], x[:, 1::2, ::2, :], x[:, ::2, 1::2, :],
             x[:, 1::2, 1::2, :]
         ]) / 4.
         return x
Ejemplo n.º 25
0
    def discriminator(self, x, reuse=None):
        with tf.variable_scope("discriminator", reuse=reuse):
            x = t.conv2d(x, self.df_dim, 5, 2,
                         name='disc-coord-conv2d-1')  # need ot be replaced
            x = tf.nn.leaky_relu(x)

            for i in range(1, 3):
                x = t.conv2d(x,
                             self.df_dim * (2**i),
                             5,
                             2,
                             name='disc-conv2d-%d' % (i + 1))
                x = tf.nn.leaky_relu(x)

            x = t.conv2d(x, self.channel, 5, 2, name='disc-conv2d-4')
            x = tf.nn.sigmoid(x)
            return x
Ejemplo n.º 26
0
    def discriminator(self, x, reuse=None, is_train=True):
        with tf.variable_scope('discriminator', reuse=reuse):
            x = t.conv2d(x, self.df_dim, s=1, name='disc-conv2d-0')
            x = tf.nn.leaky_relu(x)

            for i in range(1, 3):
                x = t.conv2d(x,
                             self.df_dim * (2**i),
                             name='disc-conv2d-%d' % i)
                x = t.batch_norm(x, is_train=is_train)
                x = tf.nn.leaky_relu(x)

            x = tf.layers.flatten(x)

            logits = t.dense(x, 1, name='disc-fc-0')
            prob = tf.nn.sigmoid(logits)

            return prob, logits
Ejemplo n.º 27
0
            def conv_in_relu(x, f, k, s, de=False, name=""):
                if not de:
                    x = t.conv2d(x, f=f, k=k, s=s)
                else:
                    x = t.deconv2d(x, f=f, k=k, s=s)

                x = t.instance_norm(x, name=name)
                x = tf.nn.relu(x)
                return x
Ejemplo n.º 28
0
 def srcnn(self, x, f, kernel_size, reduction, use_bn, scale):
     with tf.variable_scope("Srcnn_Network"):
         # x = self.image_processing(x, sign=-1, name='pre-processing')
         x = self.up_scaling(x, f, scale, name='up-scaling')
         # 1. head
         head = tfu.conv2d(x, f=f, k=kernel_size, name="conv2d-first")
         # 2. body
         x = head
         x = tfu.conv2d(x, f=f, k=kernel_size, name="conv2d-second")
         body = tfu.conv2d(x, f=f, k=kernel_size, name="conv2d-third")
         body += head  # tf.math.add(body, head)
         # 3. tail
         x = tfu.conv2d(x,
                        f=self.n_channel,
                        k=kernel_size,
                        name="conv2d-forth")  # (-1, 384, 384, 3)
         # x = self.image_processing(tail, sign=1, name='post-processing')
         return x
Ejemplo n.º 29
0
 def block(x, fs, name="0"):
     x = resize_nn(x, x.get_shape()[1] * 2)
     x = t.conv2d(x,
                  fs,
                  k=3,
                  s=1,
                  name='gen_n_%s_conv2d-%d' % (name, x.get_shape()[1]))
     x = tf.nn.leaky_relu(x)
     x = pixel_norm(x)
     return x
Ejemplo n.º 30
0
    def up_scaling(self, x, f, scale_factor, name):
@@ -186,13 +187,13 @@ def up_scaling(self, x, f, scale_factor, name):
        """
        with tf.variable_scope(name):
            if scale_factor == 3:
                x = tfutil.conv2d(x, f * 9, k=1, name='conv2d-image_scaling-0')
                x = tfutil.pixel_shuffle(x, 3)
                x = tfu.conv2d(x, f * 9, k=1, name='conv2d-image_scaling-0')
                x = tfu.pixel_shuffle(x, 3)
            elif scale_factor & (scale_factor - 1) == 0:  # is it 2^n?
                log_scale_factor = int(np.log2(scale_factor))
                for i in range(log_scale_factor):
                    x = tfutil.conv2d(x, f * 4, k=1, name='conv2d-image_scaling-%d' % i)
                    x = tfutil.pixel_shuffle(x, 2)
                    x = tfu.conv2d(x, f * 4, k=1, name='conv2d-image_scaling-%d' % i)
                    x = tfu.pixel_shuffle(x, 2)
            else:
                raise NotImplementedError("[-] Not supported scaling factor (%d)" % scale_factor)
            return x