Example #1
0
    def build_inference(self, input):
        if self.is_cgan:
            x, self.label = input
        else:
            x = input

        ndf = 32
        ksize = 4
        n_layer = 3
        depth = [ndf * 2, ndf * 4, ndf * 8]
        self.norm_mtd = ""

        print("Discriminator shape log:")

        x = L.conv2d(x,
                     ndf,
                     ksize,
                     2,
                     padding='SAME',
                     scope='conv1',
                     reuse=self.reuse,
                     activation_fn=None)
        x = ops.get_norm(x, "conv1/" + self.norm_mtd, self.training,
                         self.reuse)
        x = ops.LeakyReLU(x)
        print(x.get_shape())

        for i in range(n_layer):
            name = "conv%d" % (i + 2)
            x = L.conv2d(x,
                         depth[i],
                         ksize,
                         2,
                         padding='SAME',
                         scope=name,
                         reuse=self.reuse,
                         activation_fn=None)
            x = ops.get_norm(x, name + "/" + self.norm_mtd, self.training,
                             self.reuse)
            x = ops.LeakyReLU(x)

            #x = tf.nn.dropout(x, self.keep_prob)
            print(x.get_shape())

        x = L.conv2d(x,
                     depth[-1],
                     3,
                     1,
                     padding='VALID',
                     scope="conv_trunk",
                     reuse=self.reuse,
                     activation_fn=None)
        x = ops.get_norm(x, name + "/" + self.norm_mtd, self.training,
                         self.reuse)
        x = ops.LeakyReLU(x)
        print(x.get_shape())

        self.build_tail(x)

        return self.disc_out, self.cls_out
Example #2
0
 def residual_block(name, x, ndim, ks=3):
     shortcut = tf.identity(x)
     x = pad_reflect(x, ks)
     x = L.conv2d(x,
                  ndim,
                  ks,
                  padding='VALID',
                  scope=name + "/conv1",
                  reuse=self.reuse,
                  activation_fn=None)
     x = ops.get_norm(x, name + "/conv1/" + self.norm_mtd,
                      self.training, self.reuse)
     x = tf.nn.relu(x)
     # second convolution
     x = pad_reflect(x, ks)
     x = L.conv2d(x,
                  ndim,
                  ks,
                  padding='VALID',
                  scope=name + "/conv2",
                  reuse=self.reuse,
                  activation_fn=None)
     x = ops.get_norm(x, name + "/conv2/" + self.norm_mtd,
                      self.training, self.reuse)
     x = tf.add(x, shortcut, "residual_block_out")
     return x
Example #3
0
        def build_feat_image(x, ndim=3):
            for i in range(self.common_length):
                self.res_cnt += 1
                name = 'Res%d' % self.res_cnt
                x = residual_block(name, x, residual_dim)

            x = ops.get_norm(x,
                             name=self.norm_mtd,
                             training=self.training,
                             reuse=tf.AUTO_REUSE)

            for depth in output_side:
                self.conv_cnt += 1
                x = ops.deconv2d("deconv%d" % self.conv_cnt,
                                 x,
                                 depth,
                                 3,
                                 2,
                                 activation_fn=tf.nn.relu,
                                 normalizer_mode=self.norm_mtd,
                                 training=self.training,
                                 reuse=tf.AUTO_REUSE)

            x = ops.conv2d("deconv%d" % (self.conv_cnt + 1),
                           x,
                           ndim,
                           large_ksize,
                           1,
                           activation_fn=tf.nn.tanh,
                           normalizer_mode=None,
                           training=self.training,
                           reuse=tf.AUTO_REUSE)

            return x
Example #4
0
        def build_noise_feat(x, map_size=8):
            self.map_size = map_size
            self.map_depth = 128
            # assume 64x64 target
            x = ops.linear("fc1",
                           x, (self.map_size**2) * self.map_depth,
                           activation_fn=None,
                           normalizer_mode=None,
                           training=self.training,
                           reuse=tf.AUTO_REUSE)

            x = tf.reshape(
                x, shape=[-1, self.map_size, self.map_size, self.map_depth])

            x = ops.get_norm(x,
                             name="fc1/" + self.norm_mtd,
                             training=self.training,
                             reuse=tf.AUTO_REUSE)

            x = tf.nn.relu(x)

            # upsample to 64x64x128:
            map_size = self.map_size
            map_depth = self.map_depth
            for i in range(2):
                self.conv_cnt += 1
                map_size *= 2

                lx = tf.image.resize_images(x, [map_size, map_size])
                x = ops.deconv2d("conv%d" % self.conv_cnt,
                                 x,
                                 map_depth,
                                 3,
                                 2,
                                 activation_fn=tf.nn.relu,
                                 normalizer_mode=None,
                                 training=self.training,
                                 reuse=tf.AUTO_REUSE)
                x = x + lx
                x = ops.get_norm(x,
                                 name="conv%d/%s" %
                                 (self.conv_cnt, self.norm_mtd),
                                 training=self.training,
                                 reuse=tf.AUTO_REUSE)

            return x
Example #5
0
        def build_image_feat(x):
            self.conv_cnt = 1
            x = tf.pad(x, [[0, 0], [p, p], [p, p], [0, 0]], mode="REFLECT")
            x = L.conv2d(x,
                         input_side[0], (large_ksize, large_ksize),
                         1,
                         padding='VALID',
                         scope='conv1',
                         activation_fn=None)
            x = ops.get_norm(x, "conv1/contrib", self.training, tf.AUTO_REUSE)
            x = tf.nn.relu(x)

            ### input must be x_real
            for ndf in input_side[1:]:
                self.conv_cnt += 1
                name = 'conv%d' % self.conv_cnt

                x = pad_reflect(x)
                x = L.conv2d(x,
                             ndf,
                             4,
                             2,
                             padding='VALID',
                             reuse=tf.AUTO_REUSE,
                             scope=name,
                             activation_fn=None)
                x = ops.get_norm(x, name + "/" + self.norm_mtd, self.training,
                                 tf.AUTO_REUSE)
                x = tf.nn.relu(x)

            # now it is 4x downsampled

            for i in range(self.common_length):
                self.res_cnt += 1
                name = 'Res%d' % self.res_cnt
                x = residual_block(name, x, residual_dim)

            return tf.identity(x, "image_feat")
Example #6
0
    def build_inference(self, input):
        def pad_reflect(x, ksize=3, kstride=1):
            p = (ksize - kstride) // 2
            return tf.pad(x, [[0, 0], [p, p], [p, p], [0, 0]], "REFLECT")

        def residual_block(name, x, ndim, ks=3):
            shortcut = tf.identity(x)
            x = pad_reflect(x, ks)
            x = L.conv2d(x,
                         ndim,
                         ks,
                         padding='VALID',
                         scope=name + "/conv1",
                         reuse=self.reuse,
                         activation_fn=None)
            x = ops.get_norm(x, name + "/conv1/" + self.norm_mtd,
                             self.training, self.reuse)
            x = tf.nn.relu(x)
            # second convolution
            x = pad_reflect(x, ks)
            x = L.conv2d(x,
                         ndim,
                         ks,
                         padding='VALID',
                         scope=name + "/conv2",
                         reuse=self.reuse,
                         activation_fn=None)
            x = ops.get_norm(x, name + "/conv2/" + self.norm_mtd,
                             self.training, self.reuse)
            x = tf.add(x, shortcut, "residual_block_out")
            return x

        # assume input to be mask, sketch, image
        # the channel should be 9 dims
        #if len(input) > 1:
        #  x = tf.concat(input, 3, "concat_input_gen")
        #else:
        #    x = input[0]
        x = input

        large_ksize = 7
        ks = 3
        p = (large_ksize - 1) // 2
        ndf = 32
        self.n_enlarge = 3
        self.norm_mtd = "inst"

        print("Deep generator shape log:")

        conv_cnt = 0
        res_cnt = 0

        x = pad_reflect(x, large_ksize, 1)
        conv_cnt += 1
        x = L.conv2d(x,
                     ndf,
                     large_ksize,
                     1,
                     padding='VALID',
                     scope='conv%d' % conv_cnt,
                     reuse=self.reuse,
                     activation_fn=None)
        x = ops.get_norm(x, "conv1/" + self.norm_mtd, self.training,
                         self.reuse)
        x = tf.nn.relu(x)
        print(x.get_shape())

        # convolution input side
        mul = 1
        for idx in range(self.n_enlarge):
            mul *= 2
            conv_cnt += 1
            name = 'conv%d' % conv_cnt

            x = pad_reflect(x)
            x = L.conv2d(x,
                         ndf * mul,
                         ks,
                         2,
                         padding='VALID',
                         reuse=self.reuse,
                         scope=name,
                         activation_fn=None)
            x = ops.get_norm(x, name + "/" + self.norm_mtd, self.training,
                             self.reuse)
            x = tf.nn.relu(x)
            print(x.get_shape())

        for idx in range(9):
            res_cnt += 1
            name = "Res%d" % res_cnt
            x = residual_block(name, x, ndf * 8)

        # convolution output side
        mul = 8
        for idx in range(self.n_enlarge):
            mul /= 2
            name = "deconv%d" % (idx + 1)

            # first do reflection padding (hard code to kernel size 3)
            #x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], mode="REFLECT")

            # do subpixel convolution
            #x = L.conv2d(x, output_side[idx] * 4, (3, 3), padding='VALID', scope=name)
            # pixel shuffle
            #x = tf.depth_to_space(x, 2)

            x = L.conv2d_transpose(x,
                                   ndf * mul,
                                   ks,
                                   2,
                                   "SAME",
                                   activation_fn=None,
                                   scope=name,
                                   reuse=self.reuse)
            x = ops.get_norm(x, name + "/" + self.norm_mtd, self.training,
                             self.reuse)
            x = tf.nn.relu(x)
            print(x.get_shape())

        # output layer
        x = pad_reflect(x, large_ksize)
        conv_cnt += 1
        x = L.conv2d(x,
                     self.out_dim,
                     large_ksize,
                     padding='VALID',
                     scope="conv%d" % conv_cnt,
                     reuse=self.reuse,
                     activation_fn=None)
        #x = ops.get_norm(x, "conv%d"%conv_cnt+"/"+self.norm_mtd, self.training, self.reuse)
        x = tf.nn.tanh(x)
        #x = tf.nn.tanh(x) * 1.1
        #x = x - tf.nn.relu(x - 1) + tf.nn.relu(-x - 1)
        self.out = tf.identity(x, "GeneratorOutput")
        print(self.out.get_shape())
        return self.out
Example #7
0
    def build_inference(self, input):
        def pad_reflect(x, ksize=3, kstride=1):
            p = (ksize - kstride) // 2
            return tf.pad(x, [[0, 0], [p, p], [p, p], [0, 0]], "REFLECT")

        def residual_block(name, x, ndim, ks=3):
            shortcut = tf.identity(x)
            x = pad_reflect(x, ks)
            x = L.conv2d(x,
                         ndim,
                         ks,
                         padding='VALID',
                         scope=name + "/conv1",
                         reuse=self.reuse,
                         activation_fn=None)
            x = ops.get_norm(x, name + "/conv1/" + self.norm_mtd,
                             self.training, self.reuse)
            x = tf.nn.relu(x)
            # second convolution
            x = pad_reflect(x, ks)
            x = L.conv2d(x,
                         ndim,
                         ks,
                         padding='VALID',
                         scope=name + "/conv2",
                         reuse=self.reuse,
                         activation_fn=None)
            x = ops.get_norm(x, name + "/conv2/" + self.norm_mtd,
                             self.training, self.reuse)
            x = tf.add(x, shortcut, "residual_block_out")
            return x

        def linear(name, x, out_dim):
            with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
                w = tf.get_variable("kernel",
                                    shape=[x.get_shape()[-1], out_dim])
                return tf.matmul(x, w)

        def conv(name, x, out_dim, ks, kt):
            with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
                w = tf.get_variable("kernel",
                                    shape=[ks, ks,
                                           x.get_shape()[-1], out_dim])
                return tf.nn.conv2d(x, w, [0, kt, kt, 0], padding="SAME")

        x = input

        large_ksize = 7
        ks = 3
        p = (large_ksize - 1) // 2
        ndf = 32
        self.n_enlarge = 3
        self.norm_mtd = "contrib"

        print("Deep generator shape log:")

        conv_cnt = 0
        res_cnt = 0

        # noise set manually
        if self.side_noise is not None:
            self.side1 = tf.nn.relu(
                linear("fc_side_map", self.side_noise, 16 * 16 * ndf * 2))
            self.side1 = tf.reshape(self.side1, [-1, 16, 16, ndf * 2])
            self.side1 = L.conv2d(self.side1,
                                  ndf * 8,
                                  3,
                                  activation_fn=tf.nn.relu,
                                  scope="side_conv1",
                                  reuse=tf.AUTO_REUSE)

            tmp = self.reuse
            self.reuse = tf.AUTO_REUSE
            # add a residual
            sres_cnt = 0
            for i in range(3):
                sres_cnt += 1
                self.side1 = residual_block("side_residual%d" % sres_cnt,
                                            self.side1, ndf * 8)
            self.reuse = tmp
            #self.side2 = tf.nn.tanh(linear("fc_side_feat", self.side_noise, ndf * 8))
            #self.side1 = tf.reshape(self.side1, [-1, 16, 16, 1])
            #self.side2 = tf.reshape(self.side2, [-1, 1, 1, ndf * 8])

        x = pad_reflect(x, large_ksize, 1)
        conv_cnt += 1
        x = L.conv2d(x,
                     ndf,
                     large_ksize,
                     1,
                     padding='VALID',
                     scope='conv%d' % conv_cnt,
                     reuse=self.reuse,
                     activation_fn=None)
        x = ops.get_norm(x, "conv1/" + self.norm_mtd, self.training,
                         self.reuse)
        x = tf.nn.relu(x)
        print(x.get_shape())

        # convolution input side
        mul = 1
        for idx in range(self.n_enlarge):
            mul *= 2
            conv_cnt += 1
            name = 'conv%d' % conv_cnt

            x = pad_reflect(x)
            x = L.conv2d(x,
                         ndf * mul,
                         ks,
                         2,
                         padding='VALID',
                         activation_fn=None,
                         scope=name,
                         reuse=self.reuse)
            x = ops.get_norm(x, name + "/" + self.norm_mtd, self.training,
                             self.reuse)
            x = tf.nn.relu(x)
            print(x.get_shape())

        for idx in range(4):
            res_cnt += 1
            name = "Res%d" % res_cnt
            x = residual_block(name, x, ndf * 8)

        # Add noise (a gate)
        if self.side_noise is not None:
            #x = L.conv2d(x, ndf * 4, ks, 1, padding="SAME",
            #    activation_fn=tf.nn.relu,
            #    reuse=tf.AUTO_REUSE, scope="side_concat_conv1")
            #x = ops.get_norm(x, name+"/"+self.norm_mtd, self.training, tf.AUTO_REUSE)

            x = tf.concat([x, self.side1 * self.gate], axis=3)

            x = L.conv2d(x,
                         ndf * 8,
                         ks,
                         1,
                         padding="SAME",
                         activation_fn=tf.nn.relu,
                         reuse=tf.AUTO_REUSE,
                         scope="side_concat_conv2")

            x = ops.get_norm(x, "side_concat/" + self.norm_mtd, self.training,
                             tf.AUTO_REUSE)

        for idx in range(5):
            res_cnt += 1
            name = "Res%d" % res_cnt
            x = residual_block(name, x, ndf * 8)

        # convolution output side
        mul = 8
        for idx in range(self.n_enlarge):
            mul = int(mul / 2)
            name = "deconv%d" % (idx + 1)

            # first do reflection padding (hard code to kernel size 3)
            #x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], mode="REFLECT")

            # do subpixel convolution
            #x = L.conv2d(x, ndf * mul, (3, 3),
            #    padding='VALID',
            #    activation_fn=None,
            #    reuse=self.reuse, scope=name)
            # pixel shuffle
            #x = tf.depth_to_space(x, 2)

            x = L.conv2d_transpose(x,
                                   ndf * mul,
                                   ks,
                                   2,
                                   padding="SAME",
                                   activation_fn=None,
                                   scope=name,
                                   reuse=self.reuse)
            x = ops.get_norm(x, name + "/" + self.norm_mtd, self.training,
                             self.reuse)
            x = tf.nn.relu(x)
            print(x.get_shape())

        # output layer
        x = pad_reflect(x, large_ksize)
        conv_cnt += 1
        x = L.conv2d(x,
                     self.out_dim,
                     large_ksize,
                     padding='VALID',
                     scope="conv%d" % conv_cnt,
                     reuse=self.reuse,
                     activation_fn=None)

        x = tf.nn.tanh(x)
        self.out = tf.identity(x, "GeneratorOutput")
        print(self.out.get_shape())
        return self.out
Example #8
0
    def build_inference(self, input):
        def pad_reflect(x, ksize=3, kstride=1):
            pad_width = (ksize // kstride - 1) // 2

        # assume input to be mask, sketch, image
        # the channel should be 9 dims
        #if len(input) > 1:
        #  x = tf.concat(input, 3, "concat_input_gen")
        #else:
        #    x = input[0]
        x = input

        large_ksize = 9
        p = (large_ksize - 1) // 2
        input_side = [64, 128, 256]
        output_side = [128, 64]
        residual_dim = 256

        x = tf.pad(x, [[0, 0], [p, p], [p, p], [0, 0]], mode="REFLECT")
        x = L.conv2d(x,
                     input_side[0], (large_ksize, large_ksize),
                     1,
                     padding='VALID',
                     scope='conv1',
                     activation_fn=None)
        x = ops.get_norm(x, "conv1/contrib", self.training, tf.AUTO_REUSE)
        x = tf.nn.relu(x)

        # convolution input side
        for idx in range(1, len(input_side), 1):
            name = "conv%d" % (idx + 1)
            # first do reflection padding (hard code to kernel size 3)
            x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], mode="REFLECT")
            x = L.conv2d(x,
                         input_side[idx], (4, 4),
                         2,
                         padding='VALID',
                         scope=name,
                         reuse=self.reuse)
            ##x = L.conv2d(x, input_side[idx], (3, 3), padding='VALID')
            ##x = tf.nn.avg_pool(x, (2, 2), [0, 2, 2, 0], padding="VALID")
            # do not need gamma & beta
            x = ops.get_norm(x, name + "/contrib", self.training, self.reuse)
            x = tf.nn.relu(x)

        #base_connect = tf.identity(x, "base_connect")
        for idx in range(6):
            name = "Res%d" % (idx + 1)
            shortcut = tf.identity(x)

            # first convolution
            x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], mode="REFLECT")
            x = L.conv2d(x,
                         residual_dim, (3, 3),
                         padding='VALID',
                         scope=name + "/conv1")
            x = ops.get_norm(x, name + "/conv1/contrib", self.training,
                             self.reuse)
            x = tf.nn.relu(x)
            # second convolution
            x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], mode="REFLECT")
            x = L.conv2d(x,
                         residual_dim, (3, 3),
                         padding='VALID',
                         scope=name + "/conv2")
            x = tf.add(x, shortcut, "residual_block_out")
            #x = ops.get_norm(x, name+"/conv2/contrib", self.training, self.reuse)
            #x = tf.nn.relu(x)
        #x = ops.get_norm(x + base_connect, name+"bridge/contrib", self.training, self.reuse)
        #x = tf.nn.relu(x)

        # convolution output side
        for idx in range(len(output_side)):
            name = "conv%d" % (idx + len(input_side) + 1)
            # first do reflection padding (hard code to kernel size 3)
            #x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], mode="REFLECT")

            # do subpixel convolution
            #x = L.conv2d(x, output_side[idx] * 4, (3, 3), padding='VALID', scope=name)
            # pixel shuffle
            #x = tf.depth_to_space(x, 2)

            x = L.conv2d_transpose(x,
                                   output_side[idx], (4, 4),
                                   2,
                                   "SAME",
                                   activation_fn=None)
            #x = x[:, 1:-1, 1:-1, :]
            # do not need gamma & beta
            x = ops.get_norm(x, name + "/contrib", self.training, self.reuse)
            x = tf.nn.relu(x)

        # output layer
        x = tf.pad(x, [[0, 0], [p, p], [p, p], [0, 0]], mode="REFLECT")
        x = L.conv2d(x,
                     3, (large_ksize, large_ksize),
                     padding='VALID',
                     scope="conv%d" % (len(input_side) + len(output_side) + 1),
                     activation_fn=None)

        x = tf.nn.tanh(x) * 1.1
        self.out = x - tf.nn.relu(x - 1) + tf.nn.relu(-x - 1)

        return self.out
Example #9
0
    def build_inference(self, input):
        x = input
        conv_cnt = 0
        def conv(x, ndim, ks, kt):
            conv_cnt += 1
            return L.conv2d(x, ndim, ks, kt,
                padding='SAME',
                activation_fn=None,
                reuse=self.reuse, scope="conv%d"%conv_cnt)

        e0 = tf.nn.relu(ops.get_norm(conv(x, 32, 3, 1), name+"/"+self.norm_mtd, self.training, self.reuse))
        e1 = tf.nn.relu(ops.get_norm(conv(e0, 64, 4, 2), ))
        e2 = tf.nn.relu(ops.get_norm(conv(e1, 64, 3, 1), ))
        del e1
        e3 = tf.nn.relu(ops.get_norm((conv(e2)))
        e4 = tf.nn.relu(ops.get_norm((conv(e3)))
        del e3
        e5 = tf.nn.relu(ops.get_norm((conv(e4)))
        e6 = tf.nn.relu(ops.get_norm((conv(e5)))
        del e5
        e7 = tf.nn.relu(ops.get_norm((conv(e6)))
        e8 = tf.nn.relu(ops.get_norm((conv(e7)))

        d8 = tf.nn.relu(ops.get_norm((self.dc8(F.concat([e7, e8]))))
        del e7, e8
        d7 = tf.nn.relu(ops.get_norm((self.dc7(d8)))
        del d8
        d6 = tf.nn.relu(ops.get_norm((self.dc6(F.concat([e6, d7]))))
        del d7, e6
        d5 = tf.nn.relu(ops.get_norm((self.dc5(d6)))
        del d6
        d4 = tf.nn.relu(ops.get_norm((self.dc4(F.concat([e4, d5]))))
        del d5, e4
        d3 = tf.nn.relu(ops.get_norm((self.dc3(d4)))
        del d4
        d2 = tf.nn.relu(ops.get_norm(2(self.dc2(F.concat([e2, d3]))))
        del d3, e2
        d1 = tf.nn.relu(ops.get_norm(1(self.dc1(d2)))
        del d2
        d0 = self.dc0(F.concat([e0, d1]))