Exemplo n.º 1
0
def shape_layers(cin, cout, ngf, init_sz):
    return [
        nn.Linear(cin, ngf * 2 * init_sz**2),
        Reshape(*(-1, ngf * 2, init_sz, init_sz)),
        get_norm_layer()(ngf * 2),
        *UpsampleBlock(ngf * 2, ngf),
        get_norm_layer()(ngf),
        *UpsampleBlock(ngf, cout),
        get_norm_layer()(cout),
    ]
Exemplo n.º 2
0
    def __call__(self,
                 x,
                 dim=64,
                 n_downsamplings=5,
                 weight_decay=0.0,
                 norm_name='batch_norm',
                 training=True,
                 scope='UNetGenc'):
        MAX_DIM = 1024

        conv_ = functools.partial(
            conv, weights_regularizer=slim.l2_regularizer(weight_decay))
        norm = utils.get_norm_layer(norm_name,
                                    training,
                                    updates_collections=None)

        conv_norm_lrelu = functools.partial(conv_,
                                            normalizer_fn=norm,
                                            activation_fn=tf.nn.leaky_relu)

        with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
            z = x
            zs = []
            for i in range(n_downsamplings):
                d = min(dim * 2**i, MAX_DIM)
                z = conv_norm_lrelu(z, d, 4, 2)
                zs.append(z)

        # variables and update operations
        self.variables = tf.global_variables(scope)
        self.trainable_variables = tf.trainable_variables(scope)
        self.reg_losses = tf.losses.get_regularization_losses(scope)

        return zs
Exemplo n.º 3
0
def define_D(
    input_nc,
    ndf,
    netD,
    n_layers_D=3,
    norm="batch",
    use_sigmoid=False,
    init_type="normal",
    init_gain=0.02,
    gpu_id="cuda:0",
):
    net = None
    norm_layer = get_norm_layer(norm_type=norm)

    if netD == "basic":
        net = NLayerDiscriminator(
            input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid
        )
    elif netD == "n_layers":
        net = NLayerDiscriminator(
            input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid
        )
    elif netD == "pixel":
        net = PixelDiscriminator(
            input_nc, ndf, norm_layer=norm_layer, use_sigmoid=use_sigmoid
        )
    else:
        raise NotImplementedError(
            "Discriminator model name [%s] is not recognized" % net
        )

    return init_net(net, init_type, init_gain, gpu_id)
Exemplo n.º 4
0
    def build_model(self):
        act = nn.ReLU(inplace=True)
        input_ch = self.opt.input_ch
        n_gf = self.opt.n_gf
        norm = get_norm_layer(self.opt.norm_type)
        output_ch = self.opt.output_ch
        pad = get_pad_layer(self.opt.padding_type)

        model = []
        model += [
            pad(3),
            nn.Conv2d(input_ch, n_gf, kernel_size=7, padding=0),
            norm(n_gf), act
        ]

        for _ in range(self.opt.n_downsample):
            model += [
                nn.Conv2d(n_gf, 2 * n_gf, kernel_size=3, padding=1, stride=2),
                norm(2 * n_gf), act
            ]
            n_gf *= 2

        for _ in range(self.opt.n_residual):
            model += [ResidualBlock(n_gf, pad, norm, act)]

        for _ in range(self.opt.n_downsample):
            model += [
                nn.ConvTranspose2d(n_gf,
                                   n_gf // 2,
                                   kernel_size=3,
                                   padding=1,
                                   stride=2,
                                   output_padding=1),
                norm(n_gf // 2), act
            ]
            n_gf //= 2

        model += [
            pad(3),
            nn.Conv2d(n_gf, output_ch, kernel_size=7, padding=0),
            nn.Tanh()
        ]

        self.model = nn.Sequential(*model)
Exemplo n.º 5
0
    def __call__(self,
                 x,
                 n_atts,
                 dim=64,
                 fc_dim=1024,
                 n_downsamplings=5,
                 weight_decay=0.0,
                 norm_name='instance_norm',
                 training=True,
                 scope='ConvD'):
        MAX_DIM = 1024

        conv_ = functools.partial(
            conv, weights_regularizer=slim.l2_regularizer(weight_decay))
        fc_ = functools.partial(
            fc, weights_regularizer=slim.l2_regularizer(weight_decay))
        norm = utils.get_norm_layer(norm_name,
                                    training,
                                    updates_collections=None)

        conv_norm_lrelu = functools.partial(conv_,
                                            normalizer_fn=norm,
                                            activation_fn=tf.nn.leaky_relu)

        with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
            z = x
            for i in range(n_downsamplings):
                d = min(dim * 2**i, MAX_DIM)
                z = conv_norm_lrelu(z, d, 4, 2)
            z = slim.flatten(z)

            logit_gan = tf.nn.leaky_relu(fc_(z, fc_dim))
            logit_gan = fc_(logit_gan, 1)

            logit_att = tf.nn.leaky_relu(fc_(z, fc_dim))
            logit_att = fc_(logit_att, n_atts)

        # variables and update operations
        self.variables = tf.global_variables(scope)
        self.trainable_variables = tf.trainable_variables(scope)
        self.reg_losses = tf.losses.get_regularization_losses(scope)

        return logit_gan, logit_att
Exemplo n.º 6
0
    def __call__(self,
                 zs,
                 a,
                 dim=64,
                 n_upsamplings=5,
                 shortcut_layers=1,
                 inject_layers=1,
                 weight_decay=0.0,
                 norm_name='batch_norm',
                 training=True,
                 scope='UNetGdec'):
        MAX_DIM = 1024

        dconv_ = functools.partial(
            dconv, weights_regularizer=slim.l2_regularizer(weight_decay))
        norm = utils.get_norm_layer(norm_name,
                                    training,
                                    updates_collections=None)

        dconv_norm_relu = functools.partial(dconv_,
                                            normalizer_fn=norm,
                                            activation_fn=tf.nn.relu)

        with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
            a = tf.to_float(a)

            z = utils.tile_concat(zs[-1], a)
            for i in range(n_upsamplings - 1):
                d = min(dim * 2**(n_upsamplings - 1 - i), MAX_DIM)
                z = dconv_norm_relu(z, d, 4, 2)
                if shortcut_layers > i:
                    z = utils.tile_concat([z, zs[-2 - i]])
                if inject_layers > i:
                    z = utils.tile_concat(z, a)
            x = tf.nn.tanh(dconv_(z, 3, 4, 2))

        # variables and update operations
        self.variables = tf.global_variables(scope)
        self.trainable_variables = tf.trainable_variables(scope)
        self.reg_losses = tf.losses.get_regularization_losses(scope)

        return x
Exemplo n.º 7
0
def define_G(
    input_nc,
    output_nc,
    ngf,
    norm="batch",
    use_dropout=False,
    init_type="normal",
    init_gain=0.02,
    gpu_id="cuda:0",
):
    net = None
    norm_layer = get_norm_layer(norm_type=norm)

    net = ResnetGenerator(
        input_nc,
        output_nc,
        ngf,
        norm_layer=norm_layer,
        use_dropout=use_dropout,
        n_blocks=9,
    )

    return init_net(net, init_type, init_gain, gpu_id)
Exemplo n.º 8
0
    def build_model(self):
        act = nn.LeakyReLU(0.2, inplace=True)
        input_channel = self.opt.input_ch + self.opt.output_ch
        n_df = self.opt.n_df
        norm = get_norm_layer(self.opt.norm_type)
        blocks = []
        blocks += [[
            nn.Conv2d(input_channel, n_df, kernel_size=4, padding=1, stride=2),
            act
        ]]
        blocks += [[
            nn.Conv2d(n_df, 2 * n_df, kernel_size=4, padding=1, stride=2),
            norm(2 * n_df), act
        ]]
        blocks += [[
            nn.Conv2d(2 * n_df, 4 * n_df, kernel_size=4, padding=1, stride=2),
            norm(4 * n_df), act
        ]]
        blocks += [[
            nn.Conv2d(4 * n_df, 8 * n_df, kernel_size=4, padding=1, stride=1),
            norm(8 * n_df), act
        ]]

        if not self.opt.GAN_type == 'GAN':
            blocks += [[
                nn.Conv2d(8 * n_df, 1, kernel_size=4, padding=1, stride=1)
            ]]

        else:
            blocks += [[
                nn.Conv2d(8 * n_df, 1, kernel_size=4, padding=1, stride=1),
                nn.Sigmoid()
            ]]

        self.n_blocks = len(blocks)
        for i in range(self.n_blocks):
            setattr(self, 'block_{}'.format(i), nn.Sequential(*blocks[i]))
Exemplo n.º 9
0
from pathlib import Path
from torchsummary import summary

from dataset import ColorizationDataset
from dataset import get_split
from transforms import (DualCompose, Resize, ImageOnly, HorizontalFlip,
                        VerticalFlip, ColorizationNormalize)
import utils

## Building Generator
img_size = 128
input_nc = 3
output_nc = 3
use_dropout = False
norm_layer = utils.get_norm_layer(norm_type='batch')
netG = models.LeakyResnetGenerator(input_nc,
                                   output_nc,
                                   ngf=32,
                                   norm_layer=norm_layer,
                                   use_dropout=use_dropout,
                                   n_blocks=9)
## Unet Generator
utils.init_net(netG, init_type='normal', init_gain=0.02)
summary(netG, input_size=(3, img_size, img_size))

## Building Discriminator
netD = models.Discriminator(input_nc=3, img_size=img_size)
# netD = models.Discriminator(input_nc=6, img_size=img_size)
utils.init_net(netD, init_type='normal', init_gain=0.02)
summary(netD, input_size=(3, img_size, img_size))
Exemplo n.º 10
0
    def __call__(self,
                 xa,
                 b,
                 n_downsamplings=5,
                 n_masks=4,
                 dim=64,
                 weight_decay=0.0,
                 norm_name='batch_norm',
                 training=True,
                 scope='PAGANG'):
        MAX_DIM = 1024
        n_att = b.shape[-1]

        conv_ = functools.partial(
            conv, weights_regularizer=slim.l2_regularizer(weight_decay))
        dconv_ = functools.partial(
            dconv, weights_regularizer=slim.l2_regularizer(weight_decay))
        norm = utils.get_norm_layer(norm_name,
                                    training,
                                    updates_collections=None)

        conv_norm_relu = functools.partial(conv_,
                                           normalizer_fn=norm,
                                           activation_fn=tf.nn.relu)
        dconv_norm_relu = functools.partial(dconv_,
                                            normalizer_fn=norm,
                                            activation_fn=tf.nn.relu)

        def Gm(x, dim):
            m0 = x
            m0 = conv_norm_relu(m0, dim, 1, 1)

            m1 = x
            m1 = conv_norm_relu(m1, dim, 3, 1)

            m2 = x
            m2 = conv_norm_relu(m2, dim, 3, 1)
            m2 = conv_norm_relu(m2, dim, 3, 1)

            m3 = x
            m3 = conv_norm_relu(m3, dim, 3, 1)
            m3 = conv_norm_relu(m3, dim, 3, 1)
            m3 = conv_norm_relu(m3, dim, 3, 1)

            m = tf.concat([m0, m1, m2, m3], axis=-1)

            m = conv_norm_relu(m, dim * 2, 4, 2)
            m = dconv_(m, n_att, 4, 2)

            return m

        def Ge(x, dim):
            e = x
            e = dconv_norm_relu(e, num_outputs=dim, kernel_size=3, stride=1)
            e = dconv_norm_relu(e, num_outputs=dim, kernel_size=3, stride=2)
            return e

        def Ge_0(x, dim):
            e = x
            e = dconv_norm_relu(e, num_outputs=dim, kernel_size=3, stride=1)
            e = dconv_(e, num_outputs=3, kernel_size=3, stride=2)
            e = tf.nn.tanh(e)
            return e

        # ======================================
        # =              network               =
        # ======================================

        with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
            b = tf.to_float(b)

            # downsamplings
            fa = xa

            fas = [fa]  # fas = [xa, fa_1, fa_2, ..., fa_n_downsamplings]
            for i in range(n_downsamplings):
                d = min(dim * 2**i, MAX_DIM)
                fa = conv_norm_relu(fa, d, 4, 2)
                fas.append(fa)

            # upsamplings
            fb = utils.tile_concat(fa, b)

            # 1 ~ n_downsamplings-n_masks
            for i in range(n_downsamplings - n_masks):
                d = min(int(dim * 2**(n_downsamplings - 2 - i)), MAX_DIM)
                fb = dconv_norm_relu(fb, d, 4, 2)

            # n_downsamplings-n_masks+1 ~ n_downsamplings
            ms = []
            ms_multi = []
            m_multi = None
            for i in range(n_downsamplings - n_masks, n_downsamplings):
                d = min(int(dim * 2**(n_downsamplings - 2 - i)), MAX_DIM)
                if i < n_downsamplings - 1:
                    Ge_ = functools.partial(Ge, dim=d)
                    Gm_ = functools.partial(Gm, dim=d)
                else:
                    Ge_ = functools.partial(Ge_0, dim=d)
                    Gm_ = functools.partial(Gm, dim=d)
                fb, e, mask, m_multi = attention_editor(fas[-2 - i],
                                                        fb,
                                                        b,
                                                        Ge_,
                                                        Gm_,
                                                        m_multi_pre=m_multi)
                ms.append(mask)
                ms_multi.append(m_multi)

            x = fb

        # variables and update operations
        self.variables = tf.global_variables(scope)
        self.trainable_variables = tf.trainable_variables(scope)
        self.reg_losses = tf.losses.get_regularization_losses(scope)

        return x, e, ms, ms_multi
Exemplo n.º 11
0
    def __init__(self, opt):
        super(Generator, self).__init__()
        if opt.HD:
            act = nn.ReLU(inplace=True)
            input_ch = opt.input_ch
            n_gf = opt.n_gf
            norm = get_norm_layer(opt.norm_type)
            output_ch = opt.output_ch
            pad = get_pad_layer(opt.padding_type)

            model = []
            model += [
                pad(3),
                nn.Conv2d(input_ch, n_gf, kernel_size=7, padding=0),
                norm(n_gf), act
            ]

            for _ in range(opt.n_downsample):
                model += [
                    nn.Conv2d(n_gf,
                              2 * n_gf,
                              kernel_size=3,
                              padding=1,
                              stride=2),
                    norm(2 * n_gf), act
                ]
                n_gf *= 2

            for _ in range(opt.n_residual):
                model += [ResidualBlock(n_gf, pad, norm, act)]

            for _ in range(opt.n_downsample):
                model += [
                    nn.ConvTranspose2d(n_gf,
                                       n_gf // 2,
                                       kernel_size=3,
                                       padding=1,
                                       stride=2,
                                       output_padding=1),
                    norm(n_gf // 2), act
                ]
                n_gf //= 2

            model += [
                pad(3),
                nn.Conv2d(n_gf, output_ch, kernel_size=7, padding=0),
                nn.Tanh()
            ]
            self.model = nn.Sequential(*model)

        else:
            act_down = nn.LeakyReLU(0.2, inplace=True)
            act_up = nn.ReLU(inplace=True)
            image_height = opt.image_height
            input_ch = opt.input_ch
            max_ch = opt.max_ch
            n_downsample = int(log2(image_height))
            n_gf = opt.n_gf
            norm = nn.BatchNorm2d
            output_ch = opt.output_ch

            idx_max_ch = int(log2(max_ch // n_gf))
            for i in range(n_downsample):
                if i == 0:
                    down_block = [
                        nn.Conv2d(input_ch,
                                  n_gf,
                                  kernel_size=4,
                                  padding=1,
                                  stride=2,
                                  bias=False)
                    ]
                    up_block = [
                        act_up,
                        nn.ConvTranspose2d(2 * n_gf,
                                           output_ch,
                                           kernel_size=4,
                                           padding=1,
                                           stride=2,
                                           bias=False),
                        nn.Tanh()
                    ]

                elif 1 <= i <= idx_max_ch:
                    down_block = [
                        act_down,
                        nn.Conv2d(n_gf,
                                  2 * n_gf,
                                  kernel_size=4,
                                  padding=1,
                                  stride=2,
                                  bias=False),
                        norm(2 * n_gf)
                    ]

                    up_block = [
                        act_up,
                        nn.ConvTranspose2d(4 * n_gf,
                                           n_gf,
                                           kernel_size=4,
                                           padding=1,
                                           stride=2,
                                           bias=False),
                        norm(n_gf)
                    ]

                elif idx_max_ch < i < n_downsample - 4:
                    down_block = [
                        act_down,
                        nn.Conv2d(n_gf,
                                  n_gf,
                                  kernel_size=4,
                                  padding=1,
                                  stride=2,
                                  bias=False),
                        norm(n_gf)
                    ]

                    up_block = [
                        act_up,
                        nn.ConvTranspose2d(2 * n_gf,
                                           n_gf,
                                           kernel_size=4,
                                           padding=1,
                                           stride=2,
                                           bias=False),
                        norm(n_gf)
                    ]

                elif n_downsample - 4 <= i < n_downsample - 1:
                    down_block = [
                        act_down,
                        nn.Conv2d(n_gf,
                                  n_gf,
                                  kernel_size=4,
                                  padding=1,
                                  stride=2,
                                  bias=False),
                        norm(n_gf)
                    ]

                    up_block = [
                        act_up,
                        nn.ConvTranspose2d(2 * n_gf,
                                           n_gf,
                                           kernel_size=4,
                                           padding=1,
                                           stride=2,
                                           bias=False),
                        norm(n_gf),
                        nn.Dropout2d(0.5, inplace=True)
                    ]

                else:
                    down_block = [
                        act_down,
                        nn.Conv2d(n_gf,
                                  n_gf,
                                  kernel_size=4,
                                  padding=1,
                                  stride=2,
                                  bias=False)
                    ]

                    up_block = [
                        act_up,
                        nn.ConvTranspose2d(n_gf,
                                           n_gf,
                                           kernel_size=4,
                                           padding=1,
                                           stride=2,
                                           bias=False),
                        norm(n_gf),
                        nn.Dropout2d(0.5, inplace=True)
                    ]

                self.add_module('Down_block_{}'.format(i),
                                nn.Sequential(*down_block))
                self.add_module('Up_block_{}'.format(i),
                                nn.Sequential(*up_block))
                n_gf *= 2 if n_gf < max_ch and i != 0 else 1

            self.n_downsample = n_downsample
        self.HD = opt.HD
        print(self)
        print("the number of G parameters",
              sum(p.numel() for p in self.parameters() if p.requires_grad))