def __init__(self, in_ch, out_ch, downsampling='stride'):
     super().__init__()
     self.conv = ConvBlock(in_ch,
                           out_ch,
                           sampling='same',
                           normalization='instance')
     self.down = ConvBlock(out_ch,
                           out_ch,
                           sampling=downsampling,
                           normalization='instance')
示例#2
0
 def __init__(self, input_dim, out_ch):
     super().__init__()
     self.input_dim = input_dim
     self.conv1 = ConvBlock(input_dim,
                            out_ch,
                            sampling='same',
                            normalization='pixel')
     self.conv2 = ConvBlock(out_ch,
                            out_ch,
                            sampling='same',
                            normalization='pixel')
示例#3
0
    def __init__(self):
        super(SRCNN, self).__init__()

        self.layers = nn.Sequential(
            # ConvBlock(3, 64,  kernel_size=9, padding=4),
            nn.Conv2d(3, 64, kernel_size=(1, 1), stride=(1, 1), bias=False),
            nn.Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=4),
            nn.Conv2d(3, 64, 256, kernel_size=(1, 1),stride=(1, 1), bias=False),
            nn.Conv2d()
            ConvBlock(64, 32, kernel_size=1, padding=0),
            ConvBlock(32, 3,  kernel_size=5, padding=2, activation=None))
 def __init__(self, in_ch, out_ch):
     super().__init__()
     self.mb_stddev = MiniBatchStddev()
     self.conv = ConvBlock(in_ch + 1,
                           out_ch,
                           sampling='same',
                           normalization='instance')
     self.down = ConvBlock(out_ch,
                           out_ch,
                           kernel_size=4,
                           padding='valid',
                           normalization=None)
     self.norm = torch.nn.LayerNorm(out_ch)
示例#5
0
 def __init__(self, in_ch, out_ch, upsampling='upsampling'):
     super().__init__()
     self.up = ConvBlock(in_ch,
                         in_ch,
                         sampling=upsampling,
                         normalization='pixel')
     self.conv1 = ConvBlock(in_ch,
                            in_ch,
                            sampling='same',
                            normalization='pixel')
     self.conv2 = ConvBlock(in_ch,
                            out_ch,
                            sampling='same',
                            normalization='pixel')
示例#6
0
def discriminator(img_height, img_width):
    model = keras.Sequential()
    model.add(
        keras.layers.experimental.preprocessing.Rescaling(
            scale=1. / 127.5,
            offset=-1,
            input_shape=(img_height, img_width, 3)))
    model.add(ConvBlock(128, 5))
    model.add(ConvBlock(256, 5))
    model.add(ConvBlock(512, 5))
    model.add(ConvBlock(1024, 5))
    model.add(layers.Flatten())
    model.add(layers.Dense(1))
    return model
示例#7
0
    def __init__(self, num_classes, attention=True, normalize_attn=True, init='default'):
        super(AttnVGG_before, self).__init__()
        self.attention = attention
        # conv blocks
        self.conv_block1 = ConvBlock(3, 64, 2)
        self.conv_block2 = ConvBlock(64, 128, 2)
        self.conv_block3 = ConvBlock(128, 256, 3)
        self.conv_block4 = ConvBlock(256, 512, 3)
        self.conv_block5 = ConvBlock(512, 512, 3)
        #         self.conv_block6 = ConvBlock(512, 512, 2, pool=True)
        #         self.dense = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=int(im_size/32), padding=0, bias=True)
        self.dense1 = nn.Conv2d(in_channels=512, out_channels=4096, kernel_size=7, padding=0, bias=True)
        self.dense2 = nn.Conv2d(in_channels=4096, out_channels=4096, kernel_size=1, padding=0, bias=True)
        self.dense3 = nn.Conv2d(in_channels=4096, out_channels=512, kernel_size=1, padding=0, bias=True)

        # Projectors & Compatibility functions
        if self.attention:
            self.projector = ProjectorBlock(256, 512)
            self.attn1 = LinearAttentionBlock(in_features=512, normalize_attn=normalize_attn)
            self.attn2 = LinearAttentionBlock(in_features=512, normalize_attn=normalize_attn)
            self.attn3 = LinearAttentionBlock(in_features=512, normalize_attn=normalize_attn)
        # final classification layer
        if self.attention:
            self.classify = nn.Linear(in_features=512 * 3, out_features=num_classes, bias=True)
        else:
            self.classify = nn.Linear(in_features=512, out_features=num_classes, bias=True)
        # initialize
        if init == 'kaimingNormal':
            weights_init_kaimingNormal(self)
        elif init == 'kaimingUniform':
            weights_init_kaimingUniform(self)
        elif init == 'xavierNormal':
            weights_init_xavierNormal(self)
        elif init == 'xavierUniform':
            weights_init_xavierUniform(self)
        else:
            print("Initializing Default weights")
            for m in self.modules():
                if isinstance(m, nn.Conv2d):
                    nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                    if m.bias is not None:
                        nn.init.constant_(m.bias, 0)
                elif isinstance(m, nn.BatchNorm2d):
                    nn.init.constant_(m.weight, 1)
                    nn.init.constant_(m.bias, 0)
                elif isinstance(m, nn.Linear):
                    nn.init.normal_(m.weight, 0, 0.01)
                    nn.init.constant_(m.bias, 0)
示例#8
0
    def __init__(self, config):
        super(Decoder, self).__init__()

        channels = config.dec_channels
        kernel_size = config.dec_kernel_size
        stride = config.dec_stride

        res_norm = 'none' # no adain in res
        norm = 'none'
        pad_type = 'reflect'
        acti = 'lrelu'

        layers = []
        n_resblk = config.dec_resblks
        n_conv = config.dec_up_n
        bt_channel = config.dec_bt_channel # #channels at the bottleneck

        layers += get_norm_layer('adain', channels[0]) # adain before everything

        for i in range(n_resblk):
            layers.append(BottleNeckResBlock(kernel_size, channels[0], bt_channel, channels[0],
                                             pad_type=pad_type, norm=res_norm, acti=acti))

        for i in range(n_conv):
            layers.append(Upsample(scale_factor=2, mode='nearest'))
            cur_acti = 'none' if i == n_conv - 1 else acti
            cur_norm = 'none' if i == n_conv - 1 else norm
            layers += ConvBlock(kernel_size, channels[i], channels[i + 1], stride=stride,
                                pad_type=pad_type, norm=cur_norm, acti=cur_acti)

        self.model = nn.Sequential(*layers)
        self.channels = channels
示例#9
0
 def __init__(self, in_ch):
     super().__init__()
     self.conv = ConvBlock(in_ch,
                           3,
                           sampling='same',
                           normalization=None,
                           activation=torch.nn.Tanh())
示例#10
0
 def __init__(self, *channels):
     super().__init__()
     self.convs = torch.nn.Sequential(*[
         ConvBlock(channels[i], channels[i + 1], normalization='batch')
         for i in range(len(channels) - 2)
     ])
     self.up = UpModule(channels[-2], channels[-1])
示例#11
0
    def __init__(self, config):
        super(PatchDis, self).__init__()

        channels = config.disc_channels
        down_n = config.disc_down_n
        ks = config.disc_kernel_size
        stride = config.disc_stride
        pool_ks = config.disc_pool_size
        pool_stride = config.disc_pool_stride

        out_dim = config.num_classes

        assert down_n + 1 == len(channels)

        cnn_f = ConvLayers(kernel_size=ks, in_channels=channels[0], out_channels=channels[0])

        for i in range(down_n):
            cnn_f += [ActiFirstResBlock(kernel_size=ks, in_channels=channels[i], out_channels=channels[i], stride=stride, acti='lrelu', norm='none')]
            cnn_f += [ActiFirstResBlock(kernel_size=ks, in_channels=channels[i], out_channels=channels[i + 1], stride=stride, acti='lrelu', norm='none')]
            cnn_f += [get_conv_pad(pool_ks, pool_stride)]
            cnn_f += [nn.AvgPool1d(kernel_size=pool_ks, stride=pool_stride)]

        cnn_f += [ActiFirstResBlock(kernel_size=ks, in_channels=channels[-1], out_channels=channels[-1], stride=stride, acti='lrelu', norm='none')]
        cnn_f += [ActiFirstResBlock(kernel_size=ks, in_channels=channels[-1], out_channels=channels[-1], stride=stride, acti='lrelu', norm='none')]

        cnn_c = ConvBlock(kernel_size=ks, in_channels=channels[-1], out_channels = out_dim,
                          stride=1, norm='none', acti='lrelu', acti_first=True)

        self.cnn_f = nn.Sequential(*cnn_f)
        self.cnn_c = nn.Sequential(*cnn_c)
        self.device = config.device
示例#12
0
    def __init__(self, in_ch=3):
        super().__init__()
        self.down1 = DownBlock(in_ch, 64, 64)
        self.down2 = DownBlock(64, 128, 128)
        self.down3 = DownBlock(128, 256, 256, 256)
        self.down4 = DownBlock(256, 512, 512, 512)
        self.down5 = DownBlock(512, 512, 512, 512)

        self.up1 = UpBlock(512, 512, 512, 256)
        self.up2 = UpBlock(512 + 256, 512, 256)
        self.up3 = UpBlock(256 + 512, 512, 128)
        self.up4 = UpBlock(128 + 256, 256, 64)
        self.up5 = UpBlock(64 + 128, 128, 32)
        self.last_conv = ConvBlock(32 + 64, in_ch, activation=torch.nn.Tanh())
示例#13
0
 def __init__(self,
              num_classes,
              normalize_attn=False,
              init='kaimingNormal'):
     super(AttnVGG_grid, self).__init__()
     # conv blocks
     self.conv_block1 = ConvBlock(3, 64, 2)
     self.conv_block2 = ConvBlock(64, 128, 2)
     self.conv_block3 = ConvBlock(128, 256, 3)
     self.conv_block4 = ConvBlock(256, 512, 3)
     self.conv_block5 = ConvBlock(512, 512, 3)
     self.pool = nn.AvgPool2d(2, stride=1)
     self.attn1 = GridAttentionBlock(256,
                                     512,
                                     256,
                                     4,
                                     normalize_attn=normalize_attn)
     self.attn2 = GridAttentionBlock(512,
                                     512,
                                     256,
                                     2,
                                     normalize_attn=normalize_attn)
     # final classification layer
     self.classify = nn.Linear(in_features=512 + 512 + 256,
                               out_features=num_classes,
                               bias=True)
     # initialize
     if init == 'kaimingNormal':
         weights_init_kaimingNormal(self)
     elif init == 'kaimingUniform':
         weights_init_kaimingUniform(self)
     elif init == 'xavierNormal':
         weights_init_xavierNormal(self)
     elif init == 'xavierUniform':
         weights_init_xavierUniform(self)
     else:
         raise NotImplementedError("Invalid type of initialization!")
示例#14
0
 def __init__(self,
              im_size,
              num_classes,
              attention=True,
              normalize_attn=True,
              init='xavierUniform',
              interpolate=False):
     super(AttnVGG_after, self).__init__()
     if interpolate:
         self.interpolate = Interpolate(size=(im_size, im_size),
                                        mode='nearest')
     else:
         self.interpolate = None
     self.attention = attention
     # conv blocks
     self.conv_block1 = ConvBlock(3, 64, 2)
     self.conv_block2 = ConvBlock(64, 128, 2)
     self.conv_block3 = ConvBlock(128, 256, 3)
     self.conv_block4 = ConvBlock(256, 512, 3)
     self.conv_block5 = ConvBlock(512, 512, 3)
     self.conv_block6 = ConvBlock(512, 512, 2, pool=True)
     self.dense = nn.Conv2d(in_channels=512,
                            out_channels=512,
                            kernel_size=int(im_size / 32),
                            padding=0,
                            bias=True)
     # Projectors & Compatibility functions
     if self.attention:
         self.projector = ProjectorBlock(256, 512)
         self.attn1 = LinearAttentionBlock(in_features=512,
                                           normalize_attn=normalize_attn)
         self.attn2 = LinearAttentionBlock(in_features=512,
                                           normalize_attn=normalize_attn)
         self.attn3 = LinearAttentionBlock(in_features=512,
                                           normalize_attn=normalize_attn)
     # final classification layer
     if self.attention:
         self.classify = nn.Linear(in_features=512 * 3,
                                   out_features=num_classes,
                                   bias=True)
     else:
         self.classify = nn.Linear(in_features=512,
                                   out_features=num_classes,
                                   bias=True)
     # initialize
     if init == 'kaimingNormal':
         weights_init_kaimingNormal(self)
     elif init == 'kaimingUniform':
         weights_init_kaimingUniform(self)
     elif init == 'xavierNormal':
         weights_init_xavierNormal(self)
     elif init == 'xavierUniform':
         weights_init_xavierUniform(self)
     else:
         raise NotImplementedError("Invalid type of initialization!")
示例#15
0
    def __init__(self, config, dim):
        super(EncoderStyle, self).__init__()
        channels = config.enc_cl_channels
        channels[0] = config.style_channel_3d if dim == "3d" else config.style_channel_2d

        kernel_size = config.enc_cl_kernel_size
        stride = config.enc_cl_stride

        self.global_pool = F.max_pool1d

        layers = []
        n_convs = config.enc_cl_down_n

        for i in range(n_convs):
            layers += ConvBlock(kernel_size, channels[i], channels[i + 1],
                                stride=stride, norm='none', acti='lrelu')

        self.conv_model = nn.Sequential(*layers)
        self.channels = channels
示例#16
0
    def __init__(self, config):
        super(EncoderContent, self).__init__()
        channels = config.enc_co_channels
        kernel_size = config.enc_co_kernel_size
        stride = config.enc_co_stride

        layers = []
        n_convs = config.enc_co_down_n
        n_resblk = config.enc_co_resblks
        acti = 'lrelu'

        assert n_convs + 1 == len(channels)

        for i in range(n_convs):
            layers += ConvBlock(kernel_size, channels[i], channels[i + 1],
                                stride=stride, norm='in', acti=acti)

        for i in range(n_resblk):
            layers.append(ResBlock(kernel_size, channels[-1], stride=1,
                                   pad_type='reflect', norm='in', acti=acti))

        self.conv_model = nn.Sequential(*layers)
        self.channels = channels
示例#17
0
import tensorflow as tf
import os
import sys
import pathlib

current_dir = pathlib.Path(__file__).resolve().parent
sys.path.append(os.path.join(current_dir, '../'))
from blocks import ConvBlock

conv = ConvBlock(32,
                 kernel_size=(3, 3),
                 activation_='lrelu',
                 dilation_rate=(1, 1),
                 sampling='up',
                 normalization='batch',
                 spectral_norm=True)
x = tf.random.normal(shape=(10, 32, 32, 3))

y = conv(x, training=True)
y_ = conv(x, training=False)
# print(y)
示例#18
0
    def __init__(self,
                 im_size,
                 num_classes,
                 attention=True,
                 normalize_attn=True,
                 init='xavierUniform',
                 _base_features=64,
                 dropout=0.0):
        super(AttnVGG_before, self).__init__()

        #self.base_features = 64
        self.base_features = _base_features

        self.dropout = nn.Dropout(p=dropout)

        self.attention = attention
        # conv blocks
        self.conv_block1 = ConvBlock(1, self.base_features, 2, dropout=dropout)
        self.conv_block2 = ConvBlock(self.base_features,
                                     self.base_features * 2,
                                     2,
                                     dropout=dropout)
        self.conv_block3 = ConvBlock(self.base_features * 2,
                                     self.base_features * 4,
                                     3,
                                     dropout=dropout)
        self.conv_block4 = ConvBlock(self.base_features * 4,
                                     self.base_features * 8,
                                     3,
                                     dropout=dropout)
        self.conv_block5 = ConvBlock(self.base_features * 8,
                                     self.base_features * 8,
                                     3,
                                     dropout=dropout)
        self.conv_block6 = ConvBlock(self.base_features * 8,
                                     self.base_features * 8,
                                     2,
                                     pool=True,
                                     dropout=dropout)
        self.dense = nn.Conv2d(in_channels=self.base_features * 8,
                               out_channels=self.base_features * 8,
                               kernel_size=int(im_size / 32),
                               padding=0,
                               bias=True)
        # Projectors & Compatibility functions
        if self.attention:
            self.projector = ProjectorBlock(self.base_features * 4,
                                            self.base_features * 8)
            self.attn1 = LinearAttentionBlock(in_features=self.base_features *
                                              8,
                                              normalize_attn=normalize_attn)
            self.attn2 = LinearAttentionBlock(in_features=self.base_features *
                                              8,
                                              normalize_attn=normalize_attn)
            self.attn3 = LinearAttentionBlock(in_features=self.base_features *
                                              8,
                                              normalize_attn=normalize_attn)
        # final classification layer

        if self.attention:
            self.classify = nn.Linear(in_features=(self.base_features * 8) * 3,
                                      out_features=num_classes,
                                      bias=True)
        else:
            self.classify = nn.Linear(in_features=self.base_features * 8,
                                      out_features=num_classes,
                                      bias=True)
        # initialize
        if init == 'kaimingNormal':
            weights_init_kaimingNormal(self)
        elif init == 'kaimingUniform':
            weights_init_kaimingUniform(self)
        elif init == 'xavierNormal':
            weights_init_xavierNormal(self)
        elif init == 'xavierUniform':
            weights_init_xavierUniform(self)
        else:
            raise NotImplementedError("Invalid type of initialization!")
示例#19
0
    def __init__(self, in_ch):
        super().__init__()
        self.block1 = ConvBlock(in_ch,
                                96,
                                kernel_size=11,
                                sampling='stride',
                                normalization='batch',
                                stride=4,
                                padding='valid')
        self.block2 = torch.nn.Sequential(
            ConvBlock(96, 96, kernel_size=1, normalization='batch'),
            ConvBlock(96, 96, kernel_size=1, normalization='batch'),
            ConvBlock(96,
                      96,
                      kernel_size=3,
                      normalization='batch',
                      sampling='stride',
                      padding='valid'))

        self.block3 = torch.nn.Sequential(
            ConvBlock(96, 96, kernel_size=1, normalization='batch'),
            ConvBlock(96, 96, kernel_size=1, normalization='batch'),
            ConvBlock(96,
                      256,
                      kernel_size=5,
                      normalization='batch',
                      sampling='stride',
                      padding='valid'))

        self.block4 = torch.nn.Sequential(
            ConvBlock(256, 512, kernel_size=1, normalization='batch'),
            ConvBlock(512, 1024, kernel_size=1, normalization='batch'),
            ConvBlock(1024, 1024, kernel_size=1, normalization='batch'),
            ConvBlock(1024, 1, kernel_size=1, activation=torch.nn.Sigmoid()))
 def __init__(self, out_ch):
     super().__init__()
     self.conv = ConvBlock(3,
                           out_ch,
                           sampling='same',
                           normalization='instance')
示例#21
0
 def __init__(self, in_ch, out_ch):
     super().__init__()
     self.conv = ConvBlock(in_ch, out_ch)
示例#22
0
 def __init__(self, in_ch, out_ch):
     super().__init__()
     self.conv = ConvBlock(in_ch, out_ch, sampling='upsampling')