Exemplo n.º 1
0
    def __init__(self, in_channel, depth, stride):
        super(bottleneck_ORIG, self).__init__()
        in_planes = in_channel
        planes = depth
        self.conv1 = Conv2d(in_planes, planes, kernel_size=1, bias=False)
        self.bn1 = InstanceNorm2d(planes)
        self.conv2 = Conv2d(planes,
                            planes,
                            kernel_size=3,
                            stride=stride,
                            padding=1,
                            bias=False)
        self.bn2 = InstanceNorm2d(planes)
        self.conv3 = Conv2d(planes,
                            self.expansion * planes,
                            kernel_size=1,
                            bias=False)
        self.bn3 = InstanceNorm2d(self.expansion * planes)

        self.shortcut = Sequential()
        if stride != 1 or in_planes != self.expansion * planes:
            self.shortcut = Sequential(
                Conv2d(in_planes,
                       self.expansion * planes,
                       kernel_size=1,
                       stride=stride,
                       bias=False), InstanceNorm2d(self.expansion * planes))
Exemplo n.º 2
0
    def __init__(self,
                 in_channels,
                 channels,
                 stride=1,
                 downsample=False,
                 upsample=False):
        super().__init__()

        self.down_conv = Conv2d(in_channels,
                                channels * self.expansion,
                                kernel_size=1,
                                stride=stride) if downsample else None
        self.up_conv = ConvBlock(in_channels,
                                 channels * self.expansion,
                                 kernel_size=1,
                                 stride=1,
                                 upsample=upsample) if upsample else None

        self.conv_block = Sequential(
            InstanceNorm2d(in_channels), ReLU(),
            Conv2d(in_channels, channels, kernel_size=1, stride=1),
            InstanceNorm2d(channels), ReLU(),
            ConvBlock(channels,
                      channels,
                      kernel_size=3,
                      stride=stride,
                      upsample=upsample), InstanceNorm2d(channels), ReLU(),
            Conv2d(channels,
                   channels * self.expansion,
                   kernel_size=1,
                   stride=1))
Exemplo n.º 3
0
 def __init__(self, in_channels = 4, ngf = 32, n_layers = 5):
     super(SketchGenerator, self).__init__()
     
     encoder = []
     encoder.append(Conv2d(out_channels=ngf, kernel_size=9, padding=4, in_channels=in_channels))
     encoder.append(ReLU())
     encoder.append(mySConv(ngf*2, 2, ngf))
     encoder.append(mySConv(ngf*4, 2, ngf*2))
     
     transformer = []
     for n in range(n_layers):
         transformer.append(mySBlock(ngf*4+1))
     
     decoder1 = []
     decoder2 = []
     decoder3 = []
     decoder1.append(ConvTranspose2d(out_channels=ngf*2, kernel_size=4, stride=2, padding=0, in_channels=ngf*4+2))
     decoder1.append(InstanceNorm2d(num_features=ngf*2))
     decoder1.append(ReLU())
     decoder2.append(ConvTranspose2d(out_channels=ngf, kernel_size=4, stride=2, padding=0, in_channels=ngf*2+1))
     decoder2.append(InstanceNorm2d(num_features=ngf))
     decoder2.append(ReLU())
     decoder3.append(Conv2d(out_channels=3, kernel_size=9, padding=1, in_channels=ngf+1))
     decoder3.append(Tanh())
     
     self.encoder = nn.Sequential(*encoder)
     self.transformer = nn.Sequential(*transformer)
     self.decoder1 = nn.Sequential(*decoder1)
     self.decoder2 = nn.Sequential(*decoder2)
     self.decoder3 = nn.Sequential(*decoder3)
Exemplo n.º 4
0
 def __init__(self, num_layers, mode='ir', input_nc=3):
     super(BackboneEncoderUsingLastLayerIntoWPlus, self).__init__()
     print('Using BackboneEncoderUsingLastLayerIntoWPlus')
     assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152'
     assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
     blocks = get_blocks(num_layers)
     if mode == 'ir':
         unit_module = bottleneck_IR
     elif mode == 'ir_se':
         unit_module = bottleneck_IR_SE
     self.input_layer = Sequential(Conv2d(input_nc, 64, (3, 3), 1, 1, bias=False),
                                   InstanceNorm2d(64),
                                   PReLU(64))
     self.output_layer_2 = Sequential(InstanceNorm2d(512),
                                      torch.nn.AdaptiveAvgPool2d((7, 7)),
                                      Flatten(),
                                      Linear(512 * 7 * 7, 512))
     self.linear = FullyConnectedLayer(512, 512 * 18)
     modules = []
     for block in blocks:
         for bottleneck in block:
             modules.append(unit_module(bottleneck.in_channel,
                                        bottleneck.depth,
                                        bottleneck.stride))
     self.body = Sequential(*modules)
Exemplo n.º 5
0
 def __init__(self, in_channel, depth, stride):
     super(bottleneck_IR_SE, self).__init__()
     if in_channel == depth:
         self.shortcut_layer = MaxPool2d(1, stride)
     else:
         self.shortcut_layer = Sequential(
             Conv2d(in_channel, depth, (1, 1), stride, bias=False), )
     self.res_layer = Sequential(
         Conv2d(in_channel,
                depth,
                kernel_size=1,
                stride=1,
                padding=0,
                bias=False),
         Conv2d(depth,
                depth,
                kernel_size=3,
                stride=1,
                padding=1,
                bias=False,
                groups=depth), PReLU(depth), InstanceNorm2d(depth),
         Conv2d(depth,
                depth,
                kernel_size=1,
                stride=1,
                padding=0,
                bias=False),
         Conv2d(depth,
                depth,
                kernel_size=3,
                stride=stride,
                padding=1,
                bias=False,
                groups=depth), SEModule(depth, 16), PReLU(depth),
         InstanceNorm2d(depth))
Exemplo n.º 6
0
    def __init__(self):
        super(InverseNet, self).__init__()
        self.slice = torch.nn.Sequential()
        self.slice.add_module(
            str(0),
            Conv2d(256, 128, kernel_size=(3, 3), stride=(1, 1),
                   padding=(1, 1)))
        self.slice.add_module(str(1), InstanceNorm2d(128, affine=True))
        self.slice.add_module(str(2), ReLU())
        self.slice.add_module(str(3),
                              nn.Upsample(scale_factor=2, mode="nearest"))
        self.slice.add_module(
            str(4),
            Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1),
                   padding=(1, 1)))
        self.slice.add_module(str(5), InstanceNorm2d(128, affine=True))
        self.slice.add_module(str(6), ReLU())
        self.slice.add_module(
            str(7),
            Conv2d(128, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)))
        self.slice.add_module(str(8), InstanceNorm2d(64, affine=True))
        self.slice.add_module(str(9), ReLU())
        self.slice.add_module(str(10),
                              nn.Upsample(scale_factor=2, mode="nearest"))
        self.slice.add_module(
            str(11),
            Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)))
        self.slice.add_module(str(12), InstanceNorm2d(64, affine=True))
        self.slice.add_module(str(13), ReLU())
        self.slice.add_module(
            str(14),
            Conv2d(64, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)))

        self.init_weights()
Exemplo n.º 7
0
 def __init__(self, inplanes, planes, stride=1, downsample=None):
   super(BasicBlockIn, self).__init__()
   self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
   self.bn1 = InstanceNorm2d(planes, eps=1e-05, momentum=0.1, affine=True)
   self.relu = nn.ReLU(inplace=True)
   self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
   self.bn2 = InstanceNorm2d(planes, eps=1e-05, momentum=0.1, affine=True)
   self.downsample = downsample
   self.stride = stride
Exemplo n.º 8
0
    def __init__(self, in_channels=4, ngf=32, n_layers=5):
        """
        生成器
        :param in_channels: 传入的是 4
        :param ngf: 生成器第一层的特征的数量 传入的是 32
        :param n_layers: 生成器的网络的层数 传入的是 6
        """
        super(SketchGenerator, self).__init__()

        encoder = []
        encoder.append(
            Conv2d(in_channels=in_channels,
                   out_channels=ngf,
                   kernel_size=9,
                   padding=4))
        encoder.append(ReLU())
        encoder.append(mySConv(ngf * 2, 2, ngf))
        encoder.append(mySConv(ngf * 4, 2, ngf * 2))

        transformer = []
        for n in range(n_layers):  # n_layers: 生成器的网络的层数
            transformer.append(mySBlock(ngf * 4 + 1))  # TODO 额外加 1???

        decoder1 = []
        decoder2 = []
        decoder3 = []
        decoder1.append(
            ConvTranspose2d(out_channels=ngf * 2,
                            kernel_size=4,
                            stride=2,
                            padding=0,
                            in_channels=ngf * 4 + 2))  # 为什么加 2???
        decoder1.append(InstanceNorm2d(num_features=ngf * 2))
        decoder1.append(ReLU())

        decoder2.append(
            ConvTranspose2d(out_channels=ngf,
                            kernel_size=4,
                            stride=2,
                            padding=0,
                            in_channels=ngf * 2 + 1))  # 为什么加 1???
        decoder2.append(InstanceNorm2d(num_features=ngf))
        decoder2.append(ReLU())

        decoder3.append(
            Conv2d(out_channels=3,
                   kernel_size=9,
                   padding=1,
                   in_channels=ngf + 1))
        decoder3.append(Tanh())

        self.encoder = nn.Sequential(*encoder)
        self.transformer = nn.Sequential(*transformer)
        self.decoder1 = nn.Sequential(*decoder1)
        self.decoder2 = nn.Sequential(*decoder2)
        self.decoder3 = nn.Sequential(*decoder3)
Exemplo n.º 9
0
 def __init__(self, in_channels, out_channels):
     super(FirstResBlock, self).__init__()
     self.model = nn.Sequential(
         Conv2d(in_channels, out_channels, 3, 1, padding=1),
         InstanceNorm2d(out_channels, affine=True),
         nn.ReLU(),
         Conv2d(out_channels, out_channels, 3, 1, padding=1),
     )
     self.bypass = nn.Sequential(
         Conv2d(in_channels, out_channels, 1, 1, padding=0),
         InstanceNorm2d(out_channels, affine=True))
Exemplo n.º 10
0
 def __init__(self, in_channels, out_channels):
     super(ConvBlock, self).__init__()
     conv = Conv2d(in_channels, out_channels, 3, 1, padding=1)
     self.out_channels = out_channels
     self.model = nn.Sequential(
         InstanceNorm2d(in_channels, affine=True),
         nn.ReLU(),
         conv,
         InstanceNorm2d(out_channels,
                        affine=False)  # normalize to (0, 1) Gaussian
     )
Exemplo n.º 11
0
 def __init__(self, in_channels, out_channels, cat_channels):
     super(DecodingResBlock, self).__init__()
     # cat_channels: number of channels of the features from encoding path
     self.up = UpSampleBlock(in_channels, int(in_channels / 2))
     n_main = int(in_channels / 2) + cat_channels
     self.main = nn.Sequential(
         InstanceNorm2d(n_main, affine=True), nn.ReLU(),
         Conv2d(n_main, out_channels, 3, 1, padding=1),
         InstanceNorm2d(out_channels, affine=True), nn.ReLU(),
         Conv2d(out_channels, out_channels, 3, 1, padding=1))
     self.bypass = nn.Sequential(
         Conv2d(n_main, out_channels, 1, 1, padding=0),
         InstanceNorm2d(out_channels, affine=True))
    def __init__(self):
        super(OCRModel, self).__init__()
        self.conv1 = Conv2d(1, 32, (3, 3), padding=1, bias=False)
        self.conv2 = Conv2d(32, 32, (3, 3), padding=1, bias=False)
        self.conv3 = Conv2d(32, 64, (3, 3), padding=1, bias=False)
        self.conv4 = Conv2d(64, 64, (3, 3), padding=1, bias=False)
        self.conv5 = Conv2d(64, 128, (3, 3), padding=1, bias=False)
        self.conv6 = Conv2d(128, 128, (3, 3), padding=1, bias=False)
        self.conv7 = Conv2d(128, 256, (3, 3), padding=1, bias=False)
        self.conv8 = Conv2d(256, 256, (3, 3), padding=1, bias=False)
        self.conv9 = Conv2d(256, 512, (2, 3), padding=(0, 1), bias=False)
        self.conv10 = Conv2d(512, 512, (1, 5), padding=(0, 2), bias=False)
        self.conv11 = Conv2d(512, 94, (1, 1), padding=(0, 0))

        self.conv_attenton = Conv2d(512, 1, (1, 1), padding=0)

        self.batch1 = InstanceNorm2d(32, eps=1e-05, momentum=0.1, affine=True)
        self.batch2 = InstanceNorm2d(32, eps=1e-05, momentum=0.1, affine=True)
        self.batch3 = InstanceNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
        self.batch5 = InstanceNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
        self.batch7 = InstanceNorm2d(256, eps=1e-05, momentum=0.1, affine=True)
        self.batch8 = InstanceNorm2d(256, eps=1e-05, momentum=0.1, affine=True)
        self.batch9 = InstanceNorm2d(512, eps=1e-05, momentum=0.1, affine=True)
        self.batch10 = InstanceNorm2d(512,
                                      eps=1e-05,
                                      momentum=0.1,
                                      affine=True)
        self.drop1 = Dropout2d(p=0.2, inplace=False)
        self.leaky = LeakyReLU(negative_slope=0.01, inplace=False)
        self.max1 = MaxPool2d((2, 2), stride=None)
        self.max2 = MaxPool2d((2, 1), stride=(2, 1))
Exemplo n.º 13
0
 def __init__(self, in_channels, out_channels):
     super(EncodingResBlock, self).__init__()
     conv1 = Conv2d(in_channels, out_channels, 3, 2, padding=1)
     conv2 = Conv2d(out_channels, out_channels, 3, 1, padding=1)
     bypass_conv = Conv2d(in_channels, out_channels, 1, 1, padding=0)
     self.model = nn.Sequential(
         InstanceNorm2d(in_channels, affine=True),
         nn.ReLU(),
         conv1,
         InstanceNorm2d(out_channels, affine=True),
         nn.ReLU(),
         conv2,
     )
     self.bypass = nn.Sequential(bypass_conv, nn.AvgPool2d(2),
                                 InstanceNorm2d(out_channels, affine=True))
Exemplo n.º 14
0
 def __init__(self) -> None:
     super(TransformerNet, self).__init__()
     # Initial convolution layers
     self.conv1: ConvLayer[L[3], L[32], L[9],
                           L[1]] = ConvLayer(3, 32, kernel_size=9, stride=1)
     self.in1: InstanceNorm2d[L[32]] = InstanceNorm2d(32, affine=True)
     self.conv2: ConvLayer[L[32], L[64], L[3],
                           L[2]] = ConvLayer(32,
                                             64,
                                             kernel_size=3,
                                             stride=2)
     self.in2: InstanceNorm2d[L[64]] = InstanceNorm2d(64, affine=True)
     self.conv3: ConvLayer[L[64], L[128], L[3],
                           L[2]] = ConvLayer(64,
                                             128,
                                             kernel_size=3,
                                             stride=2)
     self.in3: InstanceNorm2d[L[128]] = InstanceNorm2d(128, affine=True)
     # Residual layers
     self.res1: ResidualBlock[L[128]] = ResidualBlock(128)
     self.res2: ResidualBlock[L[128]] = ResidualBlock(128)
     self.res3: ResidualBlock[L[128]] = ResidualBlock(128)
     self.res4: ResidualBlock[L[128]] = ResidualBlock(128)
     self.res5: ResidualBlock[L[128]] = ResidualBlock(128)
     # Upsampling Layers
     self.deconv1: UpsampleConvLayer[L[128], L[64], L[3], L[1],
                                     L[2]] = UpsampleConvLayer(
                                         128,
                                         64,
                                         kernel_size=3,
                                         stride=1,
                                         upsample=2)
     self.in4: InstanceNorm2d[L[64]] = InstanceNorm2d(64, affine=True)
     self.deconv2: UpsampleConvLayer[L[64], L[32], L[3], L[1],
                                     L[2]] = UpsampleConvLayer(
                                         64,
                                         32,
                                         kernel_size=3,
                                         stride=1,
                                         upsample=2)
     self.in5: InstanceNorm2d[L[32]] = InstanceNorm2d(32, affine=True)
     self.deconv3: ConvLayer[L[32], L[3], L[9],
                             L[1]] = ConvLayer(32,
                                               3,
                                               kernel_size=9,
                                               stride=1)
     # Non-linearities
     self.relu = ReLU()
Exemplo n.º 15
0
    def __init__(self, num_layers, mode='ir', input_nc=3):
        super(GradualStyleEncoder, self).__init__()
        assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152'
        assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
        blocks = get_blocks(num_layers)
        if mode == 'ir':
            unit_module = bottleneck_IR
        elif mode == 'ir_se':
            unit_module = bottleneck_IR_SE
        self.input_layer = Sequential(Conv2d(input_nc, 64, (3, 3), 1, 1, bias=False),
                                      InstanceNorm2d(64),
                                      PReLU(64))
        modules = []
        for block in blocks:
            for bottleneck in block:
                modules.append(unit_module(bottleneck.in_channel,
                                           bottleneck.depth,
                                           bottleneck.stride))
        self.body = Sequential(*modules)

        self.styles = nn.ModuleList()
        self.style_count = 18
        self.coarse_ind = 3
        self.middle_ind = 7
        for i in range(self.style_count):
            if i < self.coarse_ind:
                style = GradualStyleBlock(512, 512, 32)
            elif i < self.middle_ind:
                style = GradualStyleBlock(512, 512, 64)
            else:
                style = GradualStyleBlock(512, 512, 128)
            self.styles.append(style)
        self.latlayer1 = nn.Conv2d(256, 512, kernel_size=1, stride=1, padding=0)
        self.latlayer2 = nn.Conv2d(128, 512, kernel_size=1, stride=1, padding=0)
Exemplo n.º 16
0
 def __init__(self, num_filter=128):
     super(mySBlock, self).__init__()
     
     self.myconv = mySConv(num_filter=num_filter, stride=1, in_channels=num_filter)
     self.conv = Conv2d(out_channels=num_filter, kernel_size=3, padding=1, in_channels=num_filter)
     self.bn = InstanceNorm2d(num_features=num_filter)
     self.relu = ReLU()
Exemplo n.º 17
0
    def __init__(self,
                 in_channels,
                 out_channels=64,
                 kernel_size=3,
                 padding=1,
                 useNorm='BN',
                 up=False):
        super(RDCBBlock, self).__init__()

        self.relu = ReLU()
        if up == False:
            self.dconv = ConvTranspose2d(out_channels=out_channels,
                                         kernel_size=kernel_size,
                                         stride=2,
                                         padding=padding,
                                         in_channels=in_channels)
        else:
            self.dconv = UpsampleConLayer(out_channels=out_channels,
                                          kernel_size=kernel_size,
                                          stride=1,
                                          in_channels=in_channels,
                                          upsample=2)

        if useNorm == 'IN':
            self.bn = InstanceNorm2d(num_features=out_channels, affine=True)
        elif useNorm == 'BN':
            self.bn = BatchNorm2d(num_features=out_channels)
        else:
            self.bn = Identity()
Exemplo n.º 18
0
    def __init__(self, block, input_nc, num_blocks):
        super(FPN, self).__init__()
        self.in_planes = 64

        self.conv1 = Conv2d(input_nc,
                            64,
                            kernel_size=7,
                            stride=2,
                            padding=3,
                            bias=False)
        self.bn1 = InstanceNorm2d(64)

        # Bottom-up layers
        self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
        self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
        self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
        self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)

        # Top layer
        self.toplayer = Conv2d(2048, 512, kernel_size=1, stride=1,
                               padding=0)  # Reduce channels

        # Smooth layers
        self.smooth1 = Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
        self.smooth2 = Conv2d(512, 512, kernel_size=3, stride=1, padding=1)

        # Lateral layers
        self.latlayer1 = Conv2d(1024, 512, kernel_size=1, stride=1, padding=0)
        self.latlayer2 = Conv2d(512, 512, kernel_size=1, stride=1, padding=0)
Exemplo n.º 19
0
def conv_dw_in(inp, oup, stride, dilation=1):
  return nn.Sequential(
    nn.Conv2d(inp, inp, 3, stride, 1 + (dilation > 0) * (dilation -1), dilation=dilation, groups=inp, bias=False),
    nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
    InstanceNorm2d(oup, eps=1e-05, momentum=0.1),
    nn.LeakyReLU(inplace=True, negative_slope=0.01),
  )
Exemplo n.º 20
0
    def __init__(self,
                 in_,
                 out_,
                 stride=(1, 1),
                 k=3,
                 activation=ReLU,
                 dropout=0.4):
        super(ConvBlock, self).__init__()

        self.activation = activation()
        self.conv1 = Conv2d(in_channels=in_,
                            out_channels=out_,
                            kernel_size=k,
                            padding=k // 2)
        self.conv2 = Conv2d(in_channels=out_,
                            out_channels=out_,
                            kernel_size=k,
                            padding=k // 2)
        self.conv3 = Conv2d(out_,
                            out_,
                            kernel_size=(3, 3),
                            padding=(1, 1),
                            stride=stride)
        self.norm_layer = InstanceNorm2d(out_,
                                         eps=0.001,
                                         momentum=0.99,
                                         track_running_stats=False)
        self.dropout = MixDropout(dropout_proba=dropout,
                                  dropout2d_proba=dropout / 2)
Exemplo n.º 21
0
    def __init__(self, in_channels, ndf=64, n_layers=3):
        super(Discriminator, self).__init__()

        modelList = []
        kernel_size = 4
        padding = int(np.ceil((kernel_size - 1) / 2))
        modelList.append(
            Conv2d(out_channels=ndf,
                   kernel_size=kernel_size,
                   stride=2,
                   padding=padding,
                   in_channels=in_channels))
        modelList.append(LeakyReLU(0.2))

        nf_mult = 1
        for n in range(1, n_layers):
            nf_mult_prev = nf_mult
            nf_mult = min(2**n, 8)
            modelList.append(
                Conv2d(out_channels=ndf * nf_mult,
                       kernel_size=kernel_size,
                       stride=2,
                       padding=padding,
                       in_channels=ndf * nf_mult_prev))
            modelList.append(
                InstanceNorm2d(num_features=ndf * nf_mult, affine=True))
            modelList.append(LeakyReLU(0.2))

        nf_mult_prev = nf_mult
        nf_mult = min(2**n_layers, 8)
        modelList.append(
            Conv2d(out_channels=ndf * nf_mult,
                   kernel_size=kernel_size,
                   stride=1,
                   padding=padding,
                   in_channels=ndf * nf_mult_prev))
        modelList.append(
            InstanceNorm2d(num_features=ndf * nf_mult, affine=True))
        modelList.append(LeakyReLU(0.2))
        modelList.append(
            Conv2d(out_channels=1,
                   kernel_size=kernel_size,
                   stride=1,
                   padding=padding,
                   in_channels=ndf * nf_mult))

        self.model = nn.Sequential(*modelList)
Exemplo n.º 22
0
    def __init__(self, in_ch, out_ch):

        super(Upward, self).__init__()

        self.depool = Sequential(
            UpsamplingNearest2d(scale_factor=2), ZeroPad2d((1, 2, 1, 2)),
            Conv2d(in_ch, out_ch, kernel_size=4, stride=1), ReLU(inplace=True),
            InstanceNorm2d(out_ch))
Exemplo n.º 23
0
 def __init__(self, channels: Channels) -> None:
     super(ResidualBlock, self).__init__()
     self.conv1: ConvLayer[Channels, Channels, L[3],
                           L[1]] = ConvLayer(channels,
                                             channels,
                                             kernel_size=3,
                                             stride=1)
     self.in1: InstanceNorm2d[Channels] = InstanceNorm2d(channels,
                                                         affine=True)
     self.conv2: ConvLayer[Channels, Channels, L[3],
                           L[1]] = ConvLayer(channels,
                                             channels,
                                             kernel_size=3,
                                             stride=1)
     self.in2: InstanceNorm2d[Channels] = InstanceNorm2d(channels,
                                                         affine=True)
     self.relu = ReLU()
Exemplo n.º 24
0
 def __init__(self, channels_in, channels_out, acti):
     super(Conv33, self).__init__()
     self.conv_33 = Sequential(
         Conv2d(channels_in,
                channels_out,
                3,
                padding=1,
                padding_mode='reflect'), InstanceNorm2d(channels_out), acti)
     pass
Exemplo n.º 25
0
 def __init__(self,
              num_features,
              alpha_in=0.5,
              alpha_out=0.5,
              eps=1e-5,
              momentum=0.1,
              affine=True):
     super(_InstanceNorm2d, self).__init__()
     hf_ch = int(num_features * (1 - alpha_out))
     lf_ch = num_features - hf_ch
     self.inh = InstanceNorm2d(hf_ch,
                               eps=eps,
                               momentum=momentum,
                               affine=affine)
     self.inl = InstanceNorm2d(lf_ch,
                               eps=eps,
                               momentum=momentum,
                               affine=affine)
Exemplo n.º 26
0
    def __init__(self, in_channels, ndf=32, n_layers=3, multilayer=False, IN=False):
        super(Discriminator, self).__init__()
        
        modelList = []    
        outlist1 = []
        outlist2 = []
        kernel_size = 4
        padding = int(np.ceil((kernel_size - 1)/2))
        modelList.append(Conv2d(out_channels=ndf, kernel_size=kernel_size, stride=2,
                              padding=2, in_channels=in_channels))
        modelList.append(LeakyReLU(0.2))

        nf_mult = 1
        for n in range(1, n_layers):
            nf_mult_prev = nf_mult
            nf_mult = min(2 ** n, 4)
            modelList.append(Conv2d(out_channels=ndf * nf_mult, kernel_size=kernel_size, stride=2,
                                  padding=2, in_channels=ndf * nf_mult_prev))
            if IN:
                modelList.append(InstanceNorm2d(num_features=ndf * nf_mult))
            else:
                modelList.append(BatchNorm2d(num_features=ndf * nf_mult, track_running_stats=True))
            modelList.append(LeakyReLU(0.2))

        nf_mult_prev = nf_mult
        nf_mult = min(2 ** n_layers, 4)
        outlist1.append(Conv2d(out_channels=1, kernel_size=kernel_size, stride=1,
                              padding=padding, in_channels=ndf * nf_mult_prev))
        
        outlist2.append(Conv2d(out_channels=ndf * nf_mult, kernel_size=kernel_size, stride=1,
                              padding=padding, in_channels=ndf * nf_mult_prev))
        if IN:
            outlist2.append(InstanceNorm2d(num_features=ndf * nf_mult))
        else:
            outlist2.append(BatchNorm2d(num_features=ndf * nf_mult, track_running_stats=True))
        outlist2.append(LeakyReLU(0.2))
        outlist2.append(Conv2d(out_channels=1, kernel_size=kernel_size, stride=1,
                              padding=padding, in_channels=ndf * nf_mult))
        self.model = nn.Sequential(*modelList)
        self.out1 = nn.Sequential(*outlist1)
        self.out2 = nn.Sequential(*outlist2)
        self.multilayer = multilayer
Exemplo n.º 27
0
    def __init__(self, in_ch, out_ch, stride=2, normalise=True):

        super(ConvBlock, self).__init__()

        self.pool = Sequential(
            ZeroPad2d((1, 2, 1, 2)),
            Conv2d(in_ch, out_ch, kernel_size=4, stride=stride),
            LeakyReLU(negative_slope=0.2, inplace=True))

        if normalise:
            self.pool.add_module('instance_norm', InstanceNorm2d(out_ch))
Exemplo n.º 28
0
def DownsampleBlock(in_channels: int, initialization_method='he') -> Module:
    init = create_init_function(initialization_method)
    return Sequential(
        init(
            Conv2d(in_channels,
                   in_channels * 2,
                   kernel_size=4,
                   stride=2,
                   padding=1,
                   bias=False)), InstanceNorm2d(in_channels * 2, affine=True),
        ReLU(inplace=True))
Exemplo n.º 29
0
    def __init__(self, input_nc=3):
        super(GradualStyleEncoder2, self).__init__()

        self.fpn = FPN101(input_nc)

        self.map2style1 = Sequential(
            Conv2d(512, 512, kernel_size=3, stride=2, padding=1),
            LeakyReLU(inplace=True),
            InstanceNorm2d(512),
            Conv2d(512, 512, kernel_size=3, stride=2, padding=1),
            LeakyReLU(inplace=True),
            InstanceNorm2d(512),
        )

        self.map2style2 = Sequential(
            Conv2d(512, 512, kernel_size=3, stride=2, padding=1),
            LeakyReLU(inplace=True),
            InstanceNorm2d(512),
        )

        self.map2style3 = Sequential(
            Conv2d(512, 512, kernel_size=3, stride=2, padding=1),
            LeakyReLU(inplace=True),
            InstanceNorm2d(512),
            Conv2d(512, 512, kernel_size=3, stride=2, padding=1),
            LeakyReLU(inplace=True),
            InstanceNorm2d(512),
            Conv2d(512, 512, kernel_size=3, stride=2, padding=1),
            LeakyReLU(inplace=True),
            InstanceNorm2d(512),
            Conv2d(512, 512, kernel_size=3, stride=2, padding=1),
            LeakyReLU(inplace=True),
            InstanceNorm2d(512),
            Conv2d(512, 512, kernel_size=3, stride=2, padding=1),
            LeakyReLU(inplace=True),
            InstanceNorm2d(512),
        )

        self.embed = Embedding(18, 64)
        self.expand_layer = Sequential(
            nn.Linear(512+64, 512),
            nn.LeakyReLU(inplace=True),
        )

        self.rnn = GRU(512, 512, num_layers=2, batch_first=True, bidirectional=True)

        self.last = Sequential(
            Linear(1024, 512),
            LeakyReLU(inplace=True),
            Linear(512, 512)
        )
Exemplo n.º 30
0
 def __init__(self, n_class=33, ns=0.2, dp=0.1, seg_len=128):
     super(PatchDiscriminator, self).__init__()
     self.ns = ns
     self.conv1 = Conv2d(1, 64, kernel_size=5, stride=2)
     self.conv2 = Conv2d(64, 128, kernel_size=5, stride=2)
     self.conv3 = Conv2d(128, 256, kernel_size=5, stride=2)
     self.conv4 = Conv2d(256, 512, kernel_size=5, stride=2)
     self.conv5 = Conv2d(512, 512, kernel_size=5, stride=2)
     self.conv6 = Conv2d(512, 32, kernel_size=1)
     if seg_len == 128:
         self.conv7 = Conv2d(32, 1, kernel_size=(17, 4))
         self.conv_classify = Conv2d(32, n_class, kernel_size=(17, 4))
     elif seg_len == 64:
         self.conv7 = Conv2d(32, 1, kernel_size=(17, 2))
         self.conv_classify = Conv2d(32, n_class, kernel_size=(17, 2))
     else:
         raise NotImplementedError(
             "Segement length {} is not supported!".format(seg_len))
     self.drop1 = Dropout2d(p=dp)
     self.drop2 = Dropout2d(p=dp)
     self.drop3 = Dropout2d(p=dp)
     self.drop4 = Dropout2d(p=dp)
     self.drop5 = Dropout2d(p=dp)
     self.drop6 = Dropout2d(p=dp)
     self.ins_norm1 = InstanceNorm2d(self.conv1.out_channels)
     self.ins_norm2 = InstanceNorm2d(self.conv2.out_channels)
     self.ins_norm3 = InstanceNorm2d(self.conv3.out_channels)
     self.ins_norm4 = InstanceNorm2d(self.conv4.out_channels)
     self.ins_norm5 = InstanceNorm2d(self.conv5.out_channels)
     self.ins_norm6 = InstanceNorm2d(self.conv6.out_channels)