예제 #1
0
    def __init__(self, in_nc, out_nc, nf=32, nb=8, gcval=32, upscale=4):
        super(RRDB_Net, self).__init__()
        n_upscale = int(math.log(upscale, 2))
        if upscale == 3:
            n_upscale = 1
        fea_conv = conv_layer(in_nc,
                              nf,
                              norm_type=NormType.Weight,
                              use_activ=False)
        rb_blocks = [RRDB(nf, gc=gcval) for _ in range(nb)]
        LR_conv = conv_layer(nf, nf, leaky=0.2)

        if upscale == 3:
            upsampler = PixelShuffle_ICNR(nf, blur=True, leaky=0.02, scale=3)
        else:
            upsampler = [
                PixelShuffle_ICNR(nf, blur=True, leaky=0.02)
                for _ in range(n_upscale)
            ]

        HR_conv0 = conv_layer(nf, nf, leaky=0.02, norm_type=NormType.Weight)
        HR_conv1 = conv_layer(nf,
                              out_nc,
                              leaky=0.02,
                              norm_type=NormType.Weight,
                              use_activ=False)

        self.model = sequential(
            fea_conv,
            ShortcutBlock(sequential(*rb_blocks, LR_conv)),\
            *upsampler, HR_conv0, HR_conv1
        )
    def __init__(self):
        super(Autoencoder, self).__init__()
        self.print_shape = True
        self.decode = True

        self.encoder = nn.Sequential(
            conv_layer(3, 8),  # 8, 32, 32
            nn.AvgPool2d(2, ceil_mode=True),  # 8, 16, 16
            conv_layer(8, 8),  # 8, 16, 16
            nn.AvgPool2d(2, ceil_mode=True),  # 8, 8, 8 -> 512
            Flatten(),
            nn.Linear(8 * 8 * 8, 4))
        self.decoder = nn.Sequential(
            nn.Linear(4, 8 * 8 * 8),
            ResizeBatch(8, 8, 8),
            PixelShuffle_ICNR(8, 8),  # 8*16*16
            nn.ReLU(True),
            conv_layer(8, 8),
            PixelShuffle_ICNR(8, 8),  # 8*16*16
            conv_layer(8, 3))
예제 #3
0
 def _head_subnet(self,
                  n_classes,
                  n_anchors,
                  final_bias=0.,
                  n_conv=4,
                  chs=256):
     layers = [conv_layer(chs, chs, bias=True) for _ in range(n_conv)]
     layers += [conv2d(chs, n_classes * n_anchors, bias=True)]
     layers[-1].bias.data.zero_().add_(final_bias)
     layers[-1].weight.data.fill_(0)
     return nn.Sequential(*layers)
예제 #4
0
 def __init__(self, nc, gc=32):
     super().__init__()
     # gc: growth channel, i.e. intermediate channels
     self.conv1 = conv_layer(nc, gc, norm_type=NormType.Weight, leaky=0.02)
     self.conv2 = conv_layer(nc + gc,
                             gc,
                             norm_type=NormType.Weight,
                             leaky=0.02)
     self.conv3 = conv_layer(nc + 2 * gc,
                             gc,
                             norm_type=NormType.Weight,
                             leaky=0.02)
     self.conv4 = conv_layer(nc + 3 * gc,
                             gc,
                             norm_type=NormType.Weight,
                             leaky=0.02)
     # turn off activation?
     self.conv5 = conv_layer(nc + 4 * gc,
                             nc,
                             norm_type=NormType.Weight,
                             leaky=0.02,
                             use_activ=False)
예제 #5
0
    def __init__(self,
                 encoder=None,
                 n_classes=2,
                 last_filters=32,
                 imsize=(256, 256),
                 y_range=None,
                 **kwargs):

        self.n_classes = n_classes

        layers = nn.ModuleList()

        # Encoder
        sfs_szs = model_sizes(encoder, size=imsize)
        sfs_idxs = list(reversed(_get_sfs_idxs(sfs_szs)))
        self.sfs = hook_outputs([encoder[i] for i in sfs_idxs])
        layers.append(encoder)

        x = dummy_eval(encoder, imsize).detach()

        self.hc_hooks = []
        hc_c = []

        ni = sfs_szs[-1][1]
        middle_conv = nn.Sequential(conv_layer(ni, ni * 2),
                                    conv_layer(ni * 2, ni)).eval()
        x = middle_conv(x)
        layers.extend([batchnorm_2d(ni), nn.ReLU(), middle_conv])

        # self.hc_hooks = [Hook(layers[-1], _hook_inner, detach=False)]
        # hc_c = [x.shape[1]]

        # Decoder
        n_filters = [64, 128, 256, 512]
        n = len(n_filters)
        is_deconv = True

        for i, idx in enumerate(sfs_idxs[:-1]):
            in_c, out_c = int(n_filters[n - i - 1] +
                              n_filters[n - i - 2]) // 2, int(sfs_szs[idx][1])

            dec_bloc = DecoderBlock(in_c, out_c, self.sfs[i], is_deconv,
                                    True).eval()
            layers.append(dec_bloc)

            x = dec_bloc(x)

            self.hc_hooks.append(Hook(layers[-1], _hook_inner, detach=False))
            hc_c.append(x.shape[1])

        ni = x.shape[1]

        layers.append(PixelShuffle_ICNR(n_filters[0], scale=2))

        layers.append(Hcolumns(self.hc_hooks, hc_c))

        fin_block = FinalBlock(ni * (len(hc_c) + 1), last_filters, n_classes)
        layers.append(fin_block)

        if y_range is not None:
            layers.append(SigmoidRange(*y_range))
        super().__init__(*layers)
예제 #6
0
파일: critics.py 프로젝트: praduca/DeOldify
def _conv(ni: int, nf: int, ks: int = 3, stride: int = 1, **kwargs):
    return conv_layer(ni, nf, ks=ks, stride=stride, **_conv_args, **kwargs)