Пример #1
0
    def __init__(self, in_nc, out_nc, nf=32, nb=8, gcval=32, upscale=4):
        super(RRDB_Net, self).__init__()
        n_upscale = int(math.log(upscale, 2))
        if upscale == 3:
            n_upscale = 1
        fea_conv = conv_layer(in_nc,
                              nf,
                              norm_type=NormType.Weight,
                              use_activ=False)
        rb_blocks = [RRDB(nf, gc=gcval) for _ in range(nb)]
        LR_conv = conv_layer(nf, nf, leaky=0.2)

        if upscale == 3:
            upsampler = PixelShuffle_ICNR(nf, blur=True, leaky=0.02, scale=3)
        else:
            upsampler = [
                PixelShuffle_ICNR(nf, blur=True, leaky=0.02)
                for _ in range(n_upscale)
            ]

        HR_conv0 = conv_layer(nf, nf, leaky=0.02, norm_type=NormType.Weight)
        HR_conv1 = conv_layer(nf,
                              out_nc,
                              leaky=0.02,
                              norm_type=NormType.Weight,
                              use_activ=False)

        self.model = sequential(
            fea_conv,
            ShortcutBlock(sequential(*rb_blocks, LR_conv)),\
            *upsampler, HR_conv0, HR_conv1
        )
Пример #2
0
 def __init__(self,
              up_in_c: int,
              x_in_c: int,
              nf: int = None,
              blur: bool = False,
              self_attention: bool = False,
              padding: int = 1,
              **kwargs):
     super().__init__()
     self.shuf = PixelShuffle_ICNR(up_in_c,
                                   up_in_c // 2,
                                   blur=blur,
                                   **kwargs)
     self.bn = nn.BatchNorm2d(x_in_c)
     ni = up_in_c // 2 + x_in_c
     nf = nf if nf is not None else max(up_in_c // 2, 32)
     self.conv1 = ConvLayer(ni,
                            nf,
                            norm_type=None,
                            padding=padding,
                            **kwargs)
     self.conv2 = ConvLayer(
         nf,
         nf,
         norm_type=None,
         padding=padding,
         xtra=SelfAttention(nf) if self_attention else None,
         **kwargs)
     self.relu = nn.ReLU(inplace=True)
    def __init__(self):
        super(Autoencoder, self).__init__()
        self.print_shape = True
        self.decode = True

        self.encoder = nn.Sequential(
            conv_layer(3, 8),  # 8, 32, 32
            nn.AvgPool2d(2, ceil_mode=True),  # 8, 16, 16
            conv_layer(8, 8),  # 8, 16, 16
            nn.AvgPool2d(2, ceil_mode=True),  # 8, 8, 8 -> 512
            Flatten(),
            nn.Linear(8 * 8 * 8, 4))
        self.decoder = nn.Sequential(
            nn.Linear(4, 8 * 8 * 8),
            ResizeBatch(8, 8, 8),
            PixelShuffle_ICNR(8, 8),  # 8*16*16
            nn.ReLU(True),
            conv_layer(8, 8),
            PixelShuffle_ICNR(8, 8),  # 8*16*16
            conv_layer(8, 3))
Пример #4
0
    def __init__(self,
                 encoder=None,
                 n_classes=2,
                 last_filters=32,
                 imsize=(256, 256),
                 y_range=None,
                 **kwargs):

        self.n_classes = n_classes

        layers = nn.ModuleList()

        # Encoder
        sfs_szs = model_sizes(encoder, size=imsize)
        sfs_idxs = list(reversed(_get_sfs_idxs(sfs_szs)))
        self.sfs = hook_outputs([encoder[i] for i in sfs_idxs])
        layers.append(encoder)

        x = dummy_eval(encoder, imsize).detach()

        self.hc_hooks = []
        hc_c = []

        ni = sfs_szs[-1][1]
        middle_conv = nn.Sequential(conv_layer(ni, ni * 2),
                                    conv_layer(ni * 2, ni)).eval()
        x = middle_conv(x)
        layers.extend([batchnorm_2d(ni), nn.ReLU(), middle_conv])

        # self.hc_hooks = [Hook(layers[-1], _hook_inner, detach=False)]
        # hc_c = [x.shape[1]]

        # Decoder
        n_filters = [64, 128, 256, 512]
        n = len(n_filters)
        is_deconv = True

        for i, idx in enumerate(sfs_idxs[:-1]):
            in_c, out_c = int(n_filters[n - i - 1] +
                              n_filters[n - i - 2]) // 2, int(sfs_szs[idx][1])

            dec_bloc = DecoderBlock(in_c, out_c, self.sfs[i], is_deconv,
                                    True).eval()
            layers.append(dec_bloc)

            x = dec_bloc(x)

            self.hc_hooks.append(Hook(layers[-1], _hook_inner, detach=False))
            hc_c.append(x.shape[1])

        ni = x.shape[1]

        layers.append(PixelShuffle_ICNR(n_filters[0], scale=2))

        layers.append(Hcolumns(self.hc_hooks, hc_c))

        fin_block = FinalBlock(ni * (len(hc_c) + 1), last_filters, n_classes)
        layers.append(fin_block)

        if y_range is not None:
            layers.append(SigmoidRange(*y_range))
        super().__init__(*layers)
    def __init__(self,
                 encoder,
                 n_classes,
                 img_size,
                 blur=False,
                 blur_final=True,
                 self_attention=False,
                 y_range=None,
                 bottle=False,
                 act_cls=defaults.activation,
                 init=nn.init.kaiming_normal_,
                 norm_type=None,
                 include_encoder=True,
                 include_middle_conv=True,
                 **kwargs):
        imsize = img_size
        sizes = model_sizes(encoder, size=imsize)
        sz_chg_idxs = list(reversed(_get_sz_change_idxs(sizes)))
        # self.sfs = hook_outputs([encoder[i] for i in sz_chg_idxs], detach=False)
        x = dummy_eval(encoder, imsize).detach()

        layers = []
        if include_encoder:
            layers.append(encoder)

        if include_middle_conv:
            ni = sizes[-1][1]
            middle_conv = (nn.Sequential(
                ConvLayer(ni,
                          ni * 2,
                          act_cls=act_cls,
                          norm_type=norm_type,
                          **kwargs),
                ConvLayer(ni * 2,
                          ni,
                          act_cls=act_cls,
                          norm_type=norm_type,
                          **kwargs))).eval()
            x = middle_conv(x)
            layers += [BatchNorm(ni), nn.ReLU(), middle_conv]

        for i, idx in enumerate(sz_chg_idxs):
            not_final = (i != len(sz_chg_idxs) - 1)
            up_in_c = int(x.shape[1])
            do_blur = blur and (not_final or blur_final)
            sa = self_attention and (i == len(sz_chg_idxs) - 3)
            noskip_unet_block = NoSkipUnetBlock(up_in_c,
                                                final_div=not_final,
                                                blur=do_blur,
                                                self_attention=sa,
                                                act_cls=act_cls,
                                                init=init,
                                                norm_type=norm_type,
                                                **kwargs).eval()
            layers.append(noskip_unet_block)
            x = noskip_unet_block(x)

        ni = x.shape[1]
        if imsize != sizes[0][-2:]:
            layers.append(
                PixelShuffle_ICNR(ni, act_cls=act_cls, norm_type=norm_type))

        layers += [
            ConvLayer(ni,
                      n_classes,
                      ks=1,
                      act_cls=None,
                      norm_type=norm_type,
                      **kwargs)
        ]

        if include_middle_conv:
            apply_init(nn.Sequential(layers[3], layers[-2]), init)
            apply_init(nn.Sequential(layers[2]), init)

        if y_range is not None:
            layers.append(SigmoidRange(*y_range))
        super().__init__(*layers)
Пример #6
0
    def __init__(self,
                 scale,
                 n_resblocks,
                 n_feats,
                 res_scale,
                 n_colors_in=3,
                 n_colors_out=1):
        super().__init__()

        # hyper-params
        kernel_size = 3
        act = nn.ReLU(True)
        wn = lambda x: torch.nn.utils.weight_norm(x)

        #mean, std = [.0020], [0.0060]] # imagenet_stats
        #self.rgb_mean = torch.FloatTensor(mean).view([1, n_colors_in, 1, 1])
        ##self.rgb_std = torch.FloatTensor(std).view([1, n_colors_in, 1, 1])

        # define head module
        head = []
        head.append(
            wn(
                nn.Conv2d(n_colors_in,
                          n_feats,
                          kernel_size,
                          padding=kernel_size // 2)))

        # define body module
        body = []
        for i in range(n_resblocks):
            body.append(
                Block(n_feats,
                      kernel_size,
                      act=act,
                      res_scale=res_scale,
                      wn=wn))

        # define tail module
        tail = []
        # convert from n_color_in to n_color_out
        #tail.append(wn(nn.Conv2d(n_feats, n_colors_out, kernel_size, padding=kernel_size//2)))

        tail.append(PixelShuffle_ICNR(n_feats, n_colors_out, scale, blur=True))

        skip = []
        skip.append(
            wn(
                nn.Conv2d(n_colors_in,
                          n_colors_out,
                          kernel_size,
                          padding=kernel_size // 2)))
        skip.append(
            PixelShuffle_ICNR(n_colors_in, n_colors_out, scale, blur=True))

        # make object members
        self.head = nn.Sequential(*head)
        self.body = nn.Sequential(*body)
        self.tail = nn.Sequential(*tail)
        self.skip = nn.Sequential(*skip)
        self.pad = nn.ReplicationPad2d((1, 0, 1, 0))
        self.blur = nn.AvgPool2d(2, stride=1)
Пример #7
0
    def __init__(self, encoder=None, n_classes=2, last_filters=32, imsize=(256, 256), y_range=None, **kwargs):

        self.n_classes = n_classes

        layers = nn.ModuleList()

        # Encoder
        sfs_idxs = [4, 3, 2, 1, 0]
        self.sfs = hook_outputs([encoder[i] for i in sfs_idxs])
        layers.append(encoder)

        x = dummy_eval(encoder, imsize).detach()

        self.hc_hooks = []
        hc_c = []

        # ni = sfs_szs[-1][1]
        # middle_conv = nn.Sequential(conv_layer(ni, ni * 2),
        #                             conv_layer(ni * 2, ni)).eval()
        # x = middle_conv(x)
        # layers.extend([batchnorm_2d(ni), nn.ReLU(), middle_conv])

        # Decoder
        n_filters = [128, 256, 512, 1024, 2048]
        n = len(n_filters)
        is_deconv = True

        for i, idx in enumerate(sfs_idxs[:-1]):
            if i == 0:
                in_c, out_c = n_filters[n - i - 1], n_filters[n - i - 2]
            else:
                in_c, out_c = 2 * n_filters[n - i - 1], n_filters[n - i - 2]

            if i == 3:
                scale = False
            else:
                scale = True

            dec_bloc = DecoderBlock(in_c, out_c, self.sfs[i+1], is_deconv, scale).eval()
            layers.append(dec_bloc)

            x = dec_bloc(x)

            self.hc_hooks.append(Hook(layers[-1], _hook_inner, detach=False))
            hc_c.append(x.shape[1])

            # print(x.size())

        # last decoder
        in_c, out_c = n_filters[0] + 64, n_filters[0]
        dec_bloc = DecoderBlock(in_c, out_c, None, is_deconv, True).eval()
        layers.append(dec_bloc)

        x = dec_bloc(x)

        self.hc_hooks.append(Hook(layers[-1], _hook_inner, detach=False))
        hc_c.append(x.shape[1])

        # print(x.size())


        ni = x.shape[1]

        layers.append(PixelShuffle_ICNR(n_filters[0], scale=2))

        layers.append(Hcolumns(self.hc_hooks, hc_c))

        fin_block = FinalBlock(ni * (len(hc_c) + 1), last_filters, n_classes)
        layers.append(fin_block)

        if y_range is not None:
            layers.append(SigmoidRange(*y_range))
        super().__init__(*layers)