Exemplo n.º 1
0
    def __init__(self, arg):
        super(PAN_Blanced_attention, self).__init__()
        # SCPA
        self.scale = arg.scale
        in_nc = arg.in_nc_pan
        out_nc = arg.out_nc_pan
        nf = arg.nf_pan
        unf = arg.unf_pan
        nb = arg.nb_pan
        SCPA_block_f = functools.partial(SCPA, nf=nf, reduction=2)

        ### first convolution
        self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)

        ### main blocks
        self.SCPA_trunk = arch_util.make_layer(SCPA_block_f, nb)
        self.trunk_conv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)

        #### upsampling
        self.upconv1 = nn.Conv2d(nf, unf, 3, 1, 1, bias=True)
        self.att1 = BlancedAttention(unf, reduction=8)
        self.HRconv1 = nn.Conv2d(unf, unf, 3, 1, 1, bias=True)

        if self.scale == 4:
            self.upconv2 = nn.Conv2d(unf, unf, 3, 1, 1, bias=True)
            self.att2 = BlancedAttention(unf, reduction=8)
            self.HRconv2 = nn.Conv2d(unf, unf, 3, 1, 1, bias=True)

        self.conv_last = nn.Conv2d(unf, out_nc, 3, 1, 1, bias=True)
        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
    def __init__(self, upscale=4):
        super(CARN_blanced_attention, self).__init__()

        # scale = kwargs.get("scale")
        # multi_scale = kwargs.get("multi_scale")
        # group = kwargs.get("group", 1)

        self.scale = upscale
        multi_scale = True
        group = 1

        self.sub_mean = ops.MeanShift((0.4488, 0.4371, 0.4040), sub=True)
        self.add_mean = ops.MeanShift((0.4488, 0.4371, 0.4040), sub=False)

        self.entry = nn.Conv2d(3, 64, 3, 1, 1)

        self.b1 = Block(64, 64)
        self.b2 = Block(64, 64)
        self.b3 = Block(64, 64)
        self.c1 = nn.Sequential(ops.BasicBlock(64 * 2, 64, 1, 1, 0),BlancedAttention(64))
        self.c2 = nn.Sequential(ops.BasicBlock(64 * 3, 64, 1, 1, 0),BlancedAttention(64))
        self.c3 = nn.Sequential(ops.BasicBlock(64 * 4, 64, 1, 1, 0),BlancedAttention(64))

        self.upsample = ops.UpsampleBlock(64, scale=upscale,
                                          multi_scale=multi_scale,
                                          group=group)
        self.exit = nn.Conv2d(64, 3, 3, 1, 1)
Exemplo n.º 3
0
    def __init__(self, in_channels, out_channels, group=1):
        super(Block_BlancedAttention, self).__init__()

        self.r1 = ops.ResidualBlock(in_channels, out_channels)
        self.r2 = ops.ResidualBlock(in_channels * 2, out_channels * 2)
        self.r3 = ops.ResidualBlock(in_channels * 4, out_channels * 4)
        self.g = ops.BasicBlock(in_channels * 8, out_channels, 1, 1, 0)
        self.ca = BlancedAttention(in_channels)
 def __init__(self, in_channels, distillation_rate=0.25):
     super(IMDModule_blancedattention, self).__init__()
     self.distilled_channels = int(in_channels * distillation_rate)
     self.remaining_channels = int(in_channels - self.distilled_channels)
     self.c1 = conv_layer(in_channels, in_channels, 3)
     self.c2 = conv_layer(self.remaining_channels, in_channels, 3)
     self.c3 = conv_layer(self.remaining_channels, in_channels, 3)
     self.c4 = conv_layer(self.remaining_channels, self.distilled_channels,
                          3)
     self.act = activation('lrelu', neg_slope=0.05)
     self.c5 = conv_layer(in_channels, in_channels, 1)
     self.attention = BlancedAttention(self.distilled_channels * 4,
                                       reduction=16)
    def __init__(self, conv=common.default_conv, n_feats=64):
        super(MSRB_blanced_attention, self).__init__()

        kernel_size_1 = 3
        kernel_size_2 = 5

        self.conv_3_1 = conv(n_feats, n_feats, kernel_size_1)
        self.conv_3_2 = conv(n_feats * 2, n_feats * 2, kernel_size_1)
        self.conv_5_1 = conv(n_feats, n_feats, kernel_size_2)
        self.conv_5_2 = conv(n_feats * 2, n_feats * 2, kernel_size_2)
        self.confusion = nn.Conv2d(n_feats * 4,
                                   n_feats,
                                   1,
                                   padding=0,
                                   stride=1)
        self.relu = nn.ReLU(inplace=True)
        self.attention = BlancedAttention(n_feats)
    def __init__(self,
                 inp,
                 kernel_size=3,
                 bias=True,
                 bn=False,
                 act=nn.ReLU(True),
                 res_scale=1):

        super(ResBlock_blanced_attention, self).__init__()
        modules = []
        for i in range(2):
            modules.append(conv(inp, inp, kernel_size, bias=bias))
            if bn: modules.append(nn.BatchNorm2d(inp))
            if i == 0: modules.append(act)

        self.body = nn.Sequential(*modules)
        self.res_scale = res_scale
        self.attention = BlancedAttention(inp)
    def __init__(self, args):
        super(AWSRN_blanced_attention, self).__init__()
        # hyper-params
        self.args = args
        scale = args.scale
        n_resblocks = args.n_resblocks_awsrn
        n_feats = args.n_feats_awsrn
        kernel_size = 3
        res_scale = args.res_scale_awsrn
        n_awru = args.n_awru_awsrn
        act = nn.ReLU(True)
        # wn = lambda x: x
        wn = lambda x: torch.nn.utils.weight_norm(x)

        self.rgb_mean = torch.autograd.Variable(torch.FloatTensor([0.4488, 0.4371, 0.4040])).view([1, 3, 1, 1])

        # define head module
        # head = HEAD(args, n_feats, kernel_size, wn)
        head = []
        head.append(
            wn(nn.Conv2d(3, n_feats, 3, padding=3 // 2)))

        # define body module
        body = []
        for i in range(n_resblocks):
            body.append(
                LFB(n_feats, kernel_size, args.block_feats_awsrn, n_awru, wn=wn, res_scale=res_scale, act=act))

        # define tail module
        out_feats = scale * scale * 3
        tail = AWMS(args, scale, n_feats, kernel_size, wn)

        skip = []
        skip.append(
            wn(nn.Conv2d(3, out_feats, 3, padding=3 // 2))
        )
        skip.append(nn.PixelShuffle(scale))

        # make object members
        self.head = nn.Sequential(*head)
        self.body = nn.Sequential(*body)
        self.tail = tail
        self.skip = nn.Sequential(*skip)
        self.attention=BlancedAttention(args.block_feats_awsrn//4,reduction=8)
    def __init__(self,
                 conv,
                 n_feat,
                 kernel_size,
                 reduction,
                 bias=True,
                 bn=False,
                 act=nn.ReLU(True),
                 res_scale=1):

        super(RCAB_ori_blanced_attention, self).__init__()
        modules_body = []
        for i in range(2):
            modules_body.append(conv(n_feat, n_feat, kernel_size, bias=bias))
            if bn: modules_body.append(nn.BatchNorm2d(n_feat))
            if i == 0: modules_body.append(act)
        modules_body.append(BlancedAttention(n_feat, reduction))
        self.body = nn.Sequential(*modules_body)
        self.res_scale = res_scale
Exemplo n.º 9
0
    def __init__(self, args, conv=default_conv):
        super(EDSR_blanced_attention, self).__init__()

        n_resblocks = args.n_resblocks_edsr  # 16
        n_feats = args.n_feats_edsr  # 64
        kernel_size = 3
        scale = args.scale  # 4
        act = nn.ReLU(True)
        self.url = url['r{}f{}x{}'.format(n_resblocks, n_feats, scale)]
        self.sub_mean = common.MeanShift(args.rgb_range)  # 标准化输入
        self.add_mean = common.MeanShift(args.rgb_range, sign=1)  # 还原输出

        # define head module
        m_head = [conv(args.n_colors, n_feats, kernel_size)]  # channels:3->64

        # define body module
        m_body = [  # 16个resblock
            common.ResBlock(  # ## 参数:64, 3, relu, 1
                conv,
                n_feats,
                kernel_size,
                act=act,
                res_scale=args.res_scale) for _ in range(n_resblocks)
        ]
        m_body.append(conv(n_feats, n_feats, kernel_size))  # channels:64->64

        # define tail module
        m_tail = [
            common.Upsampler(conv, scale, n_feats, act=False),  # 上采样集中在这里
            conv(n_feats, args.n_colors, kernel_size)
        ]

        self.head = nn.Sequential(*m_head)
        self.body = nn.Sequential(*m_body)
        self.tail = nn.Sequential(*m_tail)
        self.attention = BlancedAttention(n_feats)
Exemplo n.º 10
0
    def __init__(self, args, conv=common.default_conv):
        super(oisr_LF_s_blanced_attention, self).__init__()

        n_resblocks = args.n_resblocks_oisr_LF_s
        n_feats = args.n_feats_oisr_LF_s
        kernel_size = 3
        scale = args.scale
        act = nn.ReLU(True)
        self.sub_mean = common.MeanShift(args.rgb_range)
        self.add_mean = common.MeanShift(args.rgb_range, sign=1)

        # define head module
        m_head = [conv(args.n_colors, n_feats, kernel_size)]

        # define body module
        m_body = [
            common.ResBlock(conv,
                            n_feats,
                            kernel_size,
                            act=act,
                            res_scale=args.res_scale)
            for _ in range(n_resblocks)
        ]
        m_body.append(conv(n_feats, n_feats, kernel_size))

        # define tail module
        m_tail = [
            common.Upsampler(conv, scale, n_feats, act=False),
            conv(n_feats, args.n_colors, kernel_size)
        ]

        self.head = nn.Sequential(*m_head)
        self.body = nn.Sequential(*m_body)
        self.tail = nn.Sequential(*m_tail)

        self.attention = BlancedAttention(n_feats)
    def __init__(self, args, conv=common.default_conv):
        super(LWSR_blanced_attention, self).__init__()

        #        n_resgroups = args.n_resgroups
        #        n_resblocks = args.n_resblocks
        n_feats = args.n_feats_s_LWSR
        kernel_size = 3
        reduction = args.reduction
        scale = args.scale
        act = nn.ReLU(True)

        # RGB mean for DIV2K
        rgb_mean = (0.4488, 0.4371, 0.4040)
        rgb_std = (1.0, 1.0, 1.0)
        self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std)

        # define head module
        modules_head = [conv(args.n_colors, n_feats, kernel_size)]

        self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)

        self.head = nn.Sequential(*modules_head)
        # define body module

        #  def __init__(
        #      self, conv, n_feat, kernel_size,
        #      bias=True, bn=False, act=nn.ReLU(True), res_scale=1):

        self.feat2_1 = ResBlock(conv,
                                n_feats,
                                kernel_size,
                                bias=True,
                                bn=False,
                                act=nn.ReLU(False),
                                res_scale=1)
        self.feat2_2 = ResBlock(conv,
                                n_feats,
                                kernel_size,
                                bias=True,
                                bn=False,
                                act=nn.ReLU(False),
                                res_scale=1)
        self.feat2_3 = ResBlock(conv,
                                n_feats,
                                kernel_size,
                                bias=True,
                                bn=False,
                                act=nn.ReLU(True),
                                res_scale=1)

        self.feat3_1 = ResBlock(conv,
                                n_feats,
                                kernel_size,
                                bias=True,
                                bn=False,
                                act=nn.ReLU(False),
                                res_scale=1)
        self.feat3_2 = ResBlock(conv,
                                n_feats,
                                kernel_size,
                                bias=True,
                                bn=False,
                                act=nn.ReLU(False),
                                res_scale=1)
        self.feat3_3 = ResBlock(conv,
                                n_feats,
                                kernel_size,
                                bias=True,
                                bn=False,
                                act=nn.ReLU(True),
                                res_scale=1)

        self.feat4_1 = ResBlock(conv,
                                n_feats,
                                kernel_size,
                                bias=True,
                                bn=False,
                                act=nn.ReLU(True),
                                res_scale=1)
        self.feat4_2 = ResBlock(conv,
                                n_feats,
                                kernel_size,
                                bias=True,
                                bn=False,
                                act=nn.ReLU(False),
                                res_scale=1)
        self.feat4_3 = ResBlock(conv,
                                n_feats,
                                kernel_size,
                                bias=True,
                                bn=False,
                                act=nn.ReLU(True),
                                res_scale=1)

        self.feat5_1 = ResBlock(conv,
                                n_feats,
                                kernel_size,
                                bias=True,
                                bn=False,
                                act=nn.ReLU(True),
                                res_scale=1)
        self.feat5_2 = ResBlock(conv,
                                n_feats,
                                kernel_size,
                                bias=True,
                                bn=False,
                                act=nn.ReLU(False),
                                res_scale=1)
        self.feat5_3 = ResBlock(conv,
                                n_feats,
                                kernel_size,
                                bias=True,
                                bn=False,
                                act=nn.ReLU(True),
                                res_scale=1)

        self.feat6_1 = ResBlock(conv,
                                n_feats,
                                kernel_size,
                                bias=True,
                                bn=False,
                                act=nn.ReLU(True),
                                res_scale=1)
        self.feat6_2 = ResBlock(conv,
                                n_feats,
                                kernel_size,
                                bias=True,
                                bn=False,
                                act=nn.ReLU(True),
                                res_scale=1)

        self.conv1 = conv(6 * n_feats, n_feats, 1, bias=False)

        self.feat7_1 = ResBlock2(conv,
                                 n_feats,
                                 kernel_size,
                                 bias=True,
                                 bn=False,
                                 act=nn.ReLU(True),
                                 res_scale=1)
        self.feat7_2 = ResBlock(conv,
                                n_feats,
                                kernel_size,
                                bias=True,
                                bn=False,
                                act=nn.ReLU(False),
                                res_scale=1)
        self.conv7 = ResBlock(conv,
                              n_feats,
                              kernel_size,
                              bias=True,
                              bn=False,
                              act=nn.ReLU(False),
                              res_scale=1)
        #   self.conv7 = ConvBlock(base_filter2, base_filter, 1, 1, 0, activation='prelu', norm=None)

        self.feat8_1 = ResBlock2(conv,
                                 n_feats,
                                 kernel_size,
                                 bias=True,
                                 bn=False,
                                 act=nn.ReLU(True),
                                 res_scale=1)
        self.feat8_2 = ResBlock(conv,
                                n_feats,
                                kernel_size,
                                bias=True,
                                bn=False,
                                act=nn.ReLU(False),
                                res_scale=1)
        self.conv8 = ResBlock(conv,
                              n_feats,
                              kernel_size,
                              bias=True,
                              bn=False,
                              act=nn.ReLU(False),
                              res_scale=1)
        #   self.conv8 = ConvBlock(base_filter2, base_filter, 1, 1, 0, activation='prelu', norm=None)

        self.feat9_1 = ResBlock2(conv,
                                 n_feats,
                                 kernel_size,
                                 bias=True,
                                 bn=False,
                                 act=nn.ReLU(True),
                                 res_scale=1)
        self.feat9_2 = ResBlock(conv,
                                n_feats,
                                kernel_size,
                                bias=True,
                                bn=False,
                                act=nn.ReLU(False),
                                res_scale=1)
        #   self.conv9 = nn.conv2d(base_filter2, base_filter, kernel_size = 1, stride = 1, padding = 0)
        self.conv9 = ResBlock(conv,
                              n_feats,
                              kernel_size,
                              bias=True,
                              bn=False,
                              act=nn.ReLU(False),
                              res_scale=1)

        self.feat10_1 = ResBlock2(conv,
                                  n_feats,
                                  kernel_size,
                                  bias=True,
                                  bn=False,
                                  act=nn.ReLU(True),
                                  res_scale=1)
        self.feat10_2 = ResBlock(conv,
                                 n_feats,
                                 kernel_size,
                                 bias=True,
                                 bn=False,
                                 act=nn.ReLU(False),
                                 res_scale=1)
        #   self.conv10 = nn.conv2d(base_filter2, base_filter, kernel_size = 1, stride = 1, padding = 0)
        self.conv10 = ResBlock(conv,
                               n_feats,
                               kernel_size,
                               bias=True,
                               bn=False,
                               act=nn.ReLU(False),
                               res_scale=1)

        self.attention = BlancedAttention(n_feats)

        #      self.conv11 = torch.nn.Conv2d(base_filter*2, base_filter, 1, 1, 0)

        # define tail module
        modules_tail = [
            common.Upsampler(conv, scale, n_feats, act=False),
            conv(n_feats, args.n_colors, kernel_size)
        ]

        self.tail = nn.Sequential(*modules_tail)