def __init__(self, in_c=3, out_c=3, scale=4, n_feats=128, n_l3=3, act=nn.LeakyReLU(0.2, True)): super(AFN, self).__init__() self.head = conv_(in_c, n_feats) self.n = n_l3 self.AFBs = nn.ModuleList() for i in range(n_l3): self.AFBs.append(AFB_L3(channels=n_feats, n_l2=4, act=act)) self.GFF = nn.Sequential(*[ SELayer(n_feats * n_l3), conv_(n_feats * n_l3, n_feats, 1, padding=0, stride=1), ]) self.tail = nn.Sequential(*[ UpsampleBlock( scale, n_feats, kernel_size=3, stride=1, bias=True, act=act), conv_(n_feats, out_c) ])
def __init__(self, in_c=3, out_c=3, scale=4, n0=128, nr=32, n_depths=6): super(DDBPN, self).__init__() self.head = nn.Sequential(*[ conv_(in_c, n0), nn.PReLU(n0), conv_(n0, nr, 1, 1, 0), nn.PReLU(nr), ]) self.depths = n_depths self.upmodules = nn.ModuleList() self.downmodules = nn.ModuleList() # up sample projection units chs = nr for i in range(n_depths): self.upmodules.append(DPU(chs, nr, scale, True, i != 0)) if i != 0: chs += nr # down sample projection units chs = nr for i in range(n_depths - 1): self.downmodules.append(DPU(chs, nr, scale, False, i != 0)) chs += nr self.tail = conv_(nr * n_depths, out_c)
def __init__(self, in_c=3, out_c=3, scale=4, n_feats=64, n_rg=10, n_rcab=20, kernel_size=3, stride=1, bias=True, bn=False, instance_norm=False, act=nn.ReLU(True)): super(RCAN, self).__init__() self.head = conv_(in_c, n_feats, kernel_size=kernel_size, stride=stride) self.body = [ ResGroup(n_rcab=n_rcab, n_feats=n_feats, kernel_size=kernel_size, stride=stride, bias=bias, bn=bn, instance_norm=instance_norm, act=act) for _ in range(n_rg) ] self.body = nn.Sequential(*self.body) self.tail = [ UpsampleBlock(scale=scale, n_feats=n_feats, kernel_size=kernel_size, stride=stride, bias=bias, bn=bn, act=act) ] if instance_norm: self.tail.append(nn.InstanceNorm2d(n_feats)) self.tail.append( conv_(n_feats, out_c, kernel_size=kernel_size, stride=stride, bias=bias)) self.tail = nn.Sequential(*self.tail)
def __init__(self, in_c=3, out_c=3, scale=4, n_feats=64, n_resblocks=16, kernel_size=3, stride=1, bias=True, bn=False, act=nn.ReLU(True), res_scale=1, last_act=None): super(EDSR, self).__init__() self.head = conv_(in_c, n_feats, kernel_size=kernel_size, stride=stride) self.body = [ ResBlock(n_feats=n_feats, kernel_size=kernel_size, stride=stride, bias=bias, bn=bn, act=act, res_scale=res_scale) for _ in range(n_resblocks) ] self.body = nn.Sequential(*self.body) self.tail = [ UpsampleBlock(scale=scale, n_feats=n_feats, kernel_size=kernel_size, stride=stride, bias=bias, bn=bn, act=act), conv_(n_feats, out_c, kernel_size=kernel_size, stride=stride, bias=bias) ] if last_act is not None: self.tail.append(nn.Sigmoid()) self.tail = nn.Sequential(*self.tail)
def __init__(self, n_rcab=20, n_feats=64, reduction=16, kernel_size=3, stride=1, bias=True, bn=False, instance_norm=False, act=nn.ReLU(True)): super(ResGroup, self).__init__() assert act is not None self.op = [] for _ in range(n_rcab): self.op.append( ResChannelAttBlock(n_feats=n_feats, reduction=reduction, kernel_size=kernel_size, stride=stride, bias=bias, bn=bn, instance_norm=instance_norm, act=act)) self.op.append( conv_(in_channels=n_feats, out_channels=n_feats, kernel_size=kernel_size, stride=stride, bias=bias)) self.op = nn.Sequential(*self.op)
def __init__(self, n_feats=64, reduction=16, kernel_size=3, stride=1, bias=True, bn=False, instance_norm=False, act=nn.ReLU(True)): super(ResChannelAttBlock, self).__init__() assert act is not None self.op = [] for i in range(2): self.op.append( conv_(n_feats, n_feats, kernel_size=kernel_size, stride=stride, bias=bias)) if bn: self.op.append(nn.BatchNorm2d(n_feats)) if instance_norm: self.op.append(nn.InstanceNorm2d(n_feats)) if i == 0: self.op.append(act) self.op.append( ChannelAttentation(channels=n_feats, reduction=reduction)) self.op = nn.Sequential(*self.op)
def __init__(self, channels, n_blocks=2, act=nn.ReLU(True)): super(AFB_0, self).__init__() self.op = [] for _ in range(n_blocks): self.op.append(conv_(channels, channels)) self.op.append(act) self.op = nn.Sequential(*self.op)
def __init__(self, channels=64, reduction=16): super(ChannelAttentation, self).__init__() self.op = nn.Sequential(*[ nn.AdaptiveAvgPool2d(1), conv_(in_channels=channels, out_channels=channels // reduction, kernel_size=1, stride=1, padding=0, bias=True), nn.ReLU(inplace=True), conv_(in_channels=channels // reduction, out_channels=channels, kernel_size=1, stride=1, padding=0, bias=True), nn.Sigmoid(), ])
def __init__(self, G0=64, G=32, C=6): super(ResDenseBlock, self).__init__() self.C = C self.convs_ = nn.ModuleList() for i in range(C): self.convs_.append( ConvLayer(G0+i*G, G) ) self.fusion = conv_(in_channels=G0+C*G, out_channels=G0, kernel_size=1, stride=1, padding=0, bias=True)
def __init__(self, in_c=3, out_c=3, scale=4, n_feats=64, D=20, G=32, C=6): super(RDN, self).__init__() self.SFE1 = conv_(in_c, n_feats, 3, 1, bias=True) self.SFE2 = conv_(n_feats, n_feats, 3, 1, bias=True) self.D = D self.RDNs_ = nn.ModuleList() for i in range(D): self.RDNs_.append( ResDenseBlock(G0=n_feats, G=G, C=C) ) self.GFF = nn.Sequential(*[ conv_(in_channels=n_feats * D, out_channels=n_feats, kernel_size=1, stride=1, padding=0, bias=True), conv_(n_feats, n_feats) ]) self.tail = nn.Sequential(*[ UpsampleBlock(scale, n_feats, 3, 1, True, False), conv_(n_feats, out_c, kernel_size=3, stride=1, bias=True), ])
def __init__(self, in_c, out_c): super(ConvLayer, self).__init__() self.conv_ = nn.Sequential(*[ conv_(in_c, out_c, kernel_size=3, stride=1, bias=True), nn.ReLU(True), ])