def __init__(self, conv, n_feats, kernel_size, bias=False, bn=False, act=nn.ReLU(False), res_scale=1, k_bits=32, ema_epoch=1, name=None): super(PAMS_ResBlock, self).__init__() self.k_bits = k_bits self.quant_act1 = pams_quant_act(self.k_bits, ema_epoch=ema_epoch) self.quant_act2 = pams_quant_act(self.k_bits, ema_epoch=ema_epoch) self.quant_act3 = pams_quant_act(self.k_bits, ema_epoch=ema_epoch) self.shortcut = common.ShortCut() m = [] for i in range(2): m.append( conv(n_feats, n_feats, kernel_size, k_bits=self.k_bits, bias=bias)) if i == 0: m.append(act) m.append(self.quant_act2) self.body = nn.Sequential(*m) self.res_scale = res_scale
def __init__(self, growRate0, growRate, nConvLayers, kSize=3, k_bits=32, name=None): super(PAMS_RDB, self).__init__() G0 = growRate0 G = growRate C = nConvLayers self.k_bits = k_bits convs = [] for c in range(C): convs.append( PAMS_RDB_Conv_in(G0 + c * G, G, kSize, k_bits=self.k_bits)) self.convs = nn.Sequential(*convs) self.act1 = pams_quant_act(self.k_bits) self.act2 = pams_quant_act(self.k_bits) # Local Feature Fusion self.LFF = QuantConv2d(in_channels=G0 + C * G, out_channels=G0, kernel_size=1, padding=0, k_bits=self.k_bits, stride=1, bias=True)
def __init__(self, inChannels, growRate, kSize=3, k_bits=32): super(PAMS_RDB_Conv, self).__init__() Cin = inChannels G = growRate self.k_bits = k_bits self.conv = nn.Sequential(*[ quant_conv3x3(Cin, G, kSize, padding=(kSize-1)//2, stride =1, k_bits= self.k_bits, bias = True), nn.ReLU() ]) self.act1 = pams_quant_act(self.k_bits) self.act2 = pams_quant_act(self.k_bits)
def __init__(self,args): super(PAMS_RDN, self).__init__() r = args.scale[0] G0 = args.G0 kSize = args.RDNkSize # number of RDB blocks, conv layers, out channels self.D, C, G = { 'A': (20, 6, 32), 'B': (16, 8, 64), }[args.RDNconfig] if not type([]) == type(args.k_bits): self.k_bits = [args.k_bits for _ in range(self.D)] else: self.k_bits = args.k_bits # Shallow feature extraction net self.SFENet1 = nn.Conv2d(args.n_colors, G0, kSize, padding=(kSize-1)//2, stride=1) self.SFENet2 = nn.Conv2d(G0, G0, kSize, padding=(kSize-1)//2, stride=1) # Redidual dense blocks and dense feature fusion self.RDBs = nn.ModuleList() for i in range(self.D): self.RDBs.append( PAMS_RDB(growRate0 = G0, growRate = G, nConvLayers = C, k_bits=args.k_bits) ) self.act = pams_quant_act(args.k_bits) # Global Feature Fusion self.GFF = nn.Sequential(*[ QuantConv2d(self.D * G0, G0, 1, padding=0, stride=1, k_bits=args.k_bits, bias=True), QuantConv2d(G0, G0, kSize, padding=(kSize-1)//2, stride=1,k_bits=args.k_bits, bias=True) ]) # Up-sampling net if r == 2 or r == 3: self.UPNet = nn.Sequential(*[ nn.Conv2d(G0, G * r * r, kSize, padding=(kSize-1)//2, stride=1), nn.PixelShuffle(r), nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1) ]) elif r == 4: self.UPNet = nn.Sequential(*[ nn.Conv2d(G0, G * 4, kSize, padding=(kSize-1)//2, stride=1), nn.PixelShuffle(2), nn.Conv2d(G, G * 4, kSize, padding=(kSize-1)//2, stride=1), nn.PixelShuffle(2), nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1) ]) else: raise ValueError("scale must be 2 or 3 or 4.")