def __init__(self, args, conv=common.default_conv): super(RCAN, self).__init__() n_resgroups = args.n_resgroups n_resblocks = args.n_resblocks n_feats = args.n_feats kernel_size = 3 reduction = args.reduction scale = args.scale[0] act = nn.ReLU(True) # RGB mean for DIV2K rgb_mean = (0.4488, 0.4371, 0.4040) rgb_std = (1.0, 1.0, 1.0) self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std) # define head module modules_head = [conv(args.n_colors, n_feats, kernel_size)] # define body module modules_body = [ ResidualGroup( conv, n_feats, kernel_size, reduction, act=act, res_scale=args.res_scale, n_resblocks=n_resblocks) \ for _ in range(n_resgroups)] modules_body.append(conv(n_feats, n_feats, kernel_size)) self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1) self.head = nn.Sequential(*modules_head) self.body = nn.Sequential(*modules_body) self.up = UPASPP(conv, n_feats, scale, reduction)
def __init__(self, args, conv=common.default_conv): super(ArchNetwork, self).__init__() self._layers = args.layers layers = args.layers genotype = eval("genotypes.%s" % args.genotype) stem_multiplier = 4 C = args.init_channels self._upsampling_Pos = args.upsampling_Pos self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range, sign=1) C_curr = stem_multiplier * C self.stem = nn.Sequential( nn.Conv2d(3, C_curr, 3, padding=1, bias=False), ) C_prev_prev, C_prev, C_curr = C_curr, C_curr, C self.cells = nn.ModuleList() upsample_prev = False for i in range(layers): if i in [self._upsampling_Pos]: upsample = True else: upsample = False cell = Cell(genotype, C_prev_prev, C_prev, C_curr, upsample, upsample_prev) upsample_prev = upsample self.cells += [cell] C_prev_prev, C_prev = C_prev, cell.multiplier * C_curr self.final_conv = conv(C_prev, args.n_colors, 3)
def __init__(self,args,conv=common.default_conv): super(FADN,self).__init__() self.n_resblocks=args.n_resblocks n_feats=args.n_feats kernel_size=3 scale = args.scale[0] act = nn.ReLU(True) self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range, sign=1) # define head module m_head = [conv(args.n_colors, n_feats, kernel_size)] # define body module m_body = [DyResBlock(kernel_size,n_feats,n_feats) for _ in range(self.n_resblocks)] m_body.append(conv(n_feats, n_feats, kernel_size)) # define tail module m_tail = [ common.Upsampler(conv, scale, n_feats, act=False), conv(n_feats, args.n_colors, kernel_size) ] self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail)
def __init__(self, args, conv=common.default_conv): super(EDSR, self).__init__() n_resblock = args.n_resblocks n_feats = args.n_feats kernel_size = 3 scale = args.scale[0] act = nn.ReLU(True) rgb_mean = (0.4488, 0.4371, 0.4040) self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, -1) # define head module modules_head = [conv(args.n_colors, n_feats, kernel_size)] # define body module modules_body = [ common.ResBlock( conv, n_feats, kernel_size, act=act, res_scale=args.res_scale) \ for _ in range(n_resblock)] modules_body.append(conv(n_feats, n_feats, kernel_size)) # define tail module modules_tail = [ common.Upsampler(conv, scale, n_feats, act=False), conv(n_feats, args.n_colors, kernel_size) ] self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, 1) self.head = nn.Sequential(*modules_head) self.body = nn.Sequential(*modules_body) self.tail = nn.Sequential(*modules_tail)
def __init__(self, args, conv=common.default_conv): super(MAMNet, self).__init__() n_resblocks = args.n_resblocks n_feats = args.n_feats kernel_size = 3 scale = args.scale[0] act = nn.ReLU(True) self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range, sign=1) # define head module m_head = [conv(args.n_colors, n_feats, kernel_size)] # define body module m_body = [ common.MAMB(conv, n_feats, kernel_size, act=act, res_scale=args.res_scale) for _ in range(n_resblocks) ] m_body.append(conv(n_feats, n_feats, kernel_size)) # define tail module m_tail = [ common.Upsampler(conv, scale, n_feats, act=False), conv(n_feats, args.n_colors, kernel_size) ] self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail) # initialization common.initialize_weights(self.body, 0.1)
def __init__(self, args, conv=common.default_conv): super(RCAN, self).__init__() n_resgroups = args.n_resgroups n_resblocks = args.n_resblocks n_feats = args.n_feats kernel_size = 3 reduction = args.reduction scale = args.scale[0] act = nn.ReLU(True) # RGB mean for DIV2K self.sub_mean = common.MeanShift(args.rgb_range) # define head module modules_head = [conv(args.n_colors, n_feats, kernel_size)] # define body module modules_body = [ ResidualGroup( conv, n_feats, kernel_size, reduction, act=act, res_scale=args.res_scale, n_resblocks=n_resblocks) \ for _ in range(n_resgroups)] modules_body.append(conv(n_feats, n_feats, kernel_size)) # define tail module modules_tail = [ common.Upsampler(conv, scale, n_feats, act=False), conv(n_feats, args.n_colors, kernel_size)] self.add_mean = common.MeanShift(args.rgb_range, sign=1) self.head = nn.Sequential(*modules_head) self.body = nn.Sequential(*modules_body) self.tail = nn.Sequential(*modules_tail)
def __init__(self, args): super(RDN, self).__init__() r = args.scale[0] G0 = args.G0 kSize = args.RDNkSize # number of RDB blocks, conv layers, out channels self.D, C, G = { 'A': (20, 6, 32), 'B': (16, 8, 64), }[args.RDNconfig] rgb_mean = (0.4488, 0.4371, 0.4040) rgb_std = (1.0, 1.0, 1.0) self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std) self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1) # Shallow feature extraction net self.SFENet1 = nn.Conv2d(args.n_colors, G0, kSize, padding=(kSize - 1) // 2, stride=1) self.SFENet2 = nn.Conv2d(G0, G0, kSize, padding=(kSize - 1) // 2, stride=1) # Redidual dense blocks and dense feature fusion self.RDBs = nn.ModuleList() for i in range(self.D): self.RDBs.append( RDB(growRate0=G0, growRate=G, nConvLayers=C) ) # Global Feature Fusion self.GFF = nn.Sequential(*[ nn.Conv2d(self.D * G0, G0, 1, padding=0, stride=1), nn.Conv2d(G0, G0, kSize, padding=(kSize - 1) // 2, stride=1) ]) self.weight_conv =nn.Conv2d(G0+2,3,3,1,1)
def __init__(self, args, conv=common.default_conv): super(EDSR, self).__init__() n_resblocks = args.n_resblocks # 16 n_feats = args.n_feats # 64 kernel_size = 3 scale = args.scale[0] # 4 act = nn.ReLU(True) self.url = url['r{}f{}x{}'.format(n_resblocks, n_feats, scale)] self.sub_mean = common.MeanShift(args.rgb_range) # 标准化输入 self.add_mean = common.MeanShift(args.rgb_range, sign=1) # 还原输出 # define head module m_head = [conv(args.n_colors, n_feats, kernel_size)] # channels:3->64 # define body module m_body = [ # 16个resblock common.ResBlock( # ## 参数:64, 3, relu, 1 conv, n_feats, kernel_size, act=act, res_scale=args.res_scale) for _ in range(n_resblocks) ] m_body.append(conv(n_feats, n_feats, kernel_size)) # channels:64->64 # define tail module m_tail = [ common.Upsampler(conv, scale, n_feats, act=False), # 上采样集中在这里 conv(n_feats, args.n_colors, kernel_size) ] self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail)
def __init__(self, args): super(CARN, self).__init__() #scale = kwargs.get("scale") #multi_scale = kwargs.get("multi_scale") #group = kwargs.get("group", 1) multi_scale = len(args.scale) > 1 self.scale_idx = 0 scale = args.scale[self.scale_idx] group = 1 self.scale = args.scale rgb_mean = (0.4488, 0.4371, 0.4040) rgb_std = (1.0, 1.0, 1.0) self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std) self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1) #self.sub_mean = ops.MeanShift((0.4488, 0.4371, 0.4040), sub=True) #self.add_mean = ops.MeanShift((0.4488, 0.4371, 0.4040), sub=False) self.entry = nn.Conv2d(3, 64, 3, 1, 1) self.b1 = Block(64, 64) self.b2 = Block(64, 64) self.b3 = Block(64, 64) self.c1 = ops.BasicBlock(64 * 2, 64, 1, 1, 0) self.c2 = ops.BasicBlock(64 * 3, 64, 1, 1, 0) self.c3 = ops.BasicBlock(64 * 4, 64, 1, 1, 0) self.upsample = ops.UpsampleBlock(64, scale=scale, multi_scale=multi_scale, group=group) self.exit = nn.Conv2d(64, 3, 3, 1, 1)
def __init__(self, args, conv=common.default_conv): super(VDSR, self).__init__() n_resblocks = args.n_resblocks n_feats = args.n_feats kernel_size = 3 scale = args.scale[0] #self.url = url['r{}f{}'.format(n_resblocks, n_feats)] self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range, sign=1) m_head = [conv(args.n_colors, n_feats, kernel_size)] def basic_block(in_channels, out_channels, act): return common.BasicBlock( conv, in_channels, out_channels, kernel_size, bias=True, bn=False, act=act ) # define body module m_body = [] m_body.append(basic_block(n_feats, n_feats, nn.ReLU(False))) for _ in range(n_resblocks - 2): m_body.append(basic_block(n_feats, n_feats, nn.ReLU(False))) #m_body.append(basic_block(n_feats, args.n_colors, None)) m_tail = [ common.Upsampler(conv, scale, n_feats, act=False), conv(n_feats, 3, kernel_size) ] self.head = nn.Sequential(*m_head) self.tail = nn.Sequential(*m_tail) self.body = nn.Sequential(*m_body)
def __init__(self, scale_list, model_path = 'weight/EDSR_weight.pt' ): super(EDSR, self).__init__() # args scale = scale_list[0] input_channel = 5 output_channel = 3 num_block = 16 inp = 64 rgb_range = 255 res_scale = 0.1 act = nn.ReLU(True) #act = nn.LeakyReLU(negative_slope=0.05, inplace=True) # head self.head = nn.Sequential( common.conv(3, inp, input_channel) ) # body self.body = nn.Sequential( *[ common.ResBlock(inp, bias = True, act = act, res_scale = res_scale) for _ in range( num_block) ] ) self.body.add_module( str(num_block), common.conv(inp, inp, 3) ) # tail if scale > 1: self.tail = nn.Sequential( *[ common.Upsampler(scale, inp, act = False, choice = 0), common.conv(inp, 3, output_channel) ] ) else: self.tail = nn.Sequential( *[ common.conv(inp, 3, output_channel) ] ) self.sub_mean = common.MeanShift(rgb_range, sign = -1) self.add_mean = common.MeanShift(rgb_range, sign = 1) self.model_path = model_path self.load()
def __init__(self, args, nf=48): super(RFDN, self).__init__() scale = args.scale[0] in_nc = 3 out_nc = 3 num_modules = 6 self.fea_conv = conv_layer(in_nc, nf, kernel_size=3) self.B1 = RFDB(in_channels=nf) self.B2 = RFDB(in_channels=nf) self.B3 = RFDB(in_channels=nf) self.B4 = RFDB(in_channels=nf) self.B5 = RFDB(in_channels=nf) self.B6 = RFDB(in_channels=nf) self.c = conv_block(nf * num_modules, nf, kernel_size=1, act_type='lrelu') self.LR_conv = conv_layer(nf, nf, kernel_size=3) upsample_block = pixelshuffle_block self.upsampler = upsample_block(nf, out_nc, upscale_factor=scale) self.scale_idx = 0 self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range, sign=1)
def __init__(self, args, conv=common.default_conv): super(EDSR, self).__init__() n_resblocks = args.n_resblocks n_feats = args.n_feats kernel_size = 3 scale = args.scale[0] act = nn.Sigmoid() #sigmoid activation function # self.url = url['r{}f{}x{}'.format(n_resblocks, n_feats, scale)] self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range, sign=1) # define head module m_head = [conv(args.n_colors, n_feats, kernel_size)] # define body module m_body = [ common.ResBlock(conv, n_feats, kernel_size, act=act, res_scale=args.res_scale) for _ in range(n_resblocks) ] m_body.append(conv(n_feats, n_feats, kernel_size)) # m_body.append(conv(n_feats, n_feats, kernel_size)) # define tail module m_tail = [ common.Upsampler(conv, scale, n_feats, act=False), conv(n_feats, args.n_colors, kernel_size) ] self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail)
def __init__(self, args, conv=common.default_conv): super(MRIRSR, self).__init__() n_feats = args.n_feats kernel_size = 3 scale = args.scale[0] self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range, sign=1) # define head module m_head = [conv(args.n_colors, n_feats, kernel_size)] # define body module m_body = [ MultiRIRBlock(n_feats, kernel_size, n_resblocks=args.n_resblocks, res_scale=args.res_scale, res_scale_factor=args.res_scale_factor, n_rirblocks=args.n_rirblocks, args=args, conv=conv), conv(n_feats, n_feats, kernel_size) ] # define tail module m_tail = [ common.Upsampler(conv, scale, n_feats, act=False), conv(n_feats, args.n_colors, kernel_size) ] self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail)
def __init__(self, args): super(LatticeNet, self).__init__() in_channels = args.n_colors out_channels = args.n_colors # num_fea = args.n_feats num_fea = 64 #TODO FOR LatticeNet upscale_factor = args.scale[0] num_LBs = args.num_LBs self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range, sign=1) self.num_LBs = num_LBs # feature extraction self.fea_conv = nn.Sequential( nn.Conv2d(in_channels, num_fea, 3, 1, 1), nn.Conv2d(num_fea, num_fea, 3, 1, 1) ) # LBlocks LBs = [] for i in range(num_LBs): LBs.append(LBlock(num_fea)) #TODO # LBs.append(ResLBlock(num_fea)) self.LBs = nn.ModuleList(LBs) # BFModule self.BFM = BFModule(num_fea) # Reconstruction self.upsample = nn.Sequential( nn.Conv2d(num_fea, num_fea, 3, 1, 1), nn.Conv2d(num_fea, out_channels * (upscale_factor ** 2), 3, 1, 1), nn.PixelShuffle(upscale_factor) )
def __init__(self, conv=common.default_conv): super(EDCNN, self).__init__() n_feats = 64 kernel_size = 3 scale = 1 rgb_range = 255 self.sub_mean = common.MeanShift(rgb_range) self.add_mean = common.MeanShift(rgb_range, sign=1) m_head = [conv(3, n_feats, kernel_size)] m_body = [ common.BaseBranch(n_feats, kernel_size), conv(n_feats, 128, kernel_size), common.BaseBranch(128, kernel_size), conv(128, 256, kernel_size), common.BaseBranch(256, kernel_size), conv(256, 512, kernel_size), common.BaseBranch(512, kernel_size), conv(512, 256, kernel_size), common.BaseBranch(256, kernel_size), conv(256, 128, kernel_size), common.BaseBranch(128, kernel_size), conv(128, 64, kernel_size), common.BaseBranch(64, kernel_size), conv(n_feats, n_feats, kernel_size) ] m_tail = [ common.Upsampler(conv, scale, n_feats, act=False), conv(n_feats, 3, kernel_size) ] self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail)
def __init__(self, conv=common.default_conv): super(rhcnn, self).__init__() kernel_size = 3 rgb_range = 255 self.sub_mean = common.MeanShift(rgb_range) self.add_mean = common.MeanShift(rgb_range, sign=1) m_head = [conv(3, 128, kernel_size)] m_body = [ common.rhcnn(128, 64, kernel_size), conv(128, 256, kernel_size), common.rhcnn(256, 128, kernel_size), conv(256, 512, kernel_size), common.rhcnn(512, 256, kernel_size), conv(512, 256, kernel_size), common.rhcnn(256, 128, kernel_size), conv(256, 128, kernel_size), common.rhcnn(128, 64, kernel_size), conv(128, 6, kernel_size), common.rhcnn(6, 3, kernel_size), ] m_tail = [conv(6, 3, 1)] m_tail2 = [conv(3, 3, 1)] self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail) self.tail2 = nn.Sequential(*m_tail2)
def __init__(self, args, conv=common.default_conv): super(EDSR_ATT, self).__init__() n_resblocks = args.n_resblocks n_resgroups = args.n_resgroups reduction = args.reduction n_feats = args.n_feats kernel_size = 3 scale = args.scale[0] act = nn.ReLU(True) self.url = "" #url['r{}f{}x{}'.format(n_resblocks, n_feats, scale)] self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range, sign=1) # define head module m_head = [conv(args.n_colors, n_feats, kernel_size)] # define body module m_body = [ common.ResGroupAtt( conv, n_feats, kernel_size, reduction, act=act, res_scale=args.res_scale, n_resblocks=n_resblocks) \ for _ in range(n_resgroups) ] m_body.append(conv(n_feats, n_feats, kernel_size)) # define tail module m_tail = [ common.Upsampler(conv, scale, n_feats, act=False), conv(n_feats, args.n_colors, kernel_size) ] self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail)
def __init__(self, args, conv=common.default_conv): super(RCAN, self).__init__() n_resgroups = args.n_resgroups - 1 n_resblocks = args.n_resblocks n_feats = args.n_feats kernel_size = 3 reduction = args.reduction scale = args.scale[0] act = nn.ReLU(True) # RGB mean for DIV2K 1-800 #rgb_mean = (0.4488, 0.4371, 0.4040) # RGB mean for DIVFlickr2K 1-3450 # rgb_mean = (0.4690, 0.4490, 0.4036) if args.data_train == 'DIV2K': print('Use DIV2K mean (0.4488, 0.4371, 0.4040)') rgb_mean = (0.4488, 0.4371, 0.4040) elif args.data_train == 'DIVFlickr2K': print('Use DIVFlickr2K mean (0.4690, 0.4490, 0.4036)') rgb_mean = (0.4690, 0.4490, 0.4036) rgb_std = (1.0, 1.0, 1.0) self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std) # define head module modules_head = [conv(args.n_colors, n_feats, kernel_size)] # # define body module modules_prebody = [] modules_prebody.append( MulConvGroup( conv, n_feats, kernel_size, reduction, act=act, res_scale=args.res_scale, n_resblocks=n_resblocks) ) modules_body = [ MulConvGroup( conv, n_feats, kernel_size, reduction, act=act, res_scale=args.res_scale, n_resblocks=n_resblocks) \ for _ in range(n_resgroups)] # modules_body = [ # MulConv_Group(n_feats, reduction, act=act) for _ in range(n_resgroups) # ] modules_body.append(conv(n_feats, n_feats, kernel_size)) # define tail module modules_up = [ common.Upsampler(conv, scale, n_feats, act=False), ] modules_tail = [ CRB(conv, n_feats, kernel_size, reduction), ] modules_re = [conv(n_feats, args.n_colors, kernel_size)] self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1) self.head = nn.Sequential(*modules_head) self.prebody = nn.Sequential(*modules_prebody) self.body = nn.Sequential(*modules_body) self.up = nn.Sequential(*modules_up) self.tail = nn.Sequential(*modules_tail) self.re = nn.Sequential(*modules_re)
def __init__(self, args, conv=common.default_conv): super(EASR, self).__init__() self.n_resgroups = args.n_resgroups # 8 self.n_resblocks = args.n_resblocks # 16 n_feats = args.n_feats kernel_size = 3 scale = args.scale self.sub_mean = common.MeanShift(args.rgb_range) # define head module modules_IFE =[conv(args.n_colors, n_feats, kernel_size)] modules_head = [ conv(n_feats, n_feats, kernel_size)] # define body module modules_body = nn.ModuleList() modules_GFF = nn.ModuleList() for i in range(self.n_resgroups): modules_body.append(RIRGroup(conv, n_feats, kernel_size, n_resblocks=self.n_resblocks)) modules_GFF.append(GFF_unit(n_feats, feat_in=(i+2))) modules_conv = [conv(n_feats, n_feats, kernel_size)] # define tail module modules_tail = [ common.Upsampler(conv, scale, n_feats, act=False), conv(n_feats, args.n_colors, kernel_size)] self.add_mean = common.MeanShift(args.rgb_range, sign=1) self.IFE = nn.Sequential(*modules_IFE) self.head = nn.Sequential(*modules_head) self.body = modules_body self.GFF = modules_GFF self.conv = nn.Sequential(*modules_conv) self.tail = nn.Sequential(*modules_tail)
def __init__(self, args, conv=common.default_conv, use_skip=False): super(CARN_M, self).__init__() upscale_factor = args.scale[0] in_channels = args.n_colors out_channels = args.n_colors num_fea = 64 self.use_skip = use_skip self.upscale_factor = upscale_factor self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range, sign=1) # extract features self.fea_in = nn.Conv2d(in_channels, num_fea, 3, 1, 1) # CARN body self.b1 = Block(num_fea) self.c1 = nn.Sequential(nn.Conv2d(num_fea * 2, num_fea, 1, 1, 0), nn.ReLU(True)) self.b2 = Block(num_fea) self.c2 = nn.Sequential(nn.Conv2d(num_fea * 3, num_fea, 1, 1, 0), nn.ReLU(True)) self.b3 = Block(num_fea) self.c3 = nn.Sequential(nn.Conv2d(num_fea * 4, num_fea, 1, 1, 0), nn.ReLU(True)) # Reconstruct self.upsampler = common.Upsampler(conv, upscale_factor, num_fea, act=False) self.last_conv = nn.Conv2d(num_fea, out_channels, 3, 1, 1)
def __init__(self, args, conv=common.default_conv): super(NEBRN, self).__init__() n_feats = 64 kernel_size = 3 scale = args.scale[0] act = nn.LeakyReLU(0.1, inplace=True) num_blocks = 10 self.scale = scale rgb_mean = (0.4488, 0.4371, 0.4040) rgb_std = (1.0, 1.0, 1.0) self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std) self.head = conv(3, n_feats, kernel_size) self.blocks = nn.ModuleList([ UpBlock(conv, n_feats * scale * scale, n_feats, kernel_size=kernel_size, act=act, scale=scale, res_head_num=5, res_tail_num=5) for _ in range(num_blocks) ]) self.pixelUnShuffle = SpaceToDepth(scale) # self.pixelUnShuffle = nn.MaxPool2d(2) self.tail = conv(n_feats * num_blocks, 3, 3) self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)
def __init__(self, args, conv=common.default_conv): super(MDSR, self).__init__() n_resblocks = args.n_resblocks n_feats = args.n_feats kernel_size = 3 act = nn.ReLU(True) self.scale_idx = 0 self.url = url['r{}f{}'.format(n_resblocks, n_feats)] self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range, sign=1) m_head = [conv(args.n_colors, n_feats, kernel_size)] self.pre_process = nn.ModuleList([ nn.Sequential(common.ResBlock(conv, n_feats, 5, act=act), common.ResBlock(conv, n_feats, 5, act=act)) for _ in args.scale ]) m_body = [ common.ResBlock(conv, n_feats, kernel_size, act=act) for _ in range(n_resblocks) ] m_body.append(conv(n_feats, n_feats, kernel_size)) self.upsample = nn.ModuleList([ common.Upsampler(conv, s, n_feats, act=False) for s in args.scale ]) m_tail = [conv(n_feats, args.n_colors, kernel_size)] self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail)
def __init__(self, args, conv=common.default_conv): super(RFSN, self).__init__() n_resgroups = args.n_resgroups n_resblocks = args.n_resblocks n = 4 n_feats = args.n_feats kernel_size = 3 scale = args.scale[0] rgb_mean = (0.4488, 0.4371, 0.4040) rgb_std = (1.0, 1.0, 1.0) self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std) modules_head = [conv(args.n_colors, n_feats, kernel_size)] modules_body = [ RFSG(n_feat=n_feats, n_resblocks=n_resblocks) for _ in range(n_resgroups) ] rs = [ResBlock(n_feats=n_feats) for i in range(n)] modules_tail = [ common.Upsampler(conv, scale, n_feats, act=False), conv(n_feats, args.n_colors, kernel_size) ] self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1) self.head = nn.Sequential(*modules_head) self.body = nn.Sequential(*modules_body) self.LA = LA(channels=n_feats, n_grou=n_resgroups) self.con = conv(n_feats, n_feats, kernel_size) self.rs = nn.Sequential(*rs) self.tail = nn.Sequential(*modules_tail)
def __init__(self, args, conv=common.default_conv): super(VDSR, self).__init__() n_resblocks = args.n_resblocks n_feats = args.n_feats kernel_size = 3 self.url = url['r{}f{}'.format(n_resblocks, n_feats)] self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range, sign=1) def basic_block(in_channels, out_channels, act): return common.BasicBlock( conv, in_channels, out_channels, kernel_size, bias=True, bn=False, act=act ) # define body module m_body = [] m_body.append(basic_block(args.n_colors, n_feats, nn.ReLU(True))) for _ in range(n_resblocks - 2): m_body.append(basic_block(n_feats, n_feats, nn.ReLU(True))) m_body.append(basic_block(n_feats, args.n_colors, None)) self.body = nn.Sequential(*m_body) self.progress = 0.0
def __init__(self, args): super(IMDN, self).__init__() nf = args.n_feats # 64 num_modules = 6 upscale = args.scale[0] in_nc = args.n_colors out_nc = args.n_colors self.fea_conv = conv_layer(in_nc, nf, kernel_size=3) # IMDBs self.IMDB1 = IMDModule(in_channels=nf) self.IMDB2 = IMDModule(in_channels=nf) self.IMDB3 = IMDModule(in_channels=nf) self.IMDB4 = IMDModule(in_channels=nf) self.IMDB5 = IMDModule(in_channels=nf) self.IMDB6 = IMDModule(in_channels=nf) self.c = conv_block(nf * num_modules, nf, kernel_size=1, act_type='lrelu') self.LR_conv = conv_layer(nf, nf, kernel_size=3) upsample_block = pixelshuffle_block self.upsampler = upsample_block(nf, out_nc, upscale_factor=upscale) self.sub_mean = common.MeanShift(args.rgb_range) # TODO self.add_mean = common.MeanShift(args.rgb_range, sign=1)
def __init__(self, args, conv=common.default_conv): super(SPSR, self).__init__() n_resblocks = args.n_resblocks n_feats = args.n_feats print(n_resblocks, n_feats) kernel_size = 3 act = nn.ReLU(True) scale = args.scale[0] self.scale_idx = 0 self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range, sign=1) m_head = [conv(args.n_colors, n_feats, kernel_size)] self.pre_process = nn.ModuleList([ nn.Sequential(common.ResBlock(conv, n_feats, 5, act=act), ) for _ in range(args.kinds) ]) m_body = [ common.ResBlock(conv, n_feats, kernel_size, act=act) for _ in range(n_resblocks) ] m_body.append(conv(n_feats, n_feats, kernel_size)) # self.upsample = nn.ModuleList([ # common.Upsampler(conv, args.scale, n_feats, act=False) # for _ in range(args.kinds) # ]) self.upsample = common.Upsampler(conv, scale, n_feats, act=False) m_tail = [conv(n_feats, args.n_colors, kernel_size)] self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail)
def __init__(self, args, conv=common.default_conv): super(WRANSR, self).__init__() n_resgroups = args.n_resgroups n_resblocks = args.n_resblocks n_feats = args.n_feats kernel_size = 3 scale = args.scale[0] act = nn.ReLU(True) rabtype = args.rabtype rgb_mean = (0.4488, 0.4371, 0.4040) rgb_std = (1.0, 1.0, 1.0) self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std) # head m_head = [conv(args.n_colors, n_feats, kernel_size)] # body m_body = [ ResidualAttentionGroup(rabtype, conv, n_feats, kernel_size, n_resblocks) for _ in range(n_resgroups) ] # tail m_tail = [ common.Upsampler(conv, scale, n_feats, act=False), conv(n_feats, args.n_colors, kernel_size) ] self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1) self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail)
def __init__(self, args, conv=common.default_conv): super(SEAN, self).__init__() n_resblock = args.n_resblocks n_feats = args.n_feats kernel_size = 3 scale = args.scale[0] act = nn.ReLU(True) rgb_mean = (0.4488, 0.4371, 0.4040) rgb_std = (1.0, 1.0, 1.0) self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std) # define head module m_LFF = [LFF(args, n_feats=n_feats)] # define body module m_Edge = [Edge_Net(args, n_feats=n_feats)] m_Fushion= [conv(6, n_feats, kernel_size=1)] # define tail module m_Net = [Net(args, n_feats=n_feats)] self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1) self.lff = nn.Sequential(*m_LFF) self.edge = nn.Sequential(*m_Edge) self.fushion = nn.Sequential(*m_Fushion) self.net = nn.Sequential(*m_Net)
def __init__(self, args): super(BELCSCNet, self).__init__() self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range, sign=1) self.default_conv = common.default_conv self.use_add = args.lcsc_use_add self.channels = args.channels self.rate_list = args.rate_list self.len_list = args.len_list self.kernel_size = args.kernel_size self.scale = args.scale[0] self.upscale = args.upscale_manner self.len = len(self.rate_list) self.init_conv = nn.Conv2d(3, self.channels, self.kernel_size, padding=1, stride=1) self.ELCSC_blocks = nn.ModuleList() for i in range(self.len): self.ELCSC_blocks.append(ELCSC_Block(self.channels, self.rate_list[i], self.len_list[i], self.kernel_size)) assert self.upscale == 'espcn' or self.upscale == 'deconv', "upscaling manner should be espcn or deconv" if self.upscale = 'espcn': self.up_part = nn.Sequential( common.Upsampler(self.default_conv, self.scale, self.channels, act=False), self.default_conv(self.channels, 3, self.kernel_size) )