def __init__(self, args, conv=common.default_conv): super(RR, self).__init__() n_resblocks = 16 # args.n_resblocks n_feats = 64 # args.n_feats kernel_size = 3 rgb_mean = (0.4488, 0.4371, 0.4040) rgb_std = (1.0, 1.0, 1.0) msa = CES(in_channels=n_feats,num=args.stages)#blocks=args.blocks) # define head module m_head = [conv(args.n_colors, n_feats, kernel_size)] # define body module m_body = [ common.ResBlock( conv, n_feats, kernel_size, nn.PReLU(), res_scale=args.res_scale ) for _ in range(n_resblocks // 2) ] m_body.append(msa) for i in range(n_resblocks // 2): m_body.append(common.ResBlock(conv, n_feats, kernel_size, nn.PReLU(), res_scale=args.res_scale)) m_body.append(conv(n_feats, n_feats, kernel_size)) m_tail = [ conv(n_feats, args.n_colors, kernel_size) ] self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1) self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail)
def __init__(self, args, conv=common.default_conv): super(MDSR, self).__init__() n_resblocks = args.n_resblocks n_feats = args.n_feats kernel_size = 3 act = nn.ReLU(True) self.scale_idx = 0 self.url = url["r{}f{}".format(n_resblocks, n_feats)] self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range, sign=1) m_head = [conv(args.n_colors, n_feats, kernel_size)] self.pre_process = nn.ModuleList([ nn.Sequential( common.ResBlock(conv, n_feats, 5, act=act), common.ResBlock(conv, n_feats, 5, act=act), ) for _ in args.scale ]) m_body = [ common.ResBlock(conv, n_feats, kernel_size, act=act) for _ in range(n_resblocks) ] m_body.append(conv(n_feats, n_feats, kernel_size)) self.upsample = nn.ModuleList([ common.Upsampler(conv, s, n_feats, act=False) for s in args.scale ]) m_tail = [conv(n_feats, args.n_colors, kernel_size)] self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail)
def __init__(self, args, n_feats, n_layer, kernel_size=3, conv=common.default_conv): super(Edge_Net, self).__init__() self.trans = conv(args.n_colors, n_feats, kernel_size) self.head = common.ResBlock(conv, n_feats, kernel_size) self.rdb = FEB(args, n_layer) self.tail = common.ResBlock(conv, n_feats, kernel_size) self.rebuilt = conv(n_feats, args.n_colors, kernel_size)
def __init__(self, args, conv=common.default_conv, gated_conv=gated_conv): super(CGC_EDSR, self).__init__() n_resblocks = args.n_resblocks n_feats = args.n_feats kernel_size = 3 scale = args.scale[0] act = nn.ReLU(True) url_name = 'cgc_edsr_r{}f{}x{}'.format(n_resblocks, n_feats, scale) if url_name in url: self.url = url[url_name] else: self.url = None self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range, sign=1) # define head module m_head = [conv(args.n_colors, n_feats, kernel_size)] # define body module m_body = [] if not args.one_cgc_block: m_body = [ common.ResBlock(gated_conv, n_feats, kernel_size, act=act, res_scale=args.res_scale) for _ in range(n_resblocks) ] m_body.append(gated_conv(n_feats, n_feats, kernel_size)) else: for i in range(n_resblocks - 1): m_body.append( common.ResBlock(conv, n_feats, kernel_size, act=act, res_scale=args.res_scale)) m_body.append( common.ResBlock(gated_conv, n_feats, kernel_size, act=act, res_scale=args.res_scale)) m_body.append(conv(n_feats, n_feats, kernel_size)) # define tail module m_tail = [ common.Upsampler(conv, scale, n_feats, act=False), conv(n_feats, args.n_colors, kernel_size) ] self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail)
def __init__(self, args, conv=common.default_conv): super(EDSR, self).__init__() self.fullTrain = args.fullTrain self.fullInputScale = args.fullInputScale self.fullTargetScale = args.fullTargetScale n_resblock = args.n_resblocks batch_norm = args.BN n_feats = args.n_feats kernel_size = 3 scale = args.scale[0] act = nn.ReLU(True) rgb_mean = (0.4488, 0.4371, 0.4040) self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, -1) # define head module modules_head = [conv(args.n_colors, n_feats, kernel_size)] modules_head_DNN = [conv(args.n_colors, n_feats, 7)] modules_head_DNN.append(act) # define body module modules_body = [ common.ResBlock( conv, n_feats, kernel_size, act=act, res_scale=args.res_scale) \ for _ in range(n_resblock)] modules_body.append(conv(n_feats, n_feats, kernel_size)) modules_body_DNN = [ common.ResBlock( conv, n_feats, kernel_size, act=act,bias = True,bn=batch_norm, res_scale=args.res_scale) \ for _ in range(n_resblock)] modules_body.append(conv(n_feats, n_feats, kernel_size)) # define tail module modules_tail = [ common.Upsampler(conv, 2, n_feats, act=False), conv(n_feats, args.n_colors, kernel_size) ] modules_tail_DNN = [conv(n_feats, args.n_colors, kernel_size)] self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, 1) self.head_DNN = nn.Sequential(*modules_head_DNN) self.body_DNN = nn.Sequential(*modules_body_DNN) #self.tail_4 = nn.Sequential(*modules_tail) self.tail_DNN = nn.Sequential(*modules_tail_DNN) self.head_2 = nn.Sequential(*modules_head) self.body_2 = nn.Sequential(*modules_body) self.tail_2 = nn.Sequential(*modules_tail) self.head = nn.Sequential(*modules_head) self.body = nn.Sequential(*modules_body) self.tail = nn.Sequential(*modules_tail)
def __init__(self, name="CSPDarkNet53", **kwargs): super(CSPDarkNet53, self).__init__(self, name=name, **kwargs) self.conv1_1 = cm.ConvBlock((3, 3, 3, 32), activate_type="mish") self.conv1_2 = cm.ConvBlock((3, 3, 32, 64), downsample=True, activate_type="mish") self.conv2_1 = cm.ConvBlock((1, 1, 64, 64), activate_type="mish") self.conv2_2 = cm.ConvBlock((1, 1, 64, 64), activate_type="mish") self.res_blocks1 = [] for _ in range(1): self.res_blocks1.append(cm.ResBlock(64, 32, 64, activate_type="mish")) self.conv2_3 = cm.ConvBlock((1, 1, 64, 64), activate_type="mish") self.conv3_1 = cm.ConvBlock((1, 1, 128, 64), activate_type="mish") self.conv3_2 = cm.ConvBlock((3, 3, 64, 128), downsample=True, activate_type="mish") self.conv4_1 = cm.ConvBlock((1, 1, 128, 64), activate_type="mish") self.conv4_2 = cm.ConvBlock((1, 1, 128, 64), activate_type="mish") self.res_blocks2 = [] for _ in range(2): self.res_blocks2.append(cm.ResBlock(64, 64, 64, activate_type="mish")) self.conv4_3 = cm.ConvBlock((1, 1, 64, 64), activate_type="mish") self.conv5_1 = cm.ConvBlock((1, 1, 128, 128), activate_type="mish") self.conv5_2 = cm.ConvBlock((3, 3, 128, 256), downsample=True, activate_type="mish") self.conv6_1 = cm.ConvBlock((1, 1, 256, 128), activate_type="mish") self.conv6_2 = cm.ConvBlock((1, 1, 256, 128), activate_type="mish") self.res_blocks3 = [] for _ in range(8): self.res_blocks3.append(cm.ResBlock(128, 128, 128, activate_type="mish")) self.conv6_3 = cm.ConvBlock((1, 1, 128, 128), activate_type="mish") self.conv7_1 = cm.ConvBlock((1, 1, 256, 256), activate_type="mish") self.conv7_2 = cm.ConvBlock((3, 3, 256, 512), downsample=True, activate_type="mish") self.conv8_1 = cm.ConvBlock((1, 1, 512, 256), activate_type="mish") self.conv8_2 = cm.ConvBlock((1, 1, 512, 256), activate_type="mish") self.res_blocks4 = [] for _ in range(8): self.res_blocks4.append(cm.ResBlock(256, 256, 256, activate_type="mish")) self.conv8_3 = cm.ConvBlock((1, 1, 256, 256), activate_type="mish") self.conv9_1 = cm.ConvBlock((1, 1, 512, 512), activate_type="mish") self.conv9_2 = cm.ConvBlock((3, 3, 512, 1024), downsample=True, activate_type="mish") self.conv10_1 = cm.ConvBlock((1, 1, 1024, 512), activate_type="mish") self.conv10_2 = cm.ConvBlock((1, 1, 1024, 512), activate_type="mish") self.res_blocks5 = [] for _ in range(4): self.res_blocks5.append(cm.ResBlock(512, 512, 512, activate_type="mish")) self.conv10_3 = cm.ConvBlock((1, 1, 512, 512), activate_type="mish") self.conv_last = cm.ConvBlock((1, 1, 1024, 1024), activate_type="mish")
def __init__(self, args, conv=common.default_conv): super(LDSR, self).__init__() n_resblocks = args.n_resblocks n_feats = args.n_feats kernel_size = 3 scale = args.scale[0] act = nn.ReLU(True) self.url = url['r{}f{}x{}'.format(n_resblocks, n_feats, scale)] self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range, sign=1) # define head module # HaiMai: Add 320 to fit Block35 inputs=320 outputs=384 m_head = [conv(args.n_colors, inputs, kernel_size)] # define body module # HaiMai: Add Block35 from inceptionV4ResV2 step = 4 #Input/ouput 320/320 m_body=[InceptionResNetV2.Block35Plus(inputs,kernel_size, act=act, res_scale=args.res_scale)] # Input/Ouput 320/384 => 384/n_feats m_body.append(InceptionResNetV2.Mixed_6aPlus(inputs,outputs)) m_body.append(conv(outputs,n_feats,kernel_size)) m_body.append([ common.ResBlock( conv, n_feats, kernel_size, act=act, res_scale=args.res_scale ) for _ in range(n_resblocks/step) ]) for _ in range(step-1): m_body.append(InceptionResNetV2.Block35Plus(inputs,kernel_size, act=act, res_scale=args.res_scale)) # Input/Ouput 320/384 => 384/n_feats m_body.append(InceptionResNetV2.Mixed_6aPlus(inputs,outputs)) m_body.append(conv(outputs,n_feats,kernel_size)) m_body.append([common.ResBlock( conv, n_feats, kernel_size, act=act, res_scale=args.res_scale ) for _ in range(n_resblocks/step)]) m_body.append(conv(n_feats, n_feats, kernel_size)) # define tail module m_tail = [ common.Upsampler(conv, scale, n_feats, act=False), conv(n_feats, args.n_colors, kernel_size) ] self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail)
def __init__(self, args): super(Discriminator, self).__init__() in_channels = args.n_colors out_channels = 64 depth = 7 conv = common.default_conv act = nn.LeakyReLU(True) def _block(_in_channels, _out_channels, stride=1): return nn.Sequential( nn.Conv2d(_in_channels, _out_channels, 3, padding=1, stride=stride, bias=False), nn.BatchNorm2d(_out_channels), nn.LeakyReLU(negative_slope=0.2, inplace=True)) m_features = [_block(in_channels, out_channels)] for i in range(depth): in_channels = out_channels if i % 2 == 1: stride = 1 out_channels *= 2 else: stride = 2 m_features.append(_block(in_channels, out_channels, stride=stride)) m_features.append( common.ResBlock(conv, out_channels, kernel_size=3, bn=True, act=act, res_scale=1)) m_features.append( common.ResBlock(conv, out_channels, kernel_size=3, bn=True, act=act, res_scale=1)) patch_size = args.patch_size // (2**((depth + 1) // 2)) m_classifier = [ nn.Linear(out_channels * patch_size**2, 1024), nn.LeakyReLU(negative_slope=0.2, inplace=True), nn.Linear(1024, 1) ] self.features = nn.Sequential(*m_features) self.classifier = nn.Sequential(*m_classifier)
def __init__(self, conv=common.default_conv, **kwargs,): super(MSMNetModel, self).__init__() # -------------- Define multi-scale model architecture here ------------ # scale = 3 input_channles = kwargs['input_channels'] num_resblocks = kwargs['num_ms_resblocks'] intermediate_channels = kwargs['intermediate_channels'] kernel_size = kwargs['default_kernel_size'] activation = nn.ReLU(True) rgb_range = kwargs['rgb_range'] self.sub_mean = common.MeanShift(rgb_range) self.add_mean = common.MeanShift(rgb_range, sign=1) # head to read scaled image _head = common.BasicBlock(conv, input_channles, intermediate_channels, kernel_size) # pre-process 2*Resblock each self.pre_process = nn.ModuleList([ nn.Sequential( common.PreResBlock(conv, 2*intermediate_channels, intermediate_channels, 5, bn=True, act=activation), common.ResBlock(conv, intermediate_channels, 5, bn=True, act=activation) ), nn.Sequential( common.PreResBlock(conv, 2*intermediate_channels, intermediate_channels, 5, bn=True, act=activation), common.ResBlock(conv, intermediate_channels, 5, bn=True, act=activation) ), nn.Sequential( common.ResBlock(conv, intermediate_channels, 5, bn=True, act=activation), common.ResBlock(conv, intermediate_channels, 5, bn=True, act=activation) ) ]) # body 16*Resblocks each _body = [ common.ResBlock( conv, intermediate_channels, kernel_size, bn=True, act=activation ) for _ in range(num_resblocks) ] _body.append(conv(intermediate_channels, intermediate_channels, kernel_size)) # upsample to enlarge the scale self.upsample = common.Upsampler(conv, intermediate_channels, bn=False, act=False) _output = nn.ModuleList([ # conv(3, 1, 3), common.BasicBlock(conv, intermediate_channels, intermediate_channels, kernel_size, act=nn.Sigmoid()) ]) self.head = nn.Sequential(*_head) self.body = nn.Sequential(*_body) self.output = nn.Sequential(*_output)
def __init__(self, args, conv=common.default_conv): super(PAEDSR, self).__init__() n_resblock = args.n_resblocks n_feats = args.n_feats kernel_size = 3 scale = args.scale[0] act = nn.ReLU(True) rgb_mean = (0.4488, 0.4371, 0.4040) rgb_std = (1.0, 1.0, 1.0) self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std) self.msa = attention.PyramidAttention(channel=256, reduction=8, res_scale=args.res_scale) # define head module m_head = [conv(args.n_colors, n_feats, kernel_size)] # define body module m_body = [ common.ResBlock(conv, n_feats, kernel_size, act=act, res_scale=args.res_scale) for _ in range(n_resblock // 2) ] m_body.append(self.msa) for _ in range(n_resblock // 2): m_body.append( common.ResBlock(conv, n_feats, kernel_size, act=act, res_scale=args.res_scale)) m_body.append(conv(n_feats, n_feats, kernel_size)) # define tail module m_tail = [ common.Upsampler(conv, scale, n_feats, act=False), nn.Conv2d(n_feats, args.n_colors, kernel_size, padding=(kernel_size // 2)) ] self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1) self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail)
def __init__(self, n_resblock=24, n_feats=256, scale=2, bias=True, norm_type=False, act_type='prelu'): super(NET, self).__init__() self.scale = scale m = [common.default_conv(1, n_feats, 3, stride=2)] m += [nn.PixelShuffle(2), common.ConvBlock(n_feats//4, n_feats, bias=True, act_type=act_type) ] m += [common.ResBlock(n_feats, 3, norm_type, act_type, res_scale=1, bias=bias) for _ in range(n_resblock)] for _ in range(int(math.log(scale, 2))): m += [nn.PixelShuffle(2), common.ConvBlock(n_feats//4, n_feats, bias=True, act_type=act_type) ] m += [common.default_conv(n_feats, 3, 3)] self.model = nn.Sequential(*m) for m in self.modules(): # pdb.set_trace() if isinstance(m, nn.Conv2d): # Xavier # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') nn.init.xavier_normal_(m.weight) m.weight.requires_grad = True if m.bias is not None: m.bias.data.zero_() m.bias.requires_grad = True
def __init__(self, args): super(EDSR, self).__init__() nResBlock = args.nResBlock nFeat = args.nFeat scale = args.scale[0] self.args = args # Submean layer self.subMean = common.meanShift( args.rgbRange, (0.4488, 0.4371, 0.4040), -1 * args.subMean) # Head convolution for feature extracting self.headConv = common.conv3x3(args.nChannel, nFeat) # Main branch modules = [common.ResBlock(nFeat) for _ in range(nResBlock)] modules.append(common.conv3x3(nFeat, nFeat)) self.body = nn.Sequential(*modules) # Upsampler self.upsample = common.upsampler(scale, nFeat) # Tail convolution for reconstruction self.tailConv = common.conv3x3(nFeat, args.nChannel) # Addmean layer self.addMean = common.meanShift( args.rgbRange, (0.4488, 0.4371, 0.4040), 1 * args.subMean)
def __init__(self, args, conv=common.default_conv): super(SRResNet, self).__init__() n_resblock = args.n_resblocks n_feats = args.n_feats scale = args.scale[0] kernel_size = 3 act = nn.PReLU() head = [conv(args.n_colors, n_feats, kernel_size=9), act] body = [ common.ResBlock(conv, n_feats, kernel_size, bias=True, bn=True, act=act) for _ in range(n_resblock) ] body.extend( [conv(n_feats, n_feats, kernel_size), nn.BatchNorm2d(n_feats)]) tail = [ common.Upsampler(conv, scale, n_feats, act=act), conv(n_feats, args.n_colors, kernel_size) ] self.head = nn.Sequential(*head) self.body = nn.Sequential(*body) self.tail = nn.Sequential(*tail)
def __init__(self, scale_list, model_path = 'weight/EDSR_weight.pt' ): super(EDSR, self).__init__() # args scale = scale_list[0] input_channel = 5 output_channel = 3 num_block = 16 inp = 64 rgb_range = 255 res_scale = 0.1 act = nn.ReLU(True) #act = nn.LeakyReLU(negative_slope=0.05, inplace=True) # head self.head = nn.Sequential( common.conv(3, inp, input_channel) ) # body self.body = nn.Sequential( *[ common.ResBlock(inp, bias = True, act = act, res_scale = res_scale) for _ in range( num_block) ] ) self.body.add_module( str(num_block), common.conv(inp, inp, 3) ) # tail if scale > 1: self.tail = nn.Sequential( *[ common.Upsampler(scale, inp, act = False, choice = 0), common.conv(inp, 3, output_channel) ] ) else: self.tail = nn.Sequential( *[ common.conv(inp, 3, output_channel) ] ) self.sub_mean = common.MeanShift(rgb_range, sign = -1) self.add_mean = common.MeanShift(rgb_range, sign = 1) self.model_path = model_path self.load()
def __init__(self, args, conv=common.default_conv): super(EDSR, self).__init__() print("EDSR MODEL is being used") n_resblock = args.n_resblocks n_feats = args.n_feats kernel_size = 3 scale = args.scale[0] act = nn.ReLU(True) rgb_mean = (0.4488, 0.4371, 0.4040) rgb_std = (1.0, 1.0, 1.0) self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std) # define head module m_head = [conv(args.n_colors, n_feats, kernel_size)] # define body module m_body = [ common.ResBlock(conv, n_feats, kernel_size, act=act, res_scale=args.res_scale) for _ in range(n_resblock) ] m_body.append(conv(n_feats, n_feats, kernel_size)) m_tail = [conv(n_feats, args.n_colors, kernel_size)] self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail)
def __init__(self, conv=common.default_conv): super(EDSR, self).__init__() n_resblocks = 32 n_feats = 64 kernel_size = 3 scale = 2 act = 'relu' #self.url = url['r{}f{}x{}'.format(n_resblocks, n_feats, scale)] # define head module m_head = [conv(3, n_feats, kernel_size)] # define body module m_body = [ common.ResBlock(n_feats, kernel_size, act_type=act, bias=True, res_scale=1) for _ in range(n_resblocks) ] m_body.append(conv(n_feats, n_feats, kernel_size)) # define tail module m_tail = [ common.Upsampler(scale, n_feats, norm_type=False, act_type=False), conv(n_feats, 3, kernel_size) ] self.sub_mean = ops.MeanShift((0.4488, 0.4371, 0.4040), sub=True) self.add_mean = ops.MeanShift((0.4488, 0.4371, 0.4040), sub=False) self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail)
def __init__(self, args, conv=common.default_conv): super(EDSR, self).__init__() n_resblocks = args.n_resblocks n_feats = args.n_feats kernel_size = 3 scale = args.scale[0] act = nn.ReLU(True) # self.url = url['r{}f{}x{}'.format(n_resblocks, n_feats, scale)] self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range, sign=1) # define head module m_head = [conv(args.n_colors, n_feats, kernel_size)] # define body module m_body = [ common.ResBlock( conv, n_feats, kernel_size, act=act, res_scale=args.res_scale ) for _ in range(n_resblocks) ] m_body.append(conv(n_feats, n_feats, kernel_size)) # define tail module m_tail = [ common.Upsampler(conv, scale, n_feats, act=False), conv(n_feats, args.n_colors, kernel_size) ] self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail)
def __init__(self, args, conv=common.default_conv): super(EDSR, self).__init__() n_resblocks = args.n_resblocks # 16 n_feats = args.n_feats # 64 kernel_size = 3 scale = args.scale[0] # 4 act = nn.ReLU(True) self.url = url['r{}f{}x{}'.format(n_resblocks, n_feats, scale)] self.sub_mean = common.MeanShift(args.rgb_range) # 标准化输入 self.add_mean = common.MeanShift(args.rgb_range, sign=1) # 还原输出 # define head module m_head = [conv(args.n_colors, n_feats, kernel_size)] # channels:3->64 # define body module m_body = [ # 16个resblock common.ResBlock( # ## 参数:64, 3, relu, 1 conv, n_feats, kernel_size, act=act, res_scale=args.res_scale) for _ in range(n_resblocks) ] m_body.append(conv(n_feats, n_feats, kernel_size)) # channels:64->64 # define tail module m_tail = [ common.Upsampler(conv, scale, n_feats, act=False), # 上采样集中在这里 conv(n_feats, args.n_colors, kernel_size) ] self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail)
def __init__(self, args, conv=common.default_conv): super(HRST_CNN, self).__init__() n_resblocks = args.n_resblocks args.n_resblocks = args.n_resblocks - args.n_resblocks_ft n_feats = args.n_feats kernel_size = 3 scale = args.scale[0] act = nn.ReLU(True) body_ft = [ common.ResBlock(conv, n_feats, kernel_size, act=act, res_scale=args.res_scale) for _ in range(args.n_resblocks_ft) ] body_ft.append(conv(n_feats, n_feats, kernel_size)) tail_ft = [ conv(n_feats, n_feats, kernel_size), conv(n_feats, n_feats, kernel_size), conv(n_feats, args.n_colors, kernel_size) ] premodel = EDSR(args) self.sub_mean = premodel.sub_mean self.head = premodel.head body = premodel.body body_child = list(body.children()) body_child.pop() self.body = nn.Sequential(*body_child) self.body_ft = nn.Sequential(*body_ft) self.tail_ft = nn.Sequential(*tail_ft) self.add_mean = premodel.add_mean args.n_resblocks = n_resblocks
def __init__(self, args, conv3x3=common.default_conv, conv1x1=common.default_conv): super(SRRESNET_CLUSTER, self).__init__() n_resblock = args.n_resblocks n_feats = args.n_feats scale = args.scale[0] act_res_flag = args.act_res kernel_size = 3 if act_res_flag == 'No': act_res = None else: act_res = 'prelu' head = [common.default_conv(args.n_colors, n_feats, kernel_size=9), nn.PReLU()] body = [common.ResBlock(conv3x3, n_feats, kernel_size, bn=True, act=act_res) for _ in range(n_resblock)] body.extend([conv3x3(n_feats, n_feats, kernel_size), nn.BatchNorm2d(n_feats)]) tail = [ common.Upsampler(conv3x3, scale, n_feats, act=act_res), conv3x3(n_feats, args.n_colors, kernel_size) ] self.head = nn.Sequential(*head) self.body = nn.Sequential(*body) self.tail = nn.Sequential(*tail) if conv3x3 == common.default_conv: # print('Loading from checkpoint {}'.format(args.pretrain_cluster)) # for (k1, v1), (k2, v2) in zip(self.state_dict().items(), torch.load(args.pretrain_cluster).items()): # print('{:<50}\t{:<50}\t{} {}'.format(k1, k2, list(v1.shape), list(v2.shape))) self.load_state_dict(torch.load(args.pretrain_cluster))
def __init__(self, opt): super(NET, self).__init__() n_resblocks = opt.n_resblocks n_feats = opt.channels bias = opt.bias norm_type = opt.norm_type act_type = opt.act_type block_type = opt.block_type head = [common.ConvBlock(4, n_feats, 5, act_type=act_type, bias=True)] if block_type.lower() == 'rrdb': resblock = [common.RRDB(n_feats, n_feats, 3, 1, bias, norm_type, act_type, 0.2) for _ in range(n_resblocks)] elif block_type.lower() == 'res': resblock = [common.ResBlock(n_feats, 3, norm_type, act_type, res_scale=1, bias=bias) for _ in range(n_resblocks)] else: raise RuntimeError('block_type is not supported') resblock += [common.ConvBlock(n_feats, n_feats, 3, bias=True)] tail = [common.ConvBlock(n_feats, 3, 3, bias=True)] self.model = nn.Sequential(*head, common.ShortcutBlock(nn.Sequential(*resblock)), *tail) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.xavier_normal_(m.weight) m.weight.requires_grad = True if m.bias is not None: m.bias.data.zero_() m.bias.requires_grad = True
def __init__(self, args, conv=common.default_conv): super(EDSR, self).__init__() n_resblock = args.n_resblocks n_feats = args.n_feats #kernel_size = 5 # My code 22.4.2018. origin is 3 kernel_size = 3 scale = args.scale[0] act = nn.ReLU(True) #act = common.Swish() #rgb_mean = (0.4488, 0.4371, 0.4040) #self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, -1) # define head module modules_head = [conv(args.n_colors, n_feats, kernel_size)] # define body module modules_body = [ common.ResBlock( conv, n_feats, kernel_size, act=act, res_scale=args.res_scale) \ for _ in range(n_resblock)] modules_body.append(conv(n_feats, n_feats, kernel_size)) # define tail module modules_tail = [ common.Upsampler(conv, scale, n_feats, act=False), conv(n_feats, args.n_colors, kernel_size) ] #self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, 1) self.head = nn.Sequential(*modules_head) self.body = nn.Sequential(*modules_body) self.tail = nn.Sequential(*modules_tail)
def __init__(self, in_channel, kernel_size=3, scale=2, conv=common.default_conv): super(MultisourceProjection, self).__init__() deconv_ksize, stride, padding, up_factor = { 2: (6, 2, 2, 2), 3: (9, 3, 3, 3), 4: (6, 2, 2, 2) }[scale] self.up_attention = CrossScaleAttention(scale=up_factor) self.down_attention = NonLocalAttention() self.upsample = nn.Sequential(*[ nn.ConvTranspose2d(in_channel, in_channel, deconv_ksize, stride=stride, padding=padding), nn.PReLU() ]) self.encoder = common.ResBlock(conv, in_channel, kernel_size, act=nn.PReLU(), res_scale=1)
def __init__(self, args, conv=common.default_conv): super(KPRCN, self).__init__() n_resgroups = args.n_resgroups n_resblocks = args.n_resblocks n_feats = args.nc_feats kernel_size = args.kernel_size reduction = args.reduction self.nalu = args.nalu act = nn.LeakyReLU(True) # define head module modules_head = [ conv(args.nc_input, args.nc_feats, args.kernel_size), nn.LeakyReLU() ] # define body module modules_body = [ common.ResBlock(conv, n_feats, kernel_size, act=act, res_scale=args.res_scale) for _ in range(n_resblocks - 2) ] modules_body.append(conv(n_feats, n_feats, kernel_size)) self.nc_output = 3 if args.model == 'DPCN' else args.recon_kernel_size**2 # define tail module modules_tail = [conv(args.nc_feats, self.nc_output, args.kernel_size)] self.head = nn.Sequential(*modules_head) self.body = nn.Sequential(*modules_body) self.tail = nn.Sequential(*modules_tail)
def __init__(self, args, in_dim=6, conv=common.default_conv): super(DNet, self).__init__() n_resblocks = args.n_resblocks n_feats = args.n_feats kernel_size = 3 act = nn.ReLU(True) #self.sub_mean = common.MeanShift(args.rgb_range) #self.add_mean = common.MeanShift(args.rgb_range, sign=1) # define head module m_head = [conv(in_dim, n_feats, kernel_size)] # define body module m_body = [ common.ResBlock(conv, n_feats, kernel_size, act=act, res_scale=args.res_scale) for _ in range(n_resblocks) ] m_body.append(conv(n_feats, n_feats, kernel_size)) # define tail module m_tail = [conv(n_feats, args.n_colors, kernel_size)] self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail)
def __init__(self, args, conv=common.default_conv): super(PANET, self).__init__() n_resblock = args.n_resblocks n_feats = args.n_feats kernel_size = 3 scale = args.scale[0] rgb_mean = (0.4488, 0.4371, 0.4040) rgb_std = (1.0, 1.0, 1.0) self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std) msa = attention.PyramidAttention() # define head module m_head = [conv(args.n_colors, n_feats, kernel_size)] # define body module m_body = [ common.ResBlock(conv, n_feats, kernel_size, nn.PReLU(), res_scale=args.res_scale) for _ in range(n_resblocks // 2) ] m_body.append(msa) for i in range(n_resblocks // 2): m_body.append( common.ResBlock(conv, n_feats, kernel_size, nn.PReLU(), res_scale=args.res_scale)) m_body.append(conv(n_feats, n_feats, kernel_size)) # define tail module #m_tail = [ # common.Upsampler(conv, scale, n_feats, act=False), # conv(n_feats, args.n_colors, kernel_size) #] m_tail = [conv(n_feats, args.n_colors, kernel_size)] self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1) self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail)
def __init__(self, scale_list, model_path='weight/MDSR_weight.pt'): super(MDSR, self).__init__() # args self.scale_list = scale_list input_channel = 3 output_channel = 3 num_block = 32 inp = 64 rgb_range = 255 res_scale = 0.1 act = nn.ReLU(True) #act = nn.LeakyReLU(negative_slope=0.05, inplace=True) # head self.head = nn.Sequential(common.conv(3, inp, input_channel)) # pre_process self.pre_process = nn.ModuleDict([ str(scale), nn.Sequential( common.ResBlock(inp, bias=True, act=act, res_scale=res_scale), common.ResBlock(inp, bias=True, act=act, res_scale=res_scale)) ] for scale in self.scale_list) # body self.body = nn.Sequential(*[ common.ResBlock(inp, bias=True, act=act, res_scale=res_scale) for _ in range(num_block) ]) self.body.add_module(str(num_block), common.conv(inp, inp, 3)) #upsample self.upsample = nn.ModuleDict( [str(scale), common.Upsampler(scale, inp, act=False, choice=0)] for scale in self.scale_list) # tail self.tail = nn.Sequential(common.conv(inp, 3, output_channel)) self.sub_mean = common.MeanShift(rgb_range, sign=-1) self.add_mean = common.MeanShift(rgb_range, sign=1) self.model_path = model_path self.load()
def __init__(self, in_channels, num=4, blocks=2): super(CES, self).__init__() print('num_RB:', num) print('num_blocks', blocks) self.blocks = blocks if blocks == 2: RBS = [ common.ResBlock(common.default_conv, n_feats=in_channels, kernel_size=3, act=nn.PReLU(), res_scale=1) for _ in range(num) ] self.RBS = nn.Sequential(*RBS) self.c1 = merge_block( in_channels=in_channels, out_channels=in_channels) #CE(in_channels=in_channels) self.c2 = merge_block( in_channels=in_channels, out_channels=in_channels) #CE(in_channels=in_channels) else: RBS1 = [ common.ResBlock(common.default_conv, n_feats=in_channels, kernel_size=3, act=nn.PReLU(), res_scale=1) for _ in range(num // 2) ] RBS2 = [ common.ResBlock(common.default_conv, n_feats=in_channels, kernel_size=3, act=nn.PReLU(), res_scale=1) for _ in range(num // 2) ] self.RBS1 = nn.Sequential(*RBS1) self.RBS2 = nn.Sequential(*RBS2) self.c1 = merge_block( in_channels=in_channels, out_channels=in_channels) #CE(in_channels=in_channels) self.c2 = merge_block( in_channels=in_channels, out_channels=in_channels) #CE(in_channels=in_channels) self.c3 = merge_block(in_channels=in_channels, out_channels=in_channels)
def __init__(self, args, conv=common.default_conv): super(EDSR_switch, self).__init__() n_resblocks = args.n_resblocks n_feats = args.n_feats kernel_size = 3 scale = args.scale[0] act = nn.ReLU(True) if args.n_colors == 3: self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range, sign=1) elif args.n_colors == 1: self.sub_mean = common.MeanShift(args.rgb_range, args.n_colors, rgb_mean=(0.4300, ), rgb_std=(1.0, )) self.add_mean = common.MeanShift(args.rgb_range, args.n_colors, rgb_mean=(0.4300, ), rgb_std=(1.0, ), sign=1) # define head module m_head = [conv(args.n_colors, n_feats, kernel_size)] # define body module m_body = [ common.ResBlock(conv, n_feats, kernel_size, act=act, res_scale=args.res_scale) for _ in range(n_resblocks) ] m_body.append(conv(n_feats, n_feats, kernel_size)) # define tail module m_tail = [ common.Upsampler(conv, scale, n_feats, act=False), conv(n_feats, args.n_colors, kernel_size) ] # define switch branch m_branch = [ nn.Conv2d(1, 12, 3, padding=1), nn.PReLU(num_parameters=12), nn.Conv2d(12, 12, 3, padding=1), nn.PReLU(num_parameters=12), nn.Conv2d(12, 12, 3, padding=1), nn.PReLU(num_parameters=12), nn.ConvTranspose2d(12, 1, 9, stride=3, padding=3) ] self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail) self.branch = nn.Sequential(*m_branch)
def __init__(self): super(MWRN_lv2_head, self).__init__() self.color_channel = 3 self.conv2_head_1 = common.BBlock(common.default_conv0,16*self.color_channel,256,3,bn=True) self.conv2_head_2 = common.BBlock(common.default_conv0,512,256,3,bn=True) self.res2_head = nn.Sequential(*[common.ResBlock(common.default_conv0,256,3) for i in range(4)]) self.conv2_head_3 = common.BBlock(common.default_conv0,1024,512,3,bn=True) self.DWT = common.DWT()