def __init__(self, args, conv=common.default_conv): super(EDSR, self).__init__() self.args = args n_resblocks = args.n_resblocks n_feats = args.n_feats kernel_size = 3 scale = args.scale[0] act = nn.ReLU(True) self.sub_mean = common.MeanShift(args.rgb_range) self.add_mean = common.MeanShift(args.rgb_range, sign=1) # define head module m_head = [conv(args.n_colors, n_feats, kernel_size)] # define body module self.block_num = [ int(n_resblocks / 3), int(2 * n_resblocks / 3) - int(n_resblocks / 3), n_resblocks - int(2 * n_resblocks / 3) ] m_body1 = [ common.ResBlock(conv, n_feats, kernel_size, act=act, res_scale=args.res_scale) for _ in range(self.block_num[0]) ] m_body2 = [ common.ResBlock(conv, n_feats, kernel_size, act=act, res_scale=args.res_scale) for _ in range(self.block_num[1]) ] m_body3 = [ common.ResBlock(conv, n_feats, kernel_size, act=act, res_scale=args.res_scale) for _ in range(self.block_num[2]) ] m_body3.append(conv(n_feats, n_feats, kernel_size)) # define tail module m_tail = [ common.Upsampler(conv, scale, n_feats, act=False), conv(n_feats, args.n_colors, kernel_size) ] self.head = nn.Sequential(*m_head) self.body1 = nn.Sequential(*m_body1) self.body2 = nn.Sequential(*m_body2) self.body3 = nn.Sequential(*m_body3) self.tail = nn.Sequential(*m_tail)
def __init__(self, args, conv=common.default_conv): conv = common.default_conv conv1x1 = common.conv_1x1_9_layer super(EDSR, self).__init__() self.args = args n_resblocks = args.n_resblocks n_feats = args.n_feats kernel_size = 3 scale = args.scale act = nn.ReLU(True) # url_name = 'r{}f{}x{}'.format(n_resblocks, n_feats, scale) # if url_name in url: # self.url = url[url_name] # else: # self.url = None self.sub_mean = common.MeanShift(args.rgb_range, args.rgb_mean, args.rgb_std) self.add_mean = common.MeanShift(args.rgb_range, args.rgb_mean, args.rgb_std, 1) # define head module m_head = [conv(args.n_colors, n_feats, kernel_size)] # define body module # 3x3 conv, secure receptive field modules (original) m_body = [ common.ResBlock(conv, n_feats, kernel_size, act=act, res_scale=args.res_scale) for _ in range(60) ] # 1x1 conv, secure depth modules m_body += [ common.ResBlock(conv1x1, n_feats, kernel_size, act=act, res_scale=args.res_scale) for _ in range(60, n_resblocks) ] m_body.append(conv1x1(n_feats, n_feats, kernel_size)) # define tail module m_tail = [ common.Upsampler(conv, scale, n_feats, act=False), conv(n_feats, args.n_colors, kernel_size) ] self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail)
def __init__(self, args, conv=common.default_conv): super(EDSR, self).__init__() n_resblock = args['n_resblocks'] n_feats = args['n_feats'] kernel_size = 3 scale = args['scale'] act = nn.ReLU(True) # RGB mean for DIV2K rgb_mean = (0.4488, 0.4371, 0.4040) rgb_std = (1.0, 1.0, 1.0) self.sub_mean = common.MeanShift(args['rgb_range'], rgb_mean, rgb_std) # define head module m_head = [conv(args['n_colors'], n_feats, kernel_size)] # define body module m_body = [ common.ResBlock(conv, n_feats, kernel_size, act=act, res_scale=args['res_scale']) for _ in range(n_resblock) ] m_body.append(conv(n_feats, n_feats, kernel_size)) # define tail module m_tail = [ common.Upsampler(conv, scale, n_feats, act=False), conv(n_feats, args['n_colors'], kernel_size) ] self.add_mean = common.MeanShift(args['rgb_range'], rgb_mean, rgb_std, 1) self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail)
def __init__(self, num_block, num_feature, scale, n_colors=15, res_scale=0.1, conv=common.default_conv, isTrain=True): super(EDSR, self).__init__() n_resblocks = num_block n_feats = num_feature scale = scale kernel_size = 3 act = nn.ReLU(True) # define head module m_head = [conv(n_colors, n_feats, kernel_size)] # define body module m_body = [ common.ResBlock(conv, n_feats, kernel_size, act=act, res_scale=res_scale) for _ in range(n_resblocks) ] m_body.append(conv(n_feats, n_feats, kernel_size)) self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.uv_up2 = common.Upsampler(conv, scale=2, n_feats=10, act=False) self.y_out_up4 = common.Upsampler(conv, scale=4, n_feats=n_feats, act=False) self.uv_out_up2 = common.Upsampler(conv, scale=2, n_feats=n_feats, act=False) self.y_res_up4 = nn.Sequential( common.Upsampler(conv, scale=4, n_feats=5, act=False), conv(5, n_feats, kernel_size)) self.y_out = conv(n_feats, 1, kernel_size) self.uv_res_up4 = nn.Sequential( common.Upsampler(conv, scale=4, n_feats=10, act=False), conv(10, n_feats, kernel_size)) self.uv_out = conv(n_feats, 2, kernel_size) self.isTrain = isTrain
def __init__(self, num_block, num_feature, scale, n_colors=15, res_scale=0.1, conv=common.default_conv, isTrain=True): super(EDSR, self).__init__() n_resblocks = num_block n_feats = num_feature scale = scale kernel_size = 3 act = nn.LeakyReLU(negative_slope=0.1, inplace=True) # define head module # define body module m_body = [ common.ResBlock(conv, n_feats, kernel_size, act=act, res_scale=res_scale) for _ in range(n_resblocks) ] m_body.append(conv(n_feats, n_feats, kernel_size)) self.body = nn.Sequential(*m_body) self.down_x2 = nn.Sequential( nn.Conv2d(n_feats, n_feats, kernel_size=3, stride=2, padding=1), act) self.up_x2 = common.Upsampler(conv, scale=2, n_feats=n_feats, act=act) self.down_x4 = nn.Sequential( nn.Conv2d(n_feats, n_feats, kernel_size=3, stride=2, padding=1), act) self.up_x4 = common.Upsampler(conv, scale=2, n_feats=n_feats, act=act) self.upsampler_2 = common.Upsampler(conv, scale=2, n_feats=n_feats, act=act) self.upsampler_4 = common.Upsampler(conv, scale=4, n_feats=n_feats, act=act) self.head_y = conv(5, n_feats, kernel_size) self.head_uv = conv(10, n_feats, kernel_size) self.y_out = conv(n_feats, 1, kernel_size) self.uv_out = conv(n_feats, 2, kernel_size) self.isTrain = isTrain
def __init__(self, conv=common.default_conv, n_resblock=8, n_filters=64, scale=4, rgb_range=255, n_colors=1, res_scale=1): super(EDSR, self).__init__() # n_resblock = args.n_resblocks # n_feats = args.n_feats kernel_size = 3 # scale = args.scale[0] act = nn.ReLU(True) rgb_mean = (0.4488, 0.4371, 0.4040) rgb_std = (1.0, 1.0, 1.0) # self.sub_mean = common.MeanShift(rgb_range, rgb_mean, -1) # rgb_std) # define head module m_head = [conv(n_colors, n_filters, kernel_size)] # define body module m_body = [ common.ResBlock(conv, n_filters, kernel_size, act=act, res_scale=res_scale) for _ in range(n_resblock) ] m_body.append(conv(n_filters, n_filters, kernel_size)) # define tail module m_tail = [ common.Upsampler(conv, scale, n_filters, act=False), conv(n_filters, n_colors, kernel_size) ] # self.add_mean = common.MeanShift(rgb_range, rgb_mean, 1) # rgb_std, 1) self.head = nn.Sequential(*m_head) self.body = nn.Sequential(*m_body) self.tail = nn.Sequential(*m_tail)
def __init__(self, n_colors, n_resblocks=32, n_feats=256, res_scale=1, conv=common.default_conv): super(EDSR, self).__init__() n_resblock = n_resblocks n_feats = n_feats kernel_size = 3 # scale = args.scale[0] act = nn.ReLU(True) # rgb_mean = (0.4488, 0.4371, 0.4040) # self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, -1) # define head module modules_head = [conv(n_colors, n_feats, kernel_size)] # define body module modules_body = [ common.ResBlock( conv, n_feats, kernel_size, act=act, res_scale=res_scale) \ for _ in range(n_resblock)] modules_body.append(conv(n_feats, n_feats, kernel_size)) # define tail module modules_tail = [ # common.Upsampler(conv, scale, n_feats, act=False), conv(n_feats, 3, kernel_size) ] # self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, 1) self.head = nn.Sequential(*modules_head) self.body = nn.Sequential(*modules_body) self.tail = nn.Sequential(*modules_tail)
def __init__(self, opts): super(Generator, self).__init__() self.light = opts.light conv_type = opts.gen_conv_layer # Get constructors of the required modules conv_layer = common.get_conv_layer(conv_type) norm_layer = common.get_norm_layer(opts.gen_norm_layer) upsampling_layer = common.get_upsampling_layer(opts.gen_upsampling_layer) # Calculate the amount of downsampling and upsampling convolutional blocks num_down_blocks = int(log( opts.gen_input_size // opts.gen_latent_size, 2)) num_up_blocks = int(log( opts.gen_output_size // opts.gen_latent_size, 2)) if opts.light: num_up_blocks_light = int(log( opts.gen_output_size_light // opts.gen_latent_size, 2)) # Read parameters for convolutional blocks in_channels = opts.gen_num_channels padding = (opts.gen_kernel_size - 1) // 2 bias = norm_layer != nn.BatchNorm2d #bias = True # only for 1 experiment # Downsampling layer down_layers = [] # Downsampling blocks for i in range(num_down_blocks): # Increase the number of channels by 2x out_channels = min(in_channels * 2, opts.gen_max_channels) down_layers += [ conv_layer(in_channels, out_channels, opts.gen_kernel_size, stride=2, padding=padding, bias=bias), norm_layer(out_channels), nn.ReLU(True)] in_channels = out_channels in_channels = opts.gen_max_channels # Residual blocks num_res_blocks = opts.gen_num_res_blocks for i in range(num_res_blocks - num_res_blocks//2): down_layers += [common.ResBlock(in_channels, conv_layer, norm_layer)] # Get list of input channels in branches input_channels_list = list(map(int, opts.gen_num_input_channels.split(','))) # List for downsampling branches self.input_branches = nn.ModuleList() for current_input_channels in input_channels_list: # First layer without normalization current_layers = [] if num_down_blocks == num_up_blocks: current_layers += [ conv_layer(current_input_channels, opts.gen_num_channels, 7, 1, 3, bias=False), nn.ReLU(True)] current_layers += copy.deepcopy(down_layers) self.input_branches += [nn.Sequential(*current_layers)] # Residual decoder blocks residual_layers = [] for i in range(num_res_blocks//2): residual_layers += [common.ResBlock(in_channels, conv_layer, norm_layer)] # Upsampling decoder blocks upsampling_layers = [] for i in range(num_up_blocks): # Decrease the number of channels by 2x out_channels = opts.gen_num_channels * 2**(num_up_blocks-i-1) out_channels = max(min(out_channels, opts.gen_max_channels), opts.gen_num_channels) upsampling_layers += upsampling_layer( in_channels, out_channels, opts.gen_kernel_size, 2, bias, conv_type) upsampling_layers += [ norm_layer(out_channels), nn.ReLU(True)] if opts.light and i == num_up_blocks_light - 1: out_channels_light = out_channels in_channels = out_channels # Get output channels per each branch branches_out_channels = opts.gen_num_output_channels.split(';') branches_nonlinearities = opts.gen_output_nonlinearities.split(';') # Output branches(deep) self.branches_residual = nn.ModuleList() self.branches_upsampling = nn.ModuleList() # Final layers (may be several per branch) self.final_layers = nn.ModuleList() for branch_out_channels, branch_nonlinearities in zip(branches_out_channels, branches_nonlinearities): # Deep copy the main chunk of the branch self.branches_residual += [nn.Sequential(*copy.deepcopy(residual_layers))] self.branches_upsampling += [nn.Sequential(*copy.deepcopy(upsampling_layers))] # Each branch has multiple heads heads_out_channels = map(int, branch_out_channels.split(',')) heads_nonlinearity_types = branch_nonlinearities.split(',') branch_heads = nn.ModuleList() for head_out_channels, head_nonlinearity_type in zip(heads_out_channels, heads_nonlinearity_types): branch_heads += [nn.Sequential( nn.Conv2d(out_channels, head_out_channels, 7, 1, 3, bias=False), common.get_nonlinear_layer(head_nonlinearity_type))] self.final_layers += [branch_heads] if opts.light: light_branch = [nn.Sequential(*copy.deepcopy(residual_layers))] light_branch += [nn.Sequential(*copy.deepcopy(upsampling_layers[:4*num_up_blocks_light]))] light_branch += [nn.Sequential(nn.Conv2d(out_channels_light, 1, 7, 1, 3, bias=False), common.get_nonlinear_layer('tanh'))] self.light_branch = nn.Sequential(*light_branch) # Initialize weights self.apply(common.weights_init)