def __init__(self, scale_list, model_path = 'weight/EDSR_weight.pt' ): super(EDSR, self).__init__() # args scale = scale_list[0] input_channel = 5 output_channel = 3 num_block = 16 inp = 64 rgb_range = 255 res_scale = 0.1 act = nn.ReLU(True) #act = nn.LeakyReLU(negative_slope=0.05, inplace=True) # head self.head = nn.Sequential( common.conv(3, inp, input_channel) ) # body self.body = nn.Sequential( *[ common.ResBlock(inp, bias = True, act = act, res_scale = res_scale) for _ in range( num_block) ] ) self.body.add_module( str(num_block), common.conv(inp, inp, 3) ) # tail if scale > 1: self.tail = nn.Sequential( *[ common.Upsampler(scale, inp, act = False, choice = 0), common.conv(inp, 3, output_channel) ] ) else: self.tail = nn.Sequential( *[ common.conv(inp, 3, output_channel) ] ) self.sub_mean = common.MeanShift(rgb_range, sign = -1) self.add_mean = common.MeanShift(rgb_range, sign = 1) self.model_path = model_path self.load()
def __init__(self, args): super().__init__() n_resgroup = args.n_resgroups n_feats = args.n_feats colors = 3 out_dim = 120 # MoL use_checkpoint = args.checkpoint self.stages = args.stages # input = lr + masked_hr + mask [+ vq_latent] self.head = conv(colors * 2 + 1, n_feats) self.fuse = conv(n_feats * 2, n_feats, kernel_size=1, bias=False) self.stage_embedding = nn.Embedding(args.stages, n_feats) self.body_nl_low = _ResGroup(n_feats, is_nonlocal=True, use_checkpoint=use_checkpoint) self.body = nn.ModuleList( [_ResGroup(n_feats) for _ in range(n_resgroup - 2)]) self.body_tail = conv(n_feats, n_feats) self.body_nl_high = _ResGroup(n_feats, is_nonlocal=True, use_checkpoint=use_checkpoint) self.tail = conv(n_feats, out_dim) if args.position_encoding: self.position_encoding = nn.Parameter( torch.zeros(args.stages, n_feats)) else: self.position_encoding = None
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, conv_type=ConvType.HYPERCUBE, bn_momentum=0.1, D=3): super(BasicBlockBase, self).__init__() self.conv1 = conv( inplanes, planes, kernel_size=3, stride=stride, dilation=dilation, conv_type=conv_type, D=D) self.norm1 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum) self.conv2 = conv( planes, planes, kernel_size=3, stride=1, dilation=dilation, bias=False, conv_type=conv_type, D=D) self.norm2 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum) self.relu = MinkowskiReLU(inplace=True) self.downsample = downsample
def network_initialization(self, in_channels, out_channels, D): def space_n_time_m(n, m): return n if D == 3 else [n, n, n, m] if D == 4: self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1) dilations = [1, 1, 1, 1] bn_momentum = 0.02 self.inplanes = self.INIT_DIM self.conv1 = conv(in_channels, self.inplanes, kernel_size=space_n_time_m(3, 1), stride=1, D=D) self.bn1 = get_norm(NormType.BATCH_NORM, self.inplanes, D=self.D, bn_momentum=bn_momentum) self.relu = ME.MinkowskiReLU(inplace=True) self.pool = sum_pool(kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), D=D) self.layer1 = self._make_layer(self.BLOCK, self.PLANES[0], self.LAYERS[0], stride=space_n_time_m(2, 1), dilation=space_n_time_m( dilations[0], 1)) self.layer2 = self._make_layer(self.BLOCK, self.PLANES[1], self.LAYERS[1], stride=space_n_time_m(2, 1), dilation=space_n_time_m( dilations[1], 1)) self.layer3 = self._make_layer(self.BLOCK, self.PLANES[2], self.LAYERS[2], stride=space_n_time_m(2, 1), dilation=space_n_time_m( dilations[2], 1)) self.layer4 = self._make_layer(self.BLOCK, self.PLANES[3], self.LAYERS[3], stride=space_n_time_m(2, 1), dilation=space_n_time_m( dilations[3], 1)) self.final = conv(self.PLANES[3] * self.BLOCK.expansion, out_channels, kernel_size=1, bias=True, D=D)
def __init__(self, scale_list, model_path='weight/MDSR_weight.pt'): super(MDSR, self).__init__() # args self.scale_list = scale_list input_channel = 3 output_channel = 3 num_block = 32 inp = 64 rgb_range = 255 res_scale = 0.1 act = nn.ReLU(True) #act = nn.LeakyReLU(negative_slope=0.05, inplace=True) # head self.head = nn.Sequential(common.conv(3, inp, input_channel)) # pre_process self.pre_process = nn.ModuleDict([ str(scale), nn.Sequential( common.ResBlock(inp, bias=True, act=act, res_scale=res_scale), common.ResBlock(inp, bias=True, act=act, res_scale=res_scale)) ] for scale in self.scale_list) # body self.body = nn.Sequential(*[ common.ResBlock(inp, bias=True, act=act, res_scale=res_scale) for _ in range(num_block) ]) self.body.add_module(str(num_block), common.conv(inp, inp, 3)) #upsample self.upsample = nn.ModuleDict( [str(scale), common.Upsampler(scale, inp, act=False, choice=0)] for scale in self.scale_list) # tail self.tail = nn.Sequential(common.conv(inp, 3, output_channel)) self.sub_mean = common.MeanShift(rgb_range, sign=-1) self.add_mean = common.MeanShift(rgb_range, sign=1) self.model_path = model_path self.load()
def forward(self, inputs): with tf.variable_scope('backbone_Darknet53'): # [19, 19, 512], [38, 38, 512], [78, 78, 256] route_19, route_38, route_76 = self.backbone( inputs, self.batch_norm_params, self.weight_decay) with tf.variable_scope('yolov4_head'): with slim.arg_scope([slim.conv2d], normalizer_fn=slim.batch_norm, normalizer_params=self.batch_norm_params, activation_fn=lambda x: tf.nn.leaky_relu(x), weights_refularizer=slim.l2_regularizer( self.weight_decay)): # feature_map_76 fpn_19 = upsample_block(route_19, 256) route_38 = conv(route_38, 256) route_38 = tf.concat([route_38, fpn_19], axis=-1) route_38 = cbl_block(route_38, 256) fpn_38 = upsample_block(route_38, 128) route_76 = conv(route_76, 128) route_76 = tf.concat([route_76, fpn_38], axis=-1) route_76 = cbl_block(route_76, 128) pan_76 = route_76 feature_76 = conv(route_76, 256) # feature_map_38 pan_76 = conv(pan_76, 256, down_sample=True) route_38 = tf.concat([route_38, pan_76], axis=-1) route_38 = cbl_block(route_38, 256) pan_38 = route_38 feature_38 = conv(route_38, 512) # feature_map_19 pan_38 = conv(pan_38, 512, down_sample=True) route_19 = tf.concat([route_19, pan_38], axis=-1) route_19 = cbl_block(route_19, 512) feature_19 = (route_19, 1024) feature_19 = slim.conv2d(feature_19, 3 * (4 + 1 + self.class_num), 1, stride=1, normalizer_fn=None, activation_fn=None, biases_initializer=tf.zeros_initializer()) feature_38 = slim.conv2d(feature_38, 3 * (4 + 1 + self.class_num), 1, stride=1, normalizer_fn=None, activation_fn=None, biases_initializer=tf.zeros_initializer()) feature_76 = slim.conv2d(feature_76, 3 * (4 + 1 + self.class_num), 1, stride=1, normalizer_fn=None, activation_fn=None, biases_initializer=tf.zeros_initializer()) return feature_19, feature_38, feature_76
def __init__(self, scale_list, model_path='weight/NEWNET_weight.pt'): super(NewNet, self).__init__() # args self.scale_list = scale_list self.scale_list.sort() self.scale_list = list(set(self.scale_list)) input_channel = 3 output_channel = 3 num_block_list = [6,6,6,6] inp = 64 rgb_range = 255 res_scale = 0.1 act = nn.LeakyReLU(negative_slope=0.05, inplace=True) # head self.head = nn.Sequential( common.conv(3, inp, input_channel) ) # body self.body = nn.ModuleDict([ [ str(scale), nn.Sequential( *[ common.ResBlock(inp, bias = True, act = act, res_scale = res_scale) for _ in range(num_block_list[scale - 1] ) ] ) ] for scale in scale_list ]) #upsample self.upsample = nn.ModuleDict([ [ str(scale), common.Upsampler(scale, inp, act = False, choice = 0) ] for scale in self.scale_list ]) # tail self.tail = nn.ModuleDict([ [ str(scale), common.conv(inp, 3, input_channel) ] for scale in self.scale_list ]) self.sub_mean = common.MeanShift(rgb_range, sign = -1) self.add_mean = common.MeanShift(rgb_range, sign = 1) self.model_path = model_path self.load()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, norm_type=NormType.BATCH_NORM, bn_momentum=0.1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( conv(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False, D=self.D), get_norm(norm_type, planes * block.expansion, D=self.D, bn_momentum=bn_momentum), ) layers = [] layers.append( block(self.inplanes, planes, stride=stride, dilation=dilation, downsample=downsample, conv_type=self.CONV_TYPE, D=self.D)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append( block(self.inplanes, planes, stride=1, dilation=dilation, conv_type=self.CONV_TYPE, D=self.D)) return nn.Sequential(*layers)
def backbone(self, inputs, batch_norm_params, weight_decay): # =============== Mish activation =============== with slim.arg_scope( [slim.conv2d], normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params, activation_fn=Mish, # TODO: Mish func weights_refularizer=slim.l2_regularizer(weight_decay)): # CBM net = conv(inputs, 32) # CSP1 net = csp_block(net, 32, res_block_sum=1, double_channels=True) # CSP2 net = csp_block(net, 64, res_block_sum=2, double_channels=False) # CSP8, feature_map_76 net = csp_block(net, 128, res_block_sum=8, double_channels=False) route_76 = net # CSP8, feature_map_38 net = csp_block(net, 256, res_block_sum=8, double_channels=False) route_38 = net # CSP4 net = csp_block(net, 512, res_block_sum=4, double_channels=False) # =============== LeakyRelu activation =============== with slim.arg_scope( [slim.conv2d], normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params, activation_fn=lambda input: tf.nn.leaky_relu(input), weights_refularizer=slim.l2_regularizer(weight_decay)): # CBL * 3 net = conv(net, 512, kernel_size=1) net = conv(net, 1024) net = conv(net, 512, kernel_size=1) # SPP, shape:[19, 19, 1024] net = spp_block(net) # CBL * 3, feature_map_19 net = conv(net, 512, kernel_size=1) net = conv(net, 1024) route_19 = conv(net, 512, kernel_size=1) return route_19, route_38, route_76
def __init__(self, n_feats, is_nonlocal=False, use_checkpoint=True): super().__init__() self.block = ResAttModuleDownUpPlus(n_feats, is_nonlocal, use_checkpoint) self.tail = conv(n_feats, n_feats)
def network_initialization(self, in_channels, out_channels, D): # Setup net_metadata dilations = self.DILATIONS bn_momentum = 0.02 def space_n_time_m(n, m): return n if D == 3 else [n, n, n, m] if D == 4: self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1) # Output of the first conv concated to conv6 self.inplanes = self.INIT_DIM self.conv0p1s1 = conv(in_channels, self.inplanes, kernel_size=space_n_time_m(3, 1), stride=1, dilation=1, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bn0 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum) self.conv1p1s2 = conv(self.inplanes, self.inplanes, kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), dilation=1, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bn1 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum) self.block1 = self._make_layer(self.BLOCK, self.PLANES[0], self.LAYERS[0], dilation=dilations[0], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.conv2p2s2 = conv(self.inplanes, self.inplanes, kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), dilation=1, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bn2 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum) self.block2 = self._make_layer(self.BLOCK, self.PLANES[1], self.LAYERS[1], dilation=dilations[1], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.conv3p4s2 = conv(self.inplanes, self.inplanes, kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), dilation=1, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bn3 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum) self.block3 = self._make_layer(self.BLOCK, self.PLANES[2], self.LAYERS[2], dilation=dilations[2], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.conv4p8s2 = conv(self.inplanes, self.inplanes, kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), dilation=1, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bn4 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum) self.block4 = self._make_layer(self.BLOCK, self.PLANES[3], self.LAYERS[3], dilation=dilations[3], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.convtr4p16s2 = conv_tr(self.inplanes, self.PLANES[4], kernel_size=space_n_time_m(2, 1), upsample_stride=space_n_time_m(2, 1), dilation=1, bias=False, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bntr4 = get_norm(self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum) self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion self.block5 = self._make_layer(self.BLOCK, self.PLANES[4], self.LAYERS[4], dilation=dilations[4], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.convtr5p8s2 = conv_tr(self.inplanes, self.PLANES[5], kernel_size=space_n_time_m(2, 1), upsample_stride=space_n_time_m(2, 1), dilation=1, bias=False, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bntr5 = get_norm(self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum) self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion self.block6 = self._make_layer(self.BLOCK, self.PLANES[5], self.LAYERS[5], dilation=dilations[5], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.convtr6p4s2 = conv_tr(self.inplanes, self.PLANES[6], kernel_size=space_n_time_m(2, 1), upsample_stride=space_n_time_m(2, 1), dilation=1, bias=False, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bntr6 = get_norm(self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum) self.inplanes = self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion self.block7 = self._make_layer(self.BLOCK, self.PLANES[6], self.LAYERS[6], dilation=dilations[6], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.convtr7p2s2 = conv_tr(self.inplanes, self.PLANES[7], kernel_size=space_n_time_m(2, 1), upsample_stride=space_n_time_m(2, 1), dilation=1, bias=False, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bntr7 = get_norm(self.NORM_TYPE, self.PLANES[7], D, bn_momentum=bn_momentum) self.inplanes = self.PLANES[7] + self.INIT_DIM self.block8 = self._make_layer(self.BLOCK, self.PLANES[7], self.LAYERS[7], dilation=dilations[7], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.final = conv(self.PLANES[7], out_channels, kernel_size=1, stride=1, bias=True, D=D) self.relu = MinkowskiReLU(inplace=True)