def __init__(self, cl=32, cm=32, ch=16, nframes = 7, input_nc = 3, output_nc = 3, upscale_factor=4): super(RBPN, self).__init__() self.nframes = nframes self.upscale_factor = upscale_factor #Initial Feature Extraction self.conv1 = M.Sequential( M.Conv2d(input_nc, cl, kernel_size=3, stride=1, padding=1), M.PReLU(), ) self.conv2 = M.Sequential( M.Conv2d(input_nc*2+0, cm, kernel_size=3, stride=1, padding=1), M.PReLU(), ) # projection module self.Projection = Projection_Module(cl, cm, ch) # reconstruction module self.reconstruction = M.Conv2d((self.nframes-1)*ch, output_nc, kernel_size=3, stride=1, padding=1)
def __init__(self, channel_num): super(CARBBlock, self).__init__() self.conv1 = M.Sequential( M.Conv2d(channel_num, channel_num, kernel_size=3, padding=1, stride=1), M.ReLU(), M.Conv2d(channel_num, channel_num, kernel_size=3, padding=1, stride=1), ) # self.global_average_pooling = nn.AdaptiveAvgPool2d((1,1)) # B,C,H,W -> B,C,1,1 self.linear = M.Sequential(M.Linear(channel_num, channel_num // 2), M.ReLU(), M.Linear(channel_num // 2, channel_num), M.Sigmoid()) self.conv2 = M.Conv2d(channel_num * 2, channel_num, kernel_size=1, padding=0, stride=1) self.lrelu = M.LeakyReLU()
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right): super(ReductionCell1, self).__init__() self.conv_prev_1x1 = [] self.conv_prev_1x1.append(M.ReLU()) self.conv_prev_1x1.append(M.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False)) self.conv_prev_1x1.append(M.BatchNorm2d(out_channels_left, eps=0.001, momentum=0.1, affine=True)) self.conv_prev_1x1 = M.Sequential(*self.conv_prev_1x1) self.conv_1x1 = [] self.conv_1x1.append(M.ReLU()) self.conv_1x1.append(M.Conv2d(in_channels_right, out_channels_right, 1, stride=1, bias=False)) self.conv_1x1.append(M.BatchNorm2d(out_channels_right, eps=0.001, momentum=0.1, affine=True)) self.conv_1x1 = M.Sequential(*self.conv_1x1) self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, bias=False) self.comb_iter_0_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, bias=False) self.comb_iter_1_left = M.MaxPool2d(3, stride=2, padding=1) self.comb_iter_1_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, bias=False) self.comb_iter_2_left = M.AvgPool2d(3, stride=2, padding=1) self.comb_iter_2_right = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, bias=False) self.comb_iter_3_right = M.AvgPool2d(3, stride=1, padding=1) self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False) self.comb_iter_4_right = M.MaxPool2d(3, stride=2, padding=1)
def __init__(self, in_channels=3, out_channels=3, d=56, s=12, upscale_factor=4): super(FSRCNN, self).__init__() l = [] l.append( M.Sequential(Conv2d(in_channels, d, 5, 1, 2), M.PReLU(num_parameters=1, init=0.25))) l.append( M.Sequential(Conv2d(d, s, 1, 1, 0), M.PReLU(num_parameters=1, init=0.25))) for i in range(4): l.append( M.Sequential(Conv2d(s, s, 3, 1, 1), M.PReLU(num_parameters=1, init=0.25))) l.append( M.Sequential(Conv2d(s, d, 1, 1, 0), M.PReLU(num_parameters=1, init=0.25))) l.append(ConvTranspose2d(d, out_channels, 8, upscale_factor, padding=2)) self.convs = M.Sequential(*l)
def __init__(self, in_channels=3, out_channels=3, mid_channels=128, hidden_channels=3 * 4 * 4, blocknums=5, upscale_factor=4, hsa=False, pixel_shuffle=False): super(RSDN, self).__init__() if hsa: self.hsa = HSA(3) else: self.hsa = Identi() self.blocknums = blocknums self.hidden_channels = hidden_channels SDBlocks = [] for _ in range(blocknums): SDBlocks.append(SDBlock(mid_channels)) self.SDBlocks = M.Sequential(*SDBlocks) self.pre_SD_S = M.Sequential( Conv2d(2 * (3 + hidden_channels), mid_channels, 3, 1, 1), M.ReLU(), ) self.pre_SD_D = M.Sequential( Conv2d(2 * (3 + hidden_channels), mid_channels, 3, 1, 1), M.ReLU(), ) self.conv_SD = M.Sequential( Conv2d(mid_channels, hidden_channels, 3, 1, 1), M.ReLU(), ) self.convS = Conv2d(mid_channels, hidden_channels, 3, 1, 1) self.convD = Conv2d(mid_channels, hidden_channels, 3, 1, 1) self.convHR = Conv2d(2 * hidden_channels, hidden_channels, 3, 1, 1) if pixel_shuffle: self.trans_S = PixelShuffle(upscale_factor) self.trans_D = PixelShuffle(upscale_factor) self.trans_HR = PixelShuffle(upscale_factor) else: self.trans_S = ConvTranspose2d(hidden_channels, 3, 4, 4, 0, bias=False) self.trans_D = ConvTranspose2d(hidden_channels, 3, 4, 4, 0, bias=False) self.trans_HR = ConvTranspose2d(hidden_channels, 3, 4, 4, 0, bias=False)
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right): super(FirstCell, self).__init__() self.conv_1x1 = [] self.conv_1x1.append(M.ReLU()) self.conv_1x1.append(M.Conv2d(in_channels_right, out_channels_right, 1, stride=1, bias=False)) self.conv_1x1.append(M.BatchNorm2d(out_channels_right, eps=0.001, momentum=0.1, affine=True)) self.conv_1x1 = M.Sequential(*self.conv_1x1) self.relu = M.ReLU() self.path_1 = [] self.path_1.append(M.AvgPool2d(1, stride=2)) self.path_1.append(M.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False)) self.path_1 = M.Sequential(*self.path_1) self.path_2 = [] # self.path_2.append(M.ZeroPad2d((0, 1, 0, 1))) self.path_2.append(M.AvgPool2d(1, stride=2)) self.path_2.append(M.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False)) self.path_2 = M.Sequential(*self.path_2) self.final_path_bn = M.BatchNorm2d(out_channels_left * 2, eps=0.001, momentum=0.1, affine=True) self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2, bias=False) self.comb_iter_0_right = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False) self.comb_iter_1_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2, bias=False) self.comb_iter_1_right = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False) self.comb_iter_2_left = M.AvgPool2d(3, stride=1, padding=1) self.comb_iter_3_left = M.AvgPool2d(3, stride=1, padding=1) self.comb_iter_3_right = M.AvgPool2d(3, stride=1, padding=1) self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
def __init__(self, cfg, input_shape: List[layers.ShapeSpec]): super().__init__() in_channels = input_shape[0].channels num_classes = cfg.num_classes num_convs = 4 prior_prob = cfg.cls_prior_prob num_anchors = [ len(cfg.anchor_scales[i]) * len(cfg.anchor_ratios[i]) for i in range(len(input_shape)) ] assert (len(set(num_anchors)) == 1 ), "not support different number of anchors between levels" num_anchors = num_anchors[0] cls_subnet = [] bbox_subnet = [] for _ in range(num_convs): cls_subnet.append( M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)) cls_subnet.append(M.ReLU()) bbox_subnet.append( M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)) bbox_subnet.append(M.ReLU()) self.cls_subnet = M.Sequential(*cls_subnet) self.bbox_subnet = M.Sequential(*bbox_subnet) self.cls_score = M.Conv2d(in_channels, num_anchors * num_classes, kernel_size=3, stride=1, padding=1) self.bbox_pred = M.Conv2d(in_channels, num_anchors * 4, kernel_size=3, stride=1, padding=1) # Initialization for modules in [ self.cls_subnet, self.bbox_subnet, self.cls_score, self.bbox_pred ]: for layer in modules.modules(): if isinstance(layer, M.Conv2d): M.init.normal_(layer.weight, mean=0, std=0.01) M.init.fill_(layer.bias, 0) # Use prior in model initialization to improve stability bias_value = -math.log((1 - prior_prob) / prior_prob) M.init.fill_(self.cls_score.bias, bias_value)
def __init__(self, mode): super().__init__() self.mode = mode self.data = np.random.random((1, 3, 224, 224)).astype(np.float32) self.normal_conv = M.Conv2d(3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)) self.group_conv = M.Conv2d(3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3) self.valid_pad_conv = M.Conv2d(3, 30, 4, padding=(1, 1)) self.valid_pad_1_conv = M.Conv2d(3, 30, 3, stride=2, padding=(1, 1)) self.same_pad_conv = M.Conv2d(3, 30, 3, padding=(1, 1)) self.same_pad_1_conv = M.Conv2d(3, 30, 4, stride=2, padding=(1, 1)) self.same_pad_2_conv = M.Conv2d(3, 30, 2, dilation=3, stride=2, padding=(1, 1)) self.normal_conv.bias = mge.Parameter( np.random.random(self.normal_conv.bias.shape).astype(np.float32)) self.group_conv.bias = mge.Parameter( np.random.random(self.group_conv.bias.shape).astype(np.float32)) self.transpose_conv = M.Sequential( M.ConvTranspose2d(3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1), M.ConvTranspose2d(5, 3, (3, 3)), ) self.transpose_conv[0].bias = mge.Parameter( np.random.random(self.transpose_conv[0].bias.shape).astype( np.float32)) self.transpose_conv[1].bias = mge.Parameter( np.random.random(self.transpose_conv[1].bias.shape).astype( np.float32)) self.tflite_transpose_conv = M.Sequential( M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1), M.ConvTranspose2d(5, 3, (3, 3)), ) self.tflite_transpose_conv[0].bias = mge.Parameter( np.random.random(self.transpose_conv[0].bias.shape).astype( np.float32)) self.tflite_transpose_conv[1].bias = mge.Parameter( np.random.random(self.transpose_conv[1].bias.shape).astype( np.float32))
def __init__(self, in_planes, planes, stride=1): super(BasicBlock, self).__init__() self.conv1 = M.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn1 = M.BatchNorm2d(planes) self.conv2 = M.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = M.BatchNorm2d(planes) self.shortcut = M.Sequential() if stride != 1 or in_planes != planes: self.shortcut = M.Sequential( M.Conv2d( in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False, ), M.BatchNorm2d(self.expansion * planes), )
def __init__(self, in_ch=3, num_classes=1000): ''' The AlexNet. args: in_ch: int, the number of channels of inputs num_classes: int, the number of classes that need to predict reference: "One weird trick for parallelizing convolutional neural networks"<https://arxiv.org/abs/1404.5997> ''' super(AlexNet, self).__init__() #the part to extract feature self.features = M.Sequential( M.Conv2d(in_ch, 64, kernel_size=11, stride=4, padding=11 // 4), M.ReLU(), M.MaxPool2d(kernel_size=3, stride=2), M.Conv2d(64, 192, kernel_size=5, padding=2), M.ReLU(), M.MaxPool2d(kernel_size=3, stride=2), M.Conv2d(192, 384, kernel_size=3, stride=1, padding=1), M.ReLU(), M.Conv2d(384, 256, kernel_size=3, stride=1, padding=1), M.ReLU(), M.Conv2d(256, 256, kernel_size=3, stride=1, padding=1), M.ReLU(), M.MaxPool2d(kernel_size=3, stride=2), ) #global avg pooling self.avgpool = M.AdaptiveAvgPool2d((6, 6)) #classify part self.classifier = M.Sequential(M.Dropout(), M.Linear(256 * 6 * 6, 4096), M.ReLU(), M.Dropout(), M.Linear(4096, 4096), M.ReLU(), M.Linear(4096, num_classes))
def __init__( self, depth, in_channels=3, stem_out_channels=32, out_features=("dark3", "dark4", "dark5"), ): """ Args: depth (int): depth of darknet used in model, usually use [21, 53] for this param. in_channels (int): number of input channels, for example, use 3 for RGB image. stem_out_channels (int): number of output chanels of darknet stem. It decides channels of darknet layer2 to layer5. out_features (Tuple[str]): desired output layer name. """ super().__init__() assert out_features, "please provide output features of Darknet" self.out_features = out_features self.stem = M.Sequential( BaseConv(in_channels, stem_out_channels, ksize=3, stride=1, act="lrelu"), *self.make_group_layer(stem_out_channels, num_blocks=1, stride=2), ) in_channels = stem_out_channels * 2 # 64 num_blocks = Darknet.depth2blocks[depth] # create darknet with `stem_out_channels` and `num_blocks` layers. # to make model structure more clear, we don't use `for` statement in python. self.dark2 = M.Sequential(*self.make_group_layer(in_channels, num_blocks[0], stride=2)) in_channels *= 2 # 128 self.dark3 = M.Sequential(*self.make_group_layer(in_channels, num_blocks[1], stride=2)) in_channels *= 2 # 256 self.dark4 = M.Sequential(*self.make_group_layer(in_channels, num_blocks[2], stride=2)) in_channels *= 2 # 512 self.dark5 = M.Sequential( *self.make_group_layer(in_channels, num_blocks[3], stride=2), *self.make_spp_block([in_channels, in_channels * 2], in_channels * 2), )
def __init__(self): super().__init__() self.classifier = None if dist.get_rank() == 0: self.features = M.Sequential( M.ConvBn2d(3, 64, 7, stride=2, padding=3, bias=False), M.MaxPool2d(kernel_size=3, stride=2, padding=1), BasicBlock(64, 64, 1), BasicBlock(64, 64, 1), ) elif dist.get_rank() == 1: self.features = M.Sequential( BasicBlock(64, 128, 2), BasicBlock(128, 128, 1), ) elif dist.get_rank() == 2: self.features = M.Sequential( BasicBlock(128, 256, 2), BasicBlock(256, 256, 1), ) elif dist.get_rank() == 3: self.features = M.Sequential( BasicBlock(256, 512, 2), BasicBlock(512, 512, 1), ) self.classifier = M.Linear(512, 1000)
def __init__(self, ch=128, nframes=7, input_nc=3, output_nc=3, upscale_factor=4, use_cost_volume=False): super(MUCAN, self).__init__() self.nframes = nframes self.upscale_factor = upscale_factor # 每个LR搞三个尺度 self.feature_encoder_carb = M.Sequential( M.Conv2d(input_nc, ch, kernel_size=3, stride=1, padding=1), M.LeakyReLU(negative_slope=0.05), CARBBlocks(channel_num=ch, block_num=4)) self.fea_L1_conv1 = M.Conv2d(ch, ch, 3, 2, 1) self.fea_L1_conv2 = M.Conv2d(ch, ch, 3, 1, 1) self.fea_L2_conv1 = M.Conv2d(ch, ch, 3, 2, 1) self.fea_L2_conv2 = M.Conv2d(ch, ch, 3, 1, 1) self.lrelu = M.LeakyReLU(negative_slope=0.05) if use_cost_volume: self.AU0 = AU_CV(K=6, d=5, ch=ch) self.AU1 = AU_CV(K=5, d=3, ch=ch) self.AU2 = AU_CV(K=4, d=3, ch=ch) else: self.AU0 = AU(ch=ch) self.AU1 = AU(ch=ch) self.AU2 = AU(ch=ch) self.UP0 = M.ConvTranspose2d(ch, ch, kernel_size=4, stride=2, padding=1) self.UP1 = M.ConvTranspose2d(ch, ch, kernel_size=4, stride=2, padding=1) self.aggre = M.Conv2d(ch * self.nframes, ch, kernel_size=3, stride=1, padding=1) self.main_conv = M.Sequential( CARBBlocks(channel_num=ch, block_num=10), M.ConvTranspose2d(ch, ch // 2, kernel_size=4, stride=2, padding=1), M.LeakyReLU(), M.Conv2d(ch // 2, ch // 2, kernel_size=3, stride=1, padding=1), M.LeakyReLU(), M.ConvTranspose2d(ch // 2, ch // 4, kernel_size=4, stride=2, padding=1), M.LeakyReLU(), M.Conv2d(ch // 4, output_nc, kernel_size=3, stride=1, padding=1))
def __init__(self, channel_nums): super(SDBlock, self).__init__() self.netS = M.Sequential(Conv2d(channel_nums, channel_nums, 3, 1, 1), M.ReLU(), Conv2d(channel_nums, channel_nums, 3, 1, 1)) self.netD = M.Sequential(Conv2d(channel_nums, channel_nums, 3, 1, 1), M.ReLU(), Conv2d(channel_nums, channel_nums, 3, 1, 1))
def __init__(self, num_channels, filter_size, stride, padding): super(UPU, self).__init__() self.deconv1 = M.Sequential( ConvTranspose2d(num_channels, num_channels, filter_size, stride, padding), M.PReLU(num_parameters=1, init=0.25)) self.conv1 = M.Sequential( Conv2d(num_channels, num_channels, filter_size, stride, padding), M.PReLU(num_parameters=1, init=0.25)) self.deconv2 = M.Sequential( ConvTranspose2d(num_channels, num_channels, filter_size, stride, padding), M.PReLU(num_parameters=1, init=0.25))
def __init__(self, channel_nums): super(SDBlock, self).__init__() self.netS = M.Sequential( Conv2d(channel_nums, channel_nums, 3, 1, 1), M.LeakyReLU(negative_slope=0.05), Conv2d(channel_nums, channel_nums, 3, 1, 1) ) self.netD = M.Sequential( Conv2d(channel_nums, channel_nums, 3, 1, 1), M.LeakyReLU(negative_slope=0.05), Conv2d(channel_nums, channel_nums, 3, 1, 1) )
def __init__(self, ch=128, nframes = 7, input_nc = 3, output_nc = 3, upscale_factor=4, blocknums1 = 5, blocknums2 = 15, non_local = True): super(MUCANV2, self).__init__() self.nframes = nframes self.upscale_factor = upscale_factor # 每个LR搞三个尺度 self.feature_encoder_carb = M.Sequential( M.Conv2d(input_nc, ch, kernel_size=3, stride=1, padding=1), M.LeakyReLU(negative_slope=0.05), CARBBlocks(channel_num=ch, block_num=blocknums1) ) self.fea_L1_conv1 = M.Conv2d(ch, ch, 3, 2, 1) self.fea_L1_conv2 = M.Conv2d(ch, ch, 3, 1, 1) self.fea_L2_conv1 = M.Conv2d(ch, ch, 3, 2, 1) self.fea_L2_conv2 = M.Conv2d(ch, ch, 3, 1, 1) self.lrelu = M.LeakyReLU(negative_slope=0.05) self.AU0 = AU(ch = ch) self.AU1 = AU(ch = ch) self.AU2 = AU(ch = ch) self.UP0 = M.ConvTranspose2d(ch, ch, kernel_size=4, stride=2, padding=1) self.UP1 = M.ConvTranspose2d(ch, ch, kernel_size=4, stride=2, padding=1) if non_local: self.non_local = Separate_non_local(ch, nframes) else: self.non_local = Identi() self.aggre = M.Conv2d(ch * self.nframes, ch, kernel_size=3, stride=1, padding=1) self.carbs = M.Sequential( CARBBlocks(channel_num=ch, block_num=blocknums2), ) self.main_conv = M.Sequential( M.Conv2d(ch, ch*4, kernel_size=3, stride=1, padding=1), M.LeakyReLU(), PixelShuffle(scale=2), # 128 M.Conv2d(ch, ch*2, kernel_size=3, stride=1, padding=1), M.LeakyReLU(), PixelShuffle(scale=2), # 64 M.Conv2d(ch//2, ch//2, kernel_size=3, stride=1, padding=1), M.LeakyReLU(), M.Conv2d(ch//2, 3, kernel_size=3, stride=1, padding=1) )
def __init__( self, dep_mul, wid_mul, out_features=("dark3", "dark4", "dark5"), depthwise=False, act="silu", ): super().__init__() assert out_features, "please provide output features of Darknet" self.out_features = out_features Conv = DWConv if depthwise else BaseConv base_channels = int(wid_mul * 64) # 64 base_depth = max(round(dep_mul * 3), 1) # 3 # stem self.stem = Focus(3, base_channels, ksize=3, act=act) # dark2 self.dark2 = M.Sequential( Conv(base_channels, base_channels * 2, 3, 2, act=act), CSPLayer( base_channels * 2, base_channels * 2, n=base_depth, depthwise=depthwise, act=act ), ) # dark3 self.dark3 = M.Sequential( Conv(base_channels * 2, base_channels * 4, 3, 2, act=act), CSPLayer( base_channels * 4, base_channels * 4, n=base_depth * 3, depthwise=depthwise, act=act, ), ) # dark4 self.dark4 = M.Sequential( Conv(base_channels * 4, base_channels * 8, 3, 2, act=act), CSPLayer( base_channels * 8, base_channels * 8, n=base_depth * 3, depthwise=depthwise, act=act, ), ) # dark5 self.dark5 = M.Sequential( Conv(base_channels * 8, base_channels * 16, 3, 2, act=act), SPPBottleneck(base_channels * 16, base_channels * 16, activation=act), CSPLayer( base_channels * 16, base_channels * 16, n=base_depth, shortcut=False, depthwise=depthwise, act=act, ), )
def __init__(self, channels, residuals, init_block_kernel_size, init_block_channels, maxpool_pad, in_channels=3, in_size=(224, 224), num_classes=1000): super(SqueezeNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.feature = [] init_block = SqueezeInitBlock(in_channels=in_channels, out_channels=init_block_channels, kernel_size=init_block_kernel_size) self.feature.append(init_block) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = [] pool = M.MaxPool2d(kernel_size=3, stride=2, padding=maxpool_pad[i]) stage.append(pool) for j, out_channels in enumerate(channels_per_stage): expand_channels = out_channels // 2 squeeze_channels = out_channels // 8 unit = FireUnit(in_channels=in_channels, squeeze_channels=squeeze_channels, expand1x1_channels=expand_channels, expand3x3_channels=expand_channels, residual=((residuals is not None) and (residuals[i][j] == 1))) stage.append(unit) in_channels = out_channels self.feature += stage self.feature.append(M.Dropout(drop_prob=0.5)) self.feature = M.Sequential(*self.feature) self.output = [] final_conv = M.Conv2d(in_channels=in_channels, out_channels=num_classes, kernel_size=1) self.output.append(final_conv) final_activ = M.ReLU() self.output.append(final_activ) final_pool = M.AvgPool2d(kernel_size=13, stride=1) self.output.append(final_pool) self.output = M.Sequential(*self.output)
def __init__(self, in_ch, out_ch, ksize, stride=1, expansion=1.0, bias=False, norm_layer=M.BatchNorm2d, activation=M.ReLU()): super(XXBlock, self).__init__() if norm_layer is None: norm_layer = M.BatchNorm2d if activation is None: activation = M.ReLU() expansion_out_ch = round(out_ch * expansion) self.conv_block = M.Sequential( M.Conv2d(in_ch, expansion_out_ch, ksize, stride=stride, padding=ksize // 2), norm_layer(expansion_out_ch), activation, M.Conv2d(expansion_out_ch, out_ch, ksize, stride=1, padding=(ksize - 1) // 2, bias=bias), norm_layer(out_ch)) self.activation = activation self.shortcut = M.Sequential() if stride > 1 or in_ch != out_ch: if stride > 1: self.shortcut = M.Sequential( M.AvgPool2d(kernel_size=stride + 1, stride=stride, padding=stride // 2), M.Conv2d(in_ch, out_ch, kernel_size=1, stride=1, padding=0, bias=bias), norm_layer(out_ch)) else: self.shortcut = M.Sequential( M.Conv2d(in_ch, out_ch, kernel_size=1, stride=1, padding=0, bias=bias), norm_layer(out_ch))
def __init__(self, inp, oup, mid_channels, *, ksize, stride): super(ShuffleV2Block, self).__init__() self.stride = stride assert stride in [1, 2] self.mid_channels = mid_channels self.ksize = ksize pad = ksize // 2 self.pad = pad self.inp = inp outputs = oup - inp branch_main = [ # pw M.Conv2d(inp, mid_channels, 1, 1, 0, bias=False), M.BatchNorm2d(mid_channels), FReLU(mid_channels), # dw M.Conv2d( mid_channels, mid_channels, ksize, stride, pad, groups=mid_channels, bias=False, ), M.BatchNorm2d(mid_channels), # pw-linear M.Conv2d(mid_channels, outputs, 1, 1, 0, bias=False), M.BatchNorm2d(outputs), FReLU(outputs), ] self.branch_main = M.Sequential(*branch_main) if stride == 2: branch_proj = [ # dw M.Conv2d(inp, inp, ksize, stride, pad, groups=inp, bias=False), M.BatchNorm2d(inp), # pw-linear M.Conv2d(inp, inp, 1, 1, 0, bias=False), M.BatchNorm2d(inp), FReLU(inp), ] self.branch_proj = M.Sequential(*branch_proj) else: self.branch_proj = None
def _make_layer(self, block, channels, blocks, stride=1, dilate=False, norm=M.BatchNorm2d): if dilate: self.dilation *= stride stride = 1 layers = [] layers.append( block( self.in_channels, channels, stride, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm=norm, )) self.in_channels = channels * block.expansion for _ in range(1, blocks): layers.append( block( self.in_channels, channels, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm=norm, )) return M.Sequential(*layers)
def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj): super(Inception, self).__init__() self.branch1 = BasicConv2d(in_channels, ch1x1, kernel_size=1) self.branch2 = M.Sequential( BasicConv2d(in_channels, ch3x3red, kernel_size=1), BasicConv2d(ch3x3red, ch3x3, kernel_size=3, padding=1)) self.branch3 = M.Sequential( BasicConv2d(in_channels, ch5x5red, kernel_size=1), BasicConv2d(ch5x5red, ch5x5, kernel_size=3, padding=1)) self.branch4 = M.Sequential( M.MaxPool2d(kernel_size=3, stride=1, padding=1), BasicConv2d(in_channels, pool_proj, kernel_size=1))
def __init__(self, ch, cl): super(Decoder, self).__init__() self.model = M.Sequential( ResBlocks(channel_num=ch, resblock_num=5, kernel_size=3), M.Conv2d(ch, cl, kernel_size=8, stride=4, padding=2), M.PReLU(), )
def __init__( self, in_channels, channels, stride=1, groups=1, base_width=64, dilation=1, norm=M.BatchNorm2d, ): super(BasicBlock, self).__init__() if groups != 1 or base_width != 64: raise ValueError( "BasicBlock only supports groups=1 and base_width=64") if dilation > 1: raise NotImplementedError( "Dilation > 1 not supported in BasicBlock") self.conv1 = M.Conv2d(in_channels, channels, 3, stride, padding=dilation, bias=False) self.bn1 = norm(channels) self.conv2 = M.Conv2d(channels, channels, 3, 1, padding=1, bias=False) self.bn2 = norm(channels) self.downsample = ( M.Identity() if in_channels == channels and stride == 1 else M.Sequential( M.Conv2d(in_channels, channels, 1, stride, bias=False), norm(channels), ))
def __init__(self, in_channels, out_channels, kernel_size=3, activation='prelu'): super(ResBlock, self).__init__() if activation == 'relu': self.act = M.ReLU() elif activation == 'prelu': self.act = M.PReLU(num_parameters=1, init=0.25) else: raise NotImplementedError("not implemented activation") m = [] m.append( M.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=1, padding=(kernel_size // 2))) m.append(self.act) m.append( M.Conv2d(out_channels, out_channels, kernel_size=kernel_size, stride=1, padding=(kernel_size // 2))) self.body = M.Sequential(*m)
def __init__(self, K, d, ch): super(AU_CV, self).__init__() self.K = K self.d = d self.conv = M.Sequential( M.Conv2d(ch * self.K, ch, kernel_size=3, stride=1, padding=1), M.LeakyReLU())
def _make_layer(self, num_blocks, in_channels, bottleneck_channels, out_channels, stride): layers = [] for _ in range(num_blocks): layers.append(Bottleneck(in_channels, bottleneck_channels, out_channels, stride)) stride = 1 in_channels = out_channels return M.Sequential(*layers)
def __init__( self, in_channels, channels, stride=1, groups=1, base_width=64, dilation=1, norm=M.BatchNorm2d, ): super().__init__() width = int(channels * (base_width / 64.0)) * groups self.conv1 = M.Conv2d(in_channels, width, 1, 1, bias=False) self.bn1 = norm(width) self.conv2 = M.Conv2d( width, width, 3, stride, padding=dilation, groups=groups, dilation=dilation, bias=False, ) self.bn2 = norm(width) self.conv3 = M.Conv2d(width, channels * self.expansion, 1, 1, bias=False) self.bn3 = norm(channels * self.expansion) self.downsample = ( M.Identity() if in_channels == channels * self.expansion and stride == 1 else M.Sequential( M.Conv2d(in_channels, channels * self.expansion, 1, stride, bias=False), norm(channels * self.expansion), ) )
def __init__(self, inp, oup, stride, expand_ratio): super(InvertedResidual, self).__init__() self.stride = stride assert stride in [1, 2] hidden_dim = int(round(inp * expand_ratio)) self.use_res_connect = self.stride == 1 and inp == oup layers = [] if expand_ratio != 1: # pw layers.append( M.ConvBnRelu2d(inp, hidden_dim, kernel_size=1, bias=False)) layers.extend([ # dw M.ConvBnRelu2d( hidden_dim, hidden_dim, kernel_size=3, padding=1, stride=stride, groups=hidden_dim, bias=False, ), # pw-linear M.ConvBn2d(hidden_dim, oup, kernel_size=1, bias=False), ]) self.conv = M.Sequential(*layers) self.add = M.Elemwise("ADD")