def __init__(self): super(MINet_VGG16, self).__init__() self.upsample_add = upsample_add self.upsample = cus_sample ( self.encoder1, self.encoder2, self.encoder4, self.encoder8, self.encoder16, ) = Backbone_VGG16_in3() self.trans = AIM((64, 128, 256, 512, 512), (32, 64, 64, 64, 64)) self.sim16 = SIM(64, 32) self.sim8 = SIM(64, 32) self.sim4 = SIM(64, 32) self.sim2 = SIM(64, 32) self.sim1 = SIM(32, 16) self.upconv16 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv8 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv4 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv2 = BasicConv2d(64, 32, kernel_size=3, stride=1, padding=1) self.upconv1 = BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1) self.classifier = nn.Conv2d(32, 1, 1)
def __init__(self): super(CPLightMINet_VGG16, self).__init__() self.upsample_add = upsample_add self.upsample = cus_sample self.dummy_tensor = torch.ones(1, dtype=torch.float32, requires_grad=True) ( self.encoder1, self.encoder2, self.encoder4, self.encoder8, self.encoder16, ) = Backbone_VGG16_in3() self.trans = LightAIM((64, 128, 256, 512, 512), (32, 64, 64, 64, 64)) self.sim16 = SIM(64, 32) self.sim8 = SIM(64, 32) self.sim4 = SIM(64, 32) self.sim2 = SIM(64, 32) self.sim1 = SIM(32, 16) self.upconv16 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv8 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv4 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv2 = BasicConv2d(64, 32, kernel_size=3, stride=1, padding=1) self.upconv1 = BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1) self.classifier = nn.Conv2d(32, 1, 1)
def __init__(self, in_C, out_C, down_factor=4, k=4): """ 更像是DenseNet的Block,从而构造特征内的密集连接 """ super(DenseLayer, self).__init__() self.k = k self.down_factor = down_factor mid_C = out_C // self.down_factor self.down = nn.Conv2d(in_C, mid_C, 1) self.denseblock = nn.ModuleList() for i in range(1, self.k + 1): self.denseblock.append(BasicConv2d(mid_C * i, mid_C, 3, 1, 1)) self.fuse = BasicConv2d(in_C + mid_C, out_C, kernel_size=3, stride=1, padding=1)
def __init__(self): super(MINet_Res50, self).__init__() self.div_2, self.div_4, self.div_8, self.div_16, self.div_32 = Backbone_ResNet50_in3( ) self.upsample_add = upsample_add self.upsample = cus_sample self.trans = AIM(iC_list=(64, 256, 512, 1024, 2048), oC_list=(64, 64, 64, 64, 64)) self.sim32 = SIM(64, 32) self.sim16 = SIM(64, 32) self.sim8 = SIM(64, 32) self.sim4 = SIM(64, 32) self.sim2 = SIM(64, 32) self.upconv32 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv16 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv8 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv4 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv2 = BasicConv2d(64, 32, kernel_size=3, stride=1, padding=1) self.upconv1 = BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1) self.classifier = nn.Conv2d(32, 1, 1)
def __init__(self): super(CPLMINet_WSGNRes50, self).__init__() self.upsample_add = upsample_add self.upsample = cus_sample self.dummy_tensor = torch.ones(1, dtype=torch.float32, requires_grad=True) self.div_2, self.div_4, self.div_8, self.div_16, self.div_32 = Backbone_ResNet50_in3( ) self.upsample_add = upsample_add self.upsample = cus_sample self.trans = LightAIM(iC_list=(64, 256, 512, 1024, 2048), oC_list=(64, 64, 64, 64, 64)) self.sim32 = SIM(64, 32) self.sim16 = SIM(64, 32) self.sim8 = SIM(64, 32) self.sim4 = SIM(64, 32) self.sim2 = SIM(64, 32) self.upconv32 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv16 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv8 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv4 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv2 = BasicConv2d(64, 32, kernel_size=3, stride=1, padding=1) self.upconv1 = BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1) self.classifier = nn.Conv2d(32, 1, 1)
def __init__(self): super(cp_res50, self).__init__() self.upsample_add = upsample_add self.upsample = cus_sample self.dummy_tensor = torch.ones(1, dtype=torch.float32, requires_grad=True) self.div_2, self.div_4, self.div_8, self.div_16, self.div_32 = Backbone_ResNet50_in3( ) self.upsample_add = upsample_add self.upsample = cus_sample self.trans32 = nn.Conv2d(2048, 64, 1, 1) self.trans16 = nn.Conv2d(1024, 64, 1, 1) self.trans8 = nn.Conv2d(512, 64, 1, 1) self.trans4 = nn.Conv2d(256, 64, 1, 1) self.trans2 = nn.Conv2d(64, 64, 1, 1) self.sim32 = SIM(64, 32) self.sim16 = SIM(64, 32) self.sim8 = SIM(64, 32) self.sim4 = SIM(64, 32) self.sim2 = SIM(64, 32) self.upconv32 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv16 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv8 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv4 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv2 = BasicConv2d(64, 32, kernel_size=3, stride=1, padding=1) self.upconv1 = BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1) self.classifier = nn.Conv2d(32, 1, 1)
def __init__(self, pretrained=True): super(HDFNet_VGG16, self).__init__() self.upsample_add = upsample_add self.upsample = cus_sample self.encoder1, self.encoder2, self.encoder4, self.encoder8, self.encoder16 = Backbone_VGG_in3( pretrained=pretrained) ( self.depth_encoder1, self.depth_encoder2, self.depth_encoder4, self.depth_encoder8, self.depth_encoder16, ) = Backbone_VGG_in1(pretrained=pretrained) self.trans16 = nn.Conv2d(512, 64, 1) self.trans8 = nn.Conv2d(512, 64, 1) self.trans4 = nn.Conv2d(256, 64, 1) self.trans2 = nn.Conv2d(128, 64, 1) self.trans1 = nn.Conv2d(64, 32, 1) self.depth_trans16 = DenseTransLayer(512, 64) self.depth_trans8 = DenseTransLayer(512, 64) self.depth_trans4 = DenseTransLayer(256, 64) self.upconv16 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv8 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv4 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv2 = BasicConv2d(64, 32, kernel_size=3, stride=1, padding=1) self.upconv1 = BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1) self.selfdc_16 = DDPM(64, 64, 64, 3, 4) self.selfdc_8 = DDPM(64, 64, 64, 3, 4) self.selfdc_4 = DDPM(64, 64, 64, 3, 4) self.classifier = nn.Conv2d(32, 1, 1)
def __init__(self, in_xC, in_yC, out_C, kernel_size=3, down_factor=4): """DDPM,利用nn.Unfold实现的动态卷积模块 Args: in_xC (int): 第一个输入的通道数 in_yC (int): 第二个输入的通道数 out_C (int): 最终输出的通道数 kernel_size (int): 指定的生成的卷积核的大小 down_factor (int): 用来降低卷积核生成过程中的参数量的一个降低通道数的参数 """ super(DDPM, self).__init__() self.kernel_size = kernel_size self.mid_c = out_C // 4 self.down_input = nn.Conv2d(in_xC, self.mid_c, 1) self.branch_1 = DepthDC3x3_1(self.mid_c, in_yC, self.mid_c, down_factor=down_factor) self.branch_3 = DepthDC3x3_3(self.mid_c, in_yC, self.mid_c, down_factor=down_factor) self.branch_5 = DepthDC3x3_5(self.mid_c, in_yC, self.mid_c, down_factor=down_factor) self.fuse = BasicConv2d(4 * self.mid_c, out_C, 3, 1, 1)
def __init__(self, in_C, out_C): super(DenseTransLayer, self).__init__() down_factor = in_C // out_C self.fuse_down_mul = BasicConv2d(in_C, in_C, 3, 1, 1) self.res_main = DenseLayer(in_C, in_C, down_factor=down_factor) self.fuse_main = BasicConv2d(in_C, out_C, kernel_size=3, stride=1, padding=1)