def __init__(self): super(CPLMINet_WSGNRes50, self).__init__() self.upsample_add = upsample_add self.upsample = cus_sample self.dummy_tensor = torch.ones(1, dtype=torch.float32, requires_grad=True) self.div_2, self.div_4, self.div_8, self.div_16, self.div_32 = Backbone_ResNet50_in3( ) self.upsample_add = upsample_add self.upsample = cus_sample self.trans = LightAIM(iC_list=(64, 256, 512, 1024, 2048), oC_list=(64, 64, 64, 64, 64)) self.sim32 = SIM(64, 32) self.sim16 = SIM(64, 32) self.sim8 = SIM(64, 32) self.sim4 = SIM(64, 32) self.sim2 = SIM(64, 32) self.upconv32 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv16 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv8 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv4 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv2 = BasicConv2d(64, 32, kernel_size=3, stride=1, padding=1) self.upconv1 = BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1) self.classifier = nn.Conv2d(32, 1, 1)
def __init__(self): super(CPLightMINet_VGG16, self).__init__() self.upsample_add = upsample_add self.upsample = cus_sample self.dummy_tensor = torch.ones(1, dtype=torch.float32, requires_grad=True) ( self.encoder1, self.encoder2, self.encoder4, self.encoder8, self.encoder16, ) = Backbone_VGG16_in3() self.trans = LightAIM((64, 128, 256, 512, 512), (32, 64, 64, 64, 64)) self.sim16 = SIM(64, 32) self.sim8 = SIM(64, 32) self.sim4 = SIM(64, 32) self.sim2 = SIM(64, 32) self.sim1 = SIM(32, 16) self.upconv16 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv8 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv4 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv2 = BasicConv2d(64, 32, kernel_size=3, stride=1, padding=1) self.upconv1 = BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1) self.classifier = nn.Conv2d(32, 1, 1)
def __init__(self): super(cp_res50, self).__init__() self.upsample_add = upsample_add self.upsample = cus_sample self.dummy_tensor = torch.ones(1, dtype=torch.float32, requires_grad=True) self.div_2, self.div_4, self.div_8, self.div_16, self.div_32 = Backbone_ResNet50_in3( ) self.upsample_add = upsample_add self.upsample = cus_sample self.trans32 = nn.Conv2d(2048, 64, 1, 1) self.trans16 = nn.Conv2d(1024, 64, 1, 1) self.trans8 = nn.Conv2d(512, 64, 1, 1) self.trans4 = nn.Conv2d(256, 64, 1, 1) self.trans2 = nn.Conv2d(64, 64, 1, 1) self.sim32 = SIM(64, 32) self.sim16 = SIM(64, 32) self.sim8 = SIM(64, 32) self.sim4 = SIM(64, 32) self.sim2 = SIM(64, 32) self.upconv32 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv16 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv8 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv4 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv2 = BasicConv2d(64, 32, kernel_size=3, stride=1, padding=1) self.upconv1 = BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1) self.classifier = nn.Conv2d(32, 1, 1)
def __init__(self): super(LightMINet_VGG16, self).__init__() self.upsample_add = upsample_add self.upsample = cus_sample ( self.encoder1, self.encoder2, self.encoder4, self.encoder8, self.encoder16, ) = Backbone_VGG16_in3() self.trans = LightAIM((64, 128, 256, 512, 512), (32, 64, 64, 64, 64)) self.sim16 = SIM(64, 32) self.sim8 = SIM(64, 32) self.sim4 = SIM(64, 32) self.sim2 = SIM(64, 32) self.sim1 = SIM(32, 16) self.upconv16 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv8 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv4 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv2 = BasicConv2d(64, 32, kernel_size=3, stride=1, padding=1) self.upconv1 = BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1) self.classifier = nn.Conv2d(32, 1, 1)