def __init__(self): super(MINet_VGG16, self).__init__() self.upsample_add = upsample_add self.upsample = cus_sample ( self.encoder1, self.encoder2, self.encoder4, self.encoder8, self.encoder16, ) = Backbone_VGG16_in3() self.trans = AIM((64, 128, 256, 512, 512), (32, 64, 64, 64, 64)) self.sim16 = SIM(64, 32) self.sim8 = SIM(64, 32) self.sim4 = SIM(64, 32) self.sim2 = SIM(64, 32) self.sim1 = SIM(32, 16) self.upconv16 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv8 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv4 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv2 = BasicConv2d(64, 32, kernel_size=3, stride=1, padding=1) self.upconv1 = BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1) self.classifier = nn.Conv2d(32, 1, 1)
def __init__(self): super(CPLightMINet_VGG16, self).__init__() self.upsample_add = upsample_add self.upsample = cus_sample self.dummy_tensor = torch.ones(1, dtype=torch.float32, requires_grad=True) ( self.encoder1, self.encoder2, self.encoder4, self.encoder8, self.encoder16, ) = Backbone_VGG16_in3() self.trans = LightAIM((64, 128, 256, 512, 512), (32, 64, 64, 64, 64)) self.sim16 = SIM(64, 32) self.sim8 = SIM(64, 32) self.sim4 = SIM(64, 32) self.sim2 = SIM(64, 32) self.sim1 = SIM(32, 16) self.upconv16 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv8 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv4 = BasicConv2d(64, 64, kernel_size=3, stride=1, padding=1) self.upconv2 = BasicConv2d(64, 32, kernel_size=3, stride=1, padding=1) self.upconv1 = BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1) self.classifier = nn.Conv2d(32, 1, 1)