Exemple #1
0
 def test_shape(self, input_data, expected_shape):
     if input_data["model"] == "densenet2d":
         model = DenseNet121(spatial_dims=2, in_channels=1, out_channels=3)
     if input_data["model"] == "densenet3d":
         model = DenseNet(spatial_dims=3,
                          in_channels=1,
                          out_channels=3,
                          init_features=2,
                          growth_rate=2,
                          block_config=(6, ))
     if input_data["model"] == "senet2d":
         model = SEResNet50(spatial_dims=2, in_channels=3, num_classes=4)
     if input_data["model"] == "senet3d":
         model = SEResNet50(spatial_dims=3, in_channels=3, num_classes=4)
     device = "cuda:0" if torch.cuda.is_available() else "cpu"
     model.to(device)
     model.eval()
     cam = GradCAM(nn_module=model,
                   target_layers=input_data["target_layers"])
     image = torch.rand(input_data["shape"], device=device)
     result = cam(x=image, layer_idx=-1)
     np.testing.assert_array_equal(cam.nn_module.class_idx.cpu(),
                                   model(image).max(1)[-1].cpu())
     fea_shape = cam.feature_map_size(input_data["shape"], device=device)
     self.assertTupleEqual(fea_shape, input_data["feature_shape"])
     self.assertTupleEqual(result.shape, expected_shape)
     # check result is same whether class_idx=None is used or not
     result2 = cam(x=image,
                   layer_idx=-1,
                   class_idx=model(image).max(1)[-1].cpu())
     np.testing.assert_array_almost_equal(result, result2)
Exemple #2
0
 def test_shape(self, input_data, expected_shape):
     if input_data["model"] == "densenet2d":
         model = DenseNet121(spatial_dims=2, in_channels=1, out_channels=3)
     if input_data["model"] == "densenet3d":
         model = DenseNet(spatial_dims=3,
                          in_channels=1,
                          out_channels=3,
                          init_features=2,
                          growth_rate=2,
                          block_config=(6, ))
     if input_data["model"] == "senet2d":
         model = SEResNet50(spatial_dims=2, in_channels=3, num_classes=4)
     if input_data["model"] == "senet3d":
         model = SEResNet50(spatial_dims=3, in_channels=3, num_classes=4)
     device = "cuda:0" if torch.cuda.is_available() else "cpu"
     model.to(device)
     model.eval()
     cam = CAM(nn_module=model,
               target_layers=input_data["target_layers"],
               fc_layers=input_data["fc_layers"])
     image = torch.rand(input_data["shape"], device=device)
     result = cam(x=image, layer_idx=-1)
     fea_shape = cam.feature_map_size(input_data["shape"], device=device)
     self.assertTupleEqual(fea_shape, input_data["feature_shape"])
     self.assertTupleEqual(result.shape, expected_shape)
Exemple #3
0
    def test_shape(self, cam_name):
        model = DenseNet(spatial_dims=3,
                         in_channels=1,
                         out_channels=3,
                         init_features=2,
                         growth_rate=2,
                         block_config=(6, ))
        device = "cuda:0" if torch.cuda.is_available() else "cpu"
        model.to(device)
        model.eval()

        image = torch.rand((2, 1, 6, 6, 6), device=device)
        target_layer = "class_layers.relu"
        fc_layer = "class_layers.out"
        if cam_name == "CAM":
            inferer = SaliencyInferer(cam_name,
                                      target_layer,
                                      None,
                                      fc_layer,
                                      upsampler=default_upsampler)
            result = inferer(inputs=image, network=model, layer_idx=-1)
        else:
            inferer = SaliencyInferer(cam_name,
                                      target_layer,
                                      None,
                                      upsampler=default_upsampler)
            result = inferer(image, model, -1, retain_graph=False)

        self.assertTupleEqual(result.shape, (2, 1, 6, 6, 6))
out_channels_3d = 3
model_2d = DenseNet121(spatial_dims=2,
                       in_channels=1,
                       out_channels=out_channels_2d).to(device)
model_2d_2c = DenseNet121(spatial_dims=2,
                          in_channels=2,
                          out_channels=out_channels_2d).to(device)
model_3d = DenseNet(spatial_dims=3,
                    in_channels=1,
                    out_channels=out_channels_3d,
                    init_features=2,
                    growth_rate=2,
                    block_config=(6, )).to(device)
model_2d.eval()
model_2d_2c.eval()
model_3d.eval()

# 2D w/ bounding box
TEST_CASE_0 = [
    {
        "nn_module": model_2d
    },
    {
        "x": torch.rand(1, 1, 48, 64).to(device),
        "b_box": [-1, -1, 2, 40, 1, 62]
    },
    (1, 1, 39, 62, out_channels_2d),
    (1, 1, 39, 62),
]
# 3D w/ bounding box and stride
TEST_CASE_1 = [