コード例 #1
0
ファイル: test_lr_finder.py プロジェクト: zomglings/MONAI
    def test_lr_finder(self):
        # 0.001 gives 54 examples
        train_ds = MedNISTDataset(
            root_dir=self.root_dir,
            transform=self.transforms,
            section="validation",
            val_frac=0.001,
            download=True,
            num_workers=10,
        )
        train_loader = DataLoader(train_ds, batch_size=300, shuffle=True, num_workers=10)
        num_classes = train_ds.get_num_classes()

        model = DenseNet(
            spatial_dims=2, in_channels=1, out_channels=num_classes, init_features=2, growth_rate=2, block_config=(2,)
        )
        loss_function = torch.nn.CrossEntropyLoss()
        learning_rate = 1e-5
        optimizer = torch.optim.Adam(model.parameters(), learning_rate)

        lr_finder = LearningRateFinder(model, optimizer, loss_function, device=device)
        lr_finder.range_test(train_loader, val_loader=train_loader, end_lr=10, num_iter=5)
        print(lr_finder.get_steepest_gradient(0, 0)[0])

        if has_matplotlib:
            ax = plt.subplot()
            plt.show(block=False)
            lr_finder.plot(0, 0, ax=ax)  # to inspect the loss-learning rate graph
            plt.pause(3)
            plt.close()

        lr_finder.reset()  # to reset the model and optimizer to their initial state
コード例 #2
0
def class_model():
    net = DenseNet(spatial_dims=3,
                   in_channels=1,
                   out_channels=2,
                   init_features=4,
                   growth_rate=2,
                   block_config=(2, 2, 2, 2),
                   bn_size=2)
    model = NoduleClassificationModule(net, num_classes=2)
    return model
コード例 #3
0
ファイル: test_vis_cam.py プロジェクト: zomglings/MONAI
 def test_shape(self, input_data, expected_shape):
     if input_data["model"] == "densenet2d":
         model = DenseNet121(spatial_dims=2, in_channels=1, out_channels=3)
     if input_data["model"] == "densenet3d":
         model = DenseNet(spatial_dims=3,
                          in_channels=1,
                          out_channels=3,
                          init_features=2,
                          growth_rate=2,
                          block_config=(6, ))
     if input_data["model"] == "senet2d":
         model = SEResNet50(spatial_dims=2, in_channels=3, num_classes=4)
     if input_data["model"] == "senet3d":
         model = SEResNet50(spatial_dims=3, in_channels=3, num_classes=4)
     device = "cuda:0" if torch.cuda.is_available() else "cpu"
     model.to(device)
     model.eval()
     cam = CAM(nn_module=model,
               target_layers=input_data["target_layers"],
               fc_layers=input_data["fc_layers"])
     image = torch.rand(input_data["shape"], device=device)
     result = cam(x=image, layer_idx=-1)
     fea_shape = cam.feature_map_size(input_data["shape"], device=device)
     self.assertTupleEqual(fea_shape, input_data["feature_shape"])
     self.assertTupleEqual(result.shape, expected_shape)
コード例 #4
0
ファイル: test_vis_gradcam.py プロジェクト: zomglings/MONAI
 def test_shape(self, input_data, expected_shape):
     if input_data["model"] == "densenet2d":
         model = DenseNet121(spatial_dims=2, in_channels=1, out_channels=3)
     if input_data["model"] == "densenet3d":
         model = DenseNet(spatial_dims=3,
                          in_channels=1,
                          out_channels=3,
                          init_features=2,
                          growth_rate=2,
                          block_config=(6, ))
     if input_data["model"] == "senet2d":
         model = SEResNet50(spatial_dims=2, in_channels=3, num_classes=4)
     if input_data["model"] == "senet3d":
         model = SEResNet50(spatial_dims=3, in_channels=3, num_classes=4)
     device = "cuda:0" if torch.cuda.is_available() else "cpu"
     model.to(device)
     model.eval()
     cam = GradCAM(nn_module=model,
                   target_layers=input_data["target_layers"])
     image = torch.rand(input_data["shape"], device=device)
     result = cam(x=image, layer_idx=-1)
     np.testing.assert_array_equal(cam.nn_module.class_idx.cpu(),
                                   model(image).max(1)[-1].cpu())
     fea_shape = cam.feature_map_size(input_data["shape"], device=device)
     self.assertTupleEqual(fea_shape, input_data["feature_shape"])
     self.assertTupleEqual(result.shape, expected_shape)
     # check result is same whether class_idx=None is used or not
     result2 = cam(x=image,
                   layer_idx=-1,
                   class_idx=model(image).max(1)[-1].cpu())
     np.testing.assert_array_almost_equal(result, result2)
コード例 #5
0
    def test_shape(self, cam_name):
        model = DenseNet(spatial_dims=3,
                         in_channels=1,
                         out_channels=3,
                         init_features=2,
                         growth_rate=2,
                         block_config=(6, ))
        device = "cuda:0" if torch.cuda.is_available() else "cpu"
        model.to(device)
        model.eval()

        image = torch.rand((2, 1, 6, 6, 6), device=device)
        target_layer = "class_layers.relu"
        fc_layer = "class_layers.out"
        if cam_name == "CAM":
            inferer = SaliencyInferer(cam_name,
                                      target_layer,
                                      None,
                                      fc_layer,
                                      upsampler=default_upsampler)
            result = inferer(inputs=image, network=model, layer_idx=-1)
        else:
            inferer = SaliencyInferer(cam_name,
                                      target_layer,
                                      None,
                                      upsampler=default_upsampler)
            result = inferer(image, model, -1, retain_graph=False)

        self.assertTupleEqual(result.shape, (2, 1, 6, 6, 6))
コード例 #6
0
from monai.networks.nets import DenseNet, DenseNet121
from monai.visualize import OcclusionSensitivity

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
out_channels_2d = 4
out_channels_3d = 3
model_2d = DenseNet121(spatial_dims=2,
                       in_channels=1,
                       out_channels=out_channels_2d).to(device)
model_2d_2c = DenseNet121(spatial_dims=2,
                          in_channels=2,
                          out_channels=out_channels_2d).to(device)
model_3d = DenseNet(spatial_dims=3,
                    in_channels=1,
                    out_channels=out_channels_3d,
                    init_features=2,
                    growth_rate=2,
                    block_config=(6, )).to(device)
model_2d.eval()
model_2d_2c.eval()
model_3d.eval()

# 2D w/ bounding box
TEST_CASE_0 = [
    {
        "nn_module": model_2d
    },
    {
        "x": torch.rand(1, 1, 48, 64).to(device),
        "b_box": [-1, -1, 2, 40, 1, 62]
    },
コード例 #7
0
ファイル: test_vis_gradbased.py プロジェクト: Nic-Ma/MONAI
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest

import torch
from parameterized import parameterized

from monai.networks.nets import DenseNet, DenseNet121, SEResNet50
from monai.visualize import GuidedBackpropGrad, GuidedBackpropSmoothGrad, SmoothGrad, VanillaGrad

DENSENET2D = DenseNet121(spatial_dims=2, in_channels=1, out_channels=3)
DENSENET3D = DenseNet(spatial_dims=3,
                      in_channels=1,
                      out_channels=3,
                      init_features=2,
                      growth_rate=2,
                      block_config=(6, ))
SENET2D = SEResNet50(spatial_dims=2, in_channels=3, num_classes=4)
SENET3D = SEResNet50(spatial_dims=3, in_channels=3, num_classes=4)

TESTS = []
for type in (VanillaGrad, SmoothGrad, GuidedBackpropGrad,
             GuidedBackpropSmoothGrad):
    # 2D densenet
    TESTS.append([type, DENSENET2D, (1, 1, 48, 64), (1, 1, 48, 64)])
    # 3D densenet
    TESTS.append([type, DENSENET3D, (1, 1, 6, 6, 6), (1, 1, 6, 6, 6)])
    # 2D senet
    TESTS.append([type, SENET2D, (1, 3, 64, 64), (1, 1, 64, 64)])
    # 3D senet
コード例 #8
0
        load_json(Path(config["data"]["split_dir"]) / "valid.json")
    ]
    label_mapping = ([1, 2, 3, 4, 5], [0, 0, 0, 1, 1])

    dm = ClassificationDataModule(data_dir=Path(config["data"]["data_dir"]),
                                  cache_dir=Path(config["data"]["cache_dir"]),
                                  splits=splits,
                                  min_anns=config["data"]["min_anns"],
                                  exclude_labels=[],
                                  label_mapping=label_mapping,
                                  aug_prob=config["data"]["aug_prob"],
                                  batch_size=config["data"]["batch_size"])

    net = DenseNet(
        spatial_dims=config["class_model"]["spatial_dims"],
        in_channels=config["class_model"]["in_channels"],
        out_channels=config["class_model"]["out_channels"],
        dropout_prob=config["class_model"]["dropout"],
    )
    model = NoduleClassificationModule(
        net,
        num_classes=config["class_model"]["num_classes"],
        lr=config["class_model"]["lr"])

    wandb.login()

    logger = WandbLogger(project=config["wandb"]["project"],
                         job_type="training")

    es = EarlyStopping(monitor="val_loss", verbose=True)
    mc = ModelCheckpoint(
        monitor="val_loss",
コード例 #9
0
 def test_ill_pretrain(self, input_param):
     with self.assertRaisesRegex(ValueError, ""):
         net = DenseNet(**input_param)