def run_inference_test(root_dir, test_x, test_y,
                       device=torch.device("cuda:0")):
    # define transforms for image and classification
    val_transforms = Compose(
        [LoadPNG(image_only=True),
         AddChannel(),
         ScaleIntensity(),
         ToTensor()])
    val_ds = MedNISTDataset(test_x, test_y, val_transforms)
    val_loader = DataLoader(val_ds, batch_size=300, num_workers=10)

    model = densenet121(
        spatial_dims=2,
        in_channels=1,
        out_channels=len(np.unique(test_y)),
    ).to(device)

    model_filename = os.path.join(root_dir, "best_metric_model.pth")
    model.load_state_dict(torch.load(model_filename))
    model.eval()
    y_true = list()
    y_pred = list()
    with torch.no_grad():
        for test_data in val_loader:
            test_images, test_labels = test_data[0].to(
                device), test_data[1].to(device)
            pred = model(test_images).argmax(dim=1)
            for i in range(len(pred)):
                y_true.append(test_labels[i].item())
                y_pred.append(pred[i].item())
    tps = [
        np.sum((np.asarray(y_true) == idx) & (np.asarray(y_pred) == idx))
        for idx in np.unique(test_y)
    ]
    return tps
Example #2
0
 def test_shape(self, data_shape, filenames, expected_shape, meta_shape):
     test_image = np.random.randint(0, 256, size=data_shape)
     tempdir = tempfile.mkdtemp()
     with tempfile.TemporaryDirectory() as tempdir:
         for i, name in enumerate(filenames):
             filenames[i] = os.path.join(tempdir, name)
             Image.fromarray(test_image.astype("uint8")).save(filenames[i])
         result = LoadPNG()(filenames)
     self.assertTupleEqual(result[1]["spatial_shape"], meta_shape)
     self.assertTupleEqual(result[0].shape, expected_shape)
Example #3
0
 def test_shape(self, data_shape, filenames, expected_shape, meta_shape):
     test_image = np.random.randint(0, 256, size=data_shape)
     with tempfile.TemporaryDirectory() as tempdir:
         for i, name in enumerate(filenames):
             filenames[i] = os.path.join(tempdir, name)
             Image.fromarray(test_image.astype("uint8")).save(filenames[i])
         result = LoadPNG()(filenames)
     self.assertTupleEqual(result[1]["spatial_shape"], meta_shape)
     self.assertTupleEqual(result[0].shape, expected_shape)
     if result[0].shape == test_image.shape:
         np.testing.assert_allclose(result[0], test_image)
     else:
         np.testing.assert_allclose(result[0], np.tile(test_image, [result[0].shape[0], 1, 1]))
Example #4
0
def run_training_test(root_dir,
                      train_x,
                      train_y,
                      val_x,
                      val_y,
                      device="cuda:0",
                      num_workers=10):

    monai.config.print_config()
    # define transforms for image and classification
    train_transforms = Compose([
        LoadPNG(image_only=True),
        AddChannel(),
        ScaleIntensity(),
        RandRotate(range_x=np.pi / 12, prob=0.5, keep_size=True),
        RandFlip(spatial_axis=0, prob=0.5),
        RandZoom(min_zoom=0.9, max_zoom=1.1, prob=0.5),
        ToTensor(),
    ])
    train_transforms.set_random_state(1234)
    val_transforms = Compose(
        [LoadPNG(image_only=True),
         AddChannel(),
         ScaleIntensity(),
         ToTensor()])

    # create train, val data loaders
    train_ds = MedNISTDataset(train_x, train_y, train_transforms)
    train_loader = DataLoader(train_ds,
                              batch_size=300,
                              shuffle=True,
                              num_workers=num_workers)

    val_ds = MedNISTDataset(val_x, val_y, val_transforms)
    val_loader = DataLoader(val_ds, batch_size=300, num_workers=num_workers)

    model = densenet121(spatial_dims=2,
                        in_channels=1,
                        out_channels=len(np.unique(train_y))).to(device)
    loss_function = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), 1e-5)
    epoch_num = 4
    val_interval = 1

    # start training validation
    best_metric = -1
    best_metric_epoch = -1
    epoch_loss_values = list()
    metric_values = list()
    model_filename = os.path.join(root_dir, "best_metric_model.pth")
    for epoch in range(epoch_num):
        print("-" * 10)
        print(f"Epoch {epoch + 1}/{epoch_num}")
        model.train()
        epoch_loss = 0
        step = 0
        for batch_data in train_loader:
            step += 1
            inputs, labels = batch_data[0].to(device), batch_data[1].to(device)
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = loss_function(outputs, labels)
            loss.backward()
            optimizer.step()
            epoch_loss += loss.item()
        epoch_loss /= step
        epoch_loss_values.append(epoch_loss)
        print(f"epoch {epoch + 1} average loss:{epoch_loss:0.4f}")

        if (epoch + 1) % val_interval == 0:
            model.eval()
            with torch.no_grad():
                y_pred = torch.tensor([], dtype=torch.float32, device=device)
                y = torch.tensor([], dtype=torch.long, device=device)
                for val_data in val_loader:
                    val_images, val_labels = val_data[0].to(
                        device), val_data[1].to(device)
                    y_pred = torch.cat([y_pred, model(val_images)], dim=0)
                    y = torch.cat([y, val_labels], dim=0)
                auc_metric = compute_roc_auc(y_pred,
                                             y,
                                             to_onehot_y=True,
                                             softmax=True)
                metric_values.append(auc_metric)
                acc_value = torch.eq(y_pred.argmax(dim=1), y)
                acc_metric = acc_value.sum().item() / len(acc_value)
                if auc_metric > best_metric:
                    best_metric = auc_metric
                    best_metric_epoch = epoch + 1
                    torch.save(model.state_dict(), model_filename)
                    print("saved new best metric model")
                print(
                    f"current epoch {epoch +1} current AUC: {auc_metric:0.4f} "
                    f"current accuracy: {acc_metric:0.4f} best AUC: {best_metric:0.4f} at epoch {best_metric_epoch}"
                )
    print(
        f"train completed, best_metric: {best_metric:0.4f}  at epoch: {best_metric_epoch}"
    )
    return epoch_loss_values, best_metric, best_metric_epoch
Example #5
0
    params['rotate_prob'] = 0.5
    params['min_zoom'] = 0.9
    params['max_zoom'] = 1.1
    params['zoom_prob'] = 0.5

    monai.config.print_config()

    train_data, val_data, test_data, raw_data = get_train_val_test_data(
        params['data_dir'], params['test_val_split'])

    size_n = 3
    sample_data = train_data.sample(size_n**2)
    #show_sample_dataframe(sample_data, size_n=size_n, title="Training samples")

    train_transforms = Compose([
        LoadPNG(image_only=True),
        AddChannel(),
        ScaleIntensity(),
        RandRotate(range_x=params['rotate_range_x'],
                   prob=params['rotate_prob'],
                   keep_size=True),
        # RandFlip(spatial_axis=0, prob=0.5),
        RandZoom(min_zoom=params['min_zoom'],
                 max_zoom=params['max_zoom'],
                 prob=params['zoom_prob'],
                 keep_size=True),
        ToTensor()
    ])

    val_transforms = Compose(
        [LoadPNG(image_only=True),