Пример #1
0
 def test_shape(self, input_data, expected_shape):
     if input_data["model"] == "densenet2d":
         model = densenet121(spatial_dims=2, in_channels=1, out_channels=3)
     if input_data["model"] == "densenet3d":
         model = DenseNet(spatial_dims=3,
                          in_channels=1,
                          out_channels=3,
                          init_features=2,
                          growth_rate=2,
                          block_config=(6, ))
     if input_data["model"] == "senet2d":
         model = se_resnet50(spatial_dims=2, in_channels=3, num_classes=4)
     if input_data["model"] == "senet3d":
         model = se_resnet50(spatial_dims=3, in_channels=3, num_classes=4)
     device = "cuda:0" if torch.cuda.is_available() else "cpu"
     model.to(device)
     model.eval()
     cam = CAM(nn_module=model,
               target_layers=input_data["target_layers"],
               fc_layers=input_data["fc_layers"])
     image = torch.rand(input_data["shape"], device=device)
     result = cam(x=image, layer_idx=-1)
     fea_shape = cam.feature_map_size(input_data["shape"], device=device)
     self.assertTupleEqual(fea_shape, input_data["feature_shape"])
     self.assertTupleEqual(result.shape, expected_shape)
def run_inference_test(root_dir, test_x, test_y,
                       device=torch.device("cuda:0")):
    # define transforms for image and classification
    val_transforms = Compose(
        [LoadPNG(image_only=True),
         AddChannel(),
         ScaleIntensity(),
         ToTensor()])
    val_ds = MedNISTDataset(test_x, test_y, val_transforms)
    val_loader = DataLoader(val_ds, batch_size=300, num_workers=10)

    model = densenet121(
        spatial_dims=2,
        in_channels=1,
        out_channels=len(np.unique(test_y)),
    ).to(device)

    model_filename = os.path.join(root_dir, "best_metric_model.pth")
    model.load_state_dict(torch.load(model_filename))
    model.eval()
    y_true = list()
    y_pred = list()
    with torch.no_grad():
        for test_data in val_loader:
            test_images, test_labels = test_data[0].to(
                device), test_data[1].to(device)
            pred = model(test_images).argmax(dim=1)
            for i in range(len(pred)):
                y_true.append(test_labels[i].item())
                y_pred.append(pred[i].item())
    tps = [
        np.sum((np.asarray(y_true) == idx) & (np.asarray(y_pred) == idx))
        for idx in np.unique(test_y)
    ]
    return tps
Пример #3
0
 def test_121_3d_shape_pretrain(self, input_param, input_shape,
                                expected_shape):
     net = densenet121(**input_param)
     net.eval()
     with torch.no_grad():
         result = net.forward(torch.randn(input_shape))
         self.assertEqual(result.shape, expected_shape)
Пример #4
0
def classify_image(image):
    # make conversions to image and save temporarily
    im = Image.open(image)
    im = im.convert(mode='L')
    im = im.resize((64, 64))
    im.save('conversion.jpeg', 'JPEG')

    # Define MONAI transforms, Dataset and Dataloader to process image
    val_transforms = Compose([
        LoadImage(image_only=True),
        AddChannel(),
        ScaleIntensity(),
        ToTensor()
    ])
    test_ds = MedNISTDataset(['conversion.jpeg'], [0], val_transforms)
    test_loader = torch.utils.data.DataLoader(test_ds)

    # Define Network
    device = torch.device("cpu")
    model = densenet121(spatial_dims=2, in_channels=1,
                        out_channels=num_class).to(device)

    # Make prediction
    model.load_state_dict(
        torch.load(
            "SmartEMR_Imaging/MONAI_DATA_DIRECTORY/best_metric_model_cpu.pth"))
    model.eval()
    y_true = list()
    y_pred = list()
    with torch.no_grad():
        for test_data in test_loader:
            test_images, test_labels = (
                test_data[0].to(device),
                test_data[1].to(device),
            )
            pred = model(test_images).argmax(dim=1)
            for i in range(len(pred)):
                y_true.append(test_labels[i].item())
                y_pred.append(pred[i].item())

    # clean up
    os.remove('conversion.jpeg')

    return class_tags[y_pred[0]]
Пример #5
0
 def test_shape(self, input_data, expected_shape):
     if input_data["model"] == "densenet2d":
         model = densenet121(spatial_dims=2, in_channels=1, out_channels=3)
     if input_data["model"] == "densenet3d":
         model = DenseNet(
             spatial_dims=3, in_channels=1, out_channels=3, init_features=2, growth_rate=2, block_config=(6,)
         )
     if input_data["model"] == "senet2d":
         model = se_resnet50(spatial_dims=2, in_channels=3, num_classes=4)
     if input_data["model"] == "senet3d":
         model = se_resnet50(spatial_dims=3, in_channels=3, num_classes=4)
     device = "cuda:0" if torch.cuda.is_available() else "cpu"
     model.to(device)
     model.eval()
     cam = GradCAM(nn_module=model, target_layers=input_data["target_layers"])
     image = torch.rand(input_data["shape"], device=device)
     result = cam(x=image, layer_idx=-1)
     np.testing.assert_array_equal(cam.nn_module.class_idx.cpu(), model(image).max(1)[-1].cpu())
     fea_shape = cam.feature_map_size(input_data["shape"], device=device)
     self.assertTupleEqual(fea_shape, input_data["feature_shape"])
     self.assertTupleEqual(result.shape, expected_shape)
     # check result is same whether class_idx=None is used or not
     result2 = cam(x=image, layer_idx=-1, class_idx=model(image).max(1)[-1].cpu())
     np.testing.assert_array_almost_equal(result, result2)
Пример #6
0
def run_training_test(root_dir,
                      train_x,
                      train_y,
                      val_x,
                      val_y,
                      device="cuda:0",
                      num_workers=10):

    monai.config.print_config()
    # define transforms for image and classification
    train_transforms = Compose([
        LoadPNG(image_only=True),
        AddChannel(),
        ScaleIntensity(),
        RandRotate(range_x=np.pi / 12, prob=0.5, keep_size=True),
        RandFlip(spatial_axis=0, prob=0.5),
        RandZoom(min_zoom=0.9, max_zoom=1.1, prob=0.5),
        ToTensor(),
    ])
    train_transforms.set_random_state(1234)
    val_transforms = Compose(
        [LoadPNG(image_only=True),
         AddChannel(),
         ScaleIntensity(),
         ToTensor()])

    # create train, val data loaders
    train_ds = MedNISTDataset(train_x, train_y, train_transforms)
    train_loader = DataLoader(train_ds,
                              batch_size=300,
                              shuffle=True,
                              num_workers=num_workers)

    val_ds = MedNISTDataset(val_x, val_y, val_transforms)
    val_loader = DataLoader(val_ds, batch_size=300, num_workers=num_workers)

    model = densenet121(spatial_dims=2,
                        in_channels=1,
                        out_channels=len(np.unique(train_y))).to(device)
    loss_function = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), 1e-5)
    epoch_num = 4
    val_interval = 1

    # start training validation
    best_metric = -1
    best_metric_epoch = -1
    epoch_loss_values = list()
    metric_values = list()
    model_filename = os.path.join(root_dir, "best_metric_model.pth")
    for epoch in range(epoch_num):
        print("-" * 10)
        print(f"Epoch {epoch + 1}/{epoch_num}")
        model.train()
        epoch_loss = 0
        step = 0
        for batch_data in train_loader:
            step += 1
            inputs, labels = batch_data[0].to(device), batch_data[1].to(device)
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = loss_function(outputs, labels)
            loss.backward()
            optimizer.step()
            epoch_loss += loss.item()
        epoch_loss /= step
        epoch_loss_values.append(epoch_loss)
        print(f"epoch {epoch + 1} average loss:{epoch_loss:0.4f}")

        if (epoch + 1) % val_interval == 0:
            model.eval()
            with torch.no_grad():
                y_pred = torch.tensor([], dtype=torch.float32, device=device)
                y = torch.tensor([], dtype=torch.long, device=device)
                for val_data in val_loader:
                    val_images, val_labels = val_data[0].to(
                        device), val_data[1].to(device)
                    y_pred = torch.cat([y_pred, model(val_images)], dim=0)
                    y = torch.cat([y, val_labels], dim=0)
                auc_metric = compute_roc_auc(y_pred,
                                             y,
                                             to_onehot_y=True,
                                             softmax=True)
                metric_values.append(auc_metric)
                acc_value = torch.eq(y_pred.argmax(dim=1), y)
                acc_metric = acc_value.sum().item() / len(acc_value)
                if auc_metric > best_metric:
                    best_metric = auc_metric
                    best_metric_epoch = epoch + 1
                    torch.save(model.state_dict(), model_filename)
                    print("saved new best metric model")
                print(
                    f"current epoch {epoch +1} current AUC: {auc_metric:0.4f} "
                    f"current accuracy: {acc_metric:0.4f} best AUC: {best_metric:0.4f} at epoch {best_metric_epoch}"
                )
    print(
        f"train completed, best_metric: {best_metric:0.4f}  at epoch: {best_metric_epoch}"
    )
    return epoch_loss_values, best_metric, best_metric_epoch
Пример #7
0
import torchvision.transforms as transforms
from PIL import Image
import numpy as np
#MONAI
import monai
from monai.networks.nets import densenet121
from monai.transforms import \
    Compose, LoadPNG, AddChannel, ScaleIntensity, ToTensor, RandRotate, RandFlip, RandZoom
from torch.utils.data import Dataset, DataLoader
import matplotlib.image as mpimg

# load model
num_class = 4
torch.cuda.empty_cache()
device = torch.device("cpu")
model = densenet121(spatial_dims=2, in_channels=1,
                    out_channels=num_class).to(device)

PATH = 'best_metric_model.pth'
model.load_state_dict(torch.load(PATH, map_location=torch.device('cpu')))
model.eval()


# convert image -> tensor
def transforms_image(image_bytes):

    image_transforms = Compose([
        # LoadPNG(image_only=True),
        ScaleIntensity(),
        ToTensor()
    ])
Пример #8
0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest

import torch
from parameterized import parameterized

from monai.metrics import compute_occlusion_sensitivity
from monai.networks.nets import DenseNet, densenet121

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model_2d = densenet121(spatial_dims=2, in_channels=1, out_channels=3).to(device)
model_3d = DenseNet(
    spatial_dims=3, in_channels=1, out_channels=3, init_features=2, growth_rate=2, block_config=(6,)
).to(device)
model_2d.eval()
model_3d.eval()

# 2D w/ bounding box
TEST_CASE_0 = [
    {
        "model": model_2d,
        "image": torch.rand(1, 1, 48, 64).to(device),
        "label": torch.tensor([[0]], dtype=torch.int64).to(device),
        "b_box": [-1, -1, 2, 40, 1, 62],
    },
    (39, 62),
Пример #9
0
 def test_121_3d_shape(self, input_param, input_data, expected_shape):
     net = densenet121(**input_param)
     net.eval()
     with torch.no_grad():
         result = net.forward(input_data)
         self.assertEqual(result.shape, expected_shape)
Пример #10
0
    input_image = transforms.Resize(255)(img)
    input_new = transformations_new(input_image)
    input_new = input_new.unsqueeze(0)

if uploaded_file is not None:
    c6.image(input_image, channel='BGR')
else:
    c6.button(
        """Awaiting X_ray image to be uploaded. Currently using sample X_Ray image (shown below)"""
    )
    c6.image(input_image)

import monai
from monai.networks.nets import densenet121
model = densenet121(
    spatial_dims=2, in_channels=1, out_channels=2
)  #.to(device)#spatial_dims=2, in_channels=1,out_channels=num_classes
model.eval()
output = model.load_state_dict(
    torch.load("best_metric_model_400epochs.pth",
               map_location=torch.device('cpu')))
output = model(input_new)

malignant_probability = nn.Softmax()(output.data[0])[1].cpu().numpy()

st.subheader('Predicted Malignant Probability')
st.write(np.around(malignant_probability * 100, 2), '%')

localisation_model = densenet121(spatial_dims=2, in_channels=1, out_channels=3)
localisation_model.eval()
if malignant_probability > 0.5: