示例#1
0
 def __call__(self, data):
     d = dict(data)
     for key in self.keys:
         img = d[key]
         roi_size = (img.shape[-2] - self.border * 2,
                     img.shape[-1] - self.border * 2)
         crop = CenterSpatialCrop(roi_size=roi_size)
         d[key] = crop(img)
     return d
def predict(file_name, model_path='', _params=params, output_name=None):
    print('Segmenting ' + file_name + ' ...')
    start = time.time()

    # Create test sample as tensor batch
    test_transforms = get_test_transforms(_params['image_shape'])
    test_file = [{"image": file_name}]
    test_batch_image = test_transforms(test_file)[0]["image"].unsqueeze(0)

    # Load model and inference
    # https://pytorch.org/tutorials/beginner/saving_loading_models.html#saving-loading-model-across-devices
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = torch.load(model_path, map_location=device)
    model.eval()
    seg = model(test_batch_image.to(device))

    # Postprocessing: keep largest seg component
    seg = torch.argmax(seg, dim=1, keepdim=True).detach().cpu()
    keeplargest = KeepLargestConnectedComponent(applied_labels=1)
    seg = keeplargest(seg)[0]

    # Resize output to original image size
    # Need to bring to canonical because so will seg be
    img = nib.load(file_name)
    img_canon = nib.as_closest_canonical(img)
    crop = CenterSpatialCrop(img_canon.shape)
    seg = crop(seg)[0]

    # Save output seg in canonical orientation
    seg1 = nib.Nifti1Image(seg.numpy(), img_canon.affine, img_canon.header)
    nib.save(seg1, output_name)

    # Change output seg orientation to original image orientation
    seg_file = [{"image": output_name}]
    seg_transforms = get_seg_transforms(
        end_seg_axcodes=nib.aff2axcodes(img.affine))
    seg1 = seg_transforms(seg_file)

    # Save output seg with same orientation as original image orientation
    seg1 = nib.Nifti1Image(seg1[0]["image"][0], img.affine, img.header)
    nib.save(seg1, output_name)

    print('Segmentation saved to ' + output_name)
    end = time.time()
    print('√ (time taken: ', round(end - start, ndigits=4), 'seconds)')
示例#3
0
    inputZ03, inputZ03_val = split_train_val(inputZ03_path)
    inputZ04, inputZ04_val = split_train_val(inputZ04_path)
    inputZ05, inputZ05_val = split_train_val(inputZ05_path)
    inputZ06, inputZ06_val = split_train_val(inputZ06_path)
    inputZ07, inputZ07_val = split_train_val(inputZ07_path)

    targetC01, targetC01_val = split_train_val(targetC01_path)
    targetC02, targetC02_val = split_train_val(targetC02_path)
    targetC03, targetC03_val = split_train_val(targetC03_path)

    # data preprocessing/augmentation
    trans_train = Compose([
        #LoadPNG(image_only=True),
        LoadImage(PILReader(), image_only=True),
        AddChannel(),
        CenterSpatialCrop(roi_size=2154),  # 2154
        #ScaleIntensity(),
        #RandRotate(range_x=15, prob=aug_prob, keep_size=True),
        #RandRotate90(prob=aug_prob, spatial_axes=(0, 1)),
        #RandFlip(spatial_axis=0, prob=aug_prob),
        #RandScaleIntensity(factors=0.5, prob=aug_prob)
        ToTensor()
    ])

    trans_val = Compose([
        #LoadPNG(image_only=True),
        LoadImage(PILReader(), image_only=True),
        AddChannel(),
        #CenterSpatialCrop(roi_size=2154),
        #ScaleIntensity(),
        ToTensor()
示例#4
0
 def test_value(self, input_param, input_data, expected_value):
     result = CenterSpatialCrop(**input_param)(input_data)
     np.testing.assert_allclose(result, expected_value)
示例#5
0
 def test_shape(self, input_param, input_data, expected_shape):
     result = CenterSpatialCrop(**input_param)(input_data)
     self.assertTupleEqual(result.shape, expected_shape)
 def test_value(self, input_param, input_data, expected_value):
     result = CenterSpatialCrop(**input_param)(input_data)
     self.assertEqual(isinstance(result, torch.Tensor),
                      isinstance(input_data, torch.Tensor))
     np.testing.assert_allclose(result, expected_value)