Ejemplo n.º 1
0
 def _resize_image_test(self, image, target_shape):
     original_image_shape = image.shape
     new_image = resize(image, target_shape)
     self.assertEqual(new_image.shape, target_shape)
     new_image = resize(new_image,
                        original_image_shape,
                        interpolation="linear")
     self.assertEqual(new_image.shape, original_image_shape)
Ejemplo n.º 2
0
def restore_dimension(image_label, slices, affine):
    old_shape = tuple([x.stop - x.start for x in slices])
    old_cropped_image = resize(image_label, old_shape, interpolation='nearest')
    rotated_image = np.rot90(old_cropped_image.get_data(), 2)
    result = np.zeros(original_shape, dtype=np.uint8)
    tp_slices = tuple(slices)
    result[tp_slices] = rotated_image
    return nib.Nifti1Image(result, affine)
Ejemplo n.º 3
0
def find_downsized_info(training_data_files, input_shape):
    foreground = get_complete_foreground(training_data_files)
    crop_slices = crop_img(foreground, return_slices=True, copy=True)
    cropped = crop_img_to(foreground, crop_slices, copy=True)
    final_image = resize(cropped,
                         new_shape=input_shape,
                         interpolation="nearest")
    return crop_slices, final_image.affine, final_image.header
Ejemplo n.º 4
0
def test_images_align():
    data = np.arange(1, 9).reshape((2, 2, 2))
    affine = np.diag(np.ones(4) * 2)
    affine[3, 3] = 1
    image_nib = nib.Nifti1Image(data, affine=affine)
    new_image_nib = resize(image_nib, (4, 4, 4), interpolation="nearest")
    assert np.all(new_image_nib.get_data()[0] == np.asarray(
        [[1, 1, 2, 2], [1, 1, 2, 2], [3, 3, 4, 4], [3, 3, 4, 4]]))
    assert np.all(new_image_nib.affine == np.asarray(
        [[1., 0., 0., -0.5], [0., 1., 0., -0.5], [0., 0., 1., -0.5],
         [0., 0., 0., 1.]]))
Ejemplo n.º 5
0
def crop_subject_modals(subject_modal_imgs, input_shape, slices):
    subject_data = []
    affine = None
    for i, modal_img in enumerate(subject_modal_imgs):
        modal_img = fix_shape(modal_img)
        modal_img = crop_img_to(modal_img, slices, copy=True)
        new_img = resize(modal_img,
                         new_shape=input_shape,
                         interpolation='linear')
        subject_data.append(new_img.get_data())
        if i == 0:
            affine = new_img.get_affine()

    subject_data = np.asarray(subject_data)
    return subject_data, affine
Ejemplo n.º 6
0
def resize_modal_image(subject_modal_imgs,
                       target_shape,
                       interpolation='linear'):
    subject_data = []
    affine = None
    for i, modal_img in enumerate(subject_modal_imgs):
        modal_img = fix_shape(modal_img)
        new_img = resize(modal_img,
                         new_shape=target_shape,
                         interpolation=interpolation)
        subject_data.append(new_img.get_data())
        if i == 0:
            affine = new_img.get_affine()

    subject_data = np.asarray(subject_data)
    return subject_data, affine
Ejemplo n.º 7
0
def segmentation_for_patient(subject_fd,
                             config,
                             output_path,
                             model=None,
                             mode='size_same_input'):

    if model is None:
        model = load_old_model(config)
    subject_name = os.path.basename(subject_fd)
    image_mris, original_affine, foreground = get_subject_tensor(
        subject_fd, subject_name)
    if mode == 'size_same_input':
        slices = get_slices(foreground)
        subject_data_fixed_size, affine = crop_subject_modals(
            image_mris, input_shape, slices)
    elif mode == 'size_interpolate':
        target_shape = tuple(config['inference_shape'])
        subject_data_fixed_size, affine = resize_modal_image(
            image_mris, target_shape)
    else:
        print('Do not support mode {} for inference'.format(mode))
        return

    subject_tensor = normalize_data(subject_data_fixed_size)

    subject_tensor = np.expand_dims(subject_tensor, axis=0)
    output_predict = predict(model, subject_tensor, affine)

    if mode == 'size_same_input':
        output = restore_dimension(output_predict, slices, original_affine)
    elif mode == 'size_interpolate':
        output = resize(output_predict,
                        new_shape=original_shape,
                        interpolation='nearest')
    else:
        print('Do not support mode {} for inference'.format(mode))
        return

    output_fd = os.path.join(output_path, subject_name)
    if not os.path.exists(output_fd):
        os.makedirs(output_fd)
    output_file = os.path.join(
        output_fd, '{}_prediction{}'.format(subject_name, extension))
    output.to_filename(output_file)

    print('Patient {} is done !'.format(subject_fd))
Ejemplo n.º 8
0
def _resize_image_test(image, target_shape):
    original_image_shape = image.shape
    new_image = resize(image, target_shape)
    assert new_image.shape == target_shape
    new_image = resize(new_image, original_image_shape, interpolation="linear")
    assert new_image.shape == original_image_shape