コード例 #1
0
def VisualizeSpectra(rf_files, freq_sampling):
    plt.figure(1, figsize=(20, 16))
    for rf_file in rf_files:
        image = itk.imread(rf_file)
        arr = itk.GetArrayViewFromImage(image)
        freq, Pxx = signal.periodogram(arr.transpose(),
                freq_sampling,
                window='hamming',
                detrend='linear',
                axis=0)
        Pxx = np.mean(Pxx, 1)
        plt.semilogy(freq, Pxx, label=rf_file)
        plt.xlabel('Frequency [Hz]')
        plt.ylabel('Power spectral density [V**2/Hz]')
    plt.legend(loc='upper right')

    dirname = os.path.dirname(rf_files[0])
    plt.savefig(os.path.join(dirname, 'PowerSpectralDensity.png'), dpi=300)
    plt.show()
コード例 #2
0
ファイル: extras.py プロジェクト: zyx3216825/ITK
assert s.GetSize()[0] == s.GetSize()[1] == 256
s = itk.region(reader.GetOutput())
assert s.GetIndex()[0] == s.GetIndex()[1] == 0
assert s.GetSize()[0] == s.GetSize()[1] == 256

# test range
assert itk.range(reader) == (0, 255)
assert itk.range(reader.GetOutput()) == (0, 255)


# test write
itk.imwrite(reader, sys.argv[3])
itk.imwrite(reader, sys.argv[3], True)

# test read
image = itk.imread(filename)
assert type(image) == itk.Image[itk.RGBPixel[itk.UC], 2]
image = itk.imread(filename, itk.F)
assert type(image) == itk.Image[itk.F, 2]
image = itk.imread(filename, itk.F, fallback_only=True)
assert type(image) == itk.Image[itk.RGBPixel[itk.UC], 2]
try:
    image = itk.imread(filename, fallback_only=True)
    # Should never reach this point if test passes since an exception
    # is expected.
    raise Exception("`itk.imread()` fallback_only should have failed")
except Exception as e:
    if str(e) == "pixel_type must be set when using the fallback_only option":
        pass
    else:
        raise e
コード例 #3
0
import itk
import sys

if len(sys.argv) != 6:
    print(
        f"Usage: {sys.argv[0]} input_image_file spacing_fraction sigma_fraction output_image_file_label_image_interpolator output_image_file_nearest_neighbor_interpolator"
    )
    sys.exit(1)

input_image_file = sys.argv[1]
spacing_fraction = float(sys.argv[2])
sigma_fraction = float(sys.argv[3])
output_image_file_label_image_interpolator = sys.argv[4]
output_image_file_nearest_neighbor_interpolator = sys.argv[5]

input_image = itk.imread(input_image_file)

resize_filter = itk.ResampleImageFilter.New(input_image)

input_spacing = itk.spacing(input_image)
output_spacing = [s * spacing_fraction for s in input_spacing]
resize_filter.SetOutputSpacing(output_spacing)

input_size = itk.size(input_image)
output_size = [
    int(s * input_spacing[dim] / spacing_fraction) for dim, s in enumerate(input_size)
]
resize_filter.SetSize(output_size)

gaussian_interpolator = itk.LabelImageGaussianInterpolateImageFunction.New(input_image)
sigma = [s * sigma_fraction for s in output_spacing]
コード例 #4
0
def generate_val_sample(image_list, h5_path, patch_size):
    # printing process id
    print('PID: {0}, number of samples: {1}'.format(os.getpid(),
                                                    len(image_list)))

    for idx in range(image_list.shape[0]):
        i_sample = image_list.iloc[idx]['ID']
        print('PID: {0} -- '.format(os.getpid()),
              image_list.iloc[idx]['image'][:])
        # read image
        itk_image = itk.imread(image_list.iloc[idx]['image'])
        itk_annotation = itk.imread(image_list.iloc[idx]['label'])
        np_image = itk.array_from_image(itk_image)
        np_annotation = itk.array_from_image(itk_annotation)

        # normalized
        #np_image = (np_image - np_image.mean())/ np_image.std()
        np_image = np_image / (np_image.max() - np_image.min())

        # reshape
        np_image = np_image.reshape(
            [1, np_image.shape[0], np_image.shape[1], np_image.shape[2]])
        np_annotation = np_annotation.reshape([
            1, np_annotation.shape[0], np_annotation.shape[1],
            np_annotation.shape[2]
        ])
        tensor_image = torch.from_numpy(np_image)
        tensor_annotation = torch.from_numpy(np_annotation)

        # get patches with proper strides to slide all image
        image_patches = tensor_image.unfold(
            1, patch_size[0],
            get_stride(np_image.shape[1], patch_size[0])).unfold(
                2, patch_size[1],
                get_stride(np_image.shape[2], patch_size[1])).unfold(
                    3, patch_size[2],
                    get_stride(np_image.shape[3], patch_size[2]))
        annotation_patches = tensor_annotation.unfold(
            1, patch_size[0],
            get_stride(np_image.shape[1], patch_size[0])).unfold(
                2, patch_size[1],
                get_stride(np_image.shape[2], patch_size[1])).unfold(
                    3, patch_size[2],
                    get_stride(np_image.shape[3], patch_size[2]))
        image_patches = image_patches.reshape(-1, 1, patch_size[0],
                                              patch_size[1], patch_size[2])
        annotation_patches = annotation_patches.reshape(
            -1, 1, patch_size[0], patch_size[1], patch_size[2])
        patch_image = image_patches.numpy()
        patch_label = annotation_patches.numpy()

        # save to h5
        if not os.path.exists(h5_path):
            os.makedirs(h5_path)

        for i_patch in range(image_patches.shape[0]):
            patch_file_name = os.path.join(
                h5_path, 'val_sample_{0}_patch_{1}x{2}x{3}_{4}.h5'.format(
                    i_sample, patch_size[0], patch_size[1], patch_size[2],
                    i_patch))

            #check old patch file
            if os.path.isfile(patch_file_name):
                os.remove(patch_file_name)

            #output h5 file
            with h5py.File(patch_file_name, 'w') as f:
                f['image'] = patch_image[i_patch, :, :, :, :]
                f['label'] = patch_label[i_patch, :, :, :, :]
コード例 #5
0
def copy_images(images_test, task_id, task_name_3D, patient_ind,
                testimage_name, ROI_list, ROI_list_alternatenames, label_dict):

    image_identifier_3D = 'CBCT_3D'
    #realCBCT_N4imagename = 'realCBCT_correctedN4_beforeshifting.mha'

    # copy the 3D cbct images to imagesTr in .nii.gz format
    global taskfolder_3D
    taskfolder_3D = f'{nnUNet_raw_data}/Task{task_id:03d}_{task_name_3D}'
    json_filename_3D = f'{taskfolder_3D}/dataset.json'
    imagesTs_foldername_3D = f'{taskfolder_3D}/imagesTs/'
    labelsTs_foldername_3D = f'{taskfolder_3D}/labelsTs/'

    maybe_mkdir_p(imagesTs_foldername_3D)
    maybe_mkdir_p(labelsTs_foldername_3D)
    open(f"{taskfolder_3D}/errorlog_test.txt", "w").close()

    test_patient_names = []

    for patient_folder in sorted(glob.glob(f'{images_test}/*')):

        # if rennes images, the name of the cbct is different and there are more than one cbct per patient
        if 'Rennes' in patient_folder:
            #cbct_image_path = glob.glob(f'{patient_folder}/cbct*.nii.gz')[0]

            if 'pseudo' in patient_folder:
                image_prefix = 'pseudo_rec'
            else:
                image_prefix = 'cbct'

            if (apply_matching == False) & ('correctedN4' in testimage_name):

                for cbct_image_path in sorted(
                        glob.glob(
                            f'{patient_folder}/{image_prefix}*_N4folder/{realCBCT_N4imagename}'
                        )):
                    cbct_id = os.path.dirname(cbct_image_path).replace(
                        f'{patient_folder}/{image_prefix}',
                        '').replace('_N4folder', '')
                    with open(f'{taskfolder_3D}/errorlog_test.txt', 'a') as f:
                        f.write(
                            str(patient_ind) + " - " + patient_folder +
                            " - cbct id " + cbct_id + " : \n")
                    print(patient_folder)

                    casename_3D = f'{image_identifier_3D}_{patient_ind:04d}'
                    test_patient_names.append(casename_3D)

                    cbct_3D_filename = f'{imagesTs_foldername_3D}/{casename_3D}_0000.nii.gz'

                    if spacing2mm == True:
                        cbct_img = itk.imread(cbct_image_path)
                        img_origin = cbct_img.GetOrigin()
                        new_cbct_img = gt.applyTransformation(
                            input=cbct_img,
                            neworigin=img_origin,
                            newspacing=img_spacing,
                            adaptive=True,
                            force_resample=True)
                        itk.imwrite(new_cbct_img, cbct_3D_filename)
                    else:

                        convert_command = f'gt_image_convert {cbct_image_path} -o {cbct_3D_filename}'
                        os.system(convert_command)

                    # copy and register label (ROI) with the cbct
                    ROI_dir = f'{patient_folder}/ROI'
                    allRoisImage = create_segmentation_map(
                        ROI_list, ROI_list_alternatenames, ROI_dir,
                        cbct_3D_filename, cbct_id)
                    label_3D_filename = f'{labelsTs_foldername_3D}/{casename_3D}.nii.gz'
                    itk.imwrite(allRoisImage, label_3D_filename)

                    patient_ind = patient_ind + 1

            elif (apply_matching == False) & ('swn_normalized'
                                              in testimage_name):

                for cbct_image_path in sorted(
                        glob.glob(
                            f'{patient_folder}/{image_prefix}*_SWNfolder/{realCBCT_SWNimagename}'
                        )):
                    cbct_id = os.path.dirname(cbct_image_path).replace(
                        f'{patient_folder}/{image_prefix}',
                        '').replace('_SWNfolder', '')
                    with open(f'{taskfolder_3D}/errorlog_test.txt', 'a') as f:
                        f.write(
                            str(patient_ind) + " - " + patient_folder +
                            " - cbct id " + cbct_id + " : \n")
                    print(patient_folder)

                    casename_3D = f'{image_identifier_3D}_{patient_ind:04d}'
                    test_patient_names.append(casename_3D)

                    cbct_3D_filename = f'{imagesTs_foldername_3D}/{casename_3D}_0000.nii.gz'

                    if spacing2mm == True:
                        cbct_img = itk.imread(cbct_image_path)
                        img_origin = cbct_img.GetOrigin()
                        new_cbct_img = gt.applyTransformation(
                            input=cbct_img,
                            neworigin=img_origin,
                            newspacing=img_spacing,
                            adaptive=True,
                            force_resample=True)
                        itk.imwrite(new_cbct_img, cbct_3D_filename)
                    else:
                        convert_command = f'gt_image_convert {cbct_image_path} -o {cbct_3D_filename}'
                        os.system(convert_command)

                    # copy and register label (ROI) with the cbct
                    ROI_dir = f'{patient_folder}/ROI'
                    allRoisImage = create_segmentation_map(
                        ROI_list, ROI_list_alternatenames, ROI_dir,
                        cbct_3D_filename, cbct_id)
                    label_3D_filename = f'{labelsTs_foldername_3D}/{casename_3D}.nii.gz'
                    itk.imwrite(allRoisImage, label_3D_filename)

                    patient_ind = patient_ind + 1

            else:

                for cbct_image_path in sorted(
                        glob.glob(f'{patient_folder}/{image_prefix}*.nii.gz')):
                    cbct_id = os.path.basename(cbct_image_path).replace(
                        f'{image_prefix}', '').replace('.nii.gz', '')
                    with open(f'{taskfolder_3D}/errorlog_test.txt', 'a') as f:
                        f.write(
                            str(patient_ind) + " - " + patient_folder +
                            " - cbct id " + cbct_id + " : \n")
                    print(patient_folder)

                    casename_3D = f'{image_identifier_3D}_{patient_ind:04d}'
                    test_patient_names.append(casename_3D)

                    # convert to float
                    convert_command = f'clitkImageConvert -i {cbct_image_path} -o {cbct_image_path} -t float'
                    os.system(convert_command)

                    # reduce image size and save in 3D_outputfolder
                    cbct_3D_filename = f'{imagesTs_foldername_3D}/{casename_3D}_0000.nii.gz'

                    if spacing2mm == True:
                        cbct_img = itk.imread(cbct_image_path)
                        img_origin = cbct_img.GetOrigin()
                        new_cbct_img = gt.applyTransformation(
                            input=cbct_img,
                            neworigin=img_origin,
                            newspacing=img_spacing,
                            adaptive=True,
                            force_resample=True)
                        itk.imwrite(new_cbct_img, cbct_3D_filename)
                    else:
                        convert_command = f'gt_image_convert {cbct_image_path} -o {cbct_3D_filename}'
                        os.system(convert_command)

                    # copy and register label (ROI) with the cbct
                    ROI_dir = f'{patient_folder}/ROI'
                    allRoisImage = create_segmentation_map(
                        ROI_list, ROI_list_alternatenames, ROI_dir,
                        cbct_3D_filename, cbct_id)
                    label_3D_filename = f'{labelsTs_foldername_3D}/{casename_3D}.nii.gz'
                    itk.imwrite(allRoisImage, label_3D_filename)

                    patient_ind = patient_ind + 1
        else:

            with open(f'{taskfolder_3D}/errorlog_test.txt', 'a') as f:
                f.write(str(patient_ind) + " - " + patient_folder + ": \n")
            print(patient_folder)

            casename_3D = f'{image_identifier_3D}_{patient_ind:04d}'
            test_patient_names.append(casename_3D)

            if 'CLB' in patient_folder:
                if (apply_matching == False) & ('correctedN4'
                                                in testimage_name):
                    cbct_image_path = f'{patient_folder}/{realCBCT_N4imagename}'
                elif (apply_matching == False) & ('swn_normalized'
                                                  in testimage_name):
                    cbct_image_path = f'{patient_folder}/{realCBCT_SWNimagename}'
                else:
                    cbct_image_path = glob.glob(
                        f'{patient_folder}/cbct*.mhd')[0]
            else:
                cbct_image_path = f'{patient_folder}/{testimage_name}'

            #cbct_image_path = f'{patient_folder}/{testimage_name}'

            # reduce image size and save in 3D_outputfolder
            cbct_3D_filename = f'{imagesTs_foldername_3D}/{casename_3D}_0000.nii.gz'

            #if (apply_matching == False) & ('correctedN4' in testimage_name): # no need to resize, already 2 mm spacing
            #    #shutil.copy(cbct_image_path, imagesTr_foldername_3D)
            #    #os.rename(f'{imagesTr_foldername_3D}/{trainingimage_name}', cbct_3D_filename)
            #    convert_command= f'gt_image_convert {cbct_image_path} -o {cbct_3D_filename}'
            #else:
            #    convert_command= f'gt_affine_transform -i {cbct_image_path} --newspacing=2,2,2 -o {cbct_3D_filename} -fr -a'

            if spacing2mm == True:
                convert_command = f'gt_affine_transform -i {cbct_image_path} --newspacing=2,2,2 -o {cbct_3D_filename} -fr -a'
                os.system(convert_command)
            else:
                convert_command = f'gt_image_convert {cbct_image_path} -o {cbct_3D_filename}'
                os.system(convert_command)

            # copy and register label (ROI) with the cbct
            ROI_dir = f'{patient_folder}/ROI'
            allRoisImage = create_segmentation_map(ROI_list,
                                                   ROI_list_alternatenames,
                                                   ROI_dir, cbct_3D_filename,
                                                   '')
            label_3D_filename = f'{labelsTs_foldername_3D}/{casename_3D}.nii.gz'
            itk.imwrite(allRoisImage, label_3D_filename)

            patient_ind = patient_ind + 1

    # dataset.json 3D
    json_dict = {}
    json_dict['name'] = task_name_3D
    json_dict['description'] = ""
    json_dict['tensorImageSize'] = "4D"
    json_dict['reference'] = "saphir"
    json_dict['licence'] = ""
    json_dict['release'] = "0.0"
    json_dict['modality'] = {
        "0": "CT",
    }

    json_dict['labels'] = label_dict

    json_dict['numTest'] = len(test_patient_names)
    json_dict['test'] = [
        "./imagesTs/%s.nii.gz" % i for i in test_patient_names
    ]
    save_json(json_dict, json_filename_3D)
コード例 #6
0
assert s.GetSize()[0] == s.GetSize()[1] == 256
s = itk.region(reader.GetOutput())
assert s.GetIndex()[0] == s.GetIndex()[1] == 0
assert s.GetSize()[0] == s.GetSize()[1] == 256

# test range
assert itk.range(reader) == (0, 255)
assert itk.range(reader.GetOutput()) == (0, 255)

# test write
itk.imwrite(reader, sys.argv[4])
itk.imwrite(reader, sys.argv[4], imageio=itk.PNGImageIO.New())
itk.imwrite(reader, sys.argv[4], True)

# test read
image = itk.imread(pathlib.Path(filename))
assert type(image) == itk.Image[itk.RGBPixel[itk.UC], 2]
image = itk.imread(filename, itk.F)
assert type(image) == itk.Image[itk.F, 2]
image = itk.imread(filename, itk.F, fallback_only=True)
assert type(image) == itk.Image[itk.RGBPixel[itk.UC], 2]
try:
    image = itk.imread(filename, fallback_only=True)
    # Should never reach this point if test passes since an exception
    # is expected.
    raise Exception("`itk.imread()` fallback_only should have failed")
except Exception as e:
    if str(e) == "pixel_type must be set when using the fallback_only option":
        pass
    else:
        raise e
コード例 #7
0
import itk
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline

im=itk.imread('data/CBCT-TextureInput.png', itk.F)
sobel=itk.sobel_edge_detection_image_filter(im)
arr = itk.array_from_image(sobel)
plt.gray()
plt.imshow(arr)
plt.axis('off')
コード例 #8
0
 def __init__(self,mhd,meta_data={}):
     self._meta_data = meta_data
     self._mhd = mhd
     self._img = itk.imread(mhd)
     self._slices = []
コード例 #9
0
    def load_data(self, path_infos, indices, train=False):
        mini_batch_size = len(indices)
        in_channels = 1

        if train == True:
            xs = np.zeros((mini_batch_size, in_channels, self.crop_size,
                           self.crop_size, self.crop_size)).astype(np.float32)
            ys = np.zeros((mini_batch_size, in_channels, self.crop_size,
                           self.crop_size, self.crop_size)).astype(np.float32)

            for i, index in enumerate(indices):
                path = path_infos[index]
                labelPath = self.labelPathFromPath(path)
                affPath, warpPath, invWarpPath = self.transformPathsFromPath(
                    path)
                labelAffPath, labelWarpPath, labelInvWarpPath = self.transformPathsFromPath(
                    labelPath)
                atlasPath = self.atlasPathFromPath(path)

                img = np.array(nib.load(path).dataobj)
                labelImg = np.array(nib.load(labelPath).dataobj)

                imgAff = np.linalg.inv(self.antsmat2mat(io.loadmat(affPath)))
                labelAff = self.antsmat2mat(io.loadmat(labelAffPath))
                imgInvWarp = np.array(nib.load(invWarpPath).dataobj)
                labelWarp = np.array(nib.load(labelWarpPath).dataobj)

                imgITK = itk.imread(path)
                labelITK = itk.imread(labelPath)
                atlasITK = itk.imread(atlasPath)

                if img is None or labelImg is None:
                    raise RuntimeError("invalid image: {i}".format(i=path))
                x, y, z = img.shape

                # Restrict random cropping to a central location in the image
                # to avoid training on too much blank space
                xRange = 20
                yRange = 70
                zRange = 70

                rand_range_x = x - self.crop_size - (xRange * 2)
                rand_range_y = y - self.crop_size - (yRange * 2)
                rand_range_z = z - self.crop_size - (zRange * 2)
                x_offset = np.random.randint(rand_range_x) + xRange
                y_offset = np.random.randint(rand_range_y) + yRange
                z_offset = np.random.randint(rand_range_z) + zRange

                img = img[x_offset:x_offset + self.crop_size,
                          y_offset:y_offset + self.crop_size,
                          z_offset:z_offset + self.crop_size]

                # Get a transformed patch of the label image to match
                # the training image
                labelImgWarped = np.zeros(
                    (self.crop_size, self.crop_size, self.crop_size))
                for i in range(self.crop_size):
                    for j in range(self.crop_size):
                        for k in range(self.crop_size):
                            originPoint = [
                                x_offset + i, y_offset + j, z_offset + k
                            ]
                            refPoint = self.transformPoint(
                                originPoint, imgAff, labelAff, imgInvWarp,
                                labelWarp, imgITK, labelITK, atlasITK)
                            labelImgWarped[i, j, k] = labelImg[refPoint[0],
                                                               refPoint[1],
                                                               refPoint[2]]

                labelImg = labelImgWarped

                # Normalize images
                if img.max() > 0 and labelImg.max() > 0:
                    img = img.astype(np.float32)
                    labelImg = labelImg.astype(np.float32)
                    img = (img / img.max())
                    labelImg = (labelImg / labelImg.max())
                else:
                    img = np.zeros(img.shape)
                    labelImg = np.zeros(labelImg.shape)

                xs[i, 0, :, :, :] = img.astype(np.float32)
                ys[i, 0, :, :, :] = labelImg.astype(np.float32)
                return xs, ys

        else:
            for i, index in enumerate(indices):
                path = path_infos[index]

                imgNib = nib.load(path)
                imgAffine = imgNib.affine
                img = np.array(imgNib.dataobj)
                if img is None:
                    raise RuntimeError("invalid image: {i}".format(i=path))

            img = img.astype(np.float32)
            maxIntensity = img.max()

            # Normalize image
            x, y, z = img.shape
            xs = np.zeros(
                (mini_batch_size, in_channels, x, y, z)).astype(np.float32)
            xs[0, 0, :, :, :] = (img / img.max()).astype(np.float32)

            imgFileName = os.path.splitext(os.path.basename(path))[0]
            return xs, maxIntensity, imgFileName, imgAffine
コード例 #10
0
    labels_np[labels_np > 7] = 0


if __name__ == '__main__':
    # TODO: set to True for CT and False for MR
    is_ct = True
    # TODO: change input folder
    input_folder = 'TODO'
    output_folder = './mmwhs_dataset/ct_mha/' if is_ct else './mmwhs_dataset/mr_mha/'
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)
    filenames = glob(input_folder + '*image.nii.gz')
    for filename in sorted(filenames):
        basename = os.path.basename(filename)
        basename_wo_ext = basename[:basename.find('.nii.gz')]
        print(basename_wo_ext)
        image = itk.imread(filename)
        reoriented = reorient_to_rai(image)
        itk.imwrite(reoriented, output_folder + basename_wo_ext + '.mha')

    filenames_label = glob(input_folder + '*label.nii.gz')
    for filename in sorted(filenames):
        basename = os.path.basename(filename)
        basename_wo_ext = basename[:basename.find('.nii.gz')]
        print(basename_wo_ext)
        image = itk.imread(filename)
        reoriented = reorient_to_rai(image)
        relabel(reoriented)
        itk.imwrite(reoriented,
                    output_folder + basename_wo_ext + '_sorted.mha')
コード例 #11
0
ファイル: Code.py プロジェクト: onurtemizkan/ITKExamples
# limitations under the License.

import argparse

import itk
from distutils.version import StrictVersion as VS
if VS(itk.Version.GetITKVersion()) < VS("5.0.0"):
    print("ITK 5.0.0 or newer is required.")
    sys.exit(1)

parser = argparse.ArgumentParser(description='Segment blood vessels.')
parser.add_argument('input_image')
parser.add_argument('output_image')
parser.add_argument('--sigma', type=float, default=1.0)
parser.add_argument('--alpha1', type=float, default=0.5)
parser.add_argument('--alpha2', type=float, default=2.0)
args = parser.parse_args()

input_image = itk.imread(args.input_image, itk.ctype('float'))

hessian_image = itk.hessian_recursive_gaussian_image_filter(input_image,
                                                            sigma=args.sigma)

vesselness_filter = itk.Hessian3DToVesselnessMeasureImageFilter[itk.ctype(
    'float')].New()
vesselness_filter.SetInput(hessian_image)
vesselness_filter.SetAlpha1(args.alpha1)
vesselness_filter.SetAlpha2(args.alpha2)

itk.imwrite(vesselness_filter, args.output_image)
コード例 #12
0
import itk
from distutils.version import StrictVersion as VS
if VS(itk.Version.GetITKVersion()) < VS("5.0.0"):
    print("ITK 5.0.0 or newer is required.")
    sys.exit(1)

parser = argparse.ArgumentParser(description='Segment blood vessels with multi-scale Hessian-based measure.')
parser.add_argument('input_image')
parser.add_argument('output_image')
parser.add_argument('--sigma_minimum', type=float, default=1.0)
parser.add_argument('--sigma_maximum', type=float, default=10.0)
parser.add_argument('--number_of_sigma_steps', type=int, default=10)
args = parser.parse_args()

input_image = itk.imread(args.input_image, itk.F)

ImageType = type(input_image)
Dimension = input_image.GetImageDimension()
HessianPixelType = itk.SymmetricSecondRankTensor[itk.D, Dimension]
HessianImageType = itk.Image[HessianPixelType, Dimension]

objectness_filter = itk.HessianToObjectnessMeasureImageFilter[HessianImageType, ImageType].New()
objectness_filter.SetBrightObject(False)
objectness_filter.SetScaleObjectnessMeasure(False)
objectness_filter.SetAlpha(0.5)
objectness_filter.SetBeta(1.0)
objectness_filter.SetGamma(5.0)

multi_scale_filter = itk.MultiScaleHessianBasedMeasureImageFilter[ImageType, HessianImageType, ImageType].New()
multi_scale_filter.SetInput(input_image)
#   Licensed under the Apache License, Version 2.0 (the "License");
#   you may not use this file except in compliance with the License.
#   You may obtain a copy of the License at
#
#          http://www.apache.org/licenses/LICENSE-2.0.txt
#
#   Unless required by applicable law or agreed to in writing, software
#   distributed under the License is distributed on an "AS IS" BASIS,
#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#   See the License for the specific language governing permissions and
#   limitations under the License.
#
#==========================================================================*/

from __future__ import print_function

import itk
from sys import argv, stderr, exit
itk.auto_progress(2)


if(len(argv) < 2):
    print((
        "Missing Parameters \n Usage: IntensityWindowingImageFilter.py inputImageFile"), file=stderr)
    exit(1)

image = itk.imread(argv[1], itk.F)
# Verifies that ITK supports getting tuples for filter parameters. Not testing the filter
# results.
intensity_filter = itk.IntensityWindowingImageFilter.New(image, WindowLevel=[255, 127])
コード例 #14
0
#==========================================================================
#
#   Copyright Insight Software Consortium
#
#   Licensed under the Apache License, Version 2.0 (the "License");
#   you may not use this file except in compliance with the License.
#   You may obtain a copy of the License at
#
#          http://www.apache.org/licenses/LICENSE-2.0.txt
#
#   Unless required by applicable law or agreed to in writing, software
#   distributed under the License is distributed on an "AS IS" BASIS,
#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#   See the License for the specific language governing permissions and
#   limitations under the License.
#
#==========================================================================*/

#
#  Example on the use of the MeanImageFilter with function calls
#

import itk
from sys import argv

image = itk.imread(argv[1])
filtered_image = itk.MeanImageFilter(image, Radius=int(argv[3]))
itk.imwrite(filtered_image, argv[2])
コード例 #15
0
ファイル: extras.py プロジェクト: PlutoniumHeart/ITK
assert s.GetIndex()[0] == s.GetIndex()[1] == 0
assert s.GetSize()[0] == s.GetSize()[1] == 256


# test range
assert itk.range(reader) == (0, 255)
assert itk.range(reader.GetOutput()) == (0, 255)


# test write
itk.imwrite(reader, sys.argv[2])
itk.write(reader, sys.argv[2])
itk.imwrite(reader, sys.argv[2], True)

# test read
image=itk.imread(fileName)
assert type(image) == itk.Image[itk.RGBPixel[itk.UC],2]
image=itk.imread(fileName, itk.F)
assert type(image) == itk.Image[itk.F,2]

# test search
res = itk.search("Index")
assert res[0] == "Index"
assert res[1] == "index"
assert "ContinuousIndex" in res

res = itk.search("index", True)
assert "Index" not in res


# test down_cast
コード例 #16
0
assert s.GetIndex()[0] == s.GetIndex()[1] == 0
assert s.GetSize()[0] == s.GetSize()[1] == 256


# test range
assert itk.range(reader) == (0, 255)
assert itk.range(reader.GetOutput()) == (0, 255)


# test write
itk.imwrite(reader, sys.argv[2])
itk.write(reader, sys.argv[2])
itk.imwrite(reader, sys.argv[2], True)

# test read
image=itk.imread(fileName)
assert type(image) == itk.Image[itk.RGBPixel[itk.UC],2]
image=itk.imread(fileName, itk.F)
assert type(image) == itk.Image[itk.F,2]

# test search
res = itk.search("Index")
assert res[0] == "Index"
assert res[1] == "index"
assert "ContinuousIndex" in res

res = itk.search("index", True)
assert "Index" not in res


# test down_cast
コード例 #17
0
ファイル: uebung_7.py プロジェクト: NajTec/bm


<<<<<<< HEAD
=======


>>>>>>> be773a0d454a339658ba66aa1e901b20108099e9
fixedImageFile = "fixed.png"
movingImageFile = "moving.png"
outputImageFile = "output.png"
differenceImageAfterFile = "after.png"
differenceImageBeforeFile = "before.png"

PixelType = itk.ctype('float')
fixedImage = itk.imread(fixedImageFile, PixelType)
movingImage = itk.imread(movingImageFile, PixelType)
Dimension = fixedImage.GetImageDimension()
FixedImageType = itk.Image[PixelType, Dimension]
MovingImageType = itk.Image[PixelType, Dimension]

TransformType = itk.TranslationTransform[itk.D, Dimension]
initialTransform = TransformType.New()

optimizer = itk.RegularStepGradientDescentOptimizerv4.New(LearningRate=4,MinimumStepLength=0.001, RelaxationFactor=0.5, NumberOfIterations=200)

metric = itk.MeanSquaresImageToImageMetricv4[FixedImageType, MovingImageType].New()

registration = itk.ImageRegistrationMethodv4.New(FixedImage=fixedImage,
MovingImage=movingImage, Metric=metric, Optimizer=optimizer,
InitialTransform=initialTransform)
コード例 #18
0

#def main():
fns = glob(os.path.join(img_dir, '*.nii*'))
for fn in fns:
    _, base, ext = split_filename(fn)
    img = nib.load(fn).get_data().astype(np.float32).squeeze()
    if img.ndim != 3:
        print(
            f'Only 3D data supported. File {base}{ext} has dimension {img.ndim}. Skipping.'
        )
        continue

    input_filename = fn

    reader = itk.imread(input_filename, itk.F)
    reader.Update()
    image = reader

    if (img.shape[0] % int(x_shape)) != 0:
        print(
            f'File {base}{ext} does not ensure equal split of input image along x axis'
        )
        continue

    if (img.shape[1] % int(y_shape)) != 0:
        print(
            f'File {base}{ext} does not ensure equal split of input image along y axis'
        )
        continue
コード例 #19
0
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import argparse

import itk
from distutils.version import StrictVersion as VS
if VS(itk.Version.GetITKVersion()) < VS("5.0.0"):
    print("ITK 5.0.0 or newer is required.")
    sys.exit(1)

parser = argparse.ArgumentParser(description='Segment blood vessels.')
parser.add_argument('input_image')
parser.add_argument('output_image')
parser.add_argument('--sigma', type=float, default=1.0)
parser.add_argument('--alpha1', type=float, default=0.5)
parser.add_argument('--alpha2', type=float, default=2.0)
args = parser.parse_args()

input_image = itk.imread(args.input_image, itk.ctype('float'))

hessian_image = itk.hessian_recursive_gaussian_image_filter(input_image, sigma=args.sigma)

vesselness_filter = itk.Hessian3DToVesselnessMeasureImageFilter[itk.ctype('float')].New()
vesselness_filter.SetInput(hessian_image)
vesselness_filter.SetAlpha1(args.alpha1)
vesselness_filter.SetAlpha2(args.alpha2)

itk.imwrite(vesselness_filter, args.output_image)
コード例 #20
0
from __future__ import print_function

import sys
try:
    import numpy as np
except ImportError:
    # We don't have numpy -- bail
    sys.exit(0)
import itk

if len(sys.argv) < 2:
    print('Usage: ' + sys.argv[0] + ' <inputImage>')
    sys.exit(1)
inputImageFileName = sys.argv[1]

image = itk.imread(inputImageFileName)
array = itk.GetArrayFromImage(image)

extractor = itk.ExtractImageFilter.New(image)
extractionRegion = image.GetLargestPossibleRegion()
extractor.SetExtractionRegion(extractionRegion)

# GetArrayFromImage calls UpdateLargestPossibleRegion to ensure the image buffer
# has been populated
array = itk.GetArrayFromImage(extractor.GetOutput())

# GetArrayFromImage calls UpdateLargestPossibleRegion to ensure the image buffer
# has been populated with the correct region
extractionRegion.SetSize(10)
extractor.SetExtractionRegion(extractionRegion)
array = itk.GetArrayFromImage(extractor.GetOutput())
コード例 #21
0
ファイル: fileio.py プロジェクト: zhushaoquan/mermaid
    def read(self,
             filename,
             intensity_normalize=False,
             squeeze_image=False,
             normalize_spacing=True,
             adaptive_padding=-1,
             verbose=False,
             silent_mode=False):
        """
        Reads the image assuming it is a single channel 

        :param filename: filename to be read
        :param intensity_normalize: uses image intensity normalization
        :param squeeze_image: squeezes image first (e.g, from 1x128x128 to 128x128)
        :param normalize_spacing: normalizes spacing so largest extent is in [0,1]
        :param silent_mode: if True, suppresses output
        :return: Will return the read file, its header information, the spacing, and the normalized spacing \
         (as a tuple: im,hdr,spacing,squeezed_spacing)
        """
        self.set_intensity_normalization(intensity_normalize)
        self.set_squeeze_image(squeeze_image)
        self.set_adaptive_padding(adaptive_padding)
        self.set_normalize_spacing(normalize_spacing)

        if verbose and not silent_mode:
            print('Reading image: ' + filename)

        # read with the itk reader (can also read other file formats)
        im_itk = itk.imread(native_str(filename))
        im, hdr = self._convert_itk_image_to_numpy(im_itk)

        if self.replace_nans_with_zeros:
            im[np.isnan(im)] = 0

        if self.datatype_conversion:
            im = im.astype(self.default_datatype)

        if 'spacing' not in hdr:
            if not silent_mode:
                print('Image does not seem to have spacing information.')
            if 'sizes' in hdr:
                dim_guess = len(hdr['sizes'])
            else:
                dim_guess = len(im.shape)
            if not silent_mode:
                print('Guessed dimension to be dim = ' + str(dim_guess))
            spacing = np.ones(dim_guess)
            hdr['spacing'] = spacing
            if not silent_mode:
                print('Using guessed spacing of ' + str(spacing))

        spacing = np.flipud(hdr['spacing'])

        squeezed_spacing = spacing  # will be changed if image is squeezed
        sz = im.shape
        sz_squeezed = sz

        if self.squeeze_image == True:
            if verbose and not silent_mode:
                print('Squeezing image')
            if hdr['is_vector_image']:
                dim = len(im.shape[1:])
            else:
                dim = len(im.shape)

            im = im.squeeze()

            if hdr['is_vector_image']:
                dimSqueezed = len(im.shape[1:])
            else:
                dimSqueezed = len(im.shape)

            hdr['squeezed_dim'] = dimSqueezed

            sz_squeezed = im.shape
            if dim != dimSqueezed:
                if verbose and not silent_mode:
                    print('Squeezing changed dimension from ' + str(dim) +
                          ' -> ' + str(dimSqueezed))

            if hdr['is_vector_image']:
                squeezed_spacing = self._compute_squeezed_spacing(
                    spacing, dim, sz[1:], dimSqueezed)
            else:
                squeezed_spacing = self._compute_squeezed_spacing(
                    spacing, dim, sz, dimSqueezed)

            if verbose and not silent_mode:
                print('squeezed_spacing = ' + str(squeezed_spacing))

            #squeezed_spacing = squeezed_spacing / (np.array(sz_squeezed) - 1)
            #if verbose and not silent_mode:
            #    print('Normalized spacing = ' + str(squeezed_spacing))

        if adaptive_padding > 0:
            im = self._do_adaptive_padding(im)

        if self.intensity_normalize_image == True:
            im = IM.IntensityNormalizeImage().default_intensity_normalization(
                im)
            if not silent_mode:
                print('INFO: Image WAS intensity normalized when loading:' \
                      + ' [' + str(im.min()) + ',' + str(im.max()) + ']')
        else:
            if not silent_mode:
                print('WARNING: Image was NOT intensity normalized when loading:' \
                      + ' [' + str(im.min()) + ',' + str(im.max()) + ']')

        if self.normalize_spacing:
            if not silent_mode:
                print(
                    'INFO: Normalizing the spacing to [0,1] in the largest dimension. (Turn normalize_spacing off if this is not desired.)'
                )
            hdr['original_spacing'] = spacing

            if hdr['is_vector_image']:
                spacing = self._normalize_spacing(spacing, sz[1:], silent_mode)
                squeezed_spacing = self._normalize_spacing(
                    squeezed_spacing, sz_squeezed[1:], silent_mode)
            else:
                spacing = self._normalize_spacing(spacing, sz, silent_mode)
                squeezed_spacing = self._normalize_spacing(
                    squeezed_spacing, sz_squeezed, silent_mode)

            hdr['spacing'] = spacing

            if verbose and not silent_mode:
                print('Normalized spacing = ' + str(spacing))
                print('Normalized squeezed spacing = ' + str(squeezed_spacing))

            if hdr['is_vector_image']:
                if self.scale_vectors_on_read_and_write:
                    hdr['vector_image_was_scaled'] = True
                    if not silent_mode:
                        print(
                            'Scaling the vector image to conform to the scaled spacing'
                        )
                    # we also need to normalize the vector components in this case
                    vector_scaling = np.array(hdr['spacing']) / np.array(
                        hdr['original_spacing'])
                    for d in range(dim):
                        im[d, ...] *= vector_scaling[d]

        return im, hdr, spacing, squeezed_spacing
コード例 #22
0
import os

import dash
import dash_html_components as html
import itk

import dash_vtk
from dash_vtk.utils import to_volume_state

# Place a DICOM series (a set of per-file slices) in a directory. ITK sorts, sets spatial metadata, etc.
demo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
itk_image = itk.imread(os.path.join(demo_dir, "data", "ct_lung"))

# Convert itk.Image to vtkImageData
vtk_image = itk.vtk_image_from_image(itk_image)
volume_state = to_volume_state(vtk_image)


vtk_view = dash_vtk.View(
    dash_vtk.VolumeRepresentation(
        children=[dash_vtk.VolumeController(), dash_vtk.Volume(state=volume_state),]
    )
)

app = dash.Dash(__name__)
server = app.server

app.layout = html.Div(
    style={"height": "calc(100vh - 50px)", "width": "100%"},
    children=[html.Div(vtk_view, style={"height": "100%", "width": "100%"})],
)
コード例 #23
0
#!/usr/bin/env python3

import numpy as np
import itk
import sys

input_filename = sys.argv[1]
output_filename = sys.argv[2]

image = itk.imread(input_filename)

# Cast to an unsigned char pixel type
cast_image = image.astype(itk.UC)

# Equivalent
cast_image = image.astype(np.uint8)

itk.imwrite(cast_image, output_filename)
コード例 #24
0
    img_count += 1
    if img_count > num_imgs:
        break
    if filename.endswith(".nii.gz"):
        input_filename = os.path.join(input_dir, filename)
    else:
        continue

    verbose = False  # verbose details of all steps.

    #% -------------------- Reader -------------------------
    InputImageType = get_itk_image_type(input_filename)
    print(InputImageType)
    OutputImageType = InputImageType

    inputImage = itk.imread(input_filename, itk.SS)

    #%% Set input information
    sizeOutput = [1024, 1400,
                  1]  # The size of output image (originally [1024,1400,1])
    threshold = 0.

    rot = [0., 0., 0.]  # rotation in degrees in x, y, and z direction.
    t = [-230, -350, 1500.]  # translation in x, y, and z directions.
    cor = [0., 0., 0.]  #  offset of the rotation from the center of image (3D)

    spaceOutput = [0.167, 0.167, 1]
    delta = sizeOutput[0] * spaceOutput[0] / 2

    inputImage.SetOrigin(
        [0, 0, 0])  # set the origin to (0,0,0) to fix translated image problem
コード例 #25
0
#!/usr/bin/env python

# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#        http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import itk
import argparse

parser = argparse.ArgumentParser(description="Sobel Edge Detection Image Filter.")
parser.add_argument("input_image")
parser.add_argument("output_image")
args = parser.parse_args()

input_image = itk.imread(args.input_image, pixel_type=itk.F)

output_image = itk.sobel_edge_detection_image_filter(input_image)

itk.imwrite(output_image, args.output_image)
コード例 #26
0
#==========================================================================*/

import sys
try:
    import numpy as np
except ImportError:
    # We don't have numpy -- bail
    sys.exit(0)
import itk

if len(sys.argv) < 2:
    print('Usage: ' + sys.argv[0] + ' <inputImage>')
    sys.exit(1)
inputImageFileName = sys.argv[1]

image = itk.imread(inputImageFileName)
array = itk.GetArrayFromImage(image)

extractor = itk.ExtractImageFilter.New(image)
extractionRegion = image.GetLargestPossibleRegion()
extractor.SetExtractionRegion(extractionRegion)

# GetArrayFromImage calls UpdateLargestPossibleRegion to ensure the image buffer
# has been populated
array = itk.GetArrayFromImage(extractor.GetOutput())

# GetArrayFromImage calls UpdateLargestPossibleRegion to ensure the image buffer
# has been populated with the correct region
extractionRegion.SetSize(10)
extractor.SetExtractionRegion(extractionRegion)
array = itk.GetArrayFromImage(extractor.GetOutput())
コード例 #27
0
def generate_random_patches(image_list, h5_path, patch_size,
                            target_num_patches_each_label, valid_pct):
    # printing process id
    print('PID: {0}, number of samples: {1}'.format(os.getpid(),
                                                    len(image_list)))

    for idx in range(image_list.shape[0]):
        i_sample = image_list.iloc[idx]['ID']
        print('PID: {0} -- '.format(os.getpid()),
              image_list.iloc[idx]['image'][:])
        # read image
        itk_image = itk.imread(image_list.iloc[idx]['image'])
        itk_annotation = itk.imread(image_list.iloc[idx]['label'])
        np_image = itk.array_from_image(itk_image)
        np_annotation = itk.array_from_image(itk_annotation)

        #normalized
        np_image = (np_image - np_image.mean()) / np_image.std()
        threshold_gray_value = np_image.mean() - 2 * np_image.std()

        # get valid range
        valid_range = np.zeros([3, 2], dtype=np.int32)
        valid_range[0, 1] = np_image.shape[0] - patch_size[0]
        valid_range[1][1] = np_image.shape[1] - patch_size[1]
        valid_range[2][1] = np_image.shape[2] - patch_size[2]

        patch_image = np.zeros([
            target_num_patches_each_label.sum(), 1, patch_size[0],
            patch_size[1], patch_size[2]
        ])  #Batch x C x W x D x H
        patch_label = np.zeros([
            target_num_patches_each_label.sum(), 1, patch_size[0],
            patch_size[1], patch_size[2]
        ])  #Batch x C x W x D x H

        patch_volume = patch_size[0] * patch_size[1] * patch_size[2]

        # randomly sampled
        i_num_valid_patches = 0
        visited_location = []
        for i_label in range(target_num_patches_each_label.shape[0]):

            i_num_valid_patches_each_label = 0
            while i_num_valid_patches_each_label < target_num_patches_each_label[
                    i_label]:
                k = randint(valid_range[0, 0], valid_range[0, 1])
                j = randint(valid_range[1, 0], valid_range[1, 1])
                i = randint(valid_range[2, 0], valid_range[2, 1])
                i_location = [k, j, i]  # bottom left corner, i.e., 000

                if not i_location in visited_location:
                    i_patch_image = np_image[k:(k + patch_size[0]),
                                             j:(j + patch_size[1]),
                                             i:(i + patch_size[2])]
                    i_patch_label = np_annotation[k:(k + patch_size[0]),
                                                  j:(j + patch_size[1]),
                                                  i:(i + patch_size[2])]

                    if (np.sum(i_patch_label == i_label) >
                            patch_volume * valid_pct) and (np.sum(
                                i_patch_image > threshold_gray_value)):
                        visited_location.append(i_location)  # valid visit

                        patch_image[i_num_valid_patches,
                                    0, :, :, :] = i_patch_image
                        patch_label[i_num_valid_patches,
                                    0, :, :, :] = i_patch_label

                        i_num_valid_patches_each_label += 1
                        i_num_valid_patches += 1


#                        print('Current total number of patches: {0}\n  For label of {1}: patch No. {2}'.format(i_num_valid_patches, i_label, i_num_valid_patches_each_label))

# shuffle
        randnum = list(range(patch_image.shape[0]))
        np.random.shuffle(randnum)
        patch_image = patch_image[randnum, :]
        patch_label = patch_label[randnum, :]

        # save to h5
        if not os.path.exists(h5_path):
            os.makedirs(h5_path)

        for i_patch in range(patch_image.shape[0]):
            patch_file_name = os.path.join(
                h5_path, 'sample_{0}_patch_{1}x{2}x{3}_{4}.h5'.format(
                    i_sample, patch_size[0], patch_size[1], patch_size[2],
                    i_patch))

            #check old patch file
            if os.path.isfile(patch_file_name):
                os.remove(patch_file_name)

            #output h5 file
            with h5py.File(patch_file_name, 'w') as f:
                f['image'] = patch_image[i_patch, :, :, :, :]
                f['label'] = patch_label[i_patch, :, :, :, :]
コード例 #28
0
import itk
import faulthandler

faulthandler.enable()
fixed_image = itk.imread('data/CT_2D_head_fixed.mha', itk.F)
moving_image = itk.imread('data/CT_2D_head_moving.mha', itk.F)

parameter_object = itk.ParameterObject.New()
default_rigid_parameter_map = parameter_object.GetDefaultParameterMap('rigid')
parameter_object.AddParameterMap(default_rigid_parameter_map)

result_image, result_transform_parameters = itk.elastix_registration_method(
    fixed_image, moving_image,
    parameter_object=parameter_object,
    log_to_console=False)
コード例 #29
0
#
#==========================================================================*/

import itk
import sys

if len(sys.argv) < 4:
    print('Usage: ' + sys.argv[0] +
          ' <MovingImage> <TransformParameters> <ResultImage>')
    sys.exit(1)

moving_filename = sys.argv[1]
transform_parameters_filename = sys.argv[2]
result_filename = sys.argv[3]

moving = itk.imread(moving_filename, itk.F)

parameters = itk.ParameterObject.New()
parameters.ReadParameterFile(transform_parameters_filename)
print(parameters)

# Object oriented interface
ImageType = itk.Image[itk.F, 3]
transformix = itk.TransformixFilter[ImageType].New()
transformix.SetMovingImage(moving)
transformix.SetLogToConsole(True)
transformix.SetTransformParameterObject(parameters)
transformix.UpdateLargestPossibleRegion()
result = transformix.GetOutput()
itk.imwrite(result, result_filename)
コード例 #30
0
def compute_dice_scores(modelresults_folder, resultsdir_3Dlabels, with3D,
                        ROI_list):

    # create files to save the dice scores
    dice_file_2D = f'{modelresults_folder}/segm_results/dicescore_2Dmodel.txt'

    dicescores_header = ''
    for ROI in ROI_list:
        dicescores_header = dicescores_header + f"{ROI}\t"
    dicescores_header = dicescores_header + "Mean\n"

    #num_ROIs = len(ROI_list)

    with open(dice_file_2D, 'w') as f:
        f.write(dicescores_header)

    meandice_2D = 0
    num_patients = 0

    if with3D:
        dice_file_3D = f'{modelresults_folder}/segm_results/dicescore_3Dmodel.txt'
        with open(dice_file_3D, 'w') as f:
            f.write(dicescores_header)
        meandice_3D = 0

    # calculate dice between predicted and real labels for 2D and 3D
    for reallabel in glob.glob(f'{resultsdir_3Dlabels}/segmap*.mha'):
        print(reallabel)
        num_patients = num_patients + 1
        label_2Dmodel = reallabel.replace(
            f'{resultsdir_3Dlabels}/segmap',
            f'{modelresults_folder}/segm_results/merged_labels/label_patient')

        reallabel_array = itk.GetArrayFromImage(itk.imread(reallabel))
        label_2Dmodel_array = itk.GetArrayFromImage(itk.imread(label_2Dmodel))
        dicescores_2D = ''
        meandice_2D_perpatient = 0
        if with3D:
            label_3Dmodel = reallabel.replace(
                f'{resultsdir_3Dlabels}/segmap',
                f'{modelresults_folder}/OUTPUT_DIRECTORY_3D/CBCT_3D_').replace(
                    '.mha', '.nii.gz')
            label_3Dmodel_array = itk.GetArrayFromImage(
                itk.imread(label_3Dmodel))
            dicescores_3D = ''
            meandice_3D_perpatient = 0

        num_labels_2D = 0
        num_labels_3D = 0
        for ROI_ind, ROI in enumerate(ROI_list):
            # the class value = ROI_ind + 1
            k = ROI_ind + 1
            #print(np.sum(label_2Dmodel_array[reallabel_array==k]==k)*2.0 )
            #print(np.sum(label_2Dmodel_array[label_2Dmodel_array==k]==k) + np.sum(reallabel_array[reallabel_array==k]==k))
            dice_2D = np.sum(
                label_2Dmodel_array[reallabel_array == k] == k) * 2.0 / (
                    np.sum(label_2Dmodel_array[label_2Dmodel_array == k] == k)
                    + np.sum(reallabel_array[reallabel_array == k] == k))
            if not math.isnan(dice_2D):
                meandice_2D_perpatient = meandice_2D_perpatient + dice_2D
                num_labels_2D = num_labels_2D + 1
            dicescores_2D = dicescores_2D + f'{dice_2D}\t'

            if with3D:
                dice_3D = np.sum(
                    label_3Dmodel_array[reallabel_array == k] == k) * 2.0 / (
                        np.sum(
                            label_3Dmodel_array[label_3Dmodel_array == k] == k)
                        + np.sum(reallabel_array[reallabel_array == k] == k))
                dicescores_3D = dicescores_3D + f'{dice_3D}\t'
                print(
                    np.sum(label_3Dmodel_array[reallabel_array == k] == k) *
                    2.0)
                print(
                    np.sum(label_3Dmodel_array[label_3Dmodel_array == k] == k))
                print(np.sum(reallabel_array[reallabel_array == k] == k))
                print(dice_3D)
                if not math.isnan(dice_3D):
                    meandice_3D_perpatient = meandice_3D_perpatient + dice_3D
                    num_labels_3D = num_labels_3D + 1

        meandice_2D_perpatient = meandice_2D_perpatient / num_labels_2D
        meandice_2D = meandice_2D + meandice_2D_perpatient
        dicescores_2D = dicescores_2D + f"{meandice_2D_perpatient}\n"
        with open(dice_file_2D, "a") as f:
            f.write(dicescores_2D)

        if with3D:
            meandice_3D_perpatient = meandice_3D_perpatient / num_labels_3D
            meandice_3D = meandice_3D + meandice_3D_perpatient
            dicescores_3D = dicescores_3D + f"{meandice_3D_perpatient}\n"
            with open(dice_file_3D, "a") as f:
                f.write(dicescores_3D)

    # calculate mean of means
    with open(dice_file_2D, "a") as f:
        f.write(f"Total mean: {meandice_2D/num_patients}\n")

    if with3D:
        with open(dice_file_3D, "a") as f:
            f.write(f"Total mean: {meandice_3D/num_patients}\n")
コード例 #31
0
#   You may obtain a copy of the License at
#
#          http://www.apache.org/licenses/LICENSE-2.0.txt
#
#   Unless required by applicable law or agreed to in writing, software
#   distributed under the License is distributed on an "AS IS" BASIS,
#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#   See the License for the specific language governing permissions and
#   limitations under the License.
#
#==========================================================================*/

import itk
import sys

if len(sys.argv) < 4:
    print('Usage: ' + sys.argv[0] +
          ' <FixedImage> <MovingImage> <ResultImage>')
    sys.exit(1)

fixed_filename = sys.argv[1]
moving_filename = sys.argv[2]
result_filename = sys.argv[3]

fixed = itk.imread(fixed_filename, itk.F)
moving = itk.imread(moving_filename, itk.F)

result = itk.elastix_registration_method(fixed, moving)

itk.imwrite(result, result_filename)
コード例 #32
0
#   distributed under the License is distributed on an "AS IS" BASIS,
#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#   See the License for the specific language governing permissions and
#   limitations under the License.
#
#==========================================================================*/

#
#  Example on the use of the LaplacianImageFilter
#

import itk
from sys import argv
itk.auto_progress(2)

edges = itk.imread(argv[1], itk.F)
houghF = itk.HoughTransform2DLinesImageFilter[itk.F, itk.F].New()
houghF.SetInput(edges)
houghF.SetAngleResolution(100)
houghF.SetNumberOfLines(2)
houghF.Update()
detected_lines = houghF.GetLines()

# Check that we detected 2 lines as we requested.
assert len(detected_lines) == 2

# Check that we can access the line by index.
line1 = detected_lines[0]

# Check that we can access the points of the line
assert len(line1.GetPoints()) == 2
コード例 #33
0
#
#==========================================================================*/

#
#  Test the performance of the BModeImageFilter using Python
#

import itk
from sys import argv

input_filename = argv[1]
output_filename = argv[2]
PixelType = itk.F
dim = 3
ImageType = itk.Image[PixelType, dim]
inputImg = itk.imread(input_filename, PixelType)
inputImg.DisconnectPipeline()

BModeFilterType = itk.BModeImageFilter[ImageType, ImageType]
bMode = BModeFilterType.New()
bMode.SetInput(inputImg)

WindowingType = itk.IntensityWindowingImageFilter[ImageType, ImageType]
window = WindowingType.New()
window.SetInput(bMode.GetOutput())

clock = itk.TimeProbe()

runs = 1000
for i in range(runs):
    bMode.Modified()
コード例 #34
0
    print(error)

# Mask Creation and Location
directory3 = (studyname + '-MinMax/')
try:
    os.mkdir(directory3)
except OSError as error:
    print(error)

pic_folder = os.listdir(directory)
pic_folder = [pic_folder for pic_folder in pic_folder if ".nii" in pic_folder]
pic_folder.sort()
print(pic_folder)
num_images = len(pic_folder)

im0Tmp = itk.imread(directory + pic_folder[int(num_images / 2)], itk.F)

resample = ttk.ResampleImage.New(Input=im0Tmp, MakeIsotropic=True)
resample.Update()
im0 = resample.GetOutput()
immath = ttk.ImageMath.New(Input=im0)
immath.Blur(1)
im0Blur = immath.GetOutput()

immath.Threshold(150, 800, 1, 0)
immath.Dilate(10, 1, 0)
mask0 = immath.GetOutputUChar()
mask0Tmp = itk.GetArrayViewFromImage(mask0)
mask0Tmp[0:4, :, :] = 0
sizeZ = mask0Tmp.shape[0]
mask0Tmp[
コード例 #35
0
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#        http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import argparse
import itk

parser = argparse.ArgumentParser(
    description="{{ cookiecutter.example_title }}.")
parser.add_argument("input_image")
parser.add_argument("output_image")
args = parser.parse_args()

input_image = itk.imread(args.input_image)

output_image = itk.itkHelpers.camel_to_snake_case({{cookiecutter.class_name}
                                                   })(input_image)

itk.imwrite(output_image, args.output_image)
コード例 #36
0
#   You may obtain a copy of the License at
#
#          http://www.apache.org/licenses/LICENSE-2.0.txt
#
#   Unless required by applicable law or agreed to in writing, software
#   distributed under the License is distributed on an "AS IS" BASIS,
#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#   See the License for the specific language governing permissions and
#   limitations under the License.
#
#==========================================================================*/

from __future__ import print_function

import itk
from sys import argv, stderr, exit
itk.auto_progress(2)

if (len(argv) < 2):
    print((
        "Missing Parameters \n Usage: IntensityWindowingImageFilter.py inputImageFile"
    ),
          file=stderr)
    exit(1)

image = itk.imread(argv[1], itk.F)
# Verifies that ITK supports getting tuples for filter parameters. Not testing the filter
# results.
intensity_filter = itk.IntensityWindowingImageFilter.New(
    image, WindowLevel=[255, 127])
コード例 #37
0
ファイル: extras.py プロジェクト: romangrothausmann/ITK
s = itk.region(reader.GetOutput())
assert s.GetIndex()[0] == s.GetIndex()[1] == 0
assert s.GetSize()[0] == s.GetSize()[1] == 256


# test range
assert itk.range(reader) == (0, 255)
assert itk.range(reader.GetOutput()) == (0, 255)


# test write
itk.imwrite(reader, sys.argv[2])
itk.imwrite(reader, sys.argv[2], True)

# test read
image=itk.imread(fileName)
assert type(image) == itk.Image[itk.RGBPixel[itk.UC],2]
image=itk.imread(fileName, itk.F)
assert type(image) == itk.Image[itk.F,2]

# test search
res = itk.search("Index")
assert res[0] == "Index"
assert res[1] == "index"
assert "ContinuousIndex" in res

res = itk.search("index", True)
assert "Index" not in res


# test down_cast
コード例 #38
0
def convertPatientAPI(inputfolder, ipp):
    #Create output directory based on ipp research
    patientParentDirectory = os.path.dirname(inputfolder)
    id = inputfolder[-7:]
    outputDirectory = os.path.join(patientParentDirectory, "output", "patient." + str(ipp))
    if os.path.isdir(outputDirectory):
      print(outputDirectory + " already exists")
      return(0)
    os.makedirs(outputDirectory)
    os.makedirs(os.path.join(outputDirectory, "input"))

    #Find all studies irradiation in the patient folder
    studies = {}
    inputRTDose = []
    inputRTStruct = []
    for root, dirs, files in os.walk(inputfolder):
        for dir in dirs:
            if root.endswith("CT_SET"):
                studies[dir] = {}
                studies[dir]["inputCT"] = []
                studies[dir]["inputCBCT"] = []
                studies[dir]["inputRTStruct"] = ""
                studies[dir]["inputPlan"] = ""
                studies[dir]["inputINI"] = ""
                os.makedirs(os.path.join(outputDirectory, "input", dir))

    #Find all input images
    for root, dirs, files in os.walk(inputfolder):
        for file in files:
            if file.startswith("CT_IMAGE_") and file.endswith(".DCM"):
                dir = os.path.basename(root)
                studies[dir]["inputCT"] += [os.path.join(root, file)]
            if file.startswith("DCMTPS_Calculated") and file.endswith(".dcm"):
                inputRTDose += [os.path.join(root, file)]
            if root.endswith("CT_SET") and file.endswith(".DCM"):
                inputRTStruct += [os.path.join(root, file)]
            if root.endswith("DICOM_PLAN") and file.endswith(".DCM"):
                ds = pydicom.read_file(os.path.join(root, file))
                dir = ds[(0x0008, 0x0018)].value
                if dir in studies.keys():
                    studies[dir]["inputPlan"] = os.path.join(root, file)
                    struct = ds[(0x300c,0x0060)][0][(0x0008,0x1155)].value
                    studies[dir]["inputRTStruct"] = struct
            if root.endswith("Reconstruction") and file.endswith(".SCAN"):
                year = file.split(".")[-2][:4]
                month = file.split(".")[-3]
                day = file.split(".")[-4]
                hour = file.split(".")[-2][4:6]
                minute = file.split(".")[-2][6:8]
                second = file.split(".")[-2][8:10]
                if os.path.isfile(os.path.join(root, file[:-4] + "INI")):
                    bashCommand = "cat " + os.path.join(root, file[:-4] + "INI")
                    process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
                    output, error = process.communicate()
                    output = output.split(b'\r\n')
                    for element in output:
                      if element.startswith(b"ReferenceUID="):
                        dir = element.split(b'=')[1].decode("utf-8")
                    studies[dir]["inputCBCT"] += [(os.path.join(root, file), str(year)+str(month)+str(day)+str(hour)+str(minute)+str(second))]
            if "CT_SET" in root and file.startswith("1.") and file.endswith(".INI"):
                dir = os.path.basename(root)
                studies[dir]["inputINI"] = os.path.join(root, file)

    for struct in inputRTStruct:
        ds = pydicom.read_file(struct)
        structId = ds[(0x0008, 0x0018)].value
        for study in studies.keys():
            if structId == studies[study]["inputRTStruct"]:
                studies[study]["inputRTStruct"] = struct

    for dir in studies.keys():
        studies[dir]["inputCT"].sort(key=natural_keys)
        studies[dir]["inputCBCT"].sort(key=natural_keys_second)

        #Remove wrong cbct (if 2 cbct are done the same day, keep the second -> it means that the first has a wrong alignment)
        removeIndexCBCT = []
        for indexCBCT in range(1, len(studies[dir]["inputCBCT"])):
          if int(studies[dir]["inputCBCT"][indexCBCT][1]) - int(studies[dir]["inputCBCT"][indexCBCT-1][1]) < 10000:
            removeIndexCBCT += [indexCBCT-1]
        studies[dir]["inputCBCT"] = [i for j, i in enumerate(studies[dir]["inputCBCT"]) if j not in removeIndexCBCT]

        #Convert CT
        inputDicomCT = gt.read_dicom(studies[dir]["inputCT"])
        #outputCT = gt.image_convert(inputDicomCT)
        itk.imwrite(inputDicomCT, os.path.join(outputDirectory, "input", dir, "CT.nii"))
        transfoCT = np.array([[1.,0,0,0], [0,1.,0,0], [0,0,1.,0], [0,0,0,1.]])

        #Convert CBCT threw clitk
        cbctIndex = 0
        for cbct in studies[dir]["inputCBCT"]:
          if os.path.isfile(cbct[0][:-4] + "INI.XVI"):
            #Read CBCT transformation matrix
            with open(cbct[0][:-4] + "INI.XVI") as f:
              for line in f.readlines():
                  if "OnlineToRefTransformCorrection=" in line:
                      outputCBCT = os.path.join(outputDirectory, "input", dir, "cbct." + str(cbctIndex) + ".nii")
                      tempLine = line.split('=')[1]
                      if tempLine[0] == " ":
                          tempLine = tempLine[1:]
                      transfoMatrix = np.array([float(t) for t in tempLine.split()]).reshape(4,4).T
                      transfoMatrix[:-1, -1] *= 10 #convert from cm to mm
                      transfoMatCT2CBCT = transfoCT.dot(transfoMatrix)
                      transfoMatCBCT2CT = invertTransfoMat(transfoMatCT2CBCT)
                      np.savetxt(outputCBCT[:-3] + "mat", transfoMatCBCT2CT)
                      bashCommand = "clitkImageConvert -i " + cbct[0] + " -o " + outputCBCT
                      process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
                      output, error = process.communicate()
                      bashCommand = "clitkAffineTransform -i " + outputCBCT + " -o " + outputCBCT + " -m " + outputCBCT[:-3] + "mat" + " --pad=-1024 --transform_grid"
                      process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
                      output, error = process.communicate()
                      os.remove(outputCBCT[:-3] + "mat")
                      break
            cbctIndex += 1

        #Convert struct threw clitk and after to .nii
        inputTmpRTStruct = []
        if not studies[dir]["inputRTStruct"] == "":
          bashCommand = "clitkDicomRTStruct2Image -c --mha -i " + studies[dir]["inputRTStruct"] + " -o " + os.path.join(outputDirectory, "input", dir, "tmp.rtstruct.") + " -j " + os.path.join(outputDirectory, "input", dir, "CT.nii")
          process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
          output, error = process.communicate()
          for root, dirs, files in os.walk(os.path.join(outputDirectory, "input", dir)):
            for file in files:
              if file.startswith("tmp.rtstruct.") and file.endswith(".mha"):
                if not file == file.encode('utf-8', 'replace').decode():
                    continue
                inputTmpRTStruct += [file]

        structName = []
        for struct in inputTmpRTStruct:
          inputStruct = itk.imread(os.path.join(outputDirectory, "input", dir, struct))
          itk.imwrite(inputStruct, os.path.join(outputDirectory, "input", dir, struct[13:-3] + "nii"))
          structName += [struct[13:-4]]
          os.remove(os.path.join(outputDirectory, "input", dir, struct))

        #Anonymise rtstruct
        if not studies[dir]["inputRTStruct"] == "":
          anonymizeDicomFile(studies[dir]["inputRTStruct"], os.path.join(outputDirectory, "input", dir, "rtstruct.dcm"), "anonymous", ipp)

        #Anonymise dicom plan
        if not studies[dir]["inputPlan"] == "":
          anonymizeDicomFile(studies[dir]["inputPlan"], os.path.join(outputDirectory, "input", dir, "rtplan.dcm"), "anonymous", ipp)

        #Anonymise rt dose
        indexDose = 0
        for dose in inputRTDose:
          anonymizeDicomFile(dose, os.path.join(outputDirectory, "input", dir, "rtdose" + str(indexDose) + ".dcm"), "anonymous", ipp)
          indexDose += 1

        #Keep treatment from .INI file
        bashCommand = "cat " + studies[dir]["inputINI"]
        process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
        output, error = process.communicate()
        output = output.split(b'\r\n')
        for element in output:
          if element.startswith(b"TreatmentID="):
            treatmentName = element.split(b'=')[1].decode("utf-8")

        #Write id and ipp in json file
        try:
            jsonData = load('delpel.json')
        except:
            jsonData = {}
            jsonData['patients'] = {}
            jsonData['header'] = {
                "key": "ipp research",
                "treatment id": "TreatmentID value found in .INI file",
                "clarity": "Does the patient treated with Clarity (artifacts)",
                "machine": "Name of the treatment machine",
                "city": "Treatment city",
                "complete bladder": "True if the bladder mask is not truncated",
                "ROI name in rtStruct": "found corresponding name"
            }
        if not ipp in jsonData['patients']:
            jsonData['patients'][ipp] = {}
        if not dir in jsonData['patients'][ipp]:
          jsonData['patients'][ipp][dir] = {
                              'treatment id': treatmentName,
                              'ReferenceUID': dir,
                              'Structures': {},
                              'clarity': False,
                              'city': "Lyon",
                              'complete bladder': True,
                              'machine': os.path.basename(os.path.dirname(inputfolder))
                              }
          for struct in structName:
              jsonData['patients'][ipp][dir]['Structures'][struct] = identifyStruct("_".join(struct.split("_")[1:]).upper())

          with open("delpel.json", "w") as jsonFile:
              json.dump(jsonData, jsonFile, indent=4, sort_keys=True)