コード例 #1
0
def interpolate_secondaries(input_folder, output_folder, factor,
                            target_numprojs):
    sec_numprojs = len(glob.glob(f'./{input_folder}/secondary????.mha'))

    img0 = itk.imread(f'./{input_folder}/secondary0000.mha')
    img_origin = itk.origin(img0)
    img_spacing = itk.spacing(img0)

    image_ind = 0
    for projnum in range(sec_numprojs - 1):
        # get
        image1 = f'./{input_folder}/secondary{projnum:04d}.mha'
        image2 = f'./{input_folder}/secondary{projnum+1:04d}.mha'

        img1_array = itk.GetArrayFromImage(itk.imread(image1))
        img2_array = itk.GetArrayFromImage(itk.imread(image2))

        # save the first image
        itk.imwrite(itk.imread(image1),
                    f'./{output_folder}/secondary{image_ind:04d}.mha')

        # interpolate xfactor images between those 2 images
        image_interpolate_recurrence(img1_array, img2_array, image_ind,
                                     image_ind + factor, output_folder,
                                     img_origin, img_spacing)
        image_ind = image_ind + factor

    # read the last image and save it until the target_numprojs is reached
    lastimage = itk.imread(
        f'./{input_folder}/secondary{sec_numprojs-1:04d}.mha')
    while image_ind < target_numprojs:
        itk.imwrite(lastimage,
                    f'./{output_folder}/secondary{image_ind:04d}.mha')
        image_ind = image_ind + 1
コード例 #2
0
def balanced_dataset(subjects):

    positive_list = []
    negative_list = []

    for count, (x, y) in enumerate(subjects):
        x = itk.GetArrayFromImage(x)
        y = itk.GetArrayFromImage(y)

        x = np.array(x)
        y = np.array(y)

        if y.max() == 1.0:

            # input = torch.from_numpy(x).type(torch.FloatTensor)
            positive_list.append((x, 1.0))

        else:
            negative_list.append((x, 0.0))

    positive_count = len(positive_list)
    negative_list_1 = random.sample(negative_list, positive_count)

    balanced_list = positive_list + negative_list_1

    random.shuffle(balanced_list)
    return balanced_list
コード例 #3
0
    def makeHardClassificationPatches(self, DirPath, inputPath, maskPath,
                                      outputPath):
        imgInputPath = DirPath + "/" + inputPath  #"/DATA/vascu_deepV2/maskWholeImage.nii"
        imgMaskPath = DirPath + "/" + maskPath
        imgROIPath = DirPath + "/" + outputPath

        imgInput = itk.imread(imgInputPath)
        imgMask = itk.imread(imgMaskPath)

        PixelType = itk.UC
        Dimension = 3
        ImageType = itk.Image[PixelType, Dimension]

        radiusValue = 4
        StructuringElementType = itk.FlatStructuringElement[Dimension]
        structuringElement = StructuringElementType.Ball(radiusValue)

        DilateFilterType = itk.BinaryDilateImageFilter[ImageType, ImageType,
                                                       StructuringElementType]

        dilateFilter = DilateFilterType.New()

        dilateFilter.SetInput(imgMask)
        dilateFilter.SetKernel(structuringElement)
        dilateFilter.SetForegroundValue(255)
        imgMask = dilateFilter.GetOutput()

        imgInput = itk.GetArrayFromImage(imgInput)
        imgMask = itk.GetArrayFromImage(imgMask)

        imgROI = np.copy(imgInput)
        imgROI[imgMask > 0] = 0

        itk.imwrite(itk.GetImageFromArray(imgROI.astype(np.uint8)), imgROIPath)
コード例 #4
0
def get_intersection_volume(roilist,xvoxel=1.,yvoxel=1.):
    # There is probably a clever way to compute this by constructing
    # an "intersection contour" for each layer: for each contour, keep only
    # points that are inside all other contours in the list. But is tough to then
    # put those points in the right order.
    # Instead we'll just make a grid of points and get the volume of the combined mask.
    # With xvoxel and yvoxel the caller can tweak the voxel size of the mask in x and y.
    # In z the voxel size is given by the incoming ROIs.
    dz = min([r.dz for r in roilist])
    assert(dz>0)
    assert(xvoxel>0)
    assert(yvoxel>0)
    bb = bounding_box(bb=roilist[0].bb)
    for roi in roilist[1:]:
        bb.intersect(roi.bb)
    if bb.empty:
        # too bad
        return 0.
    spacing = np.array([xvoxel,yvoxel,dz],dtype=float)
    bb.add_margins(2*spacing)
    dimsize = np.array(np.round((bb.maxcorner-bb.mincorner)/spacing),dtype=int)
    #img = sitk.Image(dimsize,sitk.sitkUInt8)
    img = itk.GetImageFromArray(np.zeros(dimsize[::-2],dtype=np.uint8))
    img.SetOrigin(bb.mincorner)
    img.SetSpacing(spacing)
    itkmask = itk.GetArrayFromImage(roilist[0].get_mask(img))
    for roi in roilist[1:]:
        itkmask *= itk.GetArrayFromImage(roi.get_mask(img))
    return np.sum(itkmask)*np.prod(spacing)
コード例 #5
0
def merge_pred(results_folder, ROI_list):

    # get the list of Task folders
    tasks_folders = glob.glob(f'{results_folder}/segm_results/Task*')

    if len(tasks_folders) != 3:
        print('Not all direction available, no merging was performed')
        sys.exit()

    # get the list of labels
    all_patients = glob.glob(
        f'{results_folder}/segm_results/{os.path.basename(tasks_folders[0])}/predicted_labels/*'
    )

    merged_labels = f'{results_folder}/segm_results/merged_labels'
    merged_binaries = f'{results_folder}/segm_results/merged_binaries'
    maybe_mkdir_p(merged_labels)
    maybe_mkdir_p(merged_binaries)

    # loop through the patient
    for patient in all_patients:
        label_name = os.path.basename(patient)
        patient_name = label_name.replace('label_', '')
        merged_binaries_patient = f'{merged_binaries}/{os.path.splitext(patient_name)[0]}'
        maybe_mkdir_p(merged_binaries_patient)

        # load labels in each direction
        label_d1_img = itk.imread(
            f'{tasks_folders[0]}/predicted_labels/{label_name}')
        label_origin = label_d1_img.GetOrigin()
        label_spacing = label_d1_img.GetSpacing()
        label_direction = label_d1_img.GetDirection()

        label_d1 = itk.GetArrayFromImage(label_d1_img)
        label_d2 = itk.GetArrayFromImage(
            itk.imread(f'{tasks_folders[1]}/predicted_labels/{label_name}'))
        label_d3 = itk.GetArrayFromImage(
            itk.imread(f'{tasks_folders[2]}/predicted_labels/{label_name}'))

        # get the intersection of every 2 labels
        intersec1 = np.where(label_d1 == label_d2, label_d1, 0)
        intersec2 = np.where(label_d1 == label_d3, label_d1, 0)
        intersec3 = np.where(label_d2 == label_d3, label_d2, 0)

        # get where 2 labels agree
        merged_label = np.zeros(label_d1.shape)
        merged_label = np.where(intersec1 != 0, intersec1, merged_label)
        merged_label = np.where(intersec2 != 0, intersec2, merged_label)
        merged_label = np.where(intersec3 != 0, intersec3, merged_label)

        merged_label_img = itk.GetImageFromArray(merged_label)
        merged_label_img.SetOrigin(label_origin)
        merged_label_img.SetSpacing(label_spacing)
        merged_label_img.SetDirection(label_direction)

        itk.imwrite(merged_label_img, f'{merged_labels}/{label_name}')

        segmap_to_binaries(merged_label, merged_binaries_patient, label_origin,
                           label_spacing, label_direction, ROI_list)
コード例 #6
0
def isIslandWithinDistance(image,
                           distanceImage,
                           label,
                           maxDistance
                          ):
  values = itk.GetArrayFromImage(image)
  dist_values = itk.GetArrayFromImage(distanceImage)
  return np.any(dist_values[values == label] < maxDistance)
コード例 #7
0
def MaskOriginal(OriginalImg, FinalSegm):
    OriginalImg_np = itk.GetArrayFromImage(OriginalImg)
    FinalSegm_np = itk.GetArrayFromImage(FinalSegm)
    OriginalImg_np[FinalSegm_np < 1] = 0.
    OriginalImg_masked = itk.GetImageFromArray(OriginalImg_np)
    OriginalImg_masked.SetOrigin(OriginalImg.GetOrigin())
    OriginalImg_masked.SetSpacing(OriginalImg.GetSpacing())
    OriginalImg_masked.SetDirection(OriginalImg.GetDirection())
    return OriginalImg_masked
コード例 #8
0
def LDMap(Input1, Input2, ImageType, spacing=True):
    distance_1 = itk.GetArrayFromImage(
        SignedMaurerDistanceMap(Input1, ImageType, spacing))
    distance_2 = itk.GetArrayFromImage(
        SignedMaurerDistanceMap(Input2, ImageType, spacing))
    A1 = (distance_1 > 1e-5).astype(np.float32)
    B1 = (distance_2 > 1e-5).astype(np.float32)
    LDMap_out = np.abs(A1 - B1) * np.maximum(distance_1, distance_2)
    return LDMap_out
コード例 #9
0
def RefineSegmentation(islandImage,
                       subIslandLabels,
                       ROI):
  # assignIdsToPixels
  IslandsValues = itk.GetArrayFromImage(islandImage)
  _pixelIdImage = np.zeros(IslandsValues.shape)
  roi = itk.GetArrayFromImage(ROI)
  _pixelIdImage[roi==0] = -1
  _totalPixelsInROI = np.sum(roi!=0)
  _pixelIdImage[roi!=0] = range(_totalPixelsInROI)
  # SheetnessBasedDataCost_compute for initializeDataCosts prep
  # 0
  dataCostSink = np.zeros(IslandsValues.shape)
  cond = (IslandsValues == subIslandLabels[1]) & roi!=0
  dataCostSink[cond] = 1000
  # 1
  dataCostSource = np.zeros(IslandsValues.shape)
  cond = (IslandsValues == subIslandLabels[0]) & roi!=0
  dataCostSource[cond] = 1000
  dataCostPixels = _pixelIdImage[roi!=0].flatten()
  flat_dataCostSink = dataCostSink[roi!=0].flatten()
  flat_dataCostSource = dataCostSource[roi!=0].flatten()
  # initializeNeighbours prep
  Xcenters, XFromCenter, XToCenter = SmoothnessCostFunction(pixelLeft  = _pixelIdImage[:, :, :-1],
                                                            pixelRight = _pixelIdImage[:, :, 1:])
  Ycenters, YFromCenter, YToCenter = SmoothnessCostFunction(pixelLeft  = _pixelIdImage[:, :-1, :],
                                                            pixelRight = _pixelIdImage[:, 1:, :])
  Zcenters, ZFromCenter, ZToCenter = SmoothnessCostFunction(pixelLeft  = _pixelIdImage[:-1,:,:],
                                                            pixelRight = _pixelIdImage[1:,:,:])
  CentersPixels = np.concatenate([Zcenters[0], Ycenters[0], Xcenters[0] ])
  NeighborsPixels = np.concatenate([Zcenters[1], Ycenters[1], Xcenters[1] ])
  _totalNeighbors = len(NeighborsPixels)
  flat_smoothCostFromCenter = np.concatenate([ZFromCenter, YFromCenter, XFromCenter ])
  flat_smoothCostToCenter = np.concatenate([ZToCenter, YToCenter, XToCenter ])
  # Call Maxflow
  uint_gcresult = GraphCutSupport.RunGraphCut(_totalPixelsInROI,
                                              np.ascontiguousarray(dataCostPixels, dtype=np.uint32),
                                              np.ascontiguousarray(flat_dataCostSource, dtype=np.uint32),
                                              np.ascontiguousarray(flat_dataCostSink, dtype=np.uint32),
                                              _totalNeighbors,
                                              np.ascontiguousarray(CentersPixels, dtype=np.uint32),
                                              np.ascontiguousarray(NeighborsPixels, dtype=np.uint32),
                                              np.ascontiguousarray(flat_smoothCostFromCenter, dtype=np.uint32),
                                              np.ascontiguousarray(flat_smoothCostToCenter, dtype=np.uint32)
                                              )
  _labelIdImage = _pixelIdImage
  _labelIdImage[roi!=0] = uint_gcresult
  _labelIdImage[roi==0] = 0
  _labelIdImage = np.asarray(_labelIdImage, dtype=np.uint8)
  gcresult = itk.GetImageFromArray(_labelIdImage)
  gcresult.SetOrigin(islandImage.GetOrigin())
  gcresult.SetSpacing(islandImage.GetSpacing())
  gcresult.SetDirection(islandImage.GetDirection())
  return gcresult
コード例 #10
0
ファイル: utils.py プロジェクト: jandylin/LiquidSegmentation
def load_image(root_dir,
               image_file,
               label_file,
               npoints=50000,
               threshold_min=1700,
               threshold_max=2700):
    # using "map" to repeat for each label
    # join root directory with filename(s)
    data_file = os.path.join(root_dir, image_file)
    label_file = os.path.join(root_dir, label_file)

    # load data using itk
    image = itk.imread(data_file)
    label = itk.imread(label_file)

    # first annotated voxel "DomainFirst" (offset)
    offsets = label.GetMetaDataDictionary()["DomainFirst"]
    # convert strings to integer arrays
    # flip array because annotation coordinates are in reversed order
    offsets = np.flip(np.array(offsets.split(" "), dtype=np.int), 0)

    # transform itk data to numpy arrays
    volume = itk.GetArrayFromImage(image)
    # normalize to [0, 1]
    volume = (volume - threshold_min).astype(
        np.float) / float(threshold_max - threshold_min)

    label = itk.GetArrayFromImage(label)

    # apply threshold + transform voxelgrid to pointcloud
    points = np.argwhere((volume >= 0.0) & (volume <= 1.0))

    # sample npoints out of all points
    idx = np.random.choice(points.shape[0], npoints, replace=False)
    sampled = points[idx, :].astype(np.int)

    target = np.zeros(npoints, dtype=np.int)

    xmin = offsets[0]
    ymin = offsets[1]
    zmin = offsets[2]
    xmax = offsets[0] + label.shape[0]
    ymax = offsets[1] + label.shape[1]
    zmax = offsets[2] + label.shape[2]

    # search corresponding points
    for i in range(npoints):
        s = sampled[i, :]
        # only consider points inside the bounding box
        if xmin <= s[0] < xmax and ymin <= s[1] < ymax and zmin <= s[2] < zmax:
            x, y, z = s - np.array([xmin, ymin, zmin])
            target[i] = label[x, y, z]

    return sampled, target, volume, label
コード例 #11
0
def split_predictions(modelresults_folder, task_id, task_name_3D, ROI_list,
                      resultsdir_3Dimages, resultsdir_3Dlabels):

    # copy the 3D test images and their labels
    taskfolder_3D = f'{nnUNet_raw_data}/Task{task_id:03d}_{task_name_3D}'
    imagesTs_foldername_3D = f'{taskfolder_3D}/imagesTs/'
    labelsTs_foldername_3D = f'{taskfolder_3D}/labelsTs/'

    if not os.path.exists(resultsdir_3Dimages):
        shutil.copytree(imagesTs_foldername_3D, resultsdir_3Dimages)

    if not os.path.exists(resultsdir_3Dlabels):
        shutil.copytree(labelsTs_foldername_3D, resultsdir_3Dlabels)

    # split the predictions and the real labels into binaries
    binaries_folder = f'{modelresults_folder}/binaries'
    predictions_folder = f'{modelresults_folder}/OUTPUT_DIRECTORY_3D'
    maybe_mkdir_p(binaries_folder)

    reallabels_binaries_folder = f'{modelresults_folder}/real_labels_binaries'
    reallabels_folder = f'{modelresults_folder}/real_labels'
    maybe_mkdir_p(reallabels_binaries_folder)

    # get the list of predicted labels
    all_patients = sorted(glob.glob(f'{predictions_folder}/*.nii.gz'))

    # loop through the patient
    for patient in all_patients:
        label_name = os.path.basename(patient)
        binariesfolder_patient = f'{binaries_folder}/{os.path.splitext(label_name)[0]}'
        maybe_mkdir_p(binariesfolder_patient)

        reallabels_binariesfolder_patient = f'{reallabels_binaries_folder}/{os.path.splitext(label_name)[0]}'
        maybe_mkdir_p(reallabels_binariesfolder_patient)

        label_d1_img = itk.imread(patient)
        label_origin = label_d1_img.GetOrigin()
        label_spacing = label_d1_img.GetSpacing()
        label_direction = label_d1_img.GetDirection()
        #print(patient)
        segmap = itk.GetArrayFromImage(label_d1_img)
        merge_predictions.segmap_to_binaries(segmap, binariesfolder_patient,
                                             label_origin, label_spacing,
                                             label_direction, ROI_list)

        label_d1_img = itk.imread(f'{reallabels_folder}/{label_name}')
        label_origin = label_d1_img.GetOrigin()
        label_spacing = label_d1_img.GetSpacing()
        label_direction = label_d1_img.GetDirection()
        #print(patient)
        segmap = itk.GetArrayFromImage(label_d1_img)
        merge_predictions.segmap_to_binaries(
            segmap, reallabels_binariesfolder_patient, label_origin,
            label_spacing, label_direction, ROI_list)
コード例 #12
0
    def update(self, itkFixedImage, maskImage, current_settings=None):
        """Sets new fixed image and new mask image"""

        # Load Mahfouz current settings
        FixCannyThr1 = current_settings['CannyThresh1']
        FixCannyThr2 = current_settings['CannyThresh2']
        FixCannyAperture = 2 * current_settings['Aperture'] + 1
        minImgContrast = current_settings['minImgContrast']
        maxImgContrast = current_settings['maxImgContrast']

        # Since the HipHop pipeline deals with itk images, itkFixedImage and maskImage
        # need to be converted to numpy arrays for the Mahfouz metric.

        # Get COPY OF numpy array of fixed image (not just GetArrayViewFromImage!)
        self.FixedImg = itk.GetArrayFromImage(itkFixedImage)

        # Get Mask
        self.MaskImg = itk.GetArrayFromImage(maskImage)

        # rescale 16bit into 8bit with contrast stretching
        img_for_canny = self._look_up_table(self.FixedImg, minImgContrast,
                                            maxImgContrast)

        # generate canny edge fixed image
        FixedEdgeImg = cv2.Canny(img_for_canny,
                                 FixCannyThr1,
                                 FixCannyThr2,
                                 L2gradient=False,
                                 apertureSize=FixCannyAperture)

        # Apply mask to edge image (eliminates edges outside of ROI)
        if self.useMask:
            FixedEdgeImg[self.MaskImg == 0.] = 0.

        # blurr edge image (rescaling to uint16 is necessary)
        self.FixedEdgeImg = cv2.GaussianBlur(
            FixedEdgeImg * ((np.power(2, 16) - 1) / (np.power(2, 8) - 1)),
            self.EdgeGaussSize, self.EdgeGaussSigma)

        # Apply mask to Fixed image
        if self.useMask:
            self.FixedImg[self.MaskImg == 0.] = 2.**16 - 1.

        if self.saveFixedImages:

            # Save fixed edge images
            cv2.imwrite('FixedEdgImg' + str(self.temp_counter_fix) + '.tif',
                        self.FixedEdgeImg)
            cv2.imwrite('FixedImg' + str(self.temp_counter_fix) + '.tif',
                        self._look_up_table(self.FixedImg, 0, 2**16 - 1))
            self.temp_counter_fix = self.temp_counter_fix + 1
コード例 #13
0
 def testNormalCrop(self):
     ifrom = np.array([5, 6, 7])
     ito = np.array([30, 40, 50])
     cropped_image = _CropImageManuallyWithNumpy(self.orig_image, ifrom,
                                                 ito)
     cropped_image2 = _CropAndPadImageManuallyWithNumpy(
         self.orig_image, ifrom, ito, -1024)
     itk_cropped_image = _CropImageWithITK(self.orig_image, ifrom, ito)
     # spacing
     self.assertTrue(
         np.allclose(np.array(cropped_image.GetSpacing()),
                     self.orig_spacing))
     self.assertTrue(
         np.allclose(np.array(cropped_image2.GetSpacing()),
                     self.orig_spacing))
     self.assertTrue(
         np.allclose(np.array(itk_cropped_image.GetSpacing()),
                     self.orig_spacing))
     # origin
     self.assertTrue(
         np.allclose(np.array(cropped_image.GetOrigin()),
                     self.orig_origin + self.orig_spacing * ifrom))
     self.assertTrue(
         np.allclose(np.array(cropped_image2.GetOrigin()),
                     self.orig_origin + self.orig_spacing * ifrom))
     self.assertTrue(
         np.allclose(np.array(itk_cropped_image.GetOrigin()),
                     self.orig_origin + self.orig_spacing * ifrom))
     # size
     new_size = np.array(cropped_image.GetLargestPossibleRegion().GetSize())
     new_size2 = np.array(
         cropped_image2.GetLargestPossibleRegion().GetSize())
     itk_new_size = np.array(
         itk_cropped_image.GetLargestPossibleRegion().GetSize())
     exp_size = ito - ifrom
     self.assertTrue((new_size == exp_size).all())
     self.assertTrue((new_size2 == exp_size).all())
     self.assertTrue((itk_new_size == exp_size).all())
     # values
     new_array = itk.GetArrayFromImage(cropped_image)
     new_array2 = itk.GetArrayFromImage(cropped_image2)
     itk_array = itk.GetArrayFromImage(itk_cropped_image)
     self.assertTrue(
         (new_array == self.orig_array[ifrom[2]:ito[2], ifrom[1]:ito[1],
                                       ifrom[0]:ito[0]]).all())
     self.assertTrue(
         (new_array2 == self.orig_array[ifrom[2]:ito[2], ifrom[1]:ito[1],
                                        ifrom[0]:ito[0]]).all())
     self.assertTrue(
         (itk_array == self.orig_array[ifrom[2]:ito[2], ifrom[1]:ito[1],
                                       ifrom[0]:ito[0]]).all())
コード例 #14
0
ファイル: GroomUtils.py プロジェクト: orthogrid/ShapeWorks
def FindReferenceImage(inDataList):
    """
    This find the median file between all the input files
    """
    x = y = z = 0
    for i in range(len(inDataList)):
        dim = itk.GetArrayFromImage(itk.imread(inDataList[i])).shape
        if dim[0] > x:
            x = dim[0]
        if dim[1] > y:
            y = dim[1]
        if dim[2] > z:
            z = dim[2]

    COM = np.zeros((x, y, z))
    for i in range(len(inDataList)):
        tmp = itk.GetArrayFromImage(itk.imread(inDataList[i]))
        COM += np.pad(tmp, (((x - tmp.shape[0]) // 2,
                             (x - tmp.shape[0]) - (x - tmp.shape[0]) // 2),
                            ((y - tmp.shape[1]) // 2,
                             (y - tmp.shape[1]) - (y - tmp.shape[1]) // 2),
                            ((z - tmp.shape[2]) // 2,
                             (z - tmp.shape[2]) - (z - tmp.shape[2]) // 2)))
    COM /= len(inDataList)
    dist = np.inf
    idx = 0
    for i in range(len(inDataList)):
        tmp = itk.GetArrayFromImage(itk.imread(inDataList[i]))
        tmp_dist = np.linalg.norm(COM -
                                  np.pad(tmp, (((x - tmp.shape[0]) // 2,
                                                (x - tmp.shape[0]) -
                                                (x - tmp.shape[0]) // 2),
                                               ((y - tmp.shape[1]) // 2,
                                                (y - tmp.shape[1]) -
                                                (y - tmp.shape[1]) // 2),
                                               ((z - tmp.shape[2]) // 2,
                                                (z - tmp.shape[2]) -
                                                (z - tmp.shape[2]) // 2))))
        if tmp_dist < dist:
            idx = i
            dist = tmp_dist

    print(" ")
    print("############# Reference File #############")
    cprint(("The reference file for rigid alignment is found"), 'cyan')
    cprint(("Output Median Filename : ", inDataList[idx]), 'yellow')
    print("###########################################")
    print(" ")
    return inDataList[idx]
コード例 #15
0
def itk_to_vtk_image(key, itk_image):
    dims = list(itk_image.GetLargestPossibleRegion().GetSize())
    extent = []
    for v in dims:
        extent.append(0)
        extent.append(v - 1)

    values = itk.GetArrayFromImage(itk_image).flatten(order='C')

    return {
        'vtkClass':
        'vtkImageData',
        'spacing':
        list(itk_image.GetSpacing()),
        'origin':
        list(itk_image.GetOrigin()),
        'extent':
        extent,
        'direction':
        list(
            itk.GetArrayFromVnlMatrix(itk_image.GetDirection().GetVnlMatrix().
                                      as_matrix()).flatten()),
        'pointData': {
            'values': values,
            'dataType': _itk_image_to_type(itk_image),
            'numberOfComponents': itk_image.GetNumberOfComponentsPerPixel(),
        },
    }
コード例 #16
0
def RelabelComponents(inputImage,
                      outputImageType = None):
  # relabel = itk.RelabelComponentImageFilter[input_type, output_type].New()
  # relabel.SetInput(inputImage)
  # relabel.Update()
  # return relabel.GetOutput()
  label_field = itk.GetArrayFromImage(inputImage)
  offset = 1
  max_label = int(label_field.max()) # Ensure max_label is an integer
  labels, labels_counts= np.unique(label_field,return_counts=True)
  labels=labels[np.argsort(labels_counts)[::-1]]
  labels0 = labels[labels != 0]
  new_max_label = offset - 1 + len(labels0)
  new_labels0 = np.arange(offset, new_max_label + 1)
  output_type = label_field.dtype
  required_type = np.min_scalar_type(new_max_label)
  if np.dtype(required_type).itemsize > np.dtype(label_field.dtype).itemsize:
      output_type = required_type
  forward_map = np.zeros(max_label + 1, dtype=output_type)
  forward_map[labels0] = new_labels0
  inverse_map = np.zeros(new_max_label + 1, dtype=output_type)
  inverse_map[offset:] = labels0
  relabeled = forward_map[label_field]
  result = itk.GetImageFromArray(relabeled)
  result.SetOrigin(inputImage.GetOrigin())
  result.SetSpacing(inputImage.GetSpacing())
  result.SetDirection(inputImage.GetDirection())
  if not outputImageType is None:
    s,d = itk.template(inputImage)[1]
    output_type = itk.Image[outputImageType,d]
    result = castImage(result, OutputType=output_type)
  return result
コード例 #17
0
def binaryThresholding(inputImage,
                       lowerThreshold,
                       upperThreshold,
                       outputImageType = None,
                       insideValue = 1,
                       outsideValue = 0):
  # Old version:
  # s,d = itk.template(inputImage)[1]
  # input_type = itk.Image[s,d]
  # output_type = input_type if outputImageType is None else itk.Image[outputImageType,d]
  # thresholder = itk.BinaryThresholdImageFilter[input_type, output_type].New()
  # thresholder.SetInput(inputImage)
  # thresholder.SetLowerThreshold( lowerThreshold )
  # thresholder.SetUpperThreshold( upperThreshold )
  # thresholder.SetInsideValue(insideValue)
  # thresholder.SetOutsideValue(outsideValue)
  # thresholder.Update()
  # return thresholder.GetOutput()
  values = itk.GetArrayFromImage(inputImage)
  cond = (values>=lowerThreshold) & (values<=upperThreshold)
  values[ cond ] = insideValue
  values[ np.logical_not(cond) ] = outsideValue
  result = itk.GetImageFromArray(values)
  result.SetOrigin(inputImage.GetOrigin())
  result.SetSpacing(inputImage.GetSpacing())
  result.SetDirection(inputImage.GetDirection())
  if not outputImageType is None:
    s,d = itk.template(inputImage)[1]
    output_type = itk.Image[outputImageType,d]
    result = castImage(result, OutputType=output_type)
  return result
コード例 #18
0
def multiscaleSheetness(multiScaleInput,
                        scales,
                        SmoothingImageType,
                        roi = None,
                        alpha = 0.5,
                        beta = 0.5,
                        gamma = 0.5):
  if not roi is None:
    roi = itk.GetArrayFromImage(roi)
  multiscaleSheetness = singlescaleSheetness(singleScaleInput = multiScaleInput,
                                             scale = scales[0],
                                             SmoothingImageType = SmoothingImageType,
                                             roi = roi,
                                             alpha = alpha,
                                             beta = beta,
                                             gamma = gamma)

  if len(scales) > 1:
    for scale in scales[1:]:
      singleScaleSheetness  = singlescaleSheetness(multiScaleInput,
                                                   scale = scale,
                                                   SmoothingImageType = SmoothingImageType,
                                                   roi = roi,
                                                   alpha = alpha,
                                                   beta = beta,
                                                   gamma = gamma)
      refinement = abs(singleScaleSheetness) > abs(multiscaleSheetness)
      multiscaleSheetness[refinement] = singleScaleSheetness[refinement]
  multiscaleSheetness = itk.GetImageFromArray(multiscaleSheetness.astype(np.float32))
  multiscaleSheetness.SetOrigin(multiScaleInput.GetOrigin())
  multiscaleSheetness.SetSpacing(multiScaleInput.GetSpacing())
  multiscaleSheetness.SetDirection(multiScaleInput.GetDirection())
  return multiscaleSheetness
コード例 #19
0
def computeSheetnessMeasure(SheetMeasInput,
                            roi = None,
                            alpha = 0.5,
                            beta = 0.5,
                            gamma = 0.5):
  if isinstance(SheetMeasInput, np.ndarray):
    sortedEigs = SheetMeasInput
    RsImg, EigsImg, NoNullEigs = computeRs(RsInputImg = SheetMeasInput, roi=roi)
  else:
    sortedEigs = itk.GetArrayFromImage(SheetMeasInput)
    # Sort them by abs (already done)
    # l1, l2, l3 = sortedEigs[:,:,:,0], sortedEigs[:,:,:,1], sortedEigs[:,:,:,2]
    # condA = np.abs(l1) > np.abs(l2)
    # l1[condA], l2[condA] = l2[condA], l1[condA]
    # condB = np.abs(l2) > np.abs(l3)
    # l2[condB], l3[condB] = l3[condB], l2[condB]
    # condC = np.abs(l1) > np.abs(l2)
    # l1[condC], l2[condC] = l2[condC], l1[condC]
    # sortedEigs[:,:,:,0], sortedEigs[:,:,:,1], sortedEigs[:,:,:,2] = l1, l2, l3
    RsImg, EigsImg, NoNullEigs = computeRs(RsInputImg = sortedEigs, roi=roi)
  SheetnessImage = np.empty(EigsImg.shape[:-1], dtype=float)
  SheetnessImage[NoNullEigs] = - np.sign( sortedEigs[NoNullEigs,2] )
  tmp = 1. / (beta*beta)
  SheetnessImage[NoNullEigs] *= np.exp(-RsImg[NoNullEigs,0] * RsImg[NoNullEigs,0] * tmp)
  tmp = 1. / (alpha*alpha)
  SheetnessImage[NoNullEigs] *= np.exp(-RsImg[NoNullEigs,1] * RsImg[NoNullEigs,1] * tmp)
  tmp = 1. / (gamma*gamma)
  SheetnessImage[NoNullEigs] *= np.exp(-RsImg[NoNullEigs,2] * RsImg[NoNullEigs,2] * tmp)
  # SheetnessImage *= EigsImg[:,:,:,2] ScaleObjectnessMeasureOff
  SheetnessImage[NoNullEigs] *= ( 1 - np.exp(-RsImg[NoNullEigs,3] * RsImg[NoNullEigs,3] * 4) )
  # SheetnessImage = itk.GetImageFromArray(SheetnessImage)
  return SheetnessImage
コード例 #20
0
    def noisyImage(self, inputPath, outputName, noiseType):

        for id, i in enumerate(self.noiseLevels):
            img = itk.imread(inputPath)
            dat = itk.GetArrayFromImage(img)
            dat = dat.astype(np.float32)
            # simulated CT noise poisson + gaussian noise
            if (noiseType == "poisson"):
                datNoisy = 0.5 * np.random.poisson(
                    dat, None) + 0.5 * np.random.normal(dat, i, None)
            elif (noiseType == "rician"):
                datNoisy = rice.rvs(dat / i, scale=i)
            else:
                print("error noise type not supported")

            datNoisy[datNoisy < 0] = 0
            datNoisy[datNoisy > 255] = 255
            # writing image on disk
            noisyImg = itk.GetImageFromArray(datNoisy.astype(np.uint8))

            print(i)
            outputPath = outputName + "_" + str(i) + ".nii"
            itk.imwrite(noisyImg, outputPath)

            print(outputPath)
コード例 #21
0
ファイル: segment.py プロジェクト: Will-Dolan/glance-vessels
    def get_labelmap(self, tube):
        if self.input_image is None:
            raise Exception('No input image?????')

        f = itk.SpatialObjectToImageFilter[itk.SpatialObject[3],
                                           itk.Image[itk.UC, 3]].New()

        # same origin and spacing and dir as input_image
        f.SetOrigin(self.input_image.GetOrigin())
        f.SetSpacing(self.input_image.GetSpacing())
        f.SetDirection(self.input_image.GetDirection())
        f.SetSize(self.input_image.GetLargestPossibleRegion().GetSize())

        f.SetUseObjectValue(False)
        f.SetOutsideValue(0)
        f.SetInsideValue(1)

        f.SetInput(tube)
        f.Update()

        mask = f.GetOutput()
        voxels = itk.GetArrayFromImage(mask)

        padded = np.concatenate(([0], voxels.flatten(), [0]))
        run_edges = np.diff(padded)
        run_starts, = np.where(run_edges > 0)
        run_stops, = np.where(run_edges < 0)
        run_lengths = run_stops - run_starts
        return np.array(list(zip(run_starts, run_lengths)),
                        dtype='uint32').flatten()
コード例 #22
0
def FindReferenceImage(inDataList):
    """
        This find the median file between all the input files
    """
    IMG = []
    DIM = []
    for i in range(len(inDataList)):
        tmp = itk.GetArrayFromImage(itk.imread(inDataList[i]))
        IMG.append(tmp)
        DIM.append(tmp.shape)

    ref_dim = np.max(DIM, axis=0)

    for i in range(len(inDataList)):
        IMG[i] = np.pad(IMG[i], ((0, ref_dim[0] - DIM[i][0]),
                                 (0, ref_dim[1] - DIM[i][1]),
                                 (0, ref_dim[2] - DIM[i][2])),
                        mode='constant',
                        constant_values=0)

    COM = np.sum(np.asarray(IMG), axis=0) / len(inDataList)

    idx = np.argmin(np.sqrt(np.sum((np.asarray(IMG) - COM)**2,
                                   axis=(1, 2, 3))))
    print(" ")
    print("############# Reference File #############")
    cprint(("The reference file for rigid alignment is found"), 'green')
    cprint(("Output Median Filename : ", inDataList[idx]), 'yellow')
    print("###########################################")
    print(" ")
    return inDataList[idx]
コード例 #23
0
def getImages(loader_dir, image_list, down_sample):
    # get all images
    all_images = []
    for image_path in image_list:
        if down_sample:
            img = downSample(image_path)
        else:
            image = itk.imread(image_path, itk.F)
            img = itk.GetArrayFromImage(image)
        all_images.append(img)
    all_images = np.array(all_images)
    # get mean and std
    mean_path = loader_dir + 'mean_img.npy'
    std_path = loader_dir + 'std_img.npy'
    if not os.path.exists(mean_path) or not os.path.exists(std_path):
        mean_image = np.mean(all_images)
        std_image = np.std(all_images)
        np.save(mean_path, mean_image)
        np.save(std_path, std_image)
    else:
        mean_image = np.load(mean_path)
        std_image = np.load(std_path)
    # normlaize
    norm_images = []
    for image in all_images:
        norm_images.append([(image - mean_image) / std_image])
    return norm_images
コード例 #24
0
def main():
    featureFiles = glob.glob(
        '/home/samuel/Projects/Ultrasound/Spectroscopy/Data/LinearProbe1/Chicken2/SpectraIteration1Features/*Spectra.npy'
    )

    labelFile = '/home/samuel/Projects/Ultrasound/Spectroscopy/Data/LinearProbe1/Chicken2/ManualLabels/rf_voltage_15_freq_0007500000_2017-5-31_12-50-44_ManualLabel.mha'

    features = []
    for ff in featureFiles:
        features.append(np.load(ff))

    imageType = itk.Image[itk.UC, 3]
    reader = itk.ImageFileReader[imageType].New()
    reader.SetFileName(labelFile)
    reader.Update()
    labelImage = itk.GetArrayFromImage(reader.GetOutput()).squeeze()

    print(ff)
    for i in range(0, 256, 10):
        fig = plt.figure(1)
        plt.subplot(211)
        plt.imshow(labelImage)
        plt.plot([i * 8 - 56, i * 8 + 56], [55, 55], color="red")

        plt.subplot(212)
        plt.plot(features[0][55, i, :], color="C0")
        plt.plot(features[1][55, i, :], color="C1")
        plt.plot(features[2][55, i, :], color="C2")
        plt.plot(features[3][55, i, :], color="C3")
        plt.plot(features[4][55, i, :], color="C4")
        plt.plot(features[5][55, i, :], color="C5")

        fig.savefig("fig-" + str(i) + ".png")
        plt.show()
コード例 #25
0
def get_bounding_box(image_3d_ref, depth, xy_padding=15, z_padding=8):
    image_3d_ref = image_threshold_binaray_3d(image_3d_ref, 127)

    PixelType = itk.ctype("unsigned char")
    ImageType = itk.Image[PixelType, 3]
    itkimage = itk.GetImageFromArray(image_3d_ref.astype(np.uint8))
    filter = itk.BinaryShapeKeepNObjectsImageFilter[ImageType].New()
    filter.SetAttribute('NumberOfPixels')
    filter.SetForegroundValue(255)
    filter.SetBackgroundValue(0)
    filter.SetNumberOfObjects(1)
    filter.SetInput(itkimage)
    filter.Update()
    output = filter.GetOutput()
    result_label_volume = itk.GetArrayFromImage(output)
    z, y, x = result_label_volume.nonzero()
    z_min = max(0, min(z) - z_padding)
    z_max = min(image_3d_ref.shape[0] - 1, z_min + depth)
    y_min = min(y)
    y_max = max(y)
    x_min = min(x)
    x_max = max(x)
    width = x_max - x_min
    height = y_max - y_min
    length = max(width, height)

    start_z = z_min
    end_z = z_max
    start_y = max(y_min - xy_padding, 0)
    end_y = min(start_y + length + 2 * xy_padding, image_3d_ref.shape[1] - 1)
    start_x = max(x_min - xy_padding, 0)
    end_x = min(start_x + length + 2 * xy_padding, image_3d_ref.shape[2] - 1)
    bbox_size = [start_z, end_z, start_y, end_y, start_x, end_x]

    return bbox_size
コード例 #26
0
ファイル: test.py プロジェクト: EJShim/gan_super_ct
def itk_to_vtk(itkImage):

    resample_factor = 10

    array = itk.GetArrayFromImage(itkImage)

    #Downsample test
    spacing = itkImage.GetSpacing()
    spacing[2] *= resample_factor
    array = array[::resample_factor, :, :]
    dims = (array.shape[1], array.shape[2], array.shape[0])

    downsampled_image = itk.GetImageFromArray(array)
    print(downsampled_image)

    vtk_array = numpy_support.numpy_to_vtk(num_array=array.ravel(),
                                           deep=True,
                                           array_type=vtk.VTK_FLOAT)

    print(dims, spacing)

    #MN
    vtkImage = vtk.vtkImageData()
    vtkImage.SetDimensions(dims)
    vtkImage.SetSpacing(spacing)
    vtkImage.GetPointData().SetScalars(vtk_array)

    return vtkImage
コード例 #27
0
def readNII(dcm_path):
    ImageType = itk.Image[itk.F, 3]
    reader = itk.ImageSeriesReader[ImageType].New()
    reader.SetFileName(dcm_path)
    reader.Update()
    image3d = reader.GetOutput()
    return itk.GetArrayFromImage(image3d)
コード例 #28
0
def image_keep_max_region(label_volume_data):
    label_volume = image_threshold_binaray_3d(label_volume_data, 127)
    # labels = measure.label(image_3d_ref, connectivity=2)
    # props = measure.regionprops(labels)
    # max_area = 0
    # max_area_index = 0
    # for i in range(len(props)):
    #     area = props[i].area
    #     if area > max_area:
    #         max_area_index = i
    #         max_area = area
    # box = props[max_area_index].bbox
    # label_volume_with_max_region = np.zeros(image_3d_ref.shape, np.uint8)
    # label_volume_with_max_region[box[0]:box[3], box[1]:box[4], box[2]:box[5]] = \
    #     image_3d_ref[box[0]:box[3], box[1]:box[4], box[2]:box[5]]
    # assert box[3] - box[0] < depth + 1
    # for i in range(label_volume_with_max_region.shape[0]):
    #     cv.imwrite('D:\\tmp\\MIS\\other\\%d.jpg' % (i), label_volume_with_max_region[i])

    PixelType = itk.ctype("unsigned char")
    ImageType = itk.Image[PixelType, 3]
    itkimage = itk.GetImageFromArray(label_volume.astype(np.uint8))
    filter = itk.BinaryShapeKeepNObjectsImageFilter[ImageType].New()
    filter.SetAttribute('NumberOfPixels')
    filter.SetForegroundValue(255)
    filter.SetBackgroundValue(0)
    filter.SetNumberOfObjects(1)
    filter.SetInput(itkimage)
    filter.Update()
    output = filter.GetOutput()
    result_label_volume = itk.GetArrayFromImage(output)
    return result_label_volume
コード例 #29
0
def distanceMapByFastMarcher(image,
                             objectLabel,
                             stoppingValue,
                             ImageType
                            ):
  FastMarchingImageFilter = itk.FastMarchingImageFilter[ImageType, ImageType]
  fastMarcher = FastMarchingImageFilter.New()
  fastMarcher.SetOutputSize(image.GetLargestPossibleRegion().GetSize())
  fastMarcher.SetOutputOrigin(image.GetOrigin() )
  fastMarcher.SetOutputSpacing(image.GetSpacing() )
  fastMarcher.SetOutputDirection(image.GetDirection() )
  fastMarcher.SetSpeedConstant(1.0)
  if (stoppingValue > 0):
    fastMarcher.SetStoppingValue(stoppingValue)
  NodeType = itk.LevelSetNode.F3
  FastMarchingNodeContainer = itk.VectorContainer[itk.UI, NodeType]
  TrialIndexes = np.array(np.where(itk.GetArrayFromImage(image) == objectLabel)).T
  seeds = FastMarchingNodeContainer.New()
  seeds.Initialize()
  for Idx in TrialIndexes:
    node = seeds.CreateElementAt(seeds.Size())
    node.SetValue(0.)
    node.SetIndex(Idx[::-1].tolist())
  fastMarcher.SetTrialPoints(seeds)
  fastMarcher.Update()
  return fastMarcher.GetOutput()
def VisualizeReferenceSpectrum(rf_files, freq_sampling):
    plt.figure(1, figsize=(5, 4))
    handles = []
    labels = []
    for rf_file in rf_files:
        ComponentType = itk.ctype('float')
        Dimension = 2
        ImageType = itk.VectorImage[ComponentType, Dimension]
        reader = itk.ImageFileReader[ImageType].New(FileName=rf_file)
        reader.Update()
        image = reader.GetOutput()
        arr = itk.GetArrayFromImage(image)
        arr /= arr[:, :, arr.shape[2] / 3 - arr.shape[2] / 5:arr.shape[2] / 2 +
                   arr.shape[2] / 5].max()
        freq = np.linspace(freq_sampling / 2 / arr.shape[2], freq_sampling / 2,
                           arr.shape[2])
        ax = plt.plot(freq, arr[0, 0, :].ravel(), label=rf_file)
        handles.append(ax[0])
        labels.append(rf_file)
        plt.xlabel('Frequency [Hz]')
        plt.ylabel('Power spectrum [V]')
    plt.figlegend(handles, labels, 'upper right')
    plt.ylim(0.0, 1.0)

    dirname = os.path.dirname(rf_files[0])
    plt.savefig(os.path.join(dirname, 'ReferenceSpectrum.png'), dpi=300)
    plt.show()