def test_skeletonize_wrong_dim():
    im = np.zeros(5, dtype=np.uint8)
    with pytest.raises(ValueError):
        skeletonize_3d(im)

    im = np.zeros((5, 5, 5, 5), dtype=np.uint8)
    with pytest.raises(ValueError):
        skeletonize_3d(im)
def test_skeletonize_num_neighbours():
    # an empty image
    image = np.zeros((300, 300))

    # foreground object 1
    image[10:-10, 10:100] = 1
    image[-100:-10, 10:-10] = 1
    image[10:-10, -100:-10] = 1

    # foreground object 2
    rs, cs = draw.line(250, 150, 10, 280)
    for i in range(10):
        image[rs + i, cs] = 1
    rs, cs = draw.line(10, 150, 250, 280)
    for i in range(20):
        image[rs + i, cs] = 1

    # foreground object 3
    ir, ic = np.indices(image.shape)
    circle1 = (ic - 135)**2 + (ir - 150)**2 < 30**2
    circle2 = (ic - 135)**2 + (ir - 150)**2 < 20**2
    image[circle1] = 1
    image[circle2] = 0
    result = skeletonize_3d(image)

    # there should never be a 2x2 block of foreground pixels in a skeleton
    mask = np.array([[1,  1],
                     [1,  1]], np.uint8)
    blocks = ndi.correlate(result, mask, mode='constant')
    assert_(not np.any(blocks == 4))
def test_two_hole_image():
    # test a simple 2D image against FIJI
    img_o = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0],
                      [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
                      [0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0],
                      [0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0],
                      [0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0],
                      [0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0],
                      [0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0],
                      [0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
                      [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
                      [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
                      [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
                      dtype=np.uint8)
    img_f = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0],
                      [0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0],
                      [0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0],
                      [0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0],
                      [0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0],
                      [0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0],
                      [0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0],
                      [0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                      [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
                      dtype=np.uint8)
    res = skeletonize_3d(img_o)
    assert_equal(res, img_f)
def test_skeletonize_already_thinned():
    im = np.zeros((5, 5), dtype=np.uint8)
    im[3, 1:-1] = 1
    im[2, -1] = 1
    im[4, 0] = 1
    result = skeletonize_3d(im)
    assert_equal(result, im)
def check_input(img):
    orig = img.copy()
    with warnings.catch_warnings():
        # UserWarning for possible precision loss, expected
        warnings.simplefilter('ignore', UserWarning)
        res = skeletonize_3d(img)
    assert_equal(img, orig)
def test_3d_vs_fiji():
    # generate an image with blobs and compate its skeleton to
    # the skeleton generated by FIJI
    img = binary_blobs(32, 0.05, n_dim=3, seed=1234)
    img = img[:-2, ...]
    img = img.astype(np.uint8)*255

    img_s = skeletonize_3d(img)
    img_f = io.imread(os.path.join(data_dir, "_blobs_3d_fiji_skeleton.tif"))
    assert_equal(img_s, img_f)
def test_dtype_conv():
    # check that the operation does the right thing with floats etc
    # also check non-contiguous input
    img = np.random.random((16, 16))[::2, ::2]
    img[img < 0.5] = 0

    orig = img.copy()

    with warnings.catch_warnings():
        # UserWarning for possible precision loss, expected
        warnings.simplefilter('ignore', UserWarning)
        res = skeletonize_3d(img)

    assert_equal(res.dtype, np.uint8)
    assert_equal(img, orig)  # operation does not clobber the original 
    assert_equal(res.max(),
                 img_as_ubyte(img).max())    # the intensity range is preserved
def test_dtype_conv():
    # check that the operation does the right thing with floats etc
    # also check non-contiguous input
    img = np.random.random((16, 16))[::2, ::2]
    img[img < 0.5] = 0

    orig = img.copy()

    with warnings.catch_warnings():
        # UserWarning for possible precision loss, expected
        warnings.simplefilter('ignore', UserWarning)
        res = skeletonize_3d(img)

    assert_equal(res.dtype, np.uint8)
    assert_equal(img, orig)  # operation does not clobber the original
    assert_equal(res.max(),
                 img_as_ubyte(img).max())  # the intensity range is preserved
Exemple #9
0
def tree_parse():
    input_path = "H:\\airway\\data_for_torch\\"  # ground truth or predictions
    save_path = "H:\\airway\\data_for_torch\\"
    file_list = os.listdir(input_path)
    file_list.sort()

    for ids in range(len(file_list) // 2):
        # ids = 10
        img = nibabel.load(input_path + file_list[2 * ids])
        img = img.get_data()
        label = nibabel.load(input_path + file_list[2 * ids + 1])
        label = label.get_data()
        label = (label > 0).astype(np.uint8)

        label = large_connected_domain(label)
        skeleton = skeletonize_3d(label)
        skeleton_parse, cd, num = skeleton_parsing(skeleton)
        tree_parsing = tree_parsing_func(skeleton_parse, label, cd)
        trachea = loc_trachea(tree_parsing, num)
        ad_matric = adjacent_map(tree_parsing, num)
        parent_map, children_map, generation = parent_children_map(ad_matric, trachea, num)
        while whether_refinement(parent_map, children_map, tree_parsing, num, trachea) is True:
            tree_parsing, num = tree_refinement(parent_map, children_map, tree_parsing, num, trachea)
            trachea = loc_trachea(tree_parsing, num)
            ad_matric = adjacent_map(tree_parsing, num)
            parent_map, children_map, generation = parent_children_map(ad_matric, trachea, num)
        print(ids, file_list[2 * ids][:-11], "finished!")
        # tree_parsing[tree_parsing==trachea] = 1

        save_name_img = save_path + file_list[2 * ids][:-11] + "_img.nii.gz"
        save_name_parse = save_path + file_list[2 * ids][:-11] + "_parse.nii.gz"
        save_name_skel = save_path + file_list[2 * ids][:-11] + "_skel.nii.gz"
        save_name_label = save_path + file_list[2 * ids][:-11] + "_label.nii.gz"
        img_nii = nibabel.Nifti1Image(img, np.eye(4))
        nibabel.save(img_nii, save_name_img)
        parse_nii = nibabel.Nifti1Image(tree_parsing, np.eye(4))
        nibabel.save(parse_nii, save_name_parse)
        skel_nii = nibabel.Nifti1Image(skeleton, np.eye(4))
        nibabel.save(skel_nii, save_name_skel)
        label_nii = nibabel.Nifti1Image(label, np.eye(4))
        nibabel.save(label_nii, save_name_label)

        save_name_pm = save_path + file_list[2 * ids][:-11] + "_parent.npy"
        save_name_cm = save_path + file_list[2 * ids][:-11] + "_children.npy"
        np.save(save_name_pm, parent_map)
        np.save(save_name_cm, children_map)
Exemple #10
0
def skeletonize(img):
    """Skeletonize image"""
    print("\nSkeletonizing image...")
    s = np.array([[1, 0, 0, 1], [0, 1, 1, 0], [0, 1, 1, 0], [1, 0, 0, 1]])
    skl = []
    for i in img:
        j = skeletonize_3d(i)
        k = morp.binary_hit_or_miss(j, structure1=s)
        k_ind = k.nonzero()
        k_ind = np.column_stack(k_ind)
        if len(k_ind) > 0:
            for el in k_ind:
                j[el[0], el[1]] = 0
        skl.append(j)
    skl = np.array(skl, dtype=int, copy=False)
    skl[skl > 0] = 1
    print("Image skeletonized.\n")
    return skl
Exemple #11
0
    def __mapConnectedComponents(self):
        print(colored('# Tracking Connected Components !!','green'))
        kernel = np.ones((5,5), np.uint8) 
        dilated_data = cv2.dilate(self.data, kernel, iterations=6)     
        labeled_data,num_of_components =scipy.ndimage.measurements.label(dilated_data)
        diff=self.data*num_of_components - labeled_data
        diff[diff<0]=0
        self.plotData(diff,identifier='Detected Words/Segments')
        print(colored('# Storing Words !!','green'))
        self.words=[]
        self.sk_words=[]
        self.row_sum=[]
        self.col_sum=[]
        for component in range(1,num_of_components):
            idx = np.where(diff==component)
            y,h,x,w = np.min(idx[0]), np.max(idx[0]), np.min(idx[1]), np.max(idx[1])
            word=np.ones((h-y+1,w-x+1))    
            idx=(idx[0]-y,idx[1]-x)
            word[idx]=0
            inv_word=1-word
            sk_word=skeletonize_3d(inv_word)/255
            
            #rotate
            coords = np.column_stack(np.where(sk_word > 0))
            angle = cv2.minAreaRect(coords)[-1]
            if angle < -45:
                angle = -(90 + angle)
            else:
                angle = -angle
            # rotate the image to deskew it
            (h, w) = inv_word.shape[:2]
            center = (w // 2, h // 2)
            M = cv2.getRotationMatrix2D(center, angle, 1.0)
            rotated = cv2.warpAffine(sk_word, M, (w, h),flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)
            
            self.row_sum.append(np.sum(1-rotated,axis=1))
            self.col_sum.append(np.sum(1-rotated,axis=0))
            self.sk_words.append(rotated)
            self.words.append(word)

        self.words.reverse()
        self.sk_words.reverse()
Exemple #12
0
def prepare_data(data, dilate_iterations=1, sigma=0.5):
    """Returns the given binary data, its skeleton and the thickened skeleton.

    The skeleton of a given 2D or 3D array is computed, then it is thickened
    using morphological dilation with `dilate_iterations` and smoothed with
    help of Gaussian filter of specified `sigma`.

    Parameters
    ----------
    data : ndarray
        2D or 3D binary array which will be processed.

    dilate_iterations : integer
        Indicates the number of iterations for thickenning the skeleton.

    sigma : float
        Indicates the sigma of Gaussian filter used in smoothing of skeleton.

    Returns
    -------
    arrays : tuple of 2D or 3D arrays
        The original array, its skeleton and the thickened skeleton.
    """
    data_8bit = data.astype(np.uint8)
    data_8bit = ndi.binary_fill_holes(data_8bit).astype(np.uint8)

    if data.ndim == 3:
        skeleton = morphology.skeletonize_3d(data_8bit)
    elif data.ndim == 2:
        skeleton = morphology.skeletonize(data_8bit)
    else:
        raise ValueError(
            'Incorrect number of data dimensions, it supports from 2 to 3 dimensions.'
        )

    skeleton_thick = ndi.binary_dilation(skeleton,
                                         iterations=dilate_iterations).astype(
                                             np.float32)
    skeleton_thick = ndi.filters.gaussian_filter(skeleton_thick, sigma)

    return (data, skeleton, skeleton_thick)
def bw_skel_and_analyze(bw):
     if bw.ndim == 3:
          skeleton = skeletonize_3d(bw)
     elif bw.ndim == 2:
          skeleton = skeletonize(bw)
     skeleton[skeleton > 0] = 1
    
     
     if skeleton.any() and np.count_nonzero(skeleton) > 1:
          try:
               pixel_graph, coordinates, degrees = skeleton_to_csgraph(skeleton)
          except:
               pixel_graph = np.zeros(np.shape(skeleton))
               coordinates = []
               degrees = np.zeros(np.shape(skeleton))               
     else:
          pixel_graph = np.zeros(np.shape(skeleton))
          coordinates = []
          degrees = np.zeros(np.shape(skeleton))
          
     return degrees, coordinates
def Zkele(image, sauvola=21, method='norm'):
    '''
    :param image: input image
    :param sauvola: size of sauvola kirnel
    :param method: method to find skeleton, method should be either normal ('norm') or 3d skeleton ('3d')
    :return: image of skeleton of the input image
    example
                    ske = Zkele(image,sauvola=3,method='3d')
        this function find the skeleton of input image,'image' by using Sauvola thresholding with kernel
    size equal to 3 and find skeleton by using 3d skeleton method.
    '''

    data = IP.binarize(image, method=IP.SAUVOLA_THRESHOLDING,
                       value=sauvola) / 255
    data = 1.00 - data
    if method == 'norm':
        skeleton = skeletonize(data)
    elif method == '3d':
        skeleton = skeletonize_3d(data)
    skeleton = skeleton
    return 255.0 - skeleton
def main(sim_name, domainfile, sourcefile, datafile, method, use_skel):
    domain = np.load(sim_name + domainfile).astype(int)
    source = np.load(sim_name + sourcefile).astype(int)
    vessel = 1 - domain - source  #since what we're reading is the diffusion domain file

    if not os.path.exists(sim_name + '/skeleton_vessel.npy'):
        skel = morph.skeletonize_3d(vessel)
        np.save(sim_name + '/skeleton_vessel.npy', skel)
    if use_skel == True:
        vessel = np.load(sim_name + '/skeleton_vessel.npy')

    H_score = []

    for i in np.arange(0, len(datafile)):
        data = np.load(sim_name + datafile[i])
        distribution = method(data, vessel)
        H_score.append(heterogen_score(distribution))

    print H_score

    return 0
Exemple #16
0
    def get_skeletons(self, masks):
        # Combine all the masks into a single binary image
        binary_image = cv2.bitwise_or(masks.white, masks.yellow)
        binary_image = cv2.bitwise_or(binary_image, masks.red)

        # Get the skeleton, based on [Lee94]
        skeleton = skeletonize_3d(binary_image)

        # Get dilated version of the skeleton to find the connected components.
        # This is to perform a poor man's version of DBSCAN.
        neighbors = cv2.dilate(skeleton, self._large_kernel, iterations=1)
        # Get the connected components
        num_components, components = cv2.connectedComponents(neighbors,
                                                             connectivity=4)

        skeletons = Detections(white=cv2.bitwise_and(skeleton, masks.white),
                               yellow=cv2.bitwise_and(skeleton, masks.yellow),
                               red=cv2.bitwise_and(skeleton, masks.red))

        return self.get_connected_skeletons(skeletons, num_components,
                                            components)
Exemple #17
0
    def __init__(self, bin_image):
        """

        :param bin_image: numpy array of the 3d binary image. Needs to be sufficiently processed. Smoothness of 3d image
        directly affects accuracy of skeleton
        """
        self.res_arr = np.array([96.39/512, 96.39/512, 157.6/197])
        self.raw_image = bin_image
        self.skeleton_image = skeletonize_3d(bin_image.astype(np.uint8)) ## Does the bulk of the 'skeletonizing'
        self.cell_xyz = self.get_cell_xyz()
        self.skel_points_pix = np.asarray(np.where(self.skeleton_image==self.skeleton_image.max())).T
        self.ax_equal_3d()
        self.skel_points = self.skel_points_pix*self.res_arr
        self.endpoints = []
        self.state = 0
        self.paths = []
        self.interp_paths = []
        self.delta = 100
        self.trash0 = None
        self.trash1 = None
        print("Initialized")
def show_center_lines(basepath,
                        filepath_output,
                        idx_start,
                        idx_end,
                        train_test):

    # ==========================================
    # ==========================================
    num_images_to_load = idx_end + 1 - idx_start

    i = 0
    for n in range(idx_start, idx_end + 1):

        print("========================================================================")
        print('Loading subject ' + str(n-idx_start+1) + ' out of ' + str(num_images_to_load) + '...')
        print("========================================================================")

        # load the segmentation that was created with Nicolas's tool
        image = np.load(basepath + '/' + subjects_ordering.SUBJECT_DIRS[n] + '/image.npy')
        segmented = np.load(basepath + '/' + subjects_ordering.SUBJECT_DIRS[n] + '/random_walker_prediction.npy')

        # Average the segmentation over time (the geometry should be the same over time)
        avg = np.average(segmented, axis = 3)

        # Compute the centerline points of the skeleton
        skeleton = skeletonize_3d(avg[:,:,:])

        # Get the points of the centerline as an array
        points = np.array(np.where(skeleton != 0)).transpose([1,0])

        #Load the centerline coordinates for the given subject
        # centerline_coords = centerline_indexes[n]

        # print out the points
        for i in range(len(points)):
            print("Index {}:".format(str(i)) + str(points[i]))


    return 0
def bw_skel_and_analyze(bw):
    if bw.ndim == 3:
        skeleton = skeletonize_3d(bw)
    elif bw.ndim == 2:
        skeleton = skeletonize(bw)
    skeleton[skeleton > 0] = 1

    if skeleton.any() and np.count_nonzero(skeleton) > 1:
        try:
            pixel_graph, coordinates, degrees = skeleton_to_csgraph(skeleton)
            coordinates = coordinates[0::]  ### get rid of zero at beginning
        except:
            pixel_graph = np.zeros(np.shape(skeleton))
            coordinates = []
            degrees = np.zeros(np.shape(skeleton))
            print('error, could not skeletonize')
    else:
        pixel_graph = np.zeros(np.shape(skeleton))
        coordinates = []
        degrees = np.zeros(np.shape(skeleton))

    return pixel_graph, degrees, coordinates
Exemple #20
0
    def test_skeletons_n5(self):
        config = SkeletonWorkflow.get_config()['skeletonize']
        config.update({'chunk_len': 50})
        with open(os.path.join(self.config_folder, 'skeletonize.config'), 'w') as f:
            json.dump(config, f)

        self._run_skel_wf(format_='n5', max_jobs=8)

        # check output for correctness
        seg, ids = self.ids_and_seg()
        out_key = os.path.join(self.output_prefix, 's0')
        ds = z5py.File(self.output_path)[out_key]
        for seg_id in ids:
            # read the result from file
            coords, edges = su.read_n5(ds, seg_id)

            # compute the expected result
            mask = seg == seg_id
            skel_vol = skeletonize_3d(mask)
            try:
                pix_graph, coords_exp, _ = csr.skeleton_to_csgraph(skel_vol)
            except ValueError:
                continue

            # check coordinates
            coords_exp = coords_exp[1:].astype('uint64')
            self.assertEqual(coords.shape, coords_exp.shape)
            self.assertTrue(np.allclose(coords, coords_exp))

            # check edges
            graph = csr.numba_csgraph(pix_graph)
            n_points = len(coords)
            edges_exp = [[u, v] for u in range(1, n_points + 1)
                         for v in graph.neighbors(u) if u < v]
            edges_exp = np.array(edges_exp)
            edges_exp -= 1
            self.assertEqual(edges.shape, edges_exp.shape)
            self.assertTrue(np.allclose(edges, edges_exp))
def ct(im):
    img = Image.open(im)
    # This is used to save a background and save the corner pixel
    bg = Image.new(img.mode, img.size, img.getpixel((0, 0)))
    # Which is specified by getpixel(0,0)
    diff = ImageChops.difference(img, bg)
    # Diff holds the difference with main 'im' and 'bg'
    diff = ImageChops.add(diff, diff, 2.0, -100)
    bbox = diff.getbbox()		# The difference box is generated
    if bbox:					# If the box existes it is saved
        img = img.crop(bbox)
        img.save(im)
    image = cv2.imread(im, 0)
    # The given image should be in Grayscale or Binary format
    # skeletonize_3d assumes White as foreground and black as background.
    # Hence we invert the image (This can be removed based on what form of input images we decide to provide at later stage)
    image = color.rgb2gray(invert(image))
    # skeletonize_3d is mainly used for 3D images but can be used for 2D also.
    # Advantage - Removes spurs and provides better output
    skeleton = skeletonize_3d(image)
    # Saving output image
    image = toimage(skeleton) 	# Takes a numpy array and returns a PIL image
    image.save(im)
def skeletonize3D(cube):
    """
    Return the 3D skeleton dkel2 and converted it in form of graph and return
    pixel graph as a SciPy CSR matrix in which entry (i,j) is 0 if pixels 
    i and j are not connected, and otherwise is equal to the distance between
    pixels i and j in the skeleton.

    coordinates (in pixel units) of the points in the pixel graph. Finally, degrees 
    is an image of the skeleton, with each skeleton pixel containing the number of neighboring pixels.

 

    Parameters
    ----------
    cube : 3d numpy array

   
    
    
    Returns
    -------
    dskel2 :3D skeletonized binary image(numpy array)
    Pixel Graph,
    coordinates,
    degree 
       
    """

    selem = ball(3)

    ddilate = dilation(cube, selem)
    dclose = closing(ddilate)
    dskel2 = skeletonize_3d(dclose)

    pixel_graph0, coordinates0, degrees0 = csr.skeleton_to_csgraph(dskel2)

    return dskel2, pixel_graph0, coordinates0, degrees0
def apply_skeleton(df):

    x = df.X.values
    y = df.Y.values
    z = df.Z.values

    xx = np.linspace(x.min() - 0.5, x.max() + 0.5, int(x.max() - x.min() + 2))
    yy = np.linspace(y.min() - 0.5, y.max() + 0.5, int(y.max() - y.min() + 2))
    zz = shift_to_bin_centers(df.Z.unique())

    zz = np.append(zz, zz[-1] + 1)
    zz = np.append(zz[0] - 1, zz)
    values = np.histogramdd((x, y, z), bins=[xx, yy, zz])
    val = values[0].transpose(2, 0, 1).flatten()
    digitize = np.where(values[0] > 0, 1, 0)
    skeleton = skeletonize_3d(digitize)
    #if skeleton.sum() > 0:
    skeleton_mask = np.where(skeleton == 1)
    x_skel = shift_to_bin_centers(xx)[skeleton_mask[0]]
    y_skel = shift_to_bin_centers(yy)[skeleton_mask[1]]
    z_skel = shift_to_bin_centers(zz)[skeleton_mask[2]]
    e_skel = values[0][skeleton_mask]

    return pd.DataFrame({'X': x_skel, 'Y': y_skel, 'Z': z_skel, 'E': e_skel})
def postprocess_instances(samples, output_folder, **kwargs):
    comp_thresh = kwargs['remove_small_comps']

    for sample in samples:
        inf = h5py.File(sample, "a")

        inst = np.array(inf[kwargs['res_key']])
        labels, counts = np.unique(inst, return_counts=True)
        small_labels = labels[counts <= comp_thresh]
        inst_cleaned = replace(inst, np.array(small_labels),
                               np.array([0] * len(small_labels)))
        inst_cleaned = relabel(inst_cleaned)
        if np.max(inst_cleaned) < 65535:
            dtype = np.uint16
        else:
            dtype = np.uint32

        new_key = kwargs['res_key'] + ("_rm_%s" % comp_thresh)
        inf.create_dataset(new_key,
                           data=inst_cleaned.astype(dtype),
                           dtype=dtype,
                           compression='gzip')

        if kwargs.get('export_skeleton_nrrds', False):
            sample_name = os.path.basename(sample).split(".")[0]
            labels = np.unique(inst_cleaned)
            labels = labels[labels > 0]

            for label in labels:
                mask = inst_cleaned == label
                mask = (skeletonize_3d(mask) > 0).astype(np.uint8)
                # check if transpose necessary?
                mask = np.transpose(mask, (2, 1, 0))
                nrrd.write(
                    os.path.join(output_folder,
                                 sample_name + ("_%i.nrrd" % label)), mask)
def get3Dskeleton(imgPath=None, thresh='30%', mask=True, verbose=0):
    """
    Gets the 3D skeleton of an image
    imgPath -> Relative or absolute path to a .nii image or nii variable
    thresh (optional, default='30%') -> Threshold value. Either float or string like 'x.xx%'
    mask (optional, default=True) -> Applies a gray matter mask if true
    verbose (optional, default=0) -> int - the higher it is, the more messages will be outputted to the console
    Returns a Nifti1Image object containing the skeletonized image
    """
    brain = image.load_img(imgPath)
    if (mask):
        imgMask = masking.compute_gray_matter_mask(target_img=brain,
                                                   threshold=0.3,
                                                   connected=True,
                                                   opening=1)
        brain = image.math_img('img1 * img2', img1=brain, img2=imgMask)
    brain = image.threshold_img(brain, thresh)
    affine = brain.affine
    data = brain.get_data()
    data = data / np.amax(data)
    #data = filters.threshold_adaptive(data, 35, 10)
    skeleton = morphology.skeletonize_3d(data)
    skeletonBrain = nifti.Nifti1Image(skeleton, affine)
    return skeletonBrain
def test_skeletonize_1D():
    # a corner case of an image of a shape(1, N)
    im = np.ones((5, 1), dtype=np.uint8)
    res = skeletonize_3d(im)
    assert_equal(res, im)
def check_input(img):
    orig = img.copy()
    skeletonize_3d(img)
    assert_equal(img, orig)
ax[1].axis('off')
ax[1].set_title('skeleton', fontsize=20)
fig.tight_layout()
plt.show()

# ##############################################################################
# 3D Skele
# ##############################################################################
import matplotlib.pyplot as plt
from skimage.morphology import skeletonize, skeletonize_3d
from skimage.data import binary_blobs

data = binary_blobs(200, blob_size_fraction=.2, volume_fraction=.35, seed=1)

skeleton = skeletonize(data)
skeleton3d = skeletonize_3d(data)  # Use this for 3d images

fig, axes = plt.subplots(1, 3, figsize=(8, 4), sharex=True, sharey=True)
ax = axes.ravel()
ax[0].imshow(data, cmap=plt.cm.gray, interpolation='nearest')
ax[0].set_title('original')
ax[0].axis('off')
ax[1].imshow(skeleton, cmap=plt.cm.gray, interpolation='nearest')
ax[1].set_title('skeletonize')
ax[1].axis('off')
ax[2].imshow(skeleton3d, cmap=plt.cm.gray, interpolation='nearest')
ax[2].set_title('skeletonize_3d')
ax[2].axis('off')
fig.tight_layout()
plt.show()
# ##############################################################################
def process_file(img_id, par, par2, vgg_big_path, vgg_small_path, linknet_small_path, small_res_file_path, inc_file_path, 
                 vgg_smallest_file_path, inc_smallest_file_path, res_smallest_file_path, inc3_520_file_path, inc_v2_520_file_path,
                 linknet_big_file_path, linknet_520_file_path,
                 vgg_big_path_1, vgg_smallest_file_path_1, 
                 inc_smallest_file_path_1, res_smallest_file_path_1, inc3_520_file_path_1, inc_v2_520_file_path_1, 
                  linknet_big_file_path_1, linknet_520_file_path_1, save_to=None):
    res_rows = []
    
    if vgg_small_path is None:
        msk = np.zeros((1300, 1300))
    else:
        msk = cv2.imread(vgg_small_path, cv2.IMREAD_UNCHANGED)
        msk = cv2.resize(msk, (1300, 1300))
    if linknet_small_path is None:
        msk2 = np.zeros((1300, 1300))
    else:
        msk2 = cv2.imread(linknet_small_path, cv2.IMREAD_UNCHANGED)
        msk2 = cv2.resize(msk2, (1300, 1300))
    if vgg_big_path is None:
        msk3 = np.zeros((1300, 1300))
        msk3_1 = np.zeros((1300, 1300))
    else:
        msk3 =  cv2.imread(vgg_big_path, cv2.IMREAD_UNCHANGED)
        msk3_1 =  cv2.imread(vgg_big_path_1, cv2.IMREAD_UNCHANGED)
    if small_res_file_path is None:
        res_msk = np.zeros((1300, 1300))
    else:
        res_msk = cv2.imread(small_res_file_path, cv2.IMREAD_UNCHANGED)
        res_msk = cv2.resize(res_msk, (1300, 1300))
    if inc_file_path is None:
        inc_msk = np.zeros((1300, 1300))
    else:
        inc_msk = cv2.imread(inc_file_path, cv2.IMREAD_UNCHANGED)
        inc_msk = cv2.resize(inc_msk, (1300, 1300))
    if vgg_smallest_file_path is None:
        vgg_smlst_msk = np.zeros((1300, 1300))
        vgg_smlst_msk_1 = np.zeros((1300, 1300))
    else:
        vgg_smlst_msk = cv2.imread(vgg_smallest_file_path, cv2.IMREAD_UNCHANGED)
        vgg_smlst_msk = cv2.resize(vgg_smlst_msk, (1300, 1300))
        vgg_smlst_msk_1 = cv2.imread(vgg_smallest_file_path_1, cv2.IMREAD_UNCHANGED)
        vgg_smlst_msk_1 = cv2.resize(vgg_smlst_msk_1, (1300, 1300))
    if inc_smallest_file_path is None:
        inc_smlst_msk = np.zeros((1300, 1300))
        inc_smlst_msk_1 = np.zeros((1300, 1300))
    else:
        inc_smlst_msk = cv2.imread(inc_smallest_file_path, cv2.IMREAD_UNCHANGED)
        inc_smlst_msk = cv2.resize(inc_smlst_msk, (1300, 1300))
        inc_smlst_msk_1 = cv2.imread(inc_smallest_file_path_1, cv2.IMREAD_UNCHANGED)
        inc_smlst_msk_1 = cv2.resize(inc_smlst_msk_1, (1300, 1300))
    if res_smallest_file_path is None:
        res_smlst_msk = np.zeros((1300, 1300))
        res_smlst_msk_1 = np.zeros((1300, 1300))
    else:
        res_smlst_msk = cv2.imread(res_smallest_file_path, cv2.IMREAD_UNCHANGED)
        res_smlst_msk = cv2.resize(res_smlst_msk, (1300, 1300))
        res_smlst_msk_1 = cv2.imread(res_smallest_file_path_1, cv2.IMREAD_UNCHANGED)
        res_smlst_msk_1 = cv2.resize(res_smlst_msk_1, (1300, 1300))
    if inc3_520_file_path is None:
        inc3_520_msk = np.zeros((1300, 1300))
        inc3_520_msk_1 = np.zeros((1300, 1300))
    else:
        inc3_520_msk = cv2.imread(inc3_520_file_path, cv2.IMREAD_UNCHANGED)
        inc3_520_msk = cv2.resize(inc3_520_msk, (1300, 1300))
        inc3_520_msk_1 = cv2.imread(inc3_520_file_path_1, cv2.IMREAD_UNCHANGED)
        inc3_520_msk_1 = cv2.resize(inc3_520_msk_1, (1300, 1300))
    if inc_v2_520_file_path is None:
        inc_v2_520_msk = np.zeros((1300, 1300))
        inc_v2_520_msk_1 = np.zeros((1300, 1300))
    else:
        inc_v2_520_msk = cv2.imread(inc_v2_520_file_path, cv2.IMREAD_UNCHANGED)
        inc_v2_520_msk = cv2.resize(inc_v2_520_msk, (1300, 1300))
        inc_v2_520_msk_1 = cv2.imread(inc_v2_520_file_path_1, cv2.IMREAD_UNCHANGED)
        inc_v2_520_msk_1 = cv2.resize(inc_v2_520_msk_1, (1300, 1300))
    if linknet_big_file_path is None:
        link_big_msk = np.zeros((1300, 1300))
        link_big_msk_1 = np.zeros((1300, 1300))
    else:
        link_big_msk = cv2.imread(linknet_big_file_path, cv2.IMREAD_UNCHANGED)
        link_big_msk_1 = cv2.imread(linknet_big_file_path_1, cv2.IMREAD_UNCHANGED)
    if linknet_520_file_path is None:
        link_520_msk = np.zeros((1300, 1300))
        link_520_msk_1 = np.zeros((1300, 1300))
    else:
        link_520_msk = cv2.imread(linknet_520_file_path, cv2.IMREAD_UNCHANGED)
        link_520_msk = cv2.resize(link_520_msk, (1300, 1300))
        link_520_msk_1 = cv2.imread(linknet_520_file_path_1, cv2.IMREAD_UNCHANGED)
        link_520_msk_1 = cv2.resize(link_520_msk_1, (1300, 1300))
    
    msk3 = (msk3 * 0.5 + msk3_1 * 0.5)
    inc_smlst_msk = (inc_smlst_msk * 0.5 + inc_smlst_msk_1 * 0.5)
    vgg_smlst_msk = (vgg_smlst_msk * 0.5 + vgg_smlst_msk_1 * 0.5)
    res_smlst_msk = (res_smlst_msk * 0.5 + res_smlst_msk_1 * 0.5)
    inc3_520_msk = (inc3_520_msk * 0.5 + inc3_520_msk_1 * 0.5)
    inc_v2_520_msk = (inc_v2_520_msk * 0.5 + inc_v2_520_msk_1 * 0.5)
    link_big_msk = (link_big_msk * 0.5 + link_big_msk_1 * 0.5)
    link_520_msk = (link_520_msk * 0.5 + link_520_msk_1 * 0.5)
    
    coef = []
    tot_sum = par[:12].sum()
    for i in range(12):
        coef.append(par[i] / tot_sum)
    msk = (msk * coef[0] + msk2 * coef[1] + msk3 * coef[2] + res_msk * coef[3] + inc_msk * coef[4]
             + vgg_smlst_msk * coef[5]  + inc_smlst_msk * coef[6] + res_smlst_msk * coef[7] 
             + inc3_520_msk * coef[8] + inc_v2_520_msk * coef[9] + link_big_msk * coef[10] + link_520_msk * coef[11])
    msk = msk.astype('uint8')
    if save_to is not None:
        cv2.imwrite(save_to, msk, [cv2.IMWRITE_PNG_COMPRESSION, 9])

    msk2 = np.lib.pad(msk, ((22, 22), (22, 22)), 'symmetric')
    
    thr = par[12]
        
    msk2 = 1 * (msk2 > thr)
    msk2 = msk2.astype(np.uint8)
    
    if par2[0] > 0:
        msk2 = dilation(msk2, square(par2[0]))
    if par2[1] > 0:
        msk2 = erosion(msk2, square(par2[1]))
        
    if 'Shanghai' in img_id:
        skeleton = medial_axis(msk2)
    else:
        skeleton = skeletonize_3d(msk2)
    skeleton = skeleton[22:1322, 22:1322]
    
    lbl0 = label(skeleton)
    props0 = regionprops(lbl0)
    
    cnt = 0
    crosses = []
    for x in range(1300):
        for y in range(1300):
            if skeleton[y, x] == 1:
                if skeleton[max(0, y-1):min(1300, y+2), max(0, x-1):min(1300, x+2)].sum() > 3:
                    cnt += 1
                    crss = []
                    crss.append((x, y))
                    for y0 in range(max(0, y-1), min(1300, y+2)):
                        for x0 in range(max(0, x-1), min(1300, x+2)):
                            if x == x0 and y == y0:
                                continue
                            if skeleton[max(0, y0-1):min(1300, y0+2), max(0, x0-1):min(1300, x0+2)].sum() > 3:
                                crss.append((x0, y0))
                    crosses.append(crss)
    cross_hashes = []
    for crss in crosses:
        crss_hash = set([])
        for x0, y0 in crss:
            crss_hash.add(point_hash(x0, y0))
            skeleton[y0, x0] = 0
        cross_hashes.append(crss_hash)
 
    new_crosses = []
    i = 0
    while i < len(crosses):
        new_hashes = set([])
        new_hashes.update(cross_hashes[i])
        new_crss = crosses[i][:]
        fl = True
        while fl:
            fl = False
            j = i + 1
            while j < len(crosses):
                if len(new_hashes.intersection(cross_hashes[j])) > 0:
                    new_hashes.update(cross_hashes[j])
                    new_crss.extend(crosses[j])
                    cross_hashes.pop(j)
                    crosses.pop(j)
                    fl = True
                    break
                j += 1
        mean_p = np.asarray(new_crss).mean(axis=0).astype('int')
        if len(new_crss) > 1:
            t = KDTree(new_crss)
            mean_p = new_crss[t.query(mean_p[np.newaxis, :])[1][0][0]]
        new_crosses.append([(mean_p[0], mean_p[1])] + new_crss)
        i += 1
    crosses = new_crosses
    
    lbl = label(skeleton)
    props = regionprops(lbl)
    
    connected_roads = []
    connected_crosses = [set([]) for p in props]
    for i in range(len(crosses)):
        rds = set([])
        for j in range(len(crosses[i])):
            x, y = crosses[i][j]
            for y0 in range(max(0, y-1), min(1300, y+2)):
                for x0 in range(max(0, x-1), min(1300, x+2)):
                    if lbl[y0, x0] > 0:
                        rds.add(lbl[y0, x0])
                        connected_crosses[lbl[y0, x0]-1].add(i)
        connected_roads.append(rds)
    
    res_roads = []
    
    tot_dist_min = par2[2]
    coords_min = par2[3]
        
    for i in range(len(props)):
        coords = props[i].coords
        crss = list(connected_crosses[i])
        tot_dist = props0[lbl0[coords[0][0], coords[0][1]]-1].area

        if (tot_dist < tot_dist_min) or (coords.shape[0] < coords_min and len(crss) < 2):
            continue
        if coords.shape[0] == 1:
            coords = np.asarray([coords[0], coords[0]])
        else:
            coords = get_ordered_coords(lbl, i+1, coords)
        for j in range(len(crss)):
            x, y = crosses[crss[j]][0]
            d1 = abs(coords[0][0] - y) + abs(coords[0][1] - x)
            d2 = abs(coords[-1][0] - y) + abs(coords[-1][1] - x)
            if d1 < d2:
                coords[0][0] = y
                coords[0][1] = x
            else:
                coords[-1][0] = y
                coords[-1][1] = x
        coords_approx = approximate_polygon(coords, 1.5)
        res_roads.append(coords_approx)
        
    hashes = set([])
    final_res_roads = []
    for r in res_roads:
        if r.shape[0] > 2:
            final_res_roads.append(r)
            for i in range(1, r.shape[0]):
                p1 = r[i-1]
                p2 = r[i]
                h1 = pair_hash(p1, p2)
                h2 = pair_hash(p2, p1)
                hashes.add(h1)
                hashes.add(h2)
                            
    for r in res_roads:
        if r.shape[0] == 2:
            p1 = r[0]
            p2 = r[1]
            h1 = pair_hash(p1, p2)
            h2 = pair_hash(p2, p1)
            if not (h1 in hashes or h2 in hashes):
                final_res_roads.append(r)
                hashes.add(h1)
                hashes.add(h2)
        
    end_points = {}
    for r in res_roads:
        h = point_hash(r[0, 0], r[0, 1])
        if not (h in end_points.keys()):
            end_points[h] = 0
        end_points[h] = end_points[h] + 1
        h = point_hash(r[-1, 0], r[-1, 1])
        if not (h in end_points.keys()):
            end_points[h] = 0
        end_points[h] = end_points[h] + 1
    
    road_msk = np.zeros((1300, 1300), dtype=np.int32)
    road_msk = road_msk.copy()
    thickness = 1
    for j in range(len(final_res_roads)):
        l = final_res_roads[j]
        for i in range(len(l) - 1):
            cv2.line(road_msk, (int(l[i, 1]), int(l[i, 0])), (int(l[i+1, 1]), int(l[i+1, 0])), j+1, thickness)
            
    connect_dist = par2[4]

    min_prob = par2[5]
    angles_to_check = [0, radians(5), radians(-5), radians(10), radians(-10), radians(15), radians(-15)]
    if 'Paris' in img_id or 'Vegas' in img_id:
        angles_to_check += [radians(20), radians(-20), radians(25), radians(-25)]
    
    add_dist = par2[6]
    add_dist2 = par2[7]
    
    con_r = par2[8]

    for i in range(len(final_res_roads)):
        h = point_hash(final_res_roads[i][0, 0], final_res_roads[i][0, 1])
        if end_points[h] == 1:
            p1 = final_res_roads[i][1]
            p2 = final_res_roads[i][0]            
            p3 = try_connect(p1, p2, 0, connect_dist, road_msk, min_prob, msk, final_res_roads, con_r)
            if p3 is not None:
                h1 = pair_hash(p2, p3)
                h2 = pair_hash(p3, p2)
                if not (h1 in hashes or h2 in hashes):
                    r_id = road_msk[p3[0], p3[1]] - 1
                    final_res_roads[r_id], new_hashes = inject_point(final_res_roads[r_id], p3)
                    hashes.update(new_hashes)          
                    tmp_road_msk = np.zeros((1300, 1300), dtype=np.int32)
                    tmp_road_msk = tmp_road_msk.copy()
                    cv2.line(tmp_road_msk, (p2[1], p2[0]), (p3[1], p3[0]), i+1, thickness)
                    road_msk[road_msk == 0] = tmp_road_msk[road_msk == 0]
                    road_msk = road_msk.copy()
                    final_res_roads[i] = np.vstack((p3, final_res_roads[i]))
                    hashes.add(h1)
                    hashes.add(h2)
                    end_points[point_hash(p3[0], p3[1])] = 2
        h = point_hash(final_res_roads[i][-1, 0], final_res_roads[i][-1, 1])
        if end_points[h] == 1:
            p1 = final_res_roads[i][-2]
            p2 = final_res_roads[i][-1]
            p3 = try_connect(p1, p2, 0, connect_dist, road_msk, min_prob, msk, final_res_roads, con_r)
            if p3 is not None:
                h1 = pair_hash(p2, p3)
                h2 = pair_hash(p3, p2)
                if not (h1 in hashes or h2 in hashes):
                    r_id = road_msk[p3[0], p3[1]] - 1
                    final_res_roads[r_id], new_hashes = inject_point(final_res_roads[r_id], p3)
                    hashes.update(new_hashes)
                    tmp_road_msk = np.zeros((1300, 1300), dtype=np.int32)
                    tmp_road_msk = tmp_road_msk.copy()
                    cv2.line(tmp_road_msk, (p2[1], p2[0]), (p3[1], p3[0]), i+1, thickness)
                    road_msk[road_msk == 0] = tmp_road_msk[road_msk == 0]
                    road_msk = road_msk.copy()
                    final_res_roads[i] = np.vstack((final_res_roads[i], p3))
                    hashes.add(h1)
                    hashes.add(h2)
                    end_points[point_hash(p3[0], p3[1])] = 2
                        
    for i in range(len(final_res_roads)):
        h = point_hash(final_res_roads[i][0, 0], final_res_roads[i][0, 1])
        if end_points[h] == 1:
            p1 = final_res_roads[i][1]
            p2 = final_res_roads[i][0]
            p3 = None
            for a in angles_to_check:
                p3 = try_connect(p1, p2, a, connect_dist, road_msk, min_prob, msk, final_res_roads, con_r)
                if p3 is not None:
                    break
            if p3 is not None:
                h1 = pair_hash(p2, p3)
                h2 = pair_hash(p3, p2)
                if not (h1 in hashes or h2 in hashes):
                    r_id = road_msk[p3[0], p3[1]] - 1
                    final_res_roads[r_id], new_hashes = inject_point(final_res_roads[r_id], p3)
                    hashes.update(new_hashes)          
                    tmp_road_msk = np.zeros((1300, 1300), dtype=np.int32)
                    tmp_road_msk = tmp_road_msk.copy()
                    cv2.line(tmp_road_msk, (p2[1], p2[0]), (p3[1], p3[0]), i+1, thickness)
                    road_msk[road_msk == 0] = tmp_road_msk[road_msk == 0]
                    road_msk = road_msk.copy()
                    final_res_roads[i] = np.vstack((p3, final_res_roads[i]))
                    hashes.add(h1)
                    hashes.add(h2)
                    end_points[point_hash(p3[0], p3[1])] = 2
            else:
                p3 = get_next_point(p1, p2, add_dist)
                if not (p3[0] < 2 or p3[1] < 2 or p3[0] > 1297 or p3[1] > 1297):
                    p3 = get_next_point(p1, p2, add_dist2)
                if (p3[0] != p2[0] or p3[1] != p2[1]) and (road_msk[p3[0], p3[1]] == 0):
                    h1 = pair_hash(p2, p3)
                    h2 = pair_hash(p3, p2)
                    if not (h1 in hashes or h2 in hashes):
                        final_res_roads[i] = np.vstack((p3, final_res_roads[i]))
                        hashes.add(h1)
                        hashes.add(h2)
                        tmp_road_msk = np.zeros((1300, 1300), dtype=np.int32)
                        tmp_road_msk = tmp_road_msk.copy()
                        cv2.line(tmp_road_msk, (p2[1], p2[0]), (p3[1], p3[0]), i+1, thickness)
                        road_msk[road_msk == 0] = tmp_road_msk[road_msk == 0]
                        road_msk = road_msk.copy()
                        end_points[point_hash(p3[0], p3[1])] = 2
                        
        h = point_hash(final_res_roads[i][-1, 0], final_res_roads[i][-1, 1])
        if end_points[h] == 1:
            p1 = final_res_roads[i][-2]
            p2 = final_res_roads[i][-1]
            p3 = None
            for a in angles_to_check:
                p3 = try_connect(p1, p2, a, connect_dist, road_msk, min_prob, msk, final_res_roads, con_r)
                if p3 is not None:
                    break
            if p3 is not None:
                h1 = pair_hash(p2, p3)
                h2 = pair_hash(p3, p2)
                if not (h1 in hashes or h2 in hashes):
                    r_id = road_msk[p3[0], p3[1]] - 1
                    final_res_roads[r_id], new_hashes = inject_point(final_res_roads[r_id], p3)
                    hashes.update(new_hashes)
                    tmp_road_msk = np.zeros((1300, 1300), dtype=np.int32)
                    tmp_road_msk = tmp_road_msk.copy()
                    cv2.line(tmp_road_msk, (p2[1], p2[0]), (p3[1], p3[0]), i+1, thickness)
                    road_msk[road_msk == 0] = tmp_road_msk[road_msk == 0]
                    road_msk = road_msk.copy()
                    final_res_roads[i] = np.vstack((final_res_roads[i], p3))
                    hashes.add(h1)
                    hashes.add(h2)
                    end_points[point_hash(p3[0], p3[1])] = 2
            else:
                p3 = get_next_point(p1, p2, add_dist)
                if not (p3[0] < 2 or p3[1] < 2 or p3[0] > 1297 or p3[1] > 1297):
                    p3 = get_next_point(p1, p2, add_dist2)
                if (p3[0] != p2[0] or p3[1] != p2[1]) and (road_msk[p3[0], p3[1]] == 0):
                    h1 = pair_hash(p2, p3)
                    h2 = pair_hash(p3, p2)
                    if not (h1 in hashes or h2 in hashes):
                        final_res_roads[i] = np.vstack((final_res_roads[i], p3))
                        hashes.add(h1)
                        hashes.add(h2)
                        tmp_road_msk = np.zeros((1300, 1300), dtype=np.int32)
                        tmp_road_msk = tmp_road_msk.copy()
                        cv2.line(tmp_road_msk, (p2[1], p2[0]), (p3[1], p3[0]), i+1, thickness)
                        road_msk[road_msk == 0] = tmp_road_msk[road_msk == 0]
                        road_msk = road_msk.copy()
                        end_points[point_hash(p3[0], p3[1])] = 2
            
    lines = [LineString(r[:, ::-1]) for r in final_res_roads]

    if len(lines) == 0:
        res_rows.append({'ImageId': img_id, 'WKT_Pix': 'LINESTRING EMPTY'})
    else:
        for l in lines:
            res_rows.append({'ImageId': img_id, 'WKT_Pix': dumps(l, rounding_precision=0)})   
    return res_rows
Exemple #30
0
from skimage import data
from skimage.data import binary_blobs
from skimage.morphology import medial_axis, skeletonize, skeletonize_3d


imageInputAddress = '.\\data\\width\\width_image.png'
data = cv2.imread(imageInputAddress, 0)
data = (data == 255)
distance22 = ndi.distance_transform_edt(data)
# data = binary_blobs(200, blob_size_fraction=.2, volume_fraction=.35, seed=1)
# Compute the medial axis (skeleton) and the distance transform
skel, distance = medial_axis(data, return_distance=True)

# Compare with other skeletonization algorithms
skeleton = skeletonize(data)
skeleton3d = skeletonize_3d(data)

# Distance to the background for pixels of the skeleton
dist_on_skel = distance

fig, axes = plt.subplots(2, 2, figsize=(8, 8), sharex=True, sharey=True)
ax = axes.ravel()

ax[0].imshow(distance22, cmap=plt.cm.gray, interpolation='nearest')
ax[0].set_title('original')
ax[0].axis('off')

ax[1].imshow(dist_on_skel, cmap='magma', interpolation='nearest')
ax[1].contour(data, [0.5], colors='w')
ax[1].set_title('medial_axis')
ax[1].axis('off')
def test_skeletonize_single_point():
    im = np.zeros((5, 5), dtype=np.uint8)
    im[3, 3] = 1
    result = skeletonize_3d(im)
    assert_equal(result, im)
def test_skeletonize_single_point():
    im = np.zeros((5, 5), dtype=np.uint8)
    im[3, 3] = 1
    result = skeletonize_3d(im)
    assert_equal(result, im)
def skeleton(data1):
    """
    Returns the skeleton of the input binary image using
    the function skeletonize_3d (#initially skel.compute_thin_image).
    """
    return morphology.skeletonize_3d(data1)
 def peakmem_skeletonize_3d(self):
     morphology.skeletonize_3d(self.image)
Exemple #35
0
def centerline_extraction(mask):
    from skimage import morphology
    out_skeletonize = morphology.skeletonize_3d(mask)
    return out_skeletonize
Exemple #36
0
    def erode_label(cls,
                    label_id,
                    filter_size,
                    target_frac=None,
                    min_filter_size=1,
                    use_min_filter=False,
                    skel_eros_filt_size=0):
        """Convert a label to a marker as an eroded version of the label.
        
        By default, labels will be eroded with the given ``filter_size`` 
        as long as their final size is > 20% of the original volume. If 
        the eroded volume is below threshold, ``filter_size`` will be 
        progressively decreased until the filter cannot be reduced further.
        
        Skeletonization of the labels recovers some details by partially
        preserving the original labels' extent, including thin regions that
        would be eroded away, thus serving a similar function as that of
        adaptive morphological filtering. ``skel_eros_filt_size`` allows
        titrating the amount of the labels` extent to be preserved.
        
        If :attr:`wt_dists` is present, the label's distance will be used
        to weight the starting filter size.
        
        Args:
            label_id (int): ID of label to erode.
            filter_size (int): Size of structing element to start erosion.
            target_frac (float): Target fraction of original label to erode. 
                Erosion will start with ``filter_size`` and use progressively
                smaller filters until remaining above this target. Defaults
                to None to use a fraction of 0.2. Titrates the relative
                amount of erosion allowed.
            min_filter_size (int): Minimum filter size, below which the
                original, uneroded label will be used instead. Defaults to 1.
                Use 0 to erode at size 1 even if below ``target_frac``.
                Titrates the absolute amount of erosion allowed.
            use_min_filter (bool): True to erode at ``min_filter_size`` if
                a smaller filter size would otherwise be required; defaults
                to False to revert to original, uneroded size if a filter
                smaller than ``min_filter_size`` would be needed.
            skel_eros_filt_size (int): Erosion filter size before
                skeletonization to balance how much of the labels' extent will
                be preserved during skeletonization. Increase to reduce the
                skeletonization. Defaults to 0, which will cause
                skeletonization to be skipped.
        
        Returns:
            :obj:`pd.DataFrame`, List[slice], :obj:`np.ndarray`: stats,
            including ``label_id`` for reference and 
            sizes of labels; list of slices denoting where to insert 
            the eroded label; and the eroded label itself.
        """
        if cls.wt_dists is not None:
            # weight the filter size by the fractional distance from median
            # of label distance and max dist
            wt = (np.median(cls.wt_dists[cls.labels_img == label_id]) /
                  np.amax(cls.wt_dists))
            filter_size = int(filter_size * wt)
            print("label {}: distance weight {}, adjusted filter size to {}".
                  format(label_id, wt, filter_size))
            if use_min_filter and filter_size < min_filter_size:
                filter_size = min_filter_size

        # get region as mask; assume that label exists and will yield a
        # bounding box since labels here are generally derived from the
        # labels image itself
        bbox = cv_nd.get_label_bbox(cls.labels_img, label_id)
        _, slices = cv_nd.get_bbox_region(bbox)
        region = cls.labels_img[tuple(slices)]
        label_mask_region = region == label_id
        region_size = np.sum(label_mask_region)
        region_size_filtered = region_size
        fn_selem = cv_nd.get_selem(cls.labels_img.ndim)

        # erode the labels, starting with the given filter size and decreasing
        # if the resulting label size falls below a given size ratio
        chosen_selem_size = np.nan
        filtered = label_mask_region
        size_ratio = 1
        for selem_size in range(filter_size, -1, -1):
            if selem_size < min_filter_size:
                if not use_min_filter:
                    print("label {}: could not erode without dropping below "
                          "minimum filter size of {}, reverting to original "
                          "region size of {}".format(label_id, min_filter_size,
                                                     region_size))
                    filtered = label_mask_region
                    region_size_filtered = region_size
                    chosen_selem_size = np.nan
                break
            # erode check size ratio
            filtered = morphology.binary_erosion(label_mask_region,
                                                 fn_selem(selem_size))
            region_size_filtered = np.sum(filtered)
            size_ratio = region_size_filtered / region_size
            thresh = 0.2 if target_frac is None else target_frac
            chosen_selem_size = selem_size
            if region_size_filtered < region_size and size_ratio > thresh:
                # stop eroding if underwent some erosion but stayed above
                # threshold size; skimage erosion treats border outside image
                # as True, so images may not undergo erosion and should
                # continue until lowest filter size is taken (eg NaN)
                break

        if not np.isnan(chosen_selem_size):
            print("label {}: changed num of pixels from {} to {} "
                  "(size ratio {}), initial filter size {}, chosen {}".format(
                      label_id, region_size, region_size_filtered, size_ratio,
                      filter_size, chosen_selem_size))

        if skel_eros_filt_size and np.sum(filtered) > 0:
            # skeletonize the labels to recover details from erosion;
            # need another labels erosion before skeletonization to avoid
            # preserving too much of the original labels' extent
            label_mask_region = morphology.binary_erosion(
                label_mask_region, fn_selem(skel_eros_filt_size))
            filtered = np.logical_or(
                filtered,
                morphology.skeletonize_3d(label_mask_region).astype(bool))

        stats_eros = (label_id, region_size, region_size_filtered,
                      chosen_selem_size)
        return stats_eros, slices, filtered
 def time_skeletonize_3d(self):
     morphology.skeletonize_3d(self.image)
def test_skeletonize_no_foreground():
    im = np.zeros((5, 5), dtype=np.uint8)
    result = skeletonize_3d(im)
    assert_equal(result, im)
Exemple #39
0
def Skeletonize3D(directory, crop=None, flip='y', dtype=None):
    """Skeletonize TrailMap results.
    
    Parameters
    ----------
    directory : string
        Path to directory with segmented data.
    crop : dict (optional, default None)
        Dictionary with ImageJ-format cropping coordinates ({width:, height:, x:, y:,})
    flip : string (optional, default 'y')
        Option to flip axis, can be any combination of 'xyz'.
    dtype : numpy dtype (optional, default None results in float32 images)
        Data type for output image. Set dtype=np.uint16 if you are going to combine with autofluo in Imaris.
    """
    #Load Data:
    sample = directory.split('/')[-3]
    print("Started " + time.ctime())
    ims = io.ImageCollection(os.path.join(directory, '*.tif'),
                             load_func=io.imread)
    data = ims.concatenate()
    #Optionally crop:
    if crop:
        rawshape = data.shape
        data = data[:, crop['y']:crop['y'] + crop['height'],
                    crop['x']:crop['x'] + crop['width']]
        print("Cropped data from " + str(rawshape) + " to " + str(data.shape) +
              " at " + time.ctime())
    cat = np.zeros(shape=(data.shape), dtype='float32')  #Create output array
    #Loop through thresholds 0.2 -> 0.9, extract signal, scale, and combine
    for i in range(2, 10, 1):
        print(str(i) + " started at " + time.ctime())
        i = i / 10
        im = (data > i).astype('float32')
        skel = morphology.skeletonize_3d(im).astype('float32') * i
        print(str(i) + " completed at " + time.ctime())
        cat = cat + skel
    #Optionally flip along the x, y, or z axis:
    if flip:
        if 'y' in flip:
            cat = np.flip(cat, axis=1)
        if 'x' in flip:
            cat = np.flip(cat, axis=2)
        if 'z' in flip:
            cat = np.flip(cat, axis=0)
    if dtype:
        cat = cat.astype(
            dtype
        )  #have not tested that this results in same pixel values as changing image type in ImageJ.
    #Save the result image stack:
    try:
        io.imsave(os.path.join(directory,
                               sample + '_ThresholdedSkeleton3D.tif'),
                  cat,
                  check_contrast=False)
    except PermissionError:
        print("You do not have write permissions for " + str(directory) +
              '\n' + "Saving to your home directory instead.")
        homedir = os.path.expanduser('~/')
        io.imsave(os.path.join(homedir, sample + '_ThresholdedSkeleton3D.tif'),
                  cat,
                  check_contrast=False)
    print("Finished " + sample + ' ' + time.ctime())
    return cat
def test_skeletonize_all_foreground():
    im = np.ones((3, 4), dtype=np.uint8)
    assert_equal(skeletonize_3d(im),
                 np.array([[0, 0, 0, 0],
                           [1, 1, 1, 1],
                           [0, 0, 0, 0]], dtype=np.uint8))
# IPython log file


from skimage.external.tifffile import TiffFile
f = TiffFile('OP_1_Rendered_Paths.tif')
p = f[0]
skel0 = p.asarray()
spacing = [0.3033534 * 3, 0.3033534, 0.3033534]
from skimage import morphology
skel1 = morphology.skeletonize_3d(skel0)
np.allclose(skel0, skel1)
np.sum(skel0 != skel1)
from skimage import io
# io.imsave('OP_1_Rendered_Paths_thinned.tif', skel1)
skel1 = io.imread('OP_1_Rendered_Paths_thinned.tif')
from skan import csr
spacing = 10 * np.array(spacing)
df = csr.summarise(skel1.astype(bool), spacing=spacing)
df2 = pd.read_excel('OP_1-Branch-information.xlsx')
dfs = df.sort_values(by='branch-distance', ascending=False)
df2s = df2.sort_values(by='Branch length', ascending=False)
from scipy.spatial import distance_matrix
coords0 = df[['coord-0-0', 'coord-0-1', 'coord-0-2']].values
coords1 = df[['coord-1-0', 'coord-1-1', 'coord-1-2']].values
dm = distance_matrix(coords0, coords1)
all_points_skan = np.concatenate([coords0, coords1[np.where(np.min(dm, axis=0) > 1e-6)[0]]], axis=0)
coords0fj = df2[['V1 z', 'V1 y', 'V1 x']].values
coords1fj = df2[['V2 z', 'V2 y', 'V2 x']].values
dmfj = distance_matrix(coords0fj, coords1fj)
all_points_fiji = np.concatenate([coords0fj, coords1fj[np.where(np.min(dmfj, axis=0) > 1e-6)[0]]], axis=0)
dmx = distance_matrix(all_points_skan, all_points_fiji)
Exemple #42
0
def myAnalyzeSkeleton(out=None, maskPath=None, imagePath=None):
    """
	out: numpy array with 1-pixel skeleton
	maskPath : full path to _dvMask.tif file (can include appended _0.tif
	"""

    # load x/y/z voxel size (assumes .tif was saved with Fiji
    # we use this to scale length
    xVoxel, yVoxel, zVoxel = readVoxelSize(imagePath)

    # load the mask
    if out is not None:
        maskData = out
    else:
        #maskPath = os.path.splitext(path)[0] + '_dvMask_' + str(saveNumber) + '.tif'
        maskData = tifffile.imread(maskPath)

    # was used by shape_index
    #imageData = tifffile.imread(imagePath)

    print('=== myAnalyzeSkeleton() maskData.shape:', maskData.shape)

    # make a 1-pixel skeleton from volume mask (similar to Fiji Skeletonize)
    mySkeleton = morphology.skeletonize_3d(maskData)
    '''
	# shape_index() does not work for 3D images !!!
	scale = 1
	threshold_radius = 1 # from AICS
	smooth_radius =  0.01 # from AICS
	pixel_threshold_radius = int(np.ceil(threshold_radius / scale))
	pixel_smoothing_radius = smooth_radius * pixel_threshold_radius
	quality = feature.shape_index(imageData, sigma=pixel_smoothing_radius, mode='reflect')
	#skeleton = morphology.skeletonize(thresholded) * quality
	mySkeleton = morphology.skeletonize_3d(maskData) * quality
	'''

    # analyze the skeleton (similar to Fiji Analyze Skeleton)
    mySkanSkel = skan.Skeleton(mySkeleton)

    # look at the results
    branch_data = skan.summarize(
        mySkanSkel)  # branch_data is a pandas dataframe
    nBranches = branch_data.shape[0]
    '''
	print('    number of branches:', branch_data.shape[0])
	display(branch_data.head())
	'''

    #
    # convert everything to nump arrays
    branchDistance = branch_data['branch-distance'].to_numpy()
    euclideanDistance = branch_data['euclidean-distance'].to_numpy()
    branchType = branch_data['branch-type'].to_numpy()
    #tortuosity = branchDistance / euclideanDistance # this gives divide by 0 warning
    tmpOut = np.full_like(branchDistance, fill_value=np.nan)
    tortuosity = np.divide(branchDistance,
                           euclideanDistance,
                           out=tmpOut,
                           where=euclideanDistance != 0)
    """
	
	Sunday 20200405
	HERE I AM RUNNING CODE TWICE and APPENDING TO summary2.xlsx AFTER RUNNING samiMetaAnalysis.py
	
	https://jni.github.io/skan/_modules/skan/pipe.html#process_images
	in the Skan Pipe code, they multiply the binary skeleton as follows
	maybe I can implement this with scale=1 and threshold_radius taken from AICS Segmentaiton?
	
		scale = 1
		threshold_radius = 1 # from AICS
		smooth_radius =  0.01 # from AICS
		pixel_threshold_radius = int(np.ceil(threshold_radius / scale))
		pixel_smoothing_radius = smooth_radius * pixel_threshold_radius
		quality = skimage.feature.shape_index(image, sigma=pixel_smoothing_radius,
							  mode='reflect')
		skeleton = morphology.skeletonize(thresholded) * quality
	"""
    '''
	# 20200407, no longer needed as i am now saving 'branch-type', do this in meta analysis
	print('\n\n\t\tREMEMBER, I AM ONLY INCLUDING junction-to-junction !!!!!!!!!!!!!! \n\n')
	#
	# do again just for junction-to-junction
	# 'mean-pixel-value' here is 'mean shape index' in full tutorial/recipe
	# if I use ridges, I end up with almost no branche?
	#ridges = ((branch_data['mean-pixel-value'] < 0.625) & (branch_data['mean-pixel-value'] > 0.125))
	j2j = branch_data['branch-type'] == 2 # returns True/False pandas.core.series.Series
	#datar = branch_data.loc[ridges & j2j].copy()
	datar = branch_data.loc[j2j].copy()

	branchDistance = datar['branch-distance'].to_numpy()
	euclideanDistance = datar['euclidean-distance'].to_numpy()
	#tortuosity = branchDistance / euclideanDistance # this gives divide by 0 warning
	tmpOut = np.full_like(branchDistance, fill_value=np.nan)
	tortuosity = np.divide(branchDistance, euclideanDistance, out=tmpOut, where=euclideanDistance != 0)
	'''

    #
    # organize a return dicitonary
    retDict = OrderedDict()

    retDict['data'] = OrderedDict()
    #retDict['data']['nBranches'] = nBranches
    retDict['data']['branchLength'] = branchDistance
    retDict['data']['euclideanDistance'] = euclideanDistance
    retDict['data']['branchType'] = branchType
    #retDict['data']['tortuosity'] = tortuosity

    # todo: search for 0 values in (branchDistance, euclideanDistance)

    # stats
    '''
	print('***** THIS IS NOT SCALED ***')
	print('    branchDistance mean:', np.mean(branchDistance), 'SD:', np.std(branchDistance), 'n:', branchDistance.size)
	#
	decimalPlaces = 2
	retDict['stats'] = OrderedDict()
	retDict['stats']['branchLength_mean'] = round(np.mean(branchDistance),decimalPlaces)
	retDict['stats']['branchLength_std'] = round(np.std(branchDistance),decimalPlaces)
	retDict['stats']['branchLength_n'] = branchDistance.shape[0]
	tmpCount = branchDistance[branchDistance<=2]
	retDict['stats']['branchLength_n_2'] = tmpCount.shape[0]
	#
	retDict['stats']['euclideanDistance_mean'] = round(np.mean(euclideanDistance),decimalPlaces)
	retDict['stats']['euclideanDistance_std'] = round(np.std(euclideanDistance),decimalPlaces)
	retDict['stats']['euclideanDistance_n'] = euclideanDistance.shape[0]
	#
	retDict['stats']['tortuosity_mean'] = round(np.nanmean(tortuosity),decimalPlaces)
	retDict['stats']['tortuosity_std'] = round(np.nanstd(tortuosity),decimalPlaces)
	retDict['stats']['tortuosity_n'] = tortuosity.shape[0]
	'''

    return retDict, mySkeleton  # returning mySkeleton so we can save it
def test_skeletonize_all_foreground():
    im = np.ones((3, 4), dtype=np.uint8)
    assert_equal(skeletonize_3d(im),
                 np.array([[0, 0, 0, 0],
                           [1, 1, 1, 1],
                           [0, 0, 0, 0]], dtype=np.uint8))
# stops changing. Each iteration consists of two steps: first, a list of
# candidates for removal is assembled; then pixels from this list are
# rechecked sequentially, to better preserve connectivity of the image.
#
# Note that ``skeletonize_3d`` is designed to be used mostly on 3-D images.
# However, for illustrative purposes, we apply this algorithm on a 2-D image.

import matplotlib.pyplot as plt
from skimage.morphology import skeletonize, skeletonize_3d
from skimage.data import binary_blobs


data = binary_blobs(200, blob_size_fraction=.2, volume_fraction=.35, seed=1)

skeleton = skeletonize(data)
skeleton3d = skeletonize_3d(data)

fig, axes = plt.subplots(1, 3, figsize=(8, 4), sharex=True, sharey=True,
                         subplot_kw={'adjustable': 'box-forced'})
ax = axes.ravel()

ax[0].imshow(data, cmap=plt.cm.gray, interpolation='nearest')
ax[0].set_title('original')
ax[0].axis('off')

ax[1].imshow(skeleton, cmap=plt.cm.gray, interpolation='nearest')
ax[1].set_title('skeletonize')
ax[1].axis('off')

ax[2].imshow(skeleton3d, cmap=plt.cm.gray, interpolation='nearest')
ax[2].set_title('skeletonize_3d')
def test_skeletonize_1D():
    # a corner case of an image of a shape(1, N)
    im = np.ones((5, 1), dtype=np.uint8)
    res = skeletonize_3d(im)
    assert_equal(res, im)
Exemple #46
0
"""
Created on Thu Jun 14 13:41:14 2018

@author: haswani
"""

import numpy as np
from skimage.morphology import skeletonize,skeletonize_3d
from astropy.io import fits
from skimage.morphology import binary_opening,binary_dilation,binary_erosion,closing,dilation,opening,ball,remove_small_holes

cube = fits.getdata('ngc3627_co21_12m+7m+tp_mask.fits')

selem = ball(3)

dskel = skeletonize_3d(cube)
ddilate = dilation(dskel, selem)
dclose = closing(ddilate)
dskel2 = skeletonize_3d(dclose)

import matplotlib.pyplot as plt, numpy as np
from mpl_toolkits.mplot3d import Axes3D



xoff, yoff, zoff = np.indices((3,3,3))-1  #gives [-1,0,1] indices in xoff yoff and zoff which will be used for rolling
count = np.zeros_like(dskel2) #returns zero of same dimentions as input array
count.astype(np.int8) #type cast bool to int8 [only 26 neighbour
for x,y,z in zip(xoff.ravel(), yoff.ravel(), zoff.ravel()): #zips x,  y, z
    count += np.roll(dskel2, (x,y,z), axis=(0,1,2)) #Rolling to count neighbour pixels
    
def test_skeletonize_no_foreground():
    im = np.zeros((5, 5), dtype=np.uint8)
    result = skeletonize_3d(im)
    assert_equal(result, im)