Exemplo n.º 1
0
def skeletoniseSkimg(maskFilePath):
    image = io.imread(maskFilePath)

    image = rgb2gray(image)

    binary = image > 0

    # perform skeletonization
    skeleton = skeletonize(binary)
    summary = summarize(Skeleton(skeleton, spacing=1))

    # starting coordinate in y, x
    startCoord = [
        summary.iloc[0]['image-coord-src-0'],
        summary.iloc[0]['image-coord-src-1']
    ]
    endCoord = [
        summary.iloc[0]['image-coord-dst-0'],
        summary.iloc[0]['image-coord-dst-1']
    ]

    # c0 contains information of all points on the skeleton
    # g0 is the adjacency list
    g0, c0, _ = skeleton_to_csgraph(skeleton, spacing=1)
    all_pts = c0[1:]

    # get points along skeleton in the correct sequence
    pts = traverseSkeleton(startCoord, endCoord, g0.toarray(), all_pts)

    return pts.astype(int)
Exemplo n.º 2
0
def threshold_branch_length(skeleton_img, distance_threshold):
    skeleton = skan.Skeleton(skeleton_img)
    branch_data = skan.summarize(skeleton)

    tip_junction = branch_data['branch-type'] == 1

    b_remove = (branch_data['branch-distance'] <
                distance_threshold) & tip_junction
    i_remove = b_remove.to_numpy().nonzero()[0]  # np.argwhere(b_remove)[:, 0]

    return update_skeleton(skeleton_img, skeleton, i_remove)
Exemplo n.º 3
0
 def skan(self, n):
     blur = cv2.blur(self.mask, (5, 5))
     binary = blur > filters.threshold_otsu(blur)
     skeleton = morphology.skeletonize(binary)
     ax = plt.subplot(2, 2, 2 * n - 1)
     draw.overlay_skeleton_2d(blur, skeleton, dilate=1, axes=ax)
     branch_data = summarize(Skeleton(skeleton))
     ax = plt.subplot(2, 2, 2 * n)
     draw.overlay_euclidean_skeleton_2d(blur,
                                        branch_data,
                                        skeleton_color_source='branch-type',
                                        axes=ax)
     df1 = branch_data.loc[branch_data['branch-type'] == 1]
     return df1
Exemplo n.º 4
0
def test_find_main():
    skeleton = Skeleton(skeleton1)
    summary_df = summarize(skeleton, find_main_branch=True)

    non_main_edge_start = [2, 1]
    non_main_edge_finish = [3, 3]

    non_main_df = summary_df.loc[summary_df['main'] == False]
    assert non_main_df.shape[0] == 1
    coords = non_main_df[[
        'coord-src-0', 'coord-src-1', 'coord-dst-0', 'coord-dst-1'
    ]].to_numpy()
    assert (np.all(coords == non_main_edge_start + non_main_edge_finish)
            or np.all(coords == non_main_edge_finish + non_main_edge_start))
Exemplo n.º 5
0
def get_node_coord(skeleton_img):
    if np.all(skeleton_img == 0):
        return None, None

    skeleton = skan.Skeleton(img2bin(skeleton_img))
    branch_data = skan.summarize(skeleton)

    # get all node IDs
    junc_node_id, end_node_id = get_node_id(branch_data, skeleton)

    # swap cols
    end_node_coord = skeleton.coordinates[end_node_id][:, [1, 0]]
    junc_node_coord = skeleton.coordinates[junc_node_id][:, [1, 0]]

    return junc_node_coord, end_node_coord
Exemplo n.º 6
0
def filter_branch_length(skeleton_img, branch_data=None, debug=False):
    skeleton = skan.Skeleton(skeleton_img)

    if branch_data is None:
        branch_data = skan.summarize(skeleton)

    junc_node_ids, start_node_ids = get_node_id(branch_data, skeleton)

    max_path = list()
    max_length = 0

    if debug:
        n_start_nodes = len(start_node_ids)
        n_junc_nodes = len(junc_node_ids)
        n_vertices = n_start_nodes + n_junc_nodes
        n_edges = len(branch_data.values)  # 2*(n - 1)
        n_loop = 1 + n_edges - n_vertices

        n_paths_min = n_start_nodes * (
            n_edges + 1
        )  # n_vertices * (1 + 0.5*n_vertices)# (n_vertices)*n_start_nodes
        n_paths_max = n_paths_min * 2**n_loop  # n_vertices * (1 + 2**n_junc_nodes)
        print '|E|: %d' % n_edges
        print '|V|: %d' % n_vertices
        print 'loops: %d' % n_loop
        print 'Expected function calls between  %d and %d' % (n_paths_min,
                                                              n_paths_max)

    for node_id in start_node_ids:

        current_path = list()
        current_length = 0
        current_path, current_length = find_largest_branch(
            branch_data, skeleton, node_id, node_id, current_path,
            current_length)

        if current_length > max_length:
            max_path = current_path
            max_length = current_length

    return generate_skeleton_img(skeleton, max_path,
                                 skeleton_img.shape), max_path
Exemplo n.º 7
0
def get_node_coord(skeleton_img):
    """
    Finds all junction and end nodes on a provided skeleton
    """
    if np.all(skeleton_img == 0):
        return None, None

    skeleton = skan.Skeleton(img2bin(skeleton_img))
    branch_data = skan.summarize(skeleton)

    # get all node IDs
    src_node_id = np.unique(branch_data['node-id-src'].values)
    dst_node_id = np.unique(branch_data['node-id-dst'].values)
    all_node_id = np.unique(np.append(src_node_id, dst_node_id))

    # get end and junc node IDs
    end_node_index = skeleton.degrees[all_node_id] == 1
    end_node_id = all_node_id[end_node_index]
    junc_node_id = np.setdiff1d(all_node_id, end_node_id)

    # get coordinates
    end_node_coord = skeleton.coordinates[end_node_id][:, [1, 0]]
    junc_node_coord = skeleton.coordinates[junc_node_id][:, [1, 0]]
    return junc_node_coord, end_node_coord
Exemplo n.º 8
0
def skeleton2regions(skeleton_network, neighbor_search_algorithm):
    """Determines the regions bounded by a skeleton network.

    This function can be perceived as an intermediate step between a skeleton network and
    completely geometrical representation of the regions. That is, it keeps the key topological
    information required to create a fully geometrical description, but it also contains the
    coordinates of the region boundaries. The outputs of this function can be used to build
    different region representations.

    Parameters
    ----------
    skeleton_network : Skeleton
        Geometrical and topological information about the skeleton network of a label image.
    neighbor_search_algorithm : functools.partial
        Specifies which algorithm to use for constructing the branch-region connectivity.
        The function to be passed (along with its arguments) is :func:`search_neighbor`.
        For further details, see the Notes below.

    Returns
    -------
    region_branches : dict
        For each region it contains the branch indices that bound that region.
    branch_coordinates : list
        Coordinates of the points on each branch.
    branch_regions : dict
        For each branch it contains the neighboring regions.
        This auxiliary data is not essential as it can be restored from :code:`region_branches`.
        However, it is computed as temporary data needed for :code:`region_branches`.

    See Also
    --------
    build_skeleton
    search_neighbor
    overlay_regions

    Notes
    -----
    Although the algorithms were created to require minimum user intervention, some parameters
    must be fine-tuned so as to achieve an optimal result in identifying the regions. Visualization
    plays an important role in it. Full automation is either not possible or would require a huge
    computational cost. The shortcoming of the algorithms in this function is the following.
    The recognition of which branches form a region is based on the premise that a node of a branch
    belongs to a region if its `n`-pixel neighbourhood contains a pixel from that region. Ideally,
    `n=1` would be used, meaning that the single-pixel width skeleton is located at most 1 pixel
    afar from the regions it lies among. This is true but the nodes of the skeleton can be farther
    than 1 pixel from a region. Hence, `n` has to be a parameter of our model. Increasing `n` helps
    in identifying the connecting regions to a node of a branch. On the other hand, if `n` is too
    large, regions that "in reality" are not neighbors of a branch will be included. Currently, we
    recommend trying different parameters `n`, plot the reconstructed regions over the label image
    using the :func:`overlay_regions` function, and see how good the result is. As a heuristic,
    start with `n=2`.

    """
    if not isinstance(skeleton_network, Skeleton):
        raise Exception('Skeleton object is expected.')
    S = skeleton_network
    skeleton_data = summarize(S)
    junction_to_junction = skeleton_data['branch-type'] == 2
    isolated_cycle = skeleton_data['branch-type'] == 3
    mask = junction_to_junction | isolated_cycle
    image_size = np.shape(S.source_image)
    image_index_grid = [(0, image_size[0] - 1), (0, image_size[1] - 1)]

    # Find which two regions are incident to a branch
    branch_coordinates = [
        S.path_coordinates(i) for i in range(S.n_paths) if mask[i]
    ]
    branch_regions = []
    for nodes in branch_coordinates:
        c = Counter()
        internal_nodes = range(1, np.size(nodes, 0) - 1)
        for node in internal_nodes:
            # Snap node to the nearest image coordinate
            node_coord = np.round(nodes[node, :]).astype(np.uint32)
            # Look-around for the neighboring pixels (be careful on the image boundaries)
            neighbors = neighbor_search_algorithm(node_coord,
                                                  bounds=image_index_grid)
            neighbors = S.source_image[neighbors]
            c.update(neighbors)
        neighboring_regions = [pair[0] for pair in c.most_common(2)]
        branch_regions.append(neighboring_regions)
    branch_regions = {key: val for key, val in enumerate(branch_regions)}

    # For each region, find the branches that bound it
    region_branches = {}
    for branch, regions in branch_regions.items():
        for region in regions:
            if region not in region_branches:
                region_branches[region] = [branch]
            else:
                region_branches[region].append(branch)

    return region_branches, branch_coordinates, branch_regions
Exemplo n.º 9
0
def summarize_img(skeleton_img):
    # summarize skeleton
    skeleton = skan.Skeleton(img2bin(skeleton_img))
    branch_data = skan.summarize(skeleton)
    return skeleton, branch_data
Exemplo n.º 10
0
def makeSkel(path):
    """
	path: full path to _ch1.tif or _ch2.tif
	"""
    print('=== makeSkel() path:', path)

    # load raw
    print('  loading raw:', path)
    stackData, stackHeader = bimpy.util.bTiffFile.imread(path)
    print('    stackData:', stackData.shape)

    # load mask
    maskPath, tmpExt = os.path.splitext(path)
    maskPath += '_mask.tif'
    print('  loading mask:', maskPath)
    maskData, maskHeader = bimpy.util.bTiffFile.imread(maskPath)
    print('    maskData:', stackData.shape)

    # erode mask before making 1-pixel skeleton
    iterations = 1
    print('  binary_erosion with iterations:', iterations)
    maskData = bimpy.util.morphology.binary_erosion(maskData,
                                                    iterations=iterations)

    baseFileName = getFileNameNoChannels(path)
    uFirstSlice = None
    uLastSlice = None
    try:
        print(
            '  looking in bVascularTracingAics.stackDatabase for baseFileName:',
            baseFileName)
        trimDict = bimpy.bVascularTracingAics.stackDatabase[baseFileName]
        uFirstSlice = trimDict['uFirstSlice']
        uLastSlice = trimDict['uLastSlice']
    except (KeyError) as e:
        print(
            '  warning: did not find stack baseFileName:', baseFileName,
            'in bVascularTracingAics.stackDatabase ---->>>> NO PRUNING/BLANKING'
        )

    doFirstLast = False
    if doFirstLast and uFirstSlice is not None and uLastSlice is not None:
        print('  makeSkel() pruning/blanking slices:', uFirstSlice, uLastSlice)
        maskData[0:uFirstSlice - 1, :, :] = 0
        maskData[uLastSlice:-1, :, :] = 0

    #
    print(
        '  - generating 1-pixel skeleton from mask using skimage.morphology.skeletonize_3d ...'
    )
    myTimer = bimpy.util.bTimer('skeletonizeTimer')
    skeletonData = skimage.morphology.skeletonize_3d(maskData)
    print('    skeletonData:', skeletonData.shape, skeletonData.dtype)
    print('  ', myTimer.elapsed())

    # save 1-pixel skel stack
    skelPath, tmpExt = os.path.splitext(path)
    skelPath += '_skel.tif'
    print('  saving 1-pixel skel skelPath:', skelPath)
    bimpy.util.bTiffFile.imsave(skelPath, skeletonData)

    #
    print('  - generating skeleton graph from mask using skan.Skeleton ...')
    myTimer = bimpy.util.bTimer('skan Skeleton')
    #skanSkel = skan.Skeleton(skeletonData, source_image=stackData.astype('float'))
    skanSkel = skan.Skeleton(skeletonData, source_image=stackData)
    print('  ', myTimer.elapsed())

    # not needed but just to remember
    branch_data = skan.summarize(skanSkel)  # branch_data is a pandas dataframe
    print('    branch_data.shape:', branch_data.shape)
    print(branch_data.head())
Exemplo n.º 11
0
def branch_classification(thres):
    """
    Predict the extent of branching
    
    Parameters
    ----------
        thres: array
            thresholded image to be analysed
    
    Returns
    -------
        skel: array
            skeletonised image
        is_main:
            help
        BLF: int/float
            branch length fraction
    """

    skeleton = skeletonize(thres)
    skel = Skeleton(skeleton, source_image=thres)
    summary = summarize(skel)

    is_main = np.zeros(summary.shape[0])
    us = summary['node-id-src']
    vs = summary['node-id-dst']
    ws = summary['branch-distance']

    edge2idx = {(u, v): i for i, (u, v) in enumerate(zip(us, vs))}

    edge2idx.update({(v, u): i for i, (u, v) in enumerate(zip(us, vs))})

    g = nx.Graph()

    g.add_weighted_edges_from(zip(us, vs, ws))

    for conn in nx.connected_components(g):
        curr_val = 0
        curr_pair = None
        h = g.subgraph(conn)
        p = dict(nx.all_pairs_dijkstra_path_length(h))
        for src in p:
            for dst in p[src]:
                val = p[src][dst]
                if (val is not None and np.isfinite(val) and val > curr_val):
                    curr_val = val
                    curr_pair = (src, dst)
        for i, j in tz.sliding_window(
                2,
                nx.shortest_path(h,
                                 source=curr_pair[0],
                                 target=curr_pair[1],
                                 weight='weight')):
            is_main[edge2idx[(i, j)]] = 1

    summary['main'] = is_main

    #Branch Length Fraction

    total_length = np.sum(skeleton)
    trunk_length = 0
    for i in range(summary.shape[0]):
        if summary['main'][i]:
            trunk_length += summary['branch-distance'][i]

    branch_length = total_length - trunk_length
    BLF = branch_length / total_length

    return skel, is_main, BLF
Exemplo n.º 12
0
def skeleton2regions(skeleton_network, look_around=2, algorithm=1):
    """Determines the regions bounded by a skeleton network.

    This function can be perceived as an intermediate step between a skeleton network and
    completely geometrical representation of the regions. That is, it keeps the key topological
    information required to create a fully geometrical description, but it also contains the
    coordinates of the region boundaries. The outputs of this function can be used to build
    different region representations.

    Parameters
    ----------
    skeleton_network : Skeleton
        Geometrical and topological information about the skeleton network of a label image.
    look_around : int, optional
        A junction is considered part of a region if it is at most `look_around` pixel far from it.
        The default is 2. For further details, see the Notes below.
    algorithm : int, optional
        Specifies which algorithm to use for constructing the branch-region connectivity.
        For further details, see the Notes below.

    Returns
    -------
    region_branches : dict
        For each region it contains the branch indices that bound that region.
    branch_coordinates : list
        Coordinates of the points on each branch.
    branch_regions : dict
        For each region it contains the neighboring regions.
        This auxiliary data is not essential as it can be restored from `region_branches`.
        However, it is computed as temporary data needed for `region_branches`.

    See Also
    --------
    build_skeleton
    overlay_regions

    Notes
    -----
    Although the algorithms were created to require minimum user intervention, some parameters
    must be fine-tuned so as to achieve an optimal result in identifying the regions. Visualization
    plays an important role in it. Full automation is either not possible or would require a huge
    computational cost. The shortcomings of the algorithms in this function are the following:

    - It is assumed that only branches that connect two junctions form the boundary of a region.
      This rule ensures that end points are not taken into account. However, this assumption also
      rules out the identification of region being contained in another region as the embedded
      region would be described by an isolated cycle.
    - The recognition of which branches form a region is based on the premise that a junction
      belongs to a region if its n-pixel neighbourhood contains a pixel from that region.
      Ideally, n=1 would be used, meaning that the single-pixel width skeleton is located at most
      1 pixel afar from the regions it lies among. This is true but the junctions of the skeleton
      can be farther than 1 pixel from a region. Hence, `n` has to be a parameter of our model.
      Increasing `n` helps in including junctions (and hence the connecting branches to it) to
      regions, which actually belong there. On the other hand, if `n` is too large, junctions
      that do not belong to the region are also included. Currently, we recommend trying
      different parameters `n`, plot the reconstructed regions over the label image using the
      `overlay_regions` function, and see how good the result is. As a heuristic, start with `n=2`.
    - There are configurations in which two junctions are part of a region but those two
      junctions are connected by more than one branches (typically two). The question is: which
      branch to choose as a boundary part of the region? The answer is: the one that is entirely
      inside the region. Testing it is probably not easy or is costly, therefore, we rely on a
      heuristic argument: the branch with the shortest length is chosen.

    """
    if not isinstance(skeleton_network, Skeleton):
        raise Exception('Skeleton object is expected.')
    # Extract branch-junction connectivities and the coordinates of the junctions
    S = skeleton_network
    skeleton_data = summarize(S)
    mask = skeleton_data[
        'branch-type'] == 2  # only junction-to-junction connections create regions
    endpoints_src = skeleton_data['node-id-src'][mask].to_numpy()
    endpoints_dst = skeleton_data['node-id-dst'][mask].to_numpy()
    image_size = np.shape(S.source_image)
    branch_junctions = np.transpose(np.vstack((endpoints_src, endpoints_dst)))
    junctions = np.unique([endpoints_src, endpoints_dst])
    junction_coordinates = S.coordinates[junctions, :]

    # Find which regions are incident to a junction
    junction_regions = {key: None for key in junctions}
    region_junctions = {}
    # TODO: Simplify the for-loop by using e.g. enumerate or
    #  https://discuss.codecademy.com/t/loop-two-variables-simultaneously-in-python-3/261808/2
    for i in range(len(junctions)):
        # Snap junction to the nearest image coordinate
        junction_coord = np.round(junction_coordinates[i, :]).astype(np.uint32)
        # Look-around for the neighboring pixels (be careful on the image boundaries)
        neighbor_idx = np.s_[
            max(junction_coord[0] -
                look_around, 0):min(junction_coord[0] + look_around +
                                    1, image_size[0]),
            max(junction_coord[1] -
                look_around, 0):min(junction_coord[1] + look_around +
                                    1, image_size[1])]
        neighbors = S.source_image[neighbor_idx]
        neighboring_regions = np.unique(neighbors)
        # Save junction-region and the region-junction connectivities
        # TODO: perhaps no need for the region-junction connectivities
        junction_regions[junctions[i]] = neighboring_regions
        for region in neighboring_regions:
            if region not in region_junctions:
                region_junctions[region] = [junctions[i]]
            else:
                region_junctions[region].append(junctions[i])

    # Determine which regions neighbor a branch
    branch_regions = {}
    for i, branch in enumerate(branch_junctions):
        neighboring_regions = np.intersect1d(junction_regions[branch[0]],
                                             junction_regions[branch[1]])
        branch_regions[i] = neighboring_regions

    branch_coordinates = [
        S.path_coordinates(i) for i in range(S.n_paths) if mask[i]
    ]
    # New implementation
    branch_regions2 = []
    msk = np.array([[True, True, True, True, True],
                    [True, False, False, False, True],
                    [True, False, False, False, True],
                    [True, False, False, False, True],
                    [True, True, True, True, True]])
    for i, branch in enumerate(branch_coordinates):
        c = Counter()
        for node in range(1, np.size(branch, 0) - 1):
            node_coord = np.round(branch[node, :]).astype(np.uint32)
            neighbor_idx = np.s_[
                max(node_coord[0] -
                    look_around, 0):min(node_coord[0] + look_around +
                                        1, image_size[0]),
                max(node_coord[1] -
                    look_around, 0):min(node_coord[1] + look_around +
                                        1, image_size[1])]
            neighbors = S.source_image[neighbor_idx]
            if np.shape(neighbors) == (5, 5):
                neighbors = neighbors[msk]
            c.update(neighbors.flatten())
        neighboring_regions = [pair[0] for pair in c.most_common(2)]
        branch_regions2.append(neighboring_regions)
    branch_regions = {key: val for key, val in enumerate(branch_regions2)}

    # For each region, find the branches that bound it
    region_branches = {}
    for branch, regions in branch_regions.items():
        for region in regions:
            if region not in region_branches:
                region_branches[region] = [branch]
            else:
                region_branches[region].append(branch)

    # More than one branch can connect two junctions. In that case, leave only one.
    # branch_lengths = S.path_lengths()
    # for i in region_branches.keys():
    #     branches = region_branches[i]
    #     junctions = region_junctions[i]
    #     if len(branches) > len(junctions):
    #         # Branches that connect the same two junctions (i.e. multiple edges in the graph)
    #         # branch_junctions[region_branches[i]]
    #         _, id = non_unique(branch_junctions[branches], 0)
    #         # Only the shortest branch bounds this region
    #         edges_to_remove = []
    #         for multiple_edges in id:
    #             global_multiple_edges = index_list(branches, multiple_edges)
    #             lengths = branch_lengths[global_multiple_edges]
    #             idx_min_length = np.argmin(lengths)
    #             other_edges = np.setdiff1d(range(len(multiple_edges)), idx_min_length)
    #             edges_to_remove.append(multiple_edges[other_edges])
    #             # Remove the current region among the regions that connect to the removed edges
    #             for branch in index_list(global_multiple_edges, other_edges):
    #                 regions = branch_regions[branch]
    #                 kept_regions = regions[i != regions]
    #                 branch_regions[branch] = kept_regions
    #         correct_branches = np.delete(branches, edges_to_remove).tolist()
    #         region_branches[i] = correct_branches

    # Return outputs
    return region_branches, branch_coordinates, branch_regions, branch_junctions
Exemplo n.º 13
0
        if len(path) > 2:
            for idx2 in path[1:-2]:
                edges[idx2] = idx
        #print(idx, 'path:', path)
        #print('path', path, skanSkel.path(path)) # Return the pixel indices of path number index.

# works fine
if 1:
    '''
	    branch_data['branch-type'] is as follows
		kind[(deg_src == 1) & (deg_dst == 1)] = 0  # tip-tip
	    kind[(deg_src == 1) | (deg_dst == 1)] = 1  # tip-junction
	    2: junction-to-junction
		kind[endpoints_src == endpoints_dst] = 3  # cycle
	'''
    branch_data = skan.summarize(skanSkel)
    print('branch_data.shape:', branch_data.shape)
    print(branch_data.head())
    # works fine
    #branch_data.to_excel('C:/Users/cudmorelab/Box/Sites/summary.xlsx')

#
# plot the results
# this does not work because we are using 3D, would be nice !!!
if 0:
    print('plotting with skan and matplotlib')
    import matplotlib.pyplot as plt
    oneImage = skeleton0[59, :, :]
    pixel_graph0, coordinates0, degrees0 = skan.skeleton_to_csgraph(
        oneImage)  # just one image
    fig, axes = plt.subplots(1, 2)
Exemplo n.º 14
0
def myAnalyzeSkeleton(out=None, maskPath=None, imagePath=None):
    """
	out: numpy array with 1-pixel skeleton
	maskPath : full path to _dvMask.tif file (can include appended _0.tif
	"""

    # load x/y/z voxel size (assumes .tif was saved with Fiji
    # we use this to scale length
    xVoxel, yVoxel, zVoxel = readVoxelSize(imagePath)

    # load the mask
    if out is not None:
        maskData = out
    else:
        #maskPath = os.path.splitext(path)[0] + '_dvMask_' + str(saveNumber) + '.tif'
        maskData = tifffile.imread(maskPath)

    # was used by shape_index
    #imageData = tifffile.imread(imagePath)

    print('=== myAnalyzeSkeleton() maskData.shape:', maskData.shape)

    # make a 1-pixel skeleton from volume mask (similar to Fiji Skeletonize)
    mySkeleton = morphology.skeletonize_3d(maskData)
    '''
	# shape_index() does not work for 3D images !!!
	scale = 1
	threshold_radius = 1 # from AICS
	smooth_radius =  0.01 # from AICS
	pixel_threshold_radius = int(np.ceil(threshold_radius / scale))
	pixel_smoothing_radius = smooth_radius * pixel_threshold_radius
	quality = feature.shape_index(imageData, sigma=pixel_smoothing_radius, mode='reflect')
	#skeleton = morphology.skeletonize(thresholded) * quality
	mySkeleton = morphology.skeletonize_3d(maskData) * quality
	'''

    # analyze the skeleton (similar to Fiji Analyze Skeleton)
    mySkanSkel = skan.Skeleton(mySkeleton)

    # look at the results
    branch_data = skan.summarize(
        mySkanSkel)  # branch_data is a pandas dataframe
    nBranches = branch_data.shape[0]
    '''
	print('    number of branches:', branch_data.shape[0])
	display(branch_data.head())
	'''

    #
    # convert everything to nump arrays
    branchDistance = branch_data['branch-distance'].to_numpy()
    euclideanDistance = branch_data['euclidean-distance'].to_numpy()
    branchType = branch_data['branch-type'].to_numpy()
    #tortuosity = branchDistance / euclideanDistance # this gives divide by 0 warning
    tmpOut = np.full_like(branchDistance, fill_value=np.nan)
    tortuosity = np.divide(branchDistance,
                           euclideanDistance,
                           out=tmpOut,
                           where=euclideanDistance != 0)
    """
	
	Sunday 20200405
	HERE I AM RUNNING CODE TWICE and APPENDING TO summary2.xlsx AFTER RUNNING samiMetaAnalysis.py
	
	https://jni.github.io/skan/_modules/skan/pipe.html#process_images
	in the Skan Pipe code, they multiply the binary skeleton as follows
	maybe I can implement this with scale=1 and threshold_radius taken from AICS Segmentaiton?
	
		scale = 1
		threshold_radius = 1 # from AICS
		smooth_radius =  0.01 # from AICS
		pixel_threshold_radius = int(np.ceil(threshold_radius / scale))
		pixel_smoothing_radius = smooth_radius * pixel_threshold_radius
		quality = skimage.feature.shape_index(image, sigma=pixel_smoothing_radius,
							  mode='reflect')
		skeleton = morphology.skeletonize(thresholded) * quality
	"""
    '''
	# 20200407, no longer needed as i am now saving 'branch-type', do this in meta analysis
	print('\n\n\t\tREMEMBER, I AM ONLY INCLUDING junction-to-junction !!!!!!!!!!!!!! \n\n')
	#
	# do again just for junction-to-junction
	# 'mean-pixel-value' here is 'mean shape index' in full tutorial/recipe
	# if I use ridges, I end up with almost no branche?
	#ridges = ((branch_data['mean-pixel-value'] < 0.625) & (branch_data['mean-pixel-value'] > 0.125))
	j2j = branch_data['branch-type'] == 2 # returns True/False pandas.core.series.Series
	#datar = branch_data.loc[ridges & j2j].copy()
	datar = branch_data.loc[j2j].copy()

	branchDistance = datar['branch-distance'].to_numpy()
	euclideanDistance = datar['euclidean-distance'].to_numpy()
	#tortuosity = branchDistance / euclideanDistance # this gives divide by 0 warning
	tmpOut = np.full_like(branchDistance, fill_value=np.nan)
	tortuosity = np.divide(branchDistance, euclideanDistance, out=tmpOut, where=euclideanDistance != 0)
	'''

    #
    # organize a return dicitonary
    retDict = OrderedDict()

    retDict['data'] = OrderedDict()
    #retDict['data']['nBranches'] = nBranches
    retDict['data']['branchLength'] = branchDistance
    retDict['data']['euclideanDistance'] = euclideanDistance
    retDict['data']['branchType'] = branchType
    #retDict['data']['tortuosity'] = tortuosity

    # todo: search for 0 values in (branchDistance, euclideanDistance)

    # stats
    '''
	print('***** THIS IS NOT SCALED ***')
	print('    branchDistance mean:', np.mean(branchDistance), 'SD:', np.std(branchDistance), 'n:', branchDistance.size)
	#
	decimalPlaces = 2
	retDict['stats'] = OrderedDict()
	retDict['stats']['branchLength_mean'] = round(np.mean(branchDistance),decimalPlaces)
	retDict['stats']['branchLength_std'] = round(np.std(branchDistance),decimalPlaces)
	retDict['stats']['branchLength_n'] = branchDistance.shape[0]
	tmpCount = branchDistance[branchDistance<=2]
	retDict['stats']['branchLength_n_2'] = tmpCount.shape[0]
	#
	retDict['stats']['euclideanDistance_mean'] = round(np.mean(euclideanDistance),decimalPlaces)
	retDict['stats']['euclideanDistance_std'] = round(np.std(euclideanDistance),decimalPlaces)
	retDict['stats']['euclideanDistance_n'] = euclideanDistance.shape[0]
	#
	retDict['stats']['tortuosity_mean'] = round(np.nanmean(tortuosity),decimalPlaces)
	retDict['stats']['tortuosity_std'] = round(np.nanstd(tortuosity),decimalPlaces)
	retDict['stats']['tortuosity_n'] = tortuosity.shape[0]
	'''

    return retDict, mySkeleton  # returning mySkeleton so we can save it
Exemplo n.º 15
0
area_vasc_region = Y_hat_binary_vasc.sum(-1).sum(-1).sum(-1)

Xskeleton = np.zeros_like(Y_hat_binary_thresholding_sato_vasc)
for i in np.arange(len(X_vasc)):
    Xskeleton[i, :] = skeletonize(Y_hat_binary_thresholding_sato_vasc[i, :])

#calculate skeleton statistics for all images
branch_number = np.zeros(len(X))
branch_j2e_total = np.zeros(len(X))
branch_j2j_total = np.zeros(len(X))

for i in range(len(Y_hat_binary_skin)):  #iterate over images
    print(i)
    skeleton = Xskeleton[i, :, :, 0]
    try:
        branch_data = skan.summarize(skan.Skeleton(skeleton))
        branch_number[i] = len(branch_data['branch-distance'].values)
        branch_j2e_total[i] = np.sum(branch_data['branch-type'].values == 1)
        branch_j2j_total[i] = np.sum(branch_data['branch-type'].values == 2)
    except:
        print('problem with mask')

Xskeleton_vasc = Xskeleton

nbranch_vasc = branch_number
nbranch_j2e_vasc = branch_j2e_total
nbrach_j2j_vasc = branch_j2j_total

#calculate depth
depth_vasc = np.zeros(len(Y_hat_binary_thresholding_sato_vasc))
Exemplo n.º 16
0
#h, w = imgb.shape[:2]
#imgb = cv2.resize(imgb,(h,h), interpolation=cv2.INTER_CUBIC)
pbef = Process(imgb)
pbef.lowp = np.array([150, 60, 100])
pbef.highp = np.array([170, 255, 255])
pbef.loww = np.array([90, 15, 150])
pbef.highw = np.array([115, 255, 255])
maskb = pbef.mask(kernel)
resb = pbef.res(maskb)
maskb = cv2.blur(maskb, (5, 5))

binaryb = maskb > filters.threshold_otsu(maskb)
skeletonb = morphology.skeletonize(binaryb)
fig, ax = plt.subplots()
draw.overlay_skeleton_2d(maskb, skeletonb, dilate=1, axes=ax)

#graphb = csgraph_from_masked(binaryb)
#plt.imshow(graphb)
gb, cb, db = skeleton_to_csgraph(skeletonb)
draw.overlay_skeleton_networkx(gb, cb, image=maskb)
branch_datab = summarize(Skeleton(skeletonb))
dfb = branch_datab.loc[branch_datab['branch-type'] == 1]

#dfb.to_csv(r'./before.csv')
draw.overlay_euclidean_skeleton_2d(maskb,
                                   branch_datab,
                                   skeleton_color_source='branch-type')

plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows()
Exemplo n.º 17
0
    save_path = mkpath + '/'
    print("results_folder: " + save_path)

    source_image = cv2.cvtColor(cv2.imread(imgList[0]), cv2.COLOR_BGR2RGB)

    (image_mask, filename, abs_path) = load_image(imgList[0])

    image_skeleton = skeleton_bw(image_mask)

    result_file = (save_path + base_name + '_skeleton.' + ext)
    print(result_file)
    cv2.imwrite(result_file, img_as_ubyte(image_skeleton))

    fig = plt.plot()

    branch_data = summarize(Skeleton(image_skeleton))

    print(branch_data.head())

    fig = plt.plot()

    branch_data.hist(column='branch-distance', by='branch-type', bins=100)

    result_file = (save_path + base_name + '_hist.' + ext)

    plt.savefig(result_file,
                transparent=True,
                bbox_inches='tight',
                pad_inches=0)

    fig = plt.plot()
Exemplo n.º 18
0
def myAnalyzeSkeleton(out=None,
                      maskPath=None,
                      imagePath=None,
                      saveBase=None,
                      verbose=False):
    """
	out: numpy array with 1-pixel skeleton
	maskPath : full path to _dvMask.tif file (can include appended _0.tif
	
	returns:
	    dict of results, 3d skeleton
	"""

    # load x/y/z voxel size (assumes .tif was saved with Fiji
    # we use this to scale length
    xVoxel, yVoxel, zVoxel = readVoxelSize(imagePath)

    # load the mask
    if out is not None:
        maskData = out
    else:
        #maskPath = os.path.splitext(path)[0] + '_dvMask_' + str(saveNumber) + '.tif'
        #maskData = tifffile.imread(maskPath)
        maskData, maskHeader = bimpy.util.bTiffFile.imread(maskPath)

    # was used by shape_index
    #imageData = tifffile.imread(imagePath)

    if verbose:
        print('    === myAnalyzeSkeleton() maskData.shape:', maskData.shape)

    ##
    ##
    # make a 1-pixel skeleton from volume mask (similar to Fiji Skeletonize)
    mySkeleton = morphology.skeletonize_3d(maskData)
    ##
    ##

    ##
    ##
    # analyze the skeleton (similar to Fiji Analyze Skeleton)
    ## BE SURE TO INCLUDE VOXEL SIZE HERE !!!!!! 20200503
    mySpacing_ = (zVoxel, xVoxel, yVoxel)
    mySkanSkel = skan.Skeleton(mySkeleton, spacing=mySpacing_)
    ##
    ##

    # look at the results
    branch_data = skan.summarize(
        mySkanSkel)  # branch_data is a pandas dataframe
    nBranches = branch_data.shape[0]

    # working on eroded/ring density 20200501
    # save entire skan analysis as csv
    # 20200713, was this
    '''
	tmpFolder, tmpFileName = os.path.split(imagePath)
	tmpFileNameNoExtension, tmpExtension = tmpFileName.split('.')
	saveSkelPath = os.path.join(tmpFolder, tmpFileNameNoExtension + '_skel.csv')
	if verbose: print('saving skan results to saveSkelPath:', saveSkelPath)
	'''

    saveSkelPath = saveBase + '_skel.csv'
    print('    myAnalyzeSkeleton() saving saveSkelPath:', saveSkelPath)

    branch_data.to_csv(saveSkelPath)

    #
    # convert everything to nump arrays
    branchType = branch_data['branch-type'].to_numpy()
    branchDistance = branch_data['branch-distance'].to_numpy()
    euclideanDistance = branch_data['euclidean-distance'].to_numpy()
    # don't do tortuosity here, we need to scale to um/pixel in x/y/z

    #
    # scale
    # 20200503 TRYING TO DO THIS WHEN CALLING skan.Skeleton(mySkeleton, spacing=mySpacing_) !!!!!!!!!!!!!!!!!!!!!!!
    '''
	branchDistance = np.multiply(branchDistance, xVoxel)
	euclideanDistance = np.multiply(euclideanDistance, xVoxel)
	'''
    # this will print 'divide by zero encountered in true_divide' and value will become inf
    tortuosity = np.divide(branchDistance,
                           euclideanDistance)  # might fail on divide by 0

    #
    # organize a return dicitonary
    retDict = OrderedDict()

    retDict['data'] = OrderedDict()
    retDict['data']['branchType'] = branchType
    retDict['data']['branchLength'] = branchDistance
    retDict['data']['euclideanDistance'] = euclideanDistance
    retDict['data']['tortuosity'] = tortuosity

    # todo: search for 0 values in (branchDistance, euclideanDistance)

    # 20200503 working on samiPostAnalysis density
    # we need all the src/dst point so we can quickly determine if they are in mask (full, eroded, ring)
    # 'image-coord-src-0', 'image-coord-src-1', 'image-coord-src-2', 'image-coord-dst-0', 'image-coord-dst-1', 'image-coord-dst-2'
    image_coord_src_0 = branch_data['image-coord-src-0'].to_numpy()
    image_coord_src_1 = branch_data['image-coord-src-1'].to_numpy()
    image_coord_src_2 = branch_data['image-coord-src-2'].to_numpy()
    image_coord_dst_0 = branch_data['image-coord-dst-0'].to_numpy()
    image_coord_dst_1 = branch_data['image-coord-dst-1'].to_numpy()
    image_coord_dst_2 = branch_data['image-coord-dst-2'].to_numpy()
    retDict['data']['image_coord_src_0'] = image_coord_src_0
    retDict['data']['image_coord_src_1'] = image_coord_src_1
    retDict['data']['image_coord_src_2'] = image_coord_src_2
    retDict['data']['image_coord_dst_0'] = image_coord_dst_0
    retDict['data']['image_coord_dst_1'] = image_coord_dst_1
    retDict['data']['image_coord_dst_2'] = image_coord_dst_2

    return retDict, mySkeleton  # returning mySkeleton so we can save it
Exemplo n.º 19
0
def to_graph(skeletony, img_):
    print("Creating graph", skeletony.dtype)

    w, h = skeletony.shape

    new_img = np.empty((w, h, 3), dtype=np.uint8)
    new_img[:, :, 0] = img_.astype(np.uint8) * 255
    new_img[:, :, 1] = img_.astype(np.uint8) * 255
    new_img[:, :, 2] = img_.astype(np.uint8) * 0

    previous_one_branches = 9999999

    for i in range(7):

        skeleton_obj = Skeleton(skeletony,
                                source_image=new_img,
                                keep_images=True,
                                unique_junctions=True)

        # https://github.com/jni/skan/issues/92
        branch_data = summarize(skeleton_obj)

        thres = 100

        bt = branch_data['branch-type'].value_counts()

        if 1 in bt.keys():
            num_ones = bt[1]
            if num_ones < previous_one_branches:
                previous_one_branches = bt[1]
            elif num_ones == previous_one_branches:
                print("Cleaned all 1 branches")
                break
        else:
            print("No 1 branches")

        nodes = {}
        outls = {}

        for ii in range(branch_data.shape[0]):
            branch_obj = branch_data.loc[ii]
            node_src = branch_obj.loc['node-id-src']
            node_dst = branch_obj.loc['node-id-dst']
            if node_src == node_dst:
                check_increment_dict(outls, branch_obj, node_dst)
            else:
                check_increment_dict(nodes, branch_obj, node_src)
                check_increment_dict(nodes, branch_obj, node_dst)

            if branch_data.loc[ii,
                               'branch-distance'] < thres and branch_data.loc[
                                   ii, 'branch-type'] == 1:

                integer_coords = tuple(
                    skeleton_obj.path_coordinates(ii)[1:-1].T.astype(int))
                skeletony[integer_coords] = 0

                # Filter pixels with only 1 neighbor
                # integer_coords_all = tuple(skeleton_obj.path_coordinates(ii).T.astype(int))
                # degrees = skeleton_obj.degrees_image[integer_coords_all]

                # for px in range(len(integer_coords_all[0])):
                #     if degrees[px] == 1:
                #         px_tuple = (integer_coords_all[0][px], integer_coords_all[1][px])
                #         skeletony[px_tuple] = 0

                # Filter pixels in branches with 2 pixels
                # pt_idx = skeleton_obj.path(ii)
                # if len(pt_idx) == 2:
                #     for pt in range(len(pt_idx)):
                #         a = sum(x.count(pt_idx[pt]) for x in p_list)
                #         if a == 1:
                #             px_tuple = (integer_coords_all[0][pt], integer_coords_all[1][pt])
                #             skeletony[px_tuple] = 0

        # zas_img = np.zeros((w, h), dtype=np.uint8)
        # # print("Node dict", nodes)
        # single_keys = []
        # # lengs = [single_keys.append(key) if len(nodes[key]) == 3 else '' for key in nodes.keys()]
        # lengs = [single_keys.append(key) for key in outls.keys()]
        # for kk in single_keys:
        #     for nn in nodes[kk]:
        #         if nn['branch-type'] == 3:
        #             print("Branch ", nn['branch-distance'], " ", nn['branch-type'])
        #             print(nn)
        #             integer_coords = tuple(skeleton_obj.path_coordinates(nn.name)[1:-1].T.astype(int))
        #             zas_img[integer_coords] = 255
        #
        # plt.figure()
        # plt.title("Branch type 2")
        # plt.imshow(zas_img)
        # plt.show()

        skeletony = morphology.remove_small_objects(skeletony,
                                                    min_size=2,
                                                    connectivity=2)

        print("New skeletonize")
        skeletony = morphology.binary_dilation(skeletony)
        skeletony = morphology.skeletonize(skeletony)
        print("New skeletonize done")

        # plt.figure()
        # plt.title("Remove branches")
        # plt.imshow(skeletony)
        # plt.show()

    # print("Branch stats", bs)
    # branch_data = summarize(Skeleton(skeletony, unique_junctions=True))
    # print(branch_data.loc[0])
    # draw.overlay_euclidean_skeleton_2d(img_, branch_data)

    return skeletony
Exemplo n.º 20
0
from skan import skeleton_to_csgraph
from skan import Skeleton, summarize

ipath = "your_ct_img_path"  #this is the path to your microCT image files
imgdir = "your_ct_img_folder"
outpath = "your_file_outpath"  #this is the path to where your output files go
"""
Processing
"""
Mint = np.load(outpath + imgdir + "Mint_r" + ".npy")
Mint_uint8 = Mint.astype('uint8')
skel3 = skeletonize_3d(Mint_uint8)
"""
Note:
#Mint is the 3d binary output of the internal void shape
#skel stands for skeleton
#needs a 0 or 1 binary array
"""

#section using skan package, see reference in manuscript for more information
pixel_graph, coordinates, degrees = skeleton_to_csgraph(skel3)
branch_data = summarize(Skeleton(skel3))
branch_data.head()

tot_blength = np.sum(branch_data["branch-distance"])

#output
np.save(outpath + imgdir + "3Dslen", tot_blength)
np.save(outpath + imgdir + "skel3_", skel3)

print("script complete")