def main():

    device = "cuda"

    np.random.seed(0)
    torch.manual_seed(0)

    spatial_shape = (10, 10)

    skan_skeletons_batch = []
    skeleton_image = np.zeros(spatial_shape, dtype=np.bool)
    skeleton_image[2, :] = True
    skeleton_image[:, 2] = True
    skeleton_image[7, :] = True
    skeleton_image[:, 7] = True
    skan_skeleton = skan.Skeleton(skeleton_image, keep_images=False)
    skan_skeletons_batch.append(skan_skeleton)
    # plt.imshow(skeleton_image)
    plot_skeleton(skan_skeleton)
    plt.show()

    skeleton_image = np.zeros(spatial_shape, dtype=np.bool)
    skeleton_image[5, :] = True
    skeleton_image[:, 5] = True
    skan_skeleton = skan.Skeleton(skeleton_image, keep_images=False)
    skan_skeletons_batch.append(skan_skeleton)
    # plt.imshow(skeleton_image)
    plot_skeleton(skan_skeleton)
    plt.show()

    skeletons_batch = [Skeleton(skan_skeleton.coordinates, Paths(skan_skeleton.paths.indices, skan_skeleton.paths.indptr)) for skan_skeleton in skan_skeletons_batch]

    print("# --- skeletons_to_tensorskeleton() --- #")
    tensorskeleton = skeletons_to_tensorskeleton(skeletons_batch, device=device)
    print("# --- --- #")
    # print("batch:")
    # print(tensorskeleton.batch)
    # print("pos:")
    # print(tensorskeleton.pos.shape)
    # print(tensorskeleton.pos)
    print("path_index:")
    print(tensorskeleton.path_index.shape)
    print(tensorskeleton.path_index)
    print("path_delim:")
    print(tensorskeleton.path_delim.shape)
    print(tensorskeleton.path_delim)
    print("batch_delim:")
    print(tensorskeleton.batch_delim.shape)
    print(tensorskeleton.batch_delim)

    print("# --- tensorskeleton_to_skeletons() --- #")
    skeletons_batch = tensorskeleton_to_skeletons(tensorskeleton)

    # Plot
    for skeleton in skeletons_batch:
        plot_skeleton(skeleton)
        plt.show()

    print("# --- --- #")
예제 #2
0
def threshold_branch_length(skeleton_img, distance_threshold):
    skeleton = skan.Skeleton(skeleton_img)
    branch_data = skan.summarize(skeleton)

    tip_junction = branch_data['branch-type'] == 1

    b_remove = (branch_data['branch-distance'] <
                distance_threshold) & tip_junction
    i_remove = b_remove.to_numpy().nonzero()[0]  # np.argwhere(b_remove)[:, 0]

    return update_skeleton(skeleton_img, skeleton, i_remove)
예제 #3
0
def get_node_coord(skeleton_img):
    if np.all(skeleton_img == 0):
        return None, None

    skeleton = skan.Skeleton(img2bin(skeleton_img))
    branch_data = skan.summarize(skeleton)

    # get all node IDs
    junc_node_id, end_node_id = get_node_id(branch_data, skeleton)

    # swap cols
    end_node_coord = skeleton.coordinates[end_node_id][:, [1, 0]]
    junc_node_coord = skeleton.coordinates[junc_node_id][:, [1, 0]]

    return junc_node_coord, end_node_coord
예제 #4
0
def filter_branch_length(skeleton_img, branch_data=None, debug=False):
    skeleton = skan.Skeleton(skeleton_img)

    if branch_data is None:
        branch_data = skan.summarize(skeleton)

    junc_node_ids, start_node_ids = get_node_id(branch_data, skeleton)

    max_path = list()
    max_length = 0

    if debug:
        n_start_nodes = len(start_node_ids)
        n_junc_nodes = len(junc_node_ids)
        n_vertices = n_start_nodes + n_junc_nodes
        n_edges = len(branch_data.values)  # 2*(n - 1)
        n_loop = 1 + n_edges - n_vertices

        n_paths_min = n_start_nodes * (
            n_edges + 1
        )  # n_vertices * (1 + 0.5*n_vertices)# (n_vertices)*n_start_nodes
        n_paths_max = n_paths_min * 2**n_loop  # n_vertices * (1 + 2**n_junc_nodes)
        print '|E|: %d' % n_edges
        print '|V|: %d' % n_vertices
        print 'loops: %d' % n_loop
        print 'Expected function calls between  %d and %d' % (n_paths_min,
                                                              n_paths_max)

    for node_id in start_node_ids:

        current_path = list()
        current_length = 0
        current_path, current_length = find_largest_branch(
            branch_data, skeleton, node_id, node_id, current_path,
            current_length)

        if current_length > max_length:
            max_path = current_path
            max_length = current_length

    return generate_skeleton_img(skeleton, max_path,
                                 skeleton_img.shape), max_path
예제 #5
0
def get_node_coord(skeleton_img):
    """
    Finds all junction and end nodes on a provided skeleton
    """
    if np.all(skeleton_img == 0):
        return None, None

    skeleton = skan.Skeleton(img2bin(skeleton_img))
    branch_data = skan.summarize(skeleton)

    # get all node IDs
    src_node_id = np.unique(branch_data['node-id-src'].values)
    dst_node_id = np.unique(branch_data['node-id-dst'].values)
    all_node_id = np.unique(np.append(src_node_id, dst_node_id))

    # get end and junc node IDs
    end_node_index = skeleton.degrees[all_node_id] == 1
    end_node_id = all_node_id[end_node_index]
    junc_node_id = np.setdiff1d(all_node_id, end_node_id)

    # get coordinates
    end_node_coord = skeleton.coordinates[end_node_id][:, [1, 0]]
    junc_node_coord = skeleton.coordinates[junc_node_id][:, [1, 0]]
    return junc_node_coord, end_node_coord
예제 #6
0
def myAnalyzeSkeleton(out=None, maskPath=None, imagePath=None):
    """
	out: numpy array with 1-pixel skeleton
	maskPath : full path to _dvMask.tif file (can include appended _0.tif
	"""

    # load x/y/z voxel size (assumes .tif was saved with Fiji
    # we use this to scale length
    xVoxel, yVoxel, zVoxel = readVoxelSize(imagePath)

    # load the mask
    if out is not None:
        maskData = out
    else:
        #maskPath = os.path.splitext(path)[0] + '_dvMask_' + str(saveNumber) + '.tif'
        maskData = tifffile.imread(maskPath)

    # was used by shape_index
    #imageData = tifffile.imread(imagePath)

    print('=== myAnalyzeSkeleton() maskData.shape:', maskData.shape)

    # make a 1-pixel skeleton from volume mask (similar to Fiji Skeletonize)
    mySkeleton = morphology.skeletonize_3d(maskData)
    '''
	# shape_index() does not work for 3D images !!!
	scale = 1
	threshold_radius = 1 # from AICS
	smooth_radius =  0.01 # from AICS
	pixel_threshold_radius = int(np.ceil(threshold_radius / scale))
	pixel_smoothing_radius = smooth_radius * pixel_threshold_radius
	quality = feature.shape_index(imageData, sigma=pixel_smoothing_radius, mode='reflect')
	#skeleton = morphology.skeletonize(thresholded) * quality
	mySkeleton = morphology.skeletonize_3d(maskData) * quality
	'''

    # analyze the skeleton (similar to Fiji Analyze Skeleton)
    mySkanSkel = skan.Skeleton(mySkeleton)

    # look at the results
    branch_data = skan.summarize(
        mySkanSkel)  # branch_data is a pandas dataframe
    nBranches = branch_data.shape[0]
    '''
	print('    number of branches:', branch_data.shape[0])
	display(branch_data.head())
	'''

    #
    # convert everything to nump arrays
    branchDistance = branch_data['branch-distance'].to_numpy()
    euclideanDistance = branch_data['euclidean-distance'].to_numpy()
    branchType = branch_data['branch-type'].to_numpy()
    #tortuosity = branchDistance / euclideanDistance # this gives divide by 0 warning
    tmpOut = np.full_like(branchDistance, fill_value=np.nan)
    tortuosity = np.divide(branchDistance,
                           euclideanDistance,
                           out=tmpOut,
                           where=euclideanDistance != 0)
    """
	
	Sunday 20200405
	HERE I AM RUNNING CODE TWICE and APPENDING TO summary2.xlsx AFTER RUNNING samiMetaAnalysis.py
	
	https://jni.github.io/skan/_modules/skan/pipe.html#process_images
	in the Skan Pipe code, they multiply the binary skeleton as follows
	maybe I can implement this with scale=1 and threshold_radius taken from AICS Segmentaiton?
	
		scale = 1
		threshold_radius = 1 # from AICS
		smooth_radius =  0.01 # from AICS
		pixel_threshold_radius = int(np.ceil(threshold_radius / scale))
		pixel_smoothing_radius = smooth_radius * pixel_threshold_radius
		quality = skimage.feature.shape_index(image, sigma=pixel_smoothing_radius,
							  mode='reflect')
		skeleton = morphology.skeletonize(thresholded) * quality
	"""
    '''
	# 20200407, no longer needed as i am now saving 'branch-type', do this in meta analysis
	print('\n\n\t\tREMEMBER, I AM ONLY INCLUDING junction-to-junction !!!!!!!!!!!!!! \n\n')
	#
	# do again just for junction-to-junction
	# 'mean-pixel-value' here is 'mean shape index' in full tutorial/recipe
	# if I use ridges, I end up with almost no branche?
	#ridges = ((branch_data['mean-pixel-value'] < 0.625) & (branch_data['mean-pixel-value'] > 0.125))
	j2j = branch_data['branch-type'] == 2 # returns True/False pandas.core.series.Series
	#datar = branch_data.loc[ridges & j2j].copy()
	datar = branch_data.loc[j2j].copy()

	branchDistance = datar['branch-distance'].to_numpy()
	euclideanDistance = datar['euclidean-distance'].to_numpy()
	#tortuosity = branchDistance / euclideanDistance # this gives divide by 0 warning
	tmpOut = np.full_like(branchDistance, fill_value=np.nan)
	tortuosity = np.divide(branchDistance, euclideanDistance, out=tmpOut, where=euclideanDistance != 0)
	'''

    #
    # organize a return dicitonary
    retDict = OrderedDict()

    retDict['data'] = OrderedDict()
    #retDict['data']['nBranches'] = nBranches
    retDict['data']['branchLength'] = branchDistance
    retDict['data']['euclideanDistance'] = euclideanDistance
    retDict['data']['branchType'] = branchType
    #retDict['data']['tortuosity'] = tortuosity

    # todo: search for 0 values in (branchDistance, euclideanDistance)

    # stats
    '''
	print('***** THIS IS NOT SCALED ***')
	print('    branchDistance mean:', np.mean(branchDistance), 'SD:', np.std(branchDistance), 'n:', branchDistance.size)
	#
	decimalPlaces = 2
	retDict['stats'] = OrderedDict()
	retDict['stats']['branchLength_mean'] = round(np.mean(branchDistance),decimalPlaces)
	retDict['stats']['branchLength_std'] = round(np.std(branchDistance),decimalPlaces)
	retDict['stats']['branchLength_n'] = branchDistance.shape[0]
	tmpCount = branchDistance[branchDistance<=2]
	retDict['stats']['branchLength_n_2'] = tmpCount.shape[0]
	#
	retDict['stats']['euclideanDistance_mean'] = round(np.mean(euclideanDistance),decimalPlaces)
	retDict['stats']['euclideanDistance_std'] = round(np.std(euclideanDistance),decimalPlaces)
	retDict['stats']['euclideanDistance_n'] = euclideanDistance.shape[0]
	#
	retDict['stats']['tortuosity_mean'] = round(np.nanmean(tortuosity),decimalPlaces)
	retDict['stats']['tortuosity_std'] = round(np.nanstd(tortuosity),decimalPlaces)
	retDict['stats']['tortuosity_n'] = tortuosity.shape[0]
	'''

    return retDict, mySkeleton  # returning mySkeleton so we can save it
예제 #7
0
def summarize_img(skeleton_img):
    # summarize skeleton
    skeleton = skan.Skeleton(img2bin(skeleton_img))
    branch_data = skan.summarize(skeleton)
    return skeleton, branch_data
예제 #8
0
area_vasc_region = Y_hat_binary_vasc.sum(-1).sum(-1).sum(-1)

Xskeleton = np.zeros_like(Y_hat_binary_thresholding_sato_vasc)
for i in np.arange(len(X_vasc)):
    Xskeleton[i, :] = skeletonize(Y_hat_binary_thresholding_sato_vasc[i, :])

#calculate skeleton statistics for all images
branch_number = np.zeros(len(X))
branch_j2e_total = np.zeros(len(X))
branch_j2j_total = np.zeros(len(X))

for i in range(len(Y_hat_binary_skin)):  #iterate over images
    print(i)
    skeleton = Xskeleton[i, :, :, 0]
    try:
        branch_data = skan.summarize(skan.Skeleton(skeleton))
        branch_number[i] = len(branch_data['branch-distance'].values)
        branch_j2e_total[i] = np.sum(branch_data['branch-type'].values == 1)
        branch_j2j_total[i] = np.sum(branch_data['branch-type'].values == 2)
    except:
        print('problem with mask')

Xskeleton_vasc = Xskeleton

nbranch_vasc = branch_number
nbranch_j2e_vasc = branch_j2e_total
nbrach_j2j_vasc = branch_j2j_total

#calculate depth
depth_vasc = np.zeros(len(Y_hat_binary_thresholding_sato_vasc))
예제 #9
0
def myAnalyzeSkeleton(out=None,
                      maskPath=None,
                      imagePath=None,
                      saveBase=None,
                      verbose=False):
    """
	out: numpy array with 1-pixel skeleton
	maskPath : full path to _dvMask.tif file (can include appended _0.tif
	
	returns:
	    dict of results, 3d skeleton
	"""

    # load x/y/z voxel size (assumes .tif was saved with Fiji
    # we use this to scale length
    xVoxel, yVoxel, zVoxel = readVoxelSize(imagePath)

    # load the mask
    if out is not None:
        maskData = out
    else:
        #maskPath = os.path.splitext(path)[0] + '_dvMask_' + str(saveNumber) + '.tif'
        #maskData = tifffile.imread(maskPath)
        maskData, maskHeader = bimpy.util.bTiffFile.imread(maskPath)

    # was used by shape_index
    #imageData = tifffile.imread(imagePath)

    if verbose:
        print('    === myAnalyzeSkeleton() maskData.shape:', maskData.shape)

    ##
    ##
    # make a 1-pixel skeleton from volume mask (similar to Fiji Skeletonize)
    mySkeleton = morphology.skeletonize_3d(maskData)
    ##
    ##

    ##
    ##
    # analyze the skeleton (similar to Fiji Analyze Skeleton)
    ## BE SURE TO INCLUDE VOXEL SIZE HERE !!!!!! 20200503
    mySpacing_ = (zVoxel, xVoxel, yVoxel)
    mySkanSkel = skan.Skeleton(mySkeleton, spacing=mySpacing_)
    ##
    ##

    # look at the results
    branch_data = skan.summarize(
        mySkanSkel)  # branch_data is a pandas dataframe
    nBranches = branch_data.shape[0]

    # working on eroded/ring density 20200501
    # save entire skan analysis as csv
    # 20200713, was this
    '''
	tmpFolder, tmpFileName = os.path.split(imagePath)
	tmpFileNameNoExtension, tmpExtension = tmpFileName.split('.')
	saveSkelPath = os.path.join(tmpFolder, tmpFileNameNoExtension + '_skel.csv')
	if verbose: print('saving skan results to saveSkelPath:', saveSkelPath)
	'''

    saveSkelPath = saveBase + '_skel.csv'
    print('    myAnalyzeSkeleton() saving saveSkelPath:', saveSkelPath)

    branch_data.to_csv(saveSkelPath)

    #
    # convert everything to nump arrays
    branchType = branch_data['branch-type'].to_numpy()
    branchDistance = branch_data['branch-distance'].to_numpy()
    euclideanDistance = branch_data['euclidean-distance'].to_numpy()
    # don't do tortuosity here, we need to scale to um/pixel in x/y/z

    #
    # scale
    # 20200503 TRYING TO DO THIS WHEN CALLING skan.Skeleton(mySkeleton, spacing=mySpacing_) !!!!!!!!!!!!!!!!!!!!!!!
    '''
	branchDistance = np.multiply(branchDistance, xVoxel)
	euclideanDistance = np.multiply(euclideanDistance, xVoxel)
	'''
    # this will print 'divide by zero encountered in true_divide' and value will become inf
    tortuosity = np.divide(branchDistance,
                           euclideanDistance)  # might fail on divide by 0

    #
    # organize a return dicitonary
    retDict = OrderedDict()

    retDict['data'] = OrderedDict()
    retDict['data']['branchType'] = branchType
    retDict['data']['branchLength'] = branchDistance
    retDict['data']['euclideanDistance'] = euclideanDistance
    retDict['data']['tortuosity'] = tortuosity

    # todo: search for 0 values in (branchDistance, euclideanDistance)

    # 20200503 working on samiPostAnalysis density
    # we need all the src/dst point so we can quickly determine if they are in mask (full, eroded, ring)
    # 'image-coord-src-0', 'image-coord-src-1', 'image-coord-src-2', 'image-coord-dst-0', 'image-coord-dst-1', 'image-coord-dst-2'
    image_coord_src_0 = branch_data['image-coord-src-0'].to_numpy()
    image_coord_src_1 = branch_data['image-coord-src-1'].to_numpy()
    image_coord_src_2 = branch_data['image-coord-src-2'].to_numpy()
    image_coord_dst_0 = branch_data['image-coord-dst-0'].to_numpy()
    image_coord_dst_1 = branch_data['image-coord-dst-1'].to_numpy()
    image_coord_dst_2 = branch_data['image-coord-dst-2'].to_numpy()
    retDict['data']['image_coord_src_0'] = image_coord_src_0
    retDict['data']['image_coord_src_1'] = image_coord_src_1
    retDict['data']['image_coord_src_2'] = image_coord_src_2
    retDict['data']['image_coord_dst_0'] = image_coord_dst_0
    retDict['data']['image_coord_dst_1'] = image_coord_dst_1
    retDict['data']['image_coord_dst_2'] = image_coord_dst_2

    return retDict, mySkeleton  # returning mySkeleton so we can save it
예제 #10
0
파일: skelUtils.py 프로젝트: cudmore/bImPy
def makeSkel(path):
    """
	path: full path to _ch1.tif or _ch2.tif
	"""
    print('=== makeSkel() path:', path)

    # load raw
    print('  loading raw:', path)
    stackData, stackHeader = bimpy.util.bTiffFile.imread(path)
    print('    stackData:', stackData.shape)

    # load mask
    maskPath, tmpExt = os.path.splitext(path)
    maskPath += '_mask.tif'
    print('  loading mask:', maskPath)
    maskData, maskHeader = bimpy.util.bTiffFile.imread(maskPath)
    print('    maskData:', stackData.shape)

    # erode mask before making 1-pixel skeleton
    iterations = 1
    print('  binary_erosion with iterations:', iterations)
    maskData = bimpy.util.morphology.binary_erosion(maskData,
                                                    iterations=iterations)

    baseFileName = getFileNameNoChannels(path)
    uFirstSlice = None
    uLastSlice = None
    try:
        print(
            '  looking in bVascularTracingAics.stackDatabase for baseFileName:',
            baseFileName)
        trimDict = bimpy.bVascularTracingAics.stackDatabase[baseFileName]
        uFirstSlice = trimDict['uFirstSlice']
        uLastSlice = trimDict['uLastSlice']
    except (KeyError) as e:
        print(
            '  warning: did not find stack baseFileName:', baseFileName,
            'in bVascularTracingAics.stackDatabase ---->>>> NO PRUNING/BLANKING'
        )

    doFirstLast = False
    if doFirstLast and uFirstSlice is not None and uLastSlice is not None:
        print('  makeSkel() pruning/blanking slices:', uFirstSlice, uLastSlice)
        maskData[0:uFirstSlice - 1, :, :] = 0
        maskData[uLastSlice:-1, :, :] = 0

    #
    print(
        '  - generating 1-pixel skeleton from mask using skimage.morphology.skeletonize_3d ...'
    )
    myTimer = bimpy.util.bTimer('skeletonizeTimer')
    skeletonData = skimage.morphology.skeletonize_3d(maskData)
    print('    skeletonData:', skeletonData.shape, skeletonData.dtype)
    print('  ', myTimer.elapsed())

    # save 1-pixel skel stack
    skelPath, tmpExt = os.path.splitext(path)
    skelPath += '_skel.tif'
    print('  saving 1-pixel skel skelPath:', skelPath)
    bimpy.util.bTiffFile.imsave(skelPath, skeletonData)

    #
    print('  - generating skeleton graph from mask using skan.Skeleton ...')
    myTimer = bimpy.util.bTimer('skan Skeleton')
    #skanSkel = skan.Skeleton(skeletonData, source_image=stackData.astype('float'))
    skanSkel = skan.Skeleton(skeletonData, source_image=stackData)
    print('  ', myTimer.elapsed())

    # not needed but just to remember
    branch_data = skan.summarize(skanSkel)  # branch_data is a pandas dataframe
    print('    branch_data.shape:', branch_data.shape)
    print(branch_data.head())
예제 #11
0
파일: mySkel.py 프로젝트: cudmore/bImPy
print('degrees==0:', degrees[degrees == 0].shape)
print('degrees==1:', degrees[degrees == 1].shape)
print('degrees==2:', degrees[degrees == 2].shape)
print('degrees>=3:', degrees[degrees >= 3].shape)
'''
degrees is an image of the skeleton, with each skeleton pixel containing the
number of neighbouring pixels. This enables us to distinguish between
junctions (where three or more skeleton branches meet),
endpoints (where a skeleton ends),
and paths (pixels on the inside of a skeleton branch).
'''

#
# alternate way to use skan (todo: look into this)
print('    === running skan.Skeleton(skeleton0)')
skanSkel = skan.Skeleton(skeleton0, source_image=rawStack)

pathIdx = 2
print('skanSkel.paths_list():', len(skanSkel.paths_list()))
thisPath = skanSkel.paths_list()[
    pathIdx]  # list of indices into coordinates[i,3]
print('    pathIdx:', pathIdx, 'thisPath:', thisPath)
# works
#print('    coordinates[thisPath]:', coordinates[thisPath])
if 1:
    for idx, path in enumerate(skanSkel.paths_list()):
        srcPnt = path[0]
        dstPnt = path[-1]
        '''
		slabs[srcPnt] = coordinates[srcPnt]
		slabs[dstPnt] = coordinates[dstPnt]