def testGridGraphSegmentationFelzenszwalbSegmentation(): dataRGB = numpy.random.random([3, 3, 3]).astype(numpy.float32) dataRGB = taggedView(dataRGB, 'xyc') data = numpy.random.random([3, 3]).astype(numpy.float32) edata = numpy.random.random([3 * 2 - 1, 3 * 2 - 1]).astype(numpy.float32) g0 = graphs.gridGraph(data.shape) ew = graphs.edgeFeaturesFromInterpolatedImage(g0, edata) labels = graphs.felzenszwalbSegmentation(graph=g0, edgeWeights=ew, k=1.0, nodeNumStop=5) g1 = graphs.regionAdjacencyGraph(graph=g0, labels=labels) assert g1.nodeNum == 5 data = numpy.random.random([3, 3, 3]).astype(numpy.float32) edata = numpy.random.random([3 * 2 - 1, 3 * 2 - 1, 3 * 2 - 1]).astype(numpy.float32) g0 = graphs.gridGraph(data.shape) ew = graphs.edgeFeaturesFromInterpolatedImage(g0, edata) labels = graphs.felzenszwalbSegmentation(graph=g0, edgeWeights=ew, k=1.0, nodeNumStop=15) g1 = graphs.regionAdjacencyGraph(graph=g0, labels=labels) assert g1.nodeNum == 15
def testGridGraphSegmentationFelzenszwalbSegmentation(): dataRGB = numpy.random.random([3,3,3]).astype(numpy.float32) dataRGB = taggedView(dataRGB,'xyc') data = numpy.random.random([3,3]).astype(numpy.float32) edata = numpy.random.random([3*2-1,3*2-1]).astype(numpy.float32) g0 = graphs.gridGraph(data.shape) ew = graphs.edgeFeaturesFromInterpolatedImage(g0,edata) labels = graphs.felzenszwalbSegmentation(graph=g0,edgeWeights=ew,k=1.0,nodeNumStop=5) g1 = graphs.regionAdjacencyGraph(graph=g0,labels=labels) assert g1.nodeNum == 5 data = numpy.random.random([3,3,3]).astype(numpy.float32) edata = numpy.random.random([3*2-1,3*2-1,3*2-1]).astype(numpy.float32) g0 = graphs.gridGraph(data.shape) ew = graphs.edgeFeaturesFromInterpolatedImage(g0,edata) labels = graphs.felzenszwalbSegmentation(graph=g0,edgeWeights=ew,k=1.0,nodeNumStop=15) g1 = graphs.regionAdjacencyGraph(graph=g0,labels=labels) assert g1.nodeNum == 15
def testGridGraphWatersheds(): data = numpy.random.random([10, 10, 10]).astype(numpy.float32) edata = numpy.random.random([10 * 2 - 1, 10 * 2 - 1, 10 * 2 - 1]).astype(numpy.float32) g0 = graphs.gridGraph(data.shape) ew = graphs.edgeFeaturesFromInterpolatedImage(graph=g0, image=edata) # generate seeds seeds = graphs.nodeWeightedWatershedsSeeds(graph=g0, nodeWeights=data) # node weighted watershed seeds labelsNodeWeightedA = graphs.nodeWeightedWatersheds(graph=g0, nodeWeights=data, seeds=seeds) # node weighted watershed seeds labelsNodeWeightedB = graphs.nodeWeightedWatersheds(graph=g0, nodeWeights=data) # edge weighted watershed seeds seeds = graphs.nodeWeightedWatershedsSeeds(graph=g0, nodeWeights=data) labelsEdgeWeighted = graphs.edgeWeightedWatersheds(graph=g0, edgeWeights=ew, seeds=seeds) assert numpy.array_equal(labelsNodeWeightedA, labelsNodeWeightedB) data = numpy.random.random([10, 10]).astype(numpy.float32) edata = numpy.random.random([10 * 2 - 1, 10 * 2 - 1]).astype(numpy.float32) g0 = graphs.gridGraph(data.shape) ew = graphs.edgeFeaturesFromInterpolatedImage(graph=g0, image=edata) # generate seeds seeds = graphs.nodeWeightedWatershedsSeeds(graph=g0, nodeWeights=data) # node weighted watershed seeds labelsNodeWeightedA = graphs.nodeWeightedWatersheds(graph=g0, nodeWeights=data, seeds=seeds) # node weighted watershed seeds labelsNodeWeightedB = graphs.nodeWeightedWatersheds(graph=g0, nodeWeights=data) # edge weighted watershed seeds labelsEdgeWeighted = graphs.edgeWeightedWatersheds(graph=g0, edgeWeights=ew, seeds=seeds) assert numpy.array_equal(labelsNodeWeightedA, labelsNodeWeightedB)
def testGridGraphAgglomerativeClustering(): dataRGB = numpy.random.random([10, 10, 3]).astype(numpy.float32) dataRGB = vigra.taggedView(dataRGB, 'xyc') data = numpy.random.random([10, 10]).astype(numpy.float32) edata = numpy.random.random([10 * 2 - 1, 10 * 2 - 1]).astype(numpy.float32) g0 = graphs.gridGraph(data.shape) ew = graphs.edgeFeaturesFromInterpolatedImage(graph=g0, image=edata) #ew = taggedView(ew,'xyz') labels = graphs.agglomerativeClustering(graph=g0, edgeWeights=ew, nodeFeatures=dataRGB, nodeNumStop=5) g1 = graphs.regionAdjacencyGraph(graph=g0, labels=labels) assert g1.nodeNum == 5 labels = graphs.agglomerativeClustering(graph=g0, edgeWeights=ew, nodeNumStop=5) g1 = graphs.regionAdjacencyGraph(graph=g0, labels=labels) assert g1.nodeNum == 5 dataRGB = numpy.random.random([10, 10, 10, 3]).astype(numpy.float32) dataRGB = vigra.taggedView(dataRGB, 'xyzc') data = numpy.random.random([10, 10, 10]).astype(numpy.float32) edata = numpy.random.random([10 * 2 - 1, 10 * 2 - 1, 10 * 2 - 1]).astype(numpy.float32) g0 = graphs.gridGraph(data.shape) ew = graphs.edgeFeaturesFromInterpolatedImage(graph=g0, image=edata) #ew = taggedView(ew,'xyz') labels = graphs.agglomerativeClustering(graph=g0, edgeWeights=ew, nodeFeatures=dataRGB, nodeNumStop=5) g1 = graphs.regionAdjacencyGraph(graph=g0, labels=labels) assert g1.nodeNum == 5 labels = graphs.agglomerativeClustering(graph=g0, edgeWeights=ew, nodeNumStop=5) g1 = graphs.regionAdjacencyGraph(graph=g0, labels=labels) assert g1.nodeNum == 5
def testGridGraphAgglomerativeClustering(): dataRGB = numpy.random.random([10,10,3]).astype(numpy.float32) dataRGB = vigra.taggedView(dataRGB,'xyc') data = numpy.random.random([10,10]).astype(numpy.float32) edata = numpy.random.random([10*2-1,10*2-1]).astype(numpy.float32) g0 = graphs.gridGraph(data.shape) ew = graphs.edgeFeaturesFromInterpolatedImage(graph=g0,image=edata) #ew = taggedView(ew,'xyz') labels = graphs.agglomerativeClustering(graph=g0,edgeWeights=ew,nodeFeatures=dataRGB,nodeNumStop=5) g1 = graphs.regionAdjacencyGraph(graph=g0,labels=labels) assert g1.nodeNum == 5 labels = graphs.agglomerativeClustering(graph=g0,edgeWeights=ew,nodeNumStop=5) g1 = graphs.regionAdjacencyGraph(graph=g0,labels=labels) assert g1.nodeNum == 5 dataRGB = numpy.random.random([10,10,10,3]).astype(numpy.float32) dataRGB = vigra.taggedView(dataRGB,'xyzc') data = numpy.random.random([10,10,10]).astype(numpy.float32) edata = numpy.random.random([10*2-1,10*2-1,10*2-1]).astype(numpy.float32) g0 = graphs.gridGraph(data.shape) ew = graphs.edgeFeaturesFromInterpolatedImage(graph=g0,image=edata) #ew = taggedView(ew,'xyz') labels = graphs.agglomerativeClustering(graph=g0,edgeWeights=ew,nodeFeatures=dataRGB,nodeNumStop=5) g1 = graphs.regionAdjacencyGraph(graph=g0,labels=labels) assert g1.nodeNum == 5 labels = graphs.agglomerativeClustering(graph=g0,edgeWeights=ew,nodeNumStop=5) g1 = graphs.regionAdjacencyGraph(graph=g0,labels=labels) assert g1.nodeNum == 5
def testGridGraphWatersheds(): data = numpy.random.random([10,10,10]).astype(numpy.float32) edata = numpy.random.random([10*2-1,10*2-1,10*2-1]).astype(numpy.float32) g0 = graphs.gridGraph(data.shape) ew = graphs.edgeFeaturesFromInterpolatedImage(graph=g0,image=edata) # generate seeds seeds = graphs.nodeWeightedWatershedsSeeds(graph=g0,nodeWeights=data) # node weighted watershed seeds labelsNodeWeightedA = graphs.nodeWeightedWatersheds(graph=g0,nodeWeights=data,seeds=seeds) # node weighted watershed seeds labelsNodeWeightedB = graphs.nodeWeightedWatersheds(graph=g0,nodeWeights=data) # edge weighted watershed seeds seeds = graphs.nodeWeightedWatershedsSeeds(graph=g0,nodeWeights=data) labelsEdgeWeighted = graphs.edgeWeightedWatersheds(graph=g0,edgeWeights=ew,seeds=seeds) assert numpy.array_equal(labelsNodeWeightedA,labelsNodeWeightedB) data = numpy.random.random([10,10]).astype(numpy.float32) edata = numpy.random.random([10*2-1,10*2-1]).astype(numpy.float32) g0 = graphs.gridGraph(data.shape) ew = graphs.edgeFeaturesFromInterpolatedImage(graph=g0,image=edata) # generate seeds seeds = graphs.nodeWeightedWatershedsSeeds(graph=g0,nodeWeights=data) # node weighted watershed seeds labelsNodeWeightedA = graphs.nodeWeightedWatersheds(graph=g0,nodeWeights=data,seeds=seeds) # node weighted watershed seeds labelsNodeWeightedB = graphs.nodeWeightedWatersheds(graph=g0,nodeWeights=data) # edge weighted watershed seeds labelsEdgeWeighted = graphs.edgeWeightedWatersheds(graph=g0,edgeWeights=ew,seeds=seeds) assert numpy.array_equal(labelsNodeWeightedA,labelsNodeWeightedB)
img = vigra.impex.readImage(filepath) # get super-pixels with slic on LAB image imgLab = vigra.colors.transform_RGB2Lab(img) labels, nseg = vigra.analysis.slicSuperpixels(imgLab, slicWeight, superpixelDiameter) labels = vigra.analysis.labelImage(labels) # compute gradient on interpolated image imgLabBig = vigra.resize(imgLab, [imgLab.shape[0]*2-1, imgLab.shape[1]*2-1]) gradMag = vigra.filters.gaussianGradientMagnitude(imgLabBig, sigmaGradMag) # get 2D grid graph and edgeMap for grid graph # from gradMag of interpolated image gridGraph = graphs.gridGraph(img.shape[0:2]) gridGraphEdgeIndicator = graphs.edgeFeaturesFromInterpolatedImage(gridGraph, gradMag) # get region adjacency graph from super-pixel labels rag = graphs.regionAdjacencyGraph(gridGraph, labels) # accumulate edge weights from gradient magnitude edgeWeights = rag.accumulateEdgeFeatures(gridGraphEdgeIndicator) # accumulate node features from grid graph node map # which is just a plain image (with channels) nodeFeatures = rag.accumulateNodeFeatures(imgLab) # do agglomerativeClustering labels = graphs.agglomerativeClustering(graph=rag, edgeWeights=edgeWeights, beta=beta, nodeFeatures=nodeFeatures, nodeNumStop=nodeNumStop)
# get super-pixels with slic on LAB image imgLab = vigra.colors.transform_RGB2Lab(img) labels, nseg = vigra.analysis.slicSuperpixels(imgLab, slicWeight, superpixelDiameter) labels = vigra.analysis.labelImage(labels) # compute gradient on interpolated image imgLabBig = vigra.resize(imgLab, [imgLab.shape[0] * 2 - 1, imgLab.shape[1] * 2 - 1]) gradMag = vigra.filters.gaussianGradientMagnitude(imgLabBig, sigmaGradMag) # get 2D grid graph and edgeMap for grid graph # from gradMag of interpolated image gridGraph = graphs.gridGraph(img.shape[0:2]) gridGraphEdgeIndicator = graphs.edgeFeaturesFromInterpolatedImage( gridGraph, gradMag) # get region adjacency graph from super-pixel labels rag = graphs.regionAdjacencyGraph(gridGraph, labels) # accumulate edge weights from gradient magnitude edgeIndicator = rag.accumulateEdgeFeatures(gridGraphEdgeIndicator) # accumulate node features from grid graph node map # which is just a plain image (with channels) nodeFeatures = rag.accumulateNodeFeatures(imgLab) resultFeatures = graphs.recursiveGraphSmoothing(rag, nodeFeatures, edgeIndicator, gamma=gamma, edgeThreshold=edgeThreshold,
def calculate_distances(): """ compute distances between color markers instead of existing synapses markers of the same color should be in the same neuron """ files_2d = glob.glob(inputdir + d2_pattern) files_2d = sorted(files_2d, key=str.lower) files_3d = glob.glob(inputdir + d3_pattern) files_3d = sorted(files_3d, key=str.lower) files_markers = glob.glob(inputdir + marker_pattern) files_markers = sorted(files_markers, key=str.lower) debug_dirs = glob.glob(debugdir) debug_dirs = sorted(debug_dirs, key=str.lower) files_raw = glob.glob(inputdir + raw_pattern) files_raw = sorted(files_raw, key=str.lower) #print files_2d, files_3d, files_markers, debug_dirs, files_raw first = 0 last = 4 all_distances_same = [] all_distances_diff = [] nsamesame = 0 nsamediff = 0 ndiffdiff = 0 ndiffsame = 0 for f2name, f3name, mname, ddir, rawname in zip(files_2d[first:last], files_3d[first:last], files_markers[first:last], debug_dirs[first:last], files_raw[first:last]): print "processing files:" print f2name print f3name print mname print ddir print rawname tempGraph = Graph() edgeIndicators = [] instances = [] if debug_images: rawim = vigra.readImage(rawname) vigra.impex.writeImage(rawim, ddir + "/raw.tiff") #print "processing files:", f2name, f3name, mname f2 = h5py.File(f2name) f3 = h5py.File(f3name) if use_2d_only: d2 = f2["exported_data"][5, :, :, 0] d3 = f2["exported_data"][5, :, :, 2] else: d2 = f2["exported_data"][..., 0] d3 = f3["exported_data"][5, :, :, 2] # 5 because we only want the central slice, there are 11 in total d3 = d3.swapaxes(0, 1) # print d2.shape, d3.shape combined = d2 + d3 if use_2d_only: #convert to float combined = combined.astype(numpy.float32) combined = combined/255. markedNodes = extractMarkedNodes(mname) # print opUpsample = OpUpsampleByTwo(graph=tempGraph) combined = numpy.reshape(combined, combined.shape + (1,) + (1,)) combined = combined.view(vigra.VigraArray) combined.axistags = vigra.defaultAxistags('xytc') opUpsample.Input.setValue(combined) upsampledMembraneProbs = opUpsample.Output[:].wait() # get rid of t upsampledMembraneProbs = upsampledMembraneProbs[:, :, 0, :] upsampledMembraneProbs = upsampledMembraneProbs.view(vigra.VigraArray) upsampledMembraneProbs.axistags = vigra.defaultAxistags('xyc') # try to filter upsampledSmoothedMembraneProbs = computeDistanceRaw(upsampledMembraneProbs, 1.6, ddir) upsampledMembraneProbs = filter_by_size(upsampledSmoothedMembraneProbs, ddir) upsampledMembraneProbs = upsampledMembraneProbs.view(vigra.VigraArray) upsampledMembraneProbs.axistags = vigra.defaultAxistags('xyc') edgeIndicators.append(computeDistanceHessian(upsampledMembraneProbs, 5.0, ddir)) edgeIndicators.append(upsampledSmoothedMembraneProbs) segm = superpixels(combined.squeeze()) if debug_images: vigra.impex.writeImage(segm, ddir + "/superpixels.tiff") gridGr = graphs.gridGraph((d2.shape[0], d2.shape[1])) # !on original pixels for iind, indicator in enumerate(edgeIndicators): gridGraphEdgeIndicator = graphs.edgeFeaturesFromInterpolatedImage(gridGr, indicator) instance = vigra.graphs.ShortestPathPathDijkstra(gridGr) instances.append(instance) distances_same = [] distances_diff = [] for color, points in markedNodes.iteritems(): #going over points of *same* color if len(points)>1: print "Processing color", color for i in range(len(points)): node = map(long, points[i]) sourceNode = gridGr.coordinateToNode(node) instance.run(gridGraphEdgeIndicator, sourceNode, target=None) distances_all = instance.distances() sp_this = segm[node[0], node[1]] for j in range(i + 1, len(points)): # go over points of the same color other_node = map(long, points[j]) distances_same.append(distances_all[other_node[0], other_node[1]]) sp_other = segm[other_node[0], other_node[1]] if sp_this==sp_other: nsamesame = nsamesame + 1 #print "same color in the same superpixel!" else: nsamediff += 1 #targetNode = gridGr.coordinateToNode(other_node) #path = instance.run(gridGraphEdgeIndicator, sourceNode).path(pathType='coordinates', # target=targetNode) #max_on_path = numpy.max(distances_all[path]) #min_on_path = numpy.min(distances_all[path]) # print max_on_path, min_on_path # print path.shape #print "distance b/w", node, other_node, " = ", distances_all[other_node[0], other_node[1]] for newcolor, newpoints in markedNodes.iteritems(): # go over points of other colors if color == newcolor: continue for newi in range(len(newpoints)): other_node = map(long, newpoints[newi]) sp_other = segm[other_node[0], other_node[1]] if sp_this==sp_other: ndiffsame += 1 else: ndiffdiff += 1 distances_diff.append(distances_all[other_node[0], other_node[1]]) # highlight the source point in image distances_all[node[0], node[1]] = numpy.max(distances_all) outfile = ddir + "/" + str(node[0]) + "_" + str(node[1]) + "_" + str(iind) + ".tiff" vigra.impex.writeImage(distances_all, outfile) while len(all_distances_diff)<len(edgeIndicators): all_distances_diff.append([]) all_distances_same.append([]) all_distances_diff[iind].extend(distances_diff) all_distances_same[iind].extend(distances_same) #print "summary for edge indicator:", iind #print "points of same color:", distances_same #print "points of other colors:", distances_diff # print distances_same # vigra.impex.writeImage(combined, f2name+"_combined.tiff") # vigra.impex.writeImage(d3, f2name+"_synapse.tiff") # vigra.impex.writeImage(d2, f2name+"_membrane.tiff") print "same color in the same superpixels:", nsamesame print "same color, different superpixels:", nsamediff print "diff color, same superpixel:", ndiffsame print "diff color, diff superpixels", ndiffdiff analyze_distances(all_distances_same, all_distances_diff)
def process_branch( branch_index, branch_rois ): # opFeatures and opThreshold are declared locally so this whole block can be parallelized! # (We use Input.setValue() instead of Input.connect() here.) opFeatures = OpPixelFeaturesPresmoothed(graph=tempGraph) # Compute the Hessian slicewise and create gridGraphs standard_scales = [0.3, 0.7, 1.0, 1.6, 3.5, 5.0, 10.0] standard_feature_ids = ['GaussianSmoothing', 'LaplacianOfGaussian', \ 'GaussianGradientMagnitude', 'DifferenceOfGaussians', \ 'StructureTensorEigenvalues', 'HessianOfGaussianEigenvalues'] opFeatures.Scales.setValue(standard_scales) opFeatures.FeatureIds.setValue(standard_feature_ids) # Select Hessian Eigenvalues at scale 5.0 scale_index = standard_scales.index(5.0) feature_index = standard_feature_ids.index('HessianOfGaussianEigenvalues') selection_matrix = numpy.zeros( (6,7), dtype=bool ) # all False selection_matrix[feature_index][scale_index] = True opFeatures.Matrix.setValue(selection_matrix) # opFeatures and opThreshold are declared locally so this whole block can be parallelized! # (We use Input.setValue() instead of Input.connect() here.) opThreshold = OpThresholdTwoLevels(graph=tempGraph) opThreshold.Channel.setValue(0) # We select SYNAPSE_CHANNEL before the data is given to opThreshold opThreshold.SmootherSigma.setValue({'x': 2.0, 'y': 2.0, 'z': 1.0}) #NOTE: two-level is much better. Maybe we can afford it? #opThreshold.CurOperator.setValue(0) # 0==one-level #opThreshold.SingleThreshold.setValue(0.4) #FIXME: solve the mess with uint8/float in predictions opThreshold.CurOperator.setValue(1) # 1==two-level opThreshold.HighThreshold.setValue(0.4) opThreshold.LowThreshold.setValue(0.2) previous_slice_objects = None previous_slice_roi = None conn_ids = [x.id for x in connector_infos] connector_infos_dict = dict(zip(conn_ids, connector_infos)) branch_node_count = len(branch_rois) for node_index_in_branch, (node_info, roi) in enumerate(branch_rois): with Timer() as timer: skeletonCoord = (node_info.x_px, node_info.y_px, node_info.z_px) logger.debug("skeleton point: {}".format( skeletonCoord )) #Add channel dimension roi_with_channel = numpy.zeros((2, roi.shape[1]+1), dtype=numpy.uint32) roi_with_channel[:, :-1] = roi[:] roi_with_channel[0, -1] = 0 roi_with_channel[1, -1] = 1 iz = roi[0][2] roi_hessian = (roi_with_channel[0]*2, roi_with_channel[1]*2-1) for x in range(roi.shape[1]): if roi[0][x] == 0: roi_hessian[0][x] = 0 roi_hessian[0][2] = iz roi_hessian[1][2] = iz+1 #we need the second eigenvalue roi_hessian[0][-1] = 1 roi_hessian[1][-1] = 2 WITH_CONNECTORS_ONLY = True if WITH_CONNECTORS_ONLY: if not node_info.id in node_to_connector.keys(): continue if debug_images: outdir1 = outdir+"raw/" try: os.makedirs(outdir1) except os.error: pass outfile = outdir1+"/{}-{}".format( iz, node_info.id ) + ".png" data = opPixelClassification3d.InputImages[-1](roi_with_channel[0], roi_with_channel[1]).wait() vigra.impex.writeImage(data.squeeze().astype(numpy.uint8), outfile) ''' outdir2 = outdir + "synapse_pred/" outfile = outdir2+"%.02d"%iz + ".png" data = opThreshold.InputImage(roi_with_channel[0], roi_with_channel[1]).wait() vigra.impex.writeImage(data.squeeze().astype(numpy.uint8), outfile) ''' start_pred = time.time() prediction_roi = numpy.append( roi_with_channel[:,:-1], [[0],[4]], axis=1 ) synapse_prediction_roi = numpy.append( prediction_roi[:,:-1], [[SYNAPSE_CHANNEL],[SYNAPSE_CHANNEL+1]], axis=1 ) membrane_prediction_roi = numpy.append( prediction_roi[:,:-1], [[MEMBRANE_CHANNEL],[MEMBRANE_CHANNEL+1]], axis=1 ) #synapse_predictions = opPixelClassification3d.PredictionProbabilities[-1](*prediction_roi).wait() synapse_predictions = opSynapsePredictionCache.Output(*synapse_prediction_roi).wait() synapse_predictions = vigra.taggedView( synapse_predictions, "xytc" ) if debug_images: outdir1 = outdir+"membrane/" try: os.makedirs(outdir1) except os.error: pass outfile = outdir1+"/{}-{}".format( iz, node_info.id ) + ".png" #membrane_predictions = opPixelClassification2d.HeadlessPredictionProbabilities[-1](*prediction_roi).wait() membrane_predictions = opMembranePredictionCache.Output(*membrane_prediction_roi).wait() vigra.impex.writeImage(membrane_predictions[..., 0].squeeze(), outfile) stop_pred = time.time() timing_logger.debug( "spent in first 3d prediction: {}".format( stop_pred-start_pred ) ) opThreshold.InputImage.setValue(synapse_predictions) opThreshold.InputImage.meta.drange = opPixelClassification3d.PredictionProbabilities[-1].meta.drange synapse_cc = opThreshold.Output[:].wait() if debug_images: outdir1 = outdir+"predictions_roi/" try: os.makedirs(outdir1) except os.error: pass outfile = outdir1+"/{}-{}".format( iz, node_info.id ) + ".tiff" #norm = numpy.where(synapse_cc[:, :, 0, 0]>0, 255, 0) vigra.impex.writeImage(synapse_predictions[...,0,0], outfile) if debug_images: outdir1 = outdir+"synapses_roi/" try: os.makedirs(outdir1) except os.error: pass outfile = outdir1+"/{}-{}".format( iz, node_info.id ) + ".tiff" norm = numpy.where(synapse_cc[:, :, 0, 0]>0, 255, 0) vigra.impex.writeImage(norm.astype(numpy.uint8), outfile) if numpy.sum(synapse_cc)==0: print "NO SYNAPSES IN THIS SLICE:", iz timing_logger.debug( "ROI TIMER: {}".format( timer.seconds() ) ) continue # Distances over Hessian start_hess = time.time() roi_hessian = ( tuple(map(long, roi_hessian[0])), tuple(map(long, roi_hessian[1])) ) upsampled_combined_membranes = opUpsample.Output(*roi_hessian).wait() upsampled_combined_membranes = vigra.taggedView(upsampled_combined_membranes, opUpsample.Output.meta.axistags ) opFeatures.Input.setValue(upsampled_combined_membranes) eigenValues = opFeatures.Output[...,1:2].wait() #we need the second eigenvalue eigenValues = numpy.abs(eigenValues[:, :, 0, 0]) stop_hess = time.time() timing_logger.debug( "spent for hessian: {}".format( stop_hess-start_hess ) ) shape_x = roi[1][0]-roi[0][0] shape_y = roi[1][1]-roi[0][1] shape_x = long(shape_x) shape_y = long(shape_y) start_gr = time.time() gridGr = graphs.gridGraph((shape_x, shape_y )) # !on original pixels gridGraphEdgeIndicator = graphs.edgeFeaturesFromInterpolatedImage(gridGr, eigenValues) #gridGraphs.append(gridGr) #graphEdges.append(gridGraphEdgeIndicator) stop_gr = time.time() timing_logger.debug( "creating graph: {}".format( stop_gr - start_gr ) ) if debug_images: outdir1 = outdir+"hessianUp/" try: os.makedirs(outdir1) except os.error: pass outfile = outdir1+"/{}-{}".format( iz, node_info.id ) + ".tiff" logger.debug( "saving hessian to file: {}".format( outfile ) ) vigra.impex.writeImage(eigenValues, outfile ) instance = vigra.graphs.ShortestPathPathDijkstra(gridGr) relative_coord = [skeletonCoord[0]-roi[0][0], skeletonCoord[1]-roi[0][1]] relative_coord = map(long, relative_coord) sourceNode = gridGr.coordinateToNode(relative_coord) start_dij = time.time() instance.run(gridGraphEdgeIndicator, sourceNode, target=None) distances = instance.distances() stop_dij = time.time() timing_logger.debug( "spent in dijkstra {}".format( stop_dij - start_dij ) ) if debug_images: outdir1 = outdir+"distances/" try: os.makedirs(outdir1) except os.error: pass outfile = outdir1+"/{}-{}".format( iz, node_info.id ) + ".tiff" logger.debug( "saving distances to file:".format( outfile ) ) # Create a "white" pixel at the source node distances[skeletonCoord[0]-roi[0][0], skeletonCoord[1]-roi[0][1]] = numpy.max(distances) vigra.impex.writeImage(distances, outfile ) # Distances over raw membrane probabilities roi_upsampled_membrane = numpy.asarray( roi_hessian ) roi_upsampled_membrane[:, -1] = [0,1] roi_upsampled_membrane = (map(long, roi_upsampled_membrane[0]), map(long, roi_upsampled_membrane[1])) connector_distances = None connector_coords = None if node_info.id in node_to_connector.keys(): connectors = node_to_connector[node_info.id] connector_info = connector_infos_dict[connectors[0]] #Convert to pixels con_x_px = int(connector_info.x_nm / float(X_RES)) con_y_px = int(connector_info.y_nm / float(Y_RES)) con_z_px = int(connector_info.z_nm / float(Z_RES)) connector_coords = (con_x_px-roi[0][0], con_y_px-roi[0][1]) if con_x_px>roi[0][0] and con_x_px<roi[1][0] and con_y_px>roi[0][1] and con_y_px<roi[1][1]: #this connector is inside our prediction roi, compute the distance field " con_relative = [long(con_x_px-roi[0][0]), long(con_y_px-roi[0][1])] sourceNode = gridGr.coordinateToNode(con_relative) instance.run(gridGraphEdgeIndicator, sourceNode, target=None) connector_distances = instance.distances() else: connector_distances = None upsampled_membrane_probabilities = opUpsample.Output(*roi_upsampled_membrane).wait().squeeze() upsampled_membrane_probabilities = vigra.filters.gaussianSmoothing(upsampled_membrane_probabilities, sigma=1.0) #print "UPSAMPLED MEMBRANE SHAPE: {} MAX: {} MIN: {}".format( upsampled_membrane_probabilities.shape, upsampled_membrane_probabilities.max(), upsampled_membrane_probabilities.min() ) gridGrRaw = graphs.gridGraph((shape_x, shape_y )) # !on original pixels gridGraphRawEdgeIndicator = graphs.edgeFeaturesFromInterpolatedImage(gridGrRaw, upsampled_membrane_probabilities) #gridGraphs.append(gridGrRaw) #graphEdges.append(gridGraphRawEdgeIndicator) instance_raw = vigra.graphs.ShortestPathPathDijkstra(gridGrRaw) sourceNode = gridGrRaw.coordinateToNode(relative_coord) instance_raw.run(gridGraphRawEdgeIndicator, sourceNode, target=None) distances_raw = instance_raw.distances() stop_dij = time.time() timing_logger.debug( "spent in dijkstra (raw probs) {}".format( stop_dij - start_dij ) ) if debug_images: outdir1 = outdir+"distances_raw/" try: os.makedirs(outdir1) except os.error: pass outfile = outdir1+"/{}-{}".format( iz, node_info.id ) + ".tiff" logger.debug( "saving distances (raw probs) to file:".format( outfile ) ) # Create a "white" pixel at the source node distances_raw[skeletonCoord[0]-roi[0][0], skeletonCoord[1]-roi[0][1]] = numpy.max(distances_raw) vigra.impex.writeImage(distances_raw, outfile ) if numpy.sum(synapse_cc)==0: continue with max_label_lock: synapse_objects_4d, maxLabelCurrent = normalize_synapse_ids( synapse_cc, roi, previous_slice_objects, previous_slice_roi, maxLabelSoFar[0] ) maxLabelSoFar[0] = maxLabelCurrent synapse_objects = synapse_objects_4d.squeeze() #add this synapse to the exported list previous_slice_objects = synapse_objects previous_slice_roi = roi ''' if numpy.sum(synapse_cc)==0: print "NO SYNAPSES IN THIS SLICE:", iz timing_logger.debug( "ROI TIMER: {}".format( timer.seconds() ) ) continue ''' synapseIds = set(synapse_objects.flat) synapseIds.remove(0) for sid in synapseIds: #find the pixel positions of this synapse syn_pixel_coords = numpy.where(synapse_objects == sid) synapse_size = len( syn_pixel_coords[0] ) #syn_pixel_coords = numpy.unravel_index(syn_pixels, distances.shape) #FIXME: offset by roi syn_average_x = numpy.average(syn_pixel_coords[0])+roi[0][0] syn_average_y = numpy.average(syn_pixel_coords[1])+roi[0][1] syn_distances = distances[syn_pixel_coords] mindist = numpy.min(syn_distances) syn_distances_raw = distances_raw[syn_pixel_coords] mindist_raw = numpy.min(syn_distances_raw) if connector_distances is not None: syn_distances_connector = connector_distances[syn_pixel_coords] min_conn_distance = numpy.min(syn_distances_connector) elif connector_coords is not None: euclidean_dists = [scipy.spatial.distance.euclidean(connector_coords, xy) for xy in zip(syn_pixel_coords[0], syn_pixel_coords[1])] min_conn_distance = numpy.min(euclidean_dists) else: min_conn_distance = 99999.0 # Determine average uncertainty # Get probabilities for this synapse's pixels flat_predictions = synapse_predictions.view(numpy.ndarray)[synapse_objects_4d[...,0] == sid] # If we pulled the data from cache, there may be only one channel. # In that case, we can't quite compute a proper uncertainty, # so we'll just pretend there were only two prediction channels to begin with. if flat_predictions.shape[-1] > 1: # Sort along channel axis flat_predictions.sort(axis=-1) # What's the difference between the highest and second-highest class? certainties = flat_predictions[:,-1] - flat_predictions[:,-2] else: # Pretend there were only two channels certainties = flat_predictions[:,0] - (1 - flat_predictions[:,0]) avg_certainty = numpy.average(certainties) avg_uncertainty = 1.0 - avg_certainty fields = {} fields["synapse_id"] = int(sid) fields["x_px"] = int(syn_average_x + 0.5) fields["y_px"] = int(syn_average_y + 0.5) fields["z_px"] = iz fields["size_px"] = synapse_size fields["distance_hessian"] = mindist fields["distance_raw_probs"] = mindist_raw fields["detection_uncertainty"] = avg_uncertainty fields["node_id"] = node_info.id fields["node_x_px"] = node_info.x_px fields["node_y_px"] = node_info.y_px fields["node_z_px"] = node_info.z_px if min_conn_distance!=99999.0: connectors = node_to_connector[node_info.id] connector_info = connector_infos_dict[connectors[0]] fields["nearest_connector_id"] = connector_info.id fields["nearest_connector_distance_nm"] = min_conn_distance fields["nearest_connector_x_nm"] = connector_info.x_nm fields["nearest_connector_y_nm"] = connector_info.y_nm fields["nearest_connector_z_nm"] = connector_info.z_nm else: fields["nearest_connector_id"] = -1 fields["nearest_connector_distance_nm"] = min_conn_distance fields["nearest_connector_x_nm"] = -1 fields["nearest_connector_y_nm"] = -1 fields["nearest_connector_z_nm"] = -1 with f_out_lock: csv_writer.writerow( fields ) fout.flush() with f_out_lock: node_overall_index[0] += 1 progress_callback( ProgressInfo( node_overall_index[0], skeleton_node_count, branch_index, skeleton_branch_count, node_index_in_branch, branch_node_count, maxLabelCurrent ) ) #Sanity check #outfile = outdir+"hessianUp/"+ "%.02d"%iz + ".tiff" #vigra.impex.writeImage(eigenValues, outfile) #outfile = outdir+"distances/"+ "%.02d"%iz + ".tiff" #vigra.impex.writeImage(distances, outfile) timing_logger.debug( "ROI TIMER: {}".format( timer.seconds() ) )