def execute(self,slot,subindex,roi,result): assert slot == self.MST, "Invalid output slot: {}".format(slot.name) #first thing, show the user that we are waiting for computations to finish self.applet.progressSignal.emit(0) volume_feat = self.Image( *roiFromShape( self.Image.meta.shape ) ).wait() labelVolume = self.LabelImage( *roiFromShape( self.LabelImage.meta.shape ) ).wait() self.applet.progress = 0 def updateProgressBar(x): #send signal iff progress is significant if x-self.applet.progress>1 or x==100: self.applet.progressSignal.emit(x) self.applet.progress = x mst= MSTSegmentor(labelVolume[0,...,0], numpy.asarray(volume_feat[0,...,0], numpy.float32), edgeWeightFunctor = "minimum", progressCallback = updateProgressBar) #mst.raw is not set here in order to avoid redundant data storage mst.raw = None #Output is of shape 1 result[0] = mst return result
def execute(self, slot, subindex, roi, result): assert slot == self.MST, "Invalid output slot: {}".format(slot.name) #first thing, show the user that we are waiting for computations to finish self.applet.progressSignal.emit(0) volume_feat = self.Image(*roiFromShape(self.Image.meta.shape)).wait() labelVolume = self.LabelImage( *roiFromShape(self.LabelImage.meta.shape)).wait() self.applet.progress = 0 def updateProgressBar(x): #send signal iff progress is significant if x - self.applet.progress > 1 or x == 100: self.applet.progressSignal.emit(x) self.applet.progress = x mst = MSTSegmentor(labelVolume[0, ..., 0], numpy.asarray(volume_feat[0, ..., 0], numpy.float32), edgeWeightFunctor="minimum", progressCallback=updateProgressBar) #mst.raw is not set here in order to avoid redundant data storage mst.raw = None #Output is of shape 1 result[0] = mst return result
def preprocess(inputf, outputf, sigma=1.6): print "preprocessing file %s to outputfile %s" % (inputf, outputf) h5f = h5py.File(inputf, "r") volume = h5f["raw"][:35, :35, :35] print "input volume shape: ", volume.shape print "input volume size: ", volume.nbytes / 1024**2, "MB" fvol = volume.astype(numpy.float32) #volume_feat = vigra.filters.gaussianGradientMagnitude(fvol,sigma) volume_feat = vigra.filters.hessianOfGaussianEigenvalues(fvol, sigma)[:, :, :, 0] volume_ma = numpy.max(volume_feat) volume_mi = numpy.min(volume_feat) volume_feat = (volume_feat - volume_mi) * 255.0 / (volume_ma - volume_mi) print "Watershed..." labelVolume = vigra.analysis.watersheds(volume_feat)[0].astype(numpy.int32) print labelVolume print labelVolume.shape, labelVolume.dtype mst = MSTSegmentor(labelVolume, volume_feat.astype(numpy.float32), edgeWeightFunctor="minimum") mst.raw = volume mst.saveH5("C:/Users/Ben/Desktop/carvingData/unprecarv_part35.h5", "graph")
def preprocess(inputf,outputf,sigma = 1.6): print "preprocessing file %s to outputfile %s" % (inputf, outputf) h5f = h5py.File(inputf,"r") volume = h5f["raw"][:35,:35,:35] print "input volume shape: ", volume.shape print "input volume size: ", volume.nbytes / 1024**2, "MB" fvol = volume.astype(numpy.float32) #volume_feat = vigra.filters.gaussianGradientMagnitude(fvol,sigma) volume_feat = vigra.filters.hessianOfGaussianEigenvalues(fvol,sigma)[:,:,:,0] volume_ma = numpy.max(volume_feat) volume_mi = numpy.min(volume_feat) volume_feat = (volume_feat - volume_mi) * 255.0 / (volume_ma-volume_mi) print "Watershed..." labelVolume = vigra.analysis.watersheds(volume_feat)[0].astype(numpy.int32) print labelVolume print labelVolume.shape, labelVolume.dtype mst = MSTSegmentor(labelVolume, volume_feat.astype(numpy.float32), edgeWeightFunctor = "minimum") mst.raw = volume mst.saveH5("C:/Users/Ben/Desktop/carvingData/unprecarv_part35.h5","graph")
def _deserializeFromHdf5(self, topGroup, groupVersion, hdf5File, projectFilePath, headless=False): assert "sigma" in topGroup.keys() assert "filter" in topGroup.keys() sigma = topGroup["sigma"].value sfilter = topGroup["filter"].value if "graph" in topGroup.keys(): graphgroup = topGroup["graph"] else: assert "graphfile" in topGroup.keys() # feature: load preprocessed graph from file filePath = topGroup["graphfile"].value if not os.path.exists(filePath): if headless: raise RuntimeError("Could not find data at " + filePath) filePath = self.repairFile(filePath, "*.h5") graphgroup = h5py.File(filePath, "r")["graph"] for opPre in self._o.innerOperators: opPre.initialSigma = sigma opPre.Sigma.setValue(sigma) opPre.initialFilter = sfilter opPre.Filter.setValue(sfilter) mst = MSTSegmentor.loadH5G(graphgroup) opPre._prepData = numpy.array([mst]) opPre._dirty = False opPre.applet.writeprotected = True opPre.PreprocessedData.setDirty() opPre.enableDownstream(True)
def __init__(self, carvingGraphFilename, *args, **kwargs): super(OpCarving, self).__init__(*args, **kwargs) print "[Carving id=%d] CONSTRUCTOR" % id(self) self._mst = MSTSegmentor.loadH5(carvingGraphFilename, "graph") #supervoxels of finished and saved objects self._done_lut = None self._done_seg_lut = None self._setCurrObjectName("") self.HasSegmentation.setValue(False)
def _deserializeFromHdf5(self, topGroup, groupVersion, hdf5File, projectFilePath, headless=False): assert "sigma" in topGroup.keys() assert "filter" in topGroup.keys() sigma = topGroup["sigma"].value sfilter = topGroup["filter"].value try: watershed_source = str(topGroup["watershed_source"].value) invert_watershed_source = bool( topGroup["invert_watershed_source"].value) except KeyError: watershed_source = None invert_watershed_source = False if "graph" in topGroup.keys(): graphgroup = topGroup["graph"] else: assert "graphfile" in topGroup.keys() #feature: load preprocessed graph from file filePath = topGroup["graphfile"].value if not os.path.exists(filePath): if headless: raise RuntimeError("Could not find data at " + filePath) filePath = self.repairFile(filePath, "*.h5") graphgroup = h5py.File(filePath, "r")["graph"] for opPre in self._o.innerOperators: opPre.initialSigma = sigma opPre.Sigma.setValue(sigma) if watershed_source: opPre.WatershedSource.setValue(watershed_source) opPre.InvertWatershedSource.setValue(invert_watershed_source) opPre.initialFilter = sfilter opPre.Filter.setValue(sfilter) mst = MSTSegmentor.loadH5G(graphgroup) opPre._prepData = numpy.array([mst]) opPre._dirty = False opPre.applet.writeprotected = True opPre.PreprocessedData.setDirty() opPre.enableDownstream(True)
def propagateDirty(self, slot, subindex, roi): key = roi.toSlice() if slot == self.Trigger or slot == self.BackgroundPriority or slot == self.NoBiasBelow: if self._mst is None: return if not self.BackgroundPriority.ready(): return if not self.NoBiasBelow.ready(): return bgPrio = self.BackgroundPriority.value noBiasBelow = self.NoBiasBelow.value print "compute new carving results with bg priority = %f, no bias below %d" % (bgPrio, noBiasBelow) labelCount = 2 params = dict() params["prios"] = [1.0, bgPrio, 1.0] params["uncertainty"] = "none" params["noBiasBelow"] = noBiasBelow unaries = numpy.zeros((self._mst.numNodes,labelCount+1)).astype(numpy.float32) #assert numpy.sum(self._mst.seeds > 2) == 0, "seeds > 2 at %r" % numpy.where(self._mst.seeds > 2) self._mst.run(unaries, **params) self.Segmentation.setDirty(slice(None)) self.HasSegmentation.setValue(True) elif slot == self.CarvingGraphFile: if self._mst is not None: #if the carving graph file is not valid, all outputs must be invalid for output in self.outputs.values(): output.setDirty(slice(0,None)) fname = self.CarvingGraphFile.value self._mst = MSTSegmentor.loadH5(fname, "graph") print "[Carving id=%d] loading graph file %s (mst=%d)" % (id(self), fname, id(self._mst)) self.Segmentation.setDirty(slice(None)) else: super(OpCarving, self).notifyDirty(slot, key)
def execute(self,slot,subindex,roi,result): if self._prepData[0] is not None and not self._dirty: return self._prepData #first thing, show the user that we are waiting for computations to finish self.applet.progressSignal.emit(0) #make sure raw data is 5D: t,{x,y,z},c ax = self.RawData.meta.axistags sh = self.RawData.meta.shape assert len(ax) == 5 assert ax[0].key == "t" and sh[0] == 1 for i in range(1,4): assert ax[i].isSpatial() assert ax[4].key == "c" and sh[4] == 1 volume5d = self.RawData.value sigma = self.Sigma.value volume = volume5d[0,:,:,:,0] print "input volume shape: ", volume.shape print "input volume size: ", volume.nbytes / 1024**2, "MB" fvol = volume.astype(numpy.float32) #Choose filter selected by user volume_filter = self.Filter.value self.applet.progressSignal.emit(0) print "applying filter", if volume_filter == 0: print "lowest eigenvalue of Hessian of Gaussian" volume_feat = vigra.filters.hessianOfGaussianEigenvalues(fvol,sigma)[:,:,:,0] elif volume_filter == 1: print "greatest eigenvalue of Hessian of Gaussian" volume_feat = vigra.filters.hessianOfGaussianEigenvalues(fvol,sigma)[:,:,:,2] elif volume_filter == 2: print "Gaussian Gradient Magnitude" volume_feat = vigra.filters.gaussianGradientMagnitude(fvol,sigma) elif volume_filter == 3: print "Gaussian Smoothing" volume_feat = vigra.filters.gaussianSmoothing(fvol,sigma) elif volume_filter == 4: print "negative Gaussian Smoothing" volume_feat = vigra.filters.gaussianSmoothing(-fvol,sigma) volume_ma = numpy.max(volume_feat) volume_mi = numpy.min(volume_feat) volume_feat = (volume_feat - volume_mi) * 255.0 / (volume_ma-volume_mi) sys.stdout.write("Watershed..."); sys.stdout.flush() labelVolume = vigra.analysis.watersheds(volume_feat)[0].astype(numpy.int32) sys.stdout.write("done"); sys.stdout.flush() self.applet.progress = 0 def updateProgressBar(x): #send signal iff progress is significant if x-self.applet.progress>1 or x==100: self.applet.progressSignal.emit(x) self.applet.progress = x mst= MSTSegmentor(labelVolume, volume_feat.astype(numpy.float32), edgeWeightFunctor = "minimum",progressCallback = updateProgressBar) #mst.raw is not set here in order to avoid redundant data storage mst.raw = None #Output is of shape 1 result[0] = mst #save settings for reloading them if asked by user self.initialSigma = sigma self.initialFilter = volume_filter self.enableReset(False) self._unsavedData = True self._dirty = False self.enableDownstream(True) #Cache result self._prepData = result #Wonder why this is set? #The preprocess is only called by the run button. #By setting the output dirty, this event is propagated to the #carving-Operator, who copies the result just calculated. #This is to gain control over when the preprocess is executed. self.PreprocessedData.setDirty() return result
outputf = sys.argv[2] else: outputf = "test.graph5" print "preprocessing file %s to outputfile %s" % (inputf, outputf) sigma = 1.6 h5f = h5py.File(inputf,"r") #volume = h5f["volume/data"][0,:,:,:,0] volume = h5f["sbfsem"][:,:450,:450] print "input volume shape: ", volume.shape print "input volume size: ", volume.nbytes / 1024**2, "MB" fvol = volume.astype(numpy.float32) volume_feat = vigra.filters.hessianOfGaussianEigenvalues(fvol,sigma)[:,:,:,0] volume_ma = numpy.max(volume_feat) volume_mi = numpy.min(volume_feat) volume_feat = (volume_feat - volume_mi) * 255.0 / (volume_ma-volume_mi) print "Watershed..." labelVolume = vigra.analysis.watersheds(volume_feat)[0].astype(numpy.int32) print labelVolume.shape, labelVolume.dtype mst = MSTSegmentor(labelVolume, volume_feat.astype(numpy.float32), edgeWeightFunctor = "minimum") mst.raw = volume mst.saveH5(outputf,"graph")