for vectors in nVectors: for ratio in skipRatios: for iterations in maxIterations: for filterB in filters: for contact in contacts: for step in steps: f = open(logfilename, 'ab') sourceName = workingDir+localDir+image print(sourceName) print(rep) i = ImgOpener(); sourceImg = i.openImgs(sourceName).get(0) axes = [Axes.X, Axes.Y, Axes.Z] calibration = [1,1,1] units = ["","",""] source = ImgPlus(sourceImg,image,axes,calibration,units) name = source.getName() print("name: "+name) imp = IJ.openImage(sourceName) # need for skeletonization imp.show() name = name+"-runs-"+str(rep)+"-weighted-"+str(wa)+"-maxShift-"+str(maxShift)+"-vectors-"+str(vectors)+"-ratio-"+str(ratio)+"-maxIts-"+str(iterations)+"-filter-"+str(filterB)+"-contact-"+str(contact)+"-step-"+str(step*1.0/2.3) startTime = time.time() wrapper = cs.run("org.bonej.wrapperPlugins.EllipsoidFactorWrapper",False,["inputImgPlus",source,"nVectors", vectors, "vectorIncrement", step*1.0/2.3, "skipRatio",ratio,"contactSensitivity",contact,"maxIterations",iterations, "maxDrift",maxShift*1.73,"minimumSemiAxis",filterB,"seedOnDistanceRidge",True,"seedOnTopologyPreserving",True,"showSecondaryImages",True,"runs",rep,"weightedAverageN",wa,"distanceThreshold",distanceThreshold]) wrapperInstance = wrapper.get() outputs = wrapperInstance.getOutput("ellipsoidFactorOutputImages"); seeds = wrapperInstance.getOutput("seedPointImage"); parameters = [image,rep,wa,maxShift,vectors,ratio,iterations,filterB,contact,distanceThreshold,time.time()-startTime] print(len(outputs)) EF = outputs.get(0)# 0 item is EF image
from net.imglib2.type.logic import BitType from net.imagej.axis import Axes # first take a look at the size and type of each dimension for d in range(data.numDimensions()): print "axis " + str(d) + ": type: " + str(data.axis(d).type()) + " length: " + str(data.dimension(d)) xDim = data.dimensionIndex(Axes.X) yDim = data.dimensionIndex(Axes.Y) zDim = data.dimensionIndex(Axes.Z) cDim = data.dimensionIndex(Axes.CHANNEL) # Not used, just for reference # create the otsu op otsu = ops.op(Ops.Threshold.Otsu, data.getImgPlus()) # create memory for the thresholded image thresholded = ops.create().img(data.getImgPlus(), BitType()) # call threshold op slice-wise for the defined axes, in this case [xDim,yDim] means process the # first two axes (x and y) ops.slicewise(thresholded, data.getImgPlus(), otsu, [xDim, yDim]) # try again with [xDim, yDim, zDim] is the result different? Why? # create an ImgPlus using the thresholded img, copy meta data from the input thresholdedPlus = ImgPlus(thresholded, data.getImgPlus(), True) # set a new name to avoid confusion thresholdedPlus.setName("Thresholded")
#@ UIService ui # Run a HWatershed filter on all the frames along the TIME axis. # After the filtering step the image is clipped to match the input type. # Varun n Claudia H Watershed segmentation macro from net.imagej.axis import Axes from net.imglib2.algorithm.labeling.ConnectedComponents import StructuringElement from net.imglib2.roi.labeling import LabelRegions from net.imglib2.img.display.imagej import ImageJFunctions as IJF from net.imglib2.roi import Regions from net.imglib2.algorithm.neighborhood import HyperSphereShape from net.imagej.axis import CalibratedAxis from net.imglib2.view import Views import os from net.imagej.axis import Axes from net.imagej import ImgPlus name = os.path.basename(os.path.splitext(data.getImgPlus().name)[0]) axes = [Axes.X, Axes.Y, Axes.TIME] dataImg = ImgPlus(data.getImgPlus().copy(), "Result", axes) original = ops.convert().float32(dataImg) converted = ops.filter().gauss(original, blurradius) imp = IJF.show(converted) # H-watershed returns a label map as an ImagePlus labelimage = ops.run("H_Watershed", imp, hMin, thresh, peakFlooding, outputMask, allowSplit) ui.show(labelimage)
def batch_open_images(pathImage, file_typeImage, name_filterImage=None): if isinstance(pathImage, File): pathImage = pathImage.getAbsolutePath() def check_filter(string): '''This function is used to check for a given filter. It is possible to use a single string or a list/tuple of strings as filter. This function can access the variables of the surrounding function. :param string: The filename to perform the filtering on. ''' if name_filterImage: # The first branch is used if name_filter is a list or a tuple. if isinstance(name_filterImage, (list, tuple)): for name_filter_ in name_filterImage: if name_filter_ in string: # Exit the function with True. return True else: # Next iteration of the for loop. continue # The second branch is used if name_filter is a string. elif isinstance(name_filterImage, string): if name_filterImage in string: return True else: return False return False else: # Accept all files if name_filter is None. return True def check_type(string): '''This function is used to check the file type. It is possible to use a single string or a list/tuple of strings as filter. This function can access the variables of the surrounding function. :param string: The filename to perform the check on. ''' if file_typeImage: # The first branch is used if file_type is a list or a tuple. if isinstance(file_typeImage, (list, tuple)): for file_type_ in file_typeImage: if string.endswith(file_type_): # Exit the function with True. return True else: # Next iteration of the for loop. continue # The second branch is used if file_type is a string. elif isinstance(file_typeImage, string): if string.endswith(file_typeImage): return True else: return False return False # Accept all files if file_type is None. else: return True # We collect all files to open in a list. path_to_Image = [] # Replacing some abbreviations (e.g. $HOME on Linux). path = os.path.expanduser(pathImage) path = os.path.expandvars(pathImage) # If we don't want a recursive search, we can use os.listdir(). for directory, dir_names, file_names in os.walk(pathImage): # We are only interested in files. for file_name in file_names: # The list contains only the file names. # The full path needs to be reconstructed. full_path = os.path.join(directory, file_name) # Both checks are performed to filter the files. if check_type(file_name): if check_filter(file_name) is False: # Add the file to the list of images to open. path_to_Image.append([ full_path, os.path.basename(os.path.splitext(full_path)[0]) ]) Images = [] for img_path, file_name in path_to_Image: image = IJ.openImage(img_path) dataset = ds.create(ImageJFunctions.convertFloat(image)) axes = [Axes.X, Axes.Y, Axes.Z, Axes.TIME, Axes.CHANNEL] dataImg = ds.create( ImgPlus(dataset.getImgPlus().copy(), file_name, axes)) Images.append(dataImg) return Images
delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) for image in files: for points in maxSeedPoints: for threshold in thresholds: for nDirections in nSearchDirections: sourceName = workingDir + localDir + image print(sourceName) startTime = time.time() i = ImgOpener() sourceImg = i.openImgs(sourceName).get(0) axes = [Axes.X, Axes.Y, Axes.Z] calibration = [1, 1, 1] units = ["", "", ""] source = ImgPlus(sourceImg, image, axes, calibration, units) name = source.getName() name = name + "_" + str(points) + "_points_" + str( threshold) + "_threshold_" + str( nDirections) + "_directions_" wrapper = cs.run( "org.bonej.wrapperPlugins.EllipsoidFactorWrapper", True, [ "inputImage", source, "nSphere", nDirections, "approximateMaximumNumberOfSeeds", points, "thresholdForBeingARidgePoint", threshold, "showSecondaryImages", True ]) wrapperInstance = wrapper.get() EF = wrapperInstance.getOutput("efImage")
from net.imglib2.type.logic import BitType from net.imagej.axis import Axes # first take a look at the size and type of each dimension for d in range(data.numDimensions()): print "axis " + str(d) + ": type: " + str( data.axis(d).type()) + " length: " + str(data.dimension(d)) xDim = data.dimensionIndex(Axes.X) yDim = data.dimensionIndex(Axes.Y) zDim = data.dimensionIndex(Axes.Z) cDim = data.dimensionIndex(Axes.CHANNEL) # Not used, just for reference # create the otsu op otsu = ops.op(Ops.Threshold.Otsu, data.getImgPlus()) # create memory for the thresholded image thresholded = ops.create().img(data.getImgPlus(), BitType()) # call threshold op slice-wise for the defined axes, in this case [xDim,yDim] means process the # first two axes (x and y) ops.slice(thresholded, data.getImgPlus(), otsu, [xDim, yDim]) # try again with [xDim, yDim, zDim] is the result different? Why? # create an ImgPlus using the thresholded img, copy meta data from the input thresholdedPlus = ImgPlus(thresholded, data.getImgPlus(), True) # set a new name to avoid confusion thresholdedPlus.setName("Thresholded")
parameter_writer.writerow(parameterlist) for image in files: for directions in nDirections: for skipRatio in skipRatios: try: sourceName = workingDir+localDir+image; i = ImgOpener(); print(i) print(sourceName); print("directions = ", directions, " skipRatio = ", skipRatio) sourceImg = i.openImgs(sourceName).get(0) print(sourceImg) axes = [Axes.X, Axes.Y, Axes.Z] calibration = [1,1,1] units = ["","",""] source = ImgPlus(sourceImg,image,axes,calibration,units) #ds.createDisplay(source) name = source.getName() name = name + "_dir_" + str(directions) + "_sr_"+str(skipRatio) startTime = time.time() wrapper = cs.run("org.bonej.wrapperPlugins.EllipsoidFactorWrapper",False,["inputImgPlus",source,"nVectors", directions, "vectorIncrement", 1/2.3, "skipRatio",skipRatio,"contactSensitivity",1,"maxIterations",100, "maxDrift",1.73,"sigma",0,"showSecondaryImages",True]) wrapperInstance = wrapper.get() EF = wrapperInstance.getOutput("efImage") timeTaken = time.time()-startTime print("done") print(timeTaken) table = wrapperInstance.getOutput("resultsTable") print(table) print(table[0]) print(len(table)) d = DefaultDataset(ctxt,EF)
parameter_writer = csv.writer(parameter_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) parameter_writer.writerow(["name", histoBins]) for image in files: sourceName = workingDir + "EF/" + image + "-runs-6-weighted-1-maxShift-1-vectors-100-ratio-1-maxIts-50-filter-0-contact-5-step-0.217391304348" + "-EF.tif" print(sourceName) startTime = time.time() i = ImgOpener() sourceImg = i.openImgs(sourceName).get(0) axes = [Axes.X, Axes.Y, Axes.Z] calibration = [1, 1, 1] units = ["", "", ""] source = ImgPlus(sourceImg, image, axes, calibration, units) name = source.getName() histoMapper = Real1dBinMapper(-1, 1, 50, False) histo = Histogram1d(histoMapper) histo.countData(source) print(histoBins) histoBinFreqs = histo.toLongArray() print("freq: ", histoBinFreqs) histoMults = [ abs(histoBinFreqs[i] * histoBins[i] / 100.0) for i in range(len(histoBins)) ] print(histoMults) total = histo.distributionCount()
delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) for image in files: for nDirections in nSearchDirections: sourceName = workingDir + localDir + "../EF-ported/" + image + "_" + str( nDirections) + "-EF.tif" print(sourceName) startTime = time.time() i = ImgOpener() sourceImg = i.openImgs(sourceName).get(0) axes = [Axes.X, Axes.Y, Axes.Z] calibration = [1, 1, 1] units = ["", "", ""] source = ImgPlus(sourceImg, image, axes, calibration, units) name = source.getName() name = name + "_" + str(nDirections) histoMapper = Real1dBinMapper(-1, 1, 50, False) histo = Histogram1d(histoMapper) histo.countData(source) print(histoBins) histoBinFreqs = histo.toLongArray() print(histoBinFreqs) histoMults = [ histoBinFreqs[i] * histoBins[i] / 100.0 for i in range(len(histoBins)) ] print(histoMults)
from net.imglib2.roi.labeling import LabelRegions from net.imglib2.img.display.imagej import ImageJFunctions as IJF from net.imglib2.roi import Regions from net.imglib2.algorithm.neighborhood import HyperSphereShape from net.imagej.axis import CalibratedAxis from net.imglib2.view import Views import os from net.imagej.axis import Axes from net.imagej import ImgPlus # Convert data to float 32 name = os.path.basename(os.path.splitext(data.getImgPlus().name)[0]) axes = [Axes.X, Axes.Y, Axes.TIME] dataImg = ImgPlus(data.getImgPlus().copy(), "Result", axes) original = ops.convert().float32(dataImg) converted = ops.filter().gauss(original, blurradius) # Allocate output memory (wait for hybrid CF version of slice) dog = ops.create().img(converted) # Create the op dog_op = ops.op("filter.dog", converted, sigma1, sigma2) # Setup the fixed axis t_dim = dataImg.dimensionIndex(Axes.TIME) fixed_axis = [d for d in range(0, data.numDimensions()) if d != t_dim]
# @OpService ops # @UIService ui # @ImgPlus source from net.imagej.ops import Ops from net.imagej.ops.image.distancetransform import DistanceTransform3D from net.imagej import ImgPlus sourceBitType = ops.convert().bit(source) d = ops.image().distancetransform(sourceBitType) print(d) distanceTransformedPlus = ImgPlus(d, source, True) print("almost done") ui.show("EDT", distanceTransformedPlus)