def __init__(
        self,
        imp,
        classifier="/Users/juliansegert/repos/Bio119_root_tracking/bright.model"
    ):
        self.imp = imp
        self.classifier = classifier

        self.segmentor = WekaSegmentation(self.imp)
        self.segmentor.loadClassifier(self.classifier)
class Weka_segmentor(object):
    """ Wrapper class for trainable Weka segmentation. """
    def __init__(self, imp, classifier = "bright.model"):
        self.imp = imp
        self.classifier = classifier

        self.segmentor = WekaSegmentation(self.imp)
        self.segmentor.loadClassifier(self.classifier)

    def getThreshold(self, imp):
        result = ImagePlus(imp.title, self.segmentor.applyClassifier(imp).getProcessor())
        return result
class Weka_segmentor(object):
    """ Wrapper class for trainable Weka segmentation. """
    def __init__(self, imp, classifier="bright.model"):
        self.imp = imp
        self.classifier = classifier

        self.segmentor = WekaSegmentation(self.imp)
        self.segmentor.loadClassifier(self.classifier)

    def getThreshold(self, imp):
        result = ImagePlus(imp.title,
                           self.segmentor.applyClassifier(imp).getProcessor())
        return result
def segmentChannel_Weka(image, **kwargs):
	""" SegmentChannel using a Weka Classification"""
	ch = kwargs['channel']
	if ch > len(image): 
		raise Exception('Expecting at least ' + str(ch) + ' channels. Image has only ' + str(len(imageC)) + ' channel(s)')
	imp = image[ch-1].duplicate()
	ws = WekaSegmentation(imp)	# create an instance
	ws.loadClassifier(kwargs['clspath']) # load classifier
	impProb  = ws.applyClassifier(imp, 0, True)
	impMetaProb = extractChannel(impProb,1,1)
	impMetaProb.setTitle("MetaProb")
	# segmentation
	impBW = threshold(impMetaProb, kwargs['probThr'])
	impMetaProb.show()
	IJ.run("Set Measurements...", " mean center shape area redirect=MetaProb decimal=2");
	
	# particle analysis
	IJ.run(impBW, "Analyze Particles...", "size=10-10000 pixel area circularity=0.00-1.00 display exclude clear stack add");
	rt = Analyzer.getResultsTable()
	validParticles = []
	roim = RoiManager.getInstance()
	if roim == None:
		raise Exception('Fiji error segmentNuclei.py: no RoiManager!')
	if roim.getCount() > 0:
		rois = roim.getRoisAsArray()
	else:
		IJ.log("# particles = 0")
		return impMetaProb, impBW, None, None
	X = rt.getColumn(rt.getColumnIndex("XM"))
	Y = rt.getColumn(rt.getColumnIndex("YM"))
	Mean = rt.getColumn(rt.getColumnIndex("Mean"))
	Circ = rt.getColumn(rt.getColumnIndex("Circ."))
	Area = rt.getColumn(rt.getColumnIndex("Area"))
	print "# particles = " + str(len(Mean))
	nValid = 0
	for i in range(len(Mean)):
		valid = (Mean[i]>kwargs['minProb']) and (Circ[i]<kwargs['maxCirc']) # filter particles post detection
		if(valid):
			validParticles.append([i, X[i], Y[i], Mean[i]])
	print validParticles
	IJ.log("# valid particles = %d " % len(validParticles))
	# sort particles according to Mean
	validParticlesSorted = sorted(validParticles, key=itemgetter(3))
	# only keep the three with the largest Mean
	validParticles = validParticlesSorted[-int(kwargs["nrPart"]):]
    #random.shuffle(validParticles)
	IJ.log("# valid particles = %d " % len(validParticles))
	if len(validParticles) == 0:
		validParticles = None
	return impMetaProb, impBW, validParticles, rois
Example #5
0
def process(srcDir, dstDir, currentDir, fileName):
    print "Processing:"

    # Opening the image
    print "Open image file", fileName
    image = IJ.openImage(os.path.join(srcDir, fileName))

    weka = WekaSegmentation()
    weka.setTrainingImage(image)

    # Manually loads trained classifier
    weka.loadClassifier(r'C:\Users\Angu312\Scripts\VectorClassifier.model')
    # Apply classifier and get results
    segmented_image = weka.applyClassifier(image, 0, False)
    # assign same LUT as in GUI. Within WEKA GUI, right-click on classified image and use Command Finder to save the "LUT" within Fiji.app\luts
    lut = LutLoader.openLut(
        r'C:\Users\Angu312\Documents\Fiji.app\luts\Golden ARC Lut.lut')
    segmented_image.getProcessor().setLut(lut)

    # Saving the image as a .tif file
    saveDir = dstDir
    if not os.path.exists(saveDir):
        os.makedirs(saveDir)
    print "Saving to", saveDir
    IJ.saveAs(segmented_image, "Tif", os.path.join(saveDir, fileName))
    image.close()
Example #6
0
def analyzeImage(passImage, passModel, passChannel, passProbability,
                 passPixels, passOutput):
    retResults = list()
    windows = set()

    # Register current window
    registerWindow(passImage.title, windows)

    # Extract the requested channel
    IJ.run("Z Project...", "projection=[Max Intensity]")
    registerWindow("MAX_" + passImage.title, windows)
    IJ.run("Duplicate...", "title=temp")
    registerWindow("temp", windows)

    # Apply WEKA training model to image
    wekaSeg = WekaSegmentation(WindowManager.getCurrentImage())
    wekaSeg.loadClassifier(passModel)
    wekaSeg.applyClassifier(True)

    # Extract first slice of probability map
    wekaImg = wekaSeg.getClassifiedImage()
    wekaImg.show()
    registerWindow("Probability maps", windows)
    IJ.setSlice(1)
    IJ.run("Duplicate...", "title=temp2")
    registerWindow("temp2", windows)

    # Apply threshold and save
    IJ.setThreshold(passProbability, 1, "Black & White")
    fileParts = passImage.getTitle().split(".")
    IJ.save(
        os.path.join(
            passOutput, "{0}-probmap.png".format(fileParts[0],
                                                 '.'.join(fileParts[1:]))))

    # Perform particle analysis and save
    IJ.run("Analyze Particles...",
           "size={0}-Infinity show=Outlines pixel clear".format(passPixels))
    registerWindow("Drawing of temp2", windows)
    IJ.save(
        os.path.join(
            passOutput, "{0}-particles.png".format(fileParts[0],
                                                   '.'.join(fileParts[1:]))))

    # Get measurements
    tableResults = ResultsTable.getResultsTable()
    for rowIdx in range(tableResults.size()):
        retResults.append(tableResults.getRowAsString(rowIdx).split())
        retResults[-1].insert(
            0,
            WindowManager.getCurrentImage().getCalibration().unit)
        retResults[-1].append(
            float(retResults[-1][4]) / float(retResults[-1][3]))

    # Close windows
    closeWindows(windows)

    return retResults
def Weka_Segm(dirs):
	""" Loads trained classifier and segments cells """ 
	"""	in aligned images according to training.    """
	
	# Define reference image for segmentation (default is timepoint000).
	w_train = os.path.join(dirs["Composites_Aligned"], "Timepoint000.tif")
	trainer = IJ.openImage(w_train)
	weka = WekaSegmentation()
	weka.setTrainingImage(trainer)
	
	# Select classifier model.
	weka.loadClassifier(str(classifier))
     
	weka.applyClassifier(False)
	segmentation = weka.getClassifiedImage()
	segmentation.show()

	# Convert image to 8bit
	ImageConverter(segmentation).convertToRGB()
	ImageConverter(segmentation).convertToGray8()
		
	# Threshold segmentation to soma only.
	hist = segmentation.getProcessor().getHistogram()
	lowth = Auto_Threshold.IJDefault(hist)
	segmentation.getProcessor().threshold(lowth)
	segmentation.getProcessor().setThreshold(0, 0, ImageProcessor.NO_LUT_UPDATE)
	segmentation.getProcessor().invert()
	segmentation.show()
	
	# Run Watershed Irregular Features plugin, with parameters.
	IJ.run(segmentation, "Watershed Irregular Features",
	      "erosion=20 convexity_treshold=0 separator_size=0-Infinity")

	# Make selection and add to RoiManager.	
	RoiManager()
	rm = RoiManager.getInstance()
	rm.runCommand("reset")
	roi = ThresholdToSelection.run(segmentation)
	segmentation.setRoi(roi)
	rm.addRoi(roi)
	rm.runCommand("Split")
def analyse(imp, root, filename):
	#
	# classification
	#
	classifierPath = "//almf/almf/software/scripts/imagej/2015-10-14--Tischi--ShaneMorley--MTnetworkAnalysis/classifier.model"
	ws = WekaSegmentation(imp)  # create an instance
	ws.loadClassifier(classifierPath) # load classifier
	impProb = ws.applyClassifier(imp, 0, True)
	#impProb.show()
	impMetaProb = extractChannel(impProb,1,1)
	impMetaProb.setTitle("MetaProb")
	
	#
	# segmentation
	#
	impBW = threshold(impMetaProb,0.75)
	impMetaProb.show()
	impBW.show()
	
	#
	# skeletonization
	#
	run("Skeletonize (2D/3D)");
	run("Analyze Skeleton (2D/3D)", "prune=[shortest branch] show display");
def analyzeImage(passImage, passModel, passProbability, passPixels,
                 passOutput):
    retResults = list()

    # Apply WEKA training model to image
    wekaSeg = WekaSegmentation(passImage)
    wekaSeg.loadClassifier(passModel)
    wekaSeg.applyClassifier(True)

    # Extract first slice of probability map
    wekaImg = wekaSeg.getClassifiedImage()
    wekaImg.show()

    IJ.selectWindow("Probability maps")
    IJ.setSlice(1)
    IJ.run("Duplicate...", "title=temp")

    # Apply threshold and save
    IJ.setThreshold(passProbability, 1, "Black & White")
    fileParts = passImage.getTitle().split(".")
    IJ.save(
        os.path.join(
            passOutput, "{0}-probmap.png".format(fileParts[0],
                                                 '.'.join(fileParts[1:]))))

    # Perform particle analysis and save
    IJ.run("Analyze Particles...",
           "size={0}-Infinity show=Outlines pixel clear".format(passPixels))
    IJ.selectWindow("Drawing of temp")
    IJ.save(
        os.path.join(
            passOutput, "{0}-particles.png".format(fileParts[0],
                                                   '.'.join(fileParts[1:]))))

    # Get measurements (skip final row, this will correspond to legend)
    tableResults = ResultsTable.getResultsTable()
    for rowIdx in range(tableResults.size() - 1):
        retResults.append(tableResults.getRowAsString(rowIdx).split())

    # Close interim windows
    IJ.run("Close")
    IJ.selectWindow("temp")
    IJ.run("Close")
    IJ.selectWindow("Probability maps")
    IJ.run("Close")

    return retResults
Example #10
0
def training_Classifier():

    input_im = IJ.openImage(
        "/home/aik19/Achintha/Sintering analysis/Scripts/Sintering_Analysis_The_New_Approach/training_sample_1.tif"
    )
    labels = IJ.openImage(
        "/home/aik19/Achintha/Sintering analysis/Scripts/Sintering_Analysis_The_New_Approach/Filtered_training_Sample.labels.tif"
    )
    # for all slices in input2, add white pixels as labels for class 2 and
    #black pixels as labels for class 1
    segmentator = WekaSegmentation(input_im)
    segmentator.addBinaryData(input_im, labels, "class 2", "class 1")
    segmentator.trainClassifier()
    testImage = IJ.openImage(
        "/home/aik19/Achintha/Sintering analysis/Scripts/Sintering_Analysis_The_New_Approach/74122_micro_test_3.tif"
    )
    result = segmentator.applyClassifier(testImage)
    result.show()
import os
from timeit import default_timer as timer
from trainableSegmentation import WekaSegmentation
from ij import IJ

indir = "/scratch/data/__TESTFILES/weka"
infile = os.path.join(indir, "1462_mko_ctx_1.tif")
modelfile = os.path.join(indir, "tissue_fibrotic_bg.model")

input_image = IJ.openImage(infile)
segmentator = WekaSegmentation(input_image)
segmentator.loadClassifier(modelfile)
### Field of view: max sigma = 16.0, min sigma = 0.0
### Membrane thickness: 1, patch size: 19
### Read class name: tissue
### Read class name: fibrotic
### Read class name: bg

segmentator.enabledFeatures
### array('z', [True, True, True, True, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False])

segmentator.getClassLabel(0)
### u'tissue'

segmentator.getClassLabel(1)
### u'fibrotic'

segmentator.getClassLabel(2)
### u'bg'

result = segmentator.applyClassifier(input_image, 0, True)
    def __init__(self, imp, classifier = "/Users/juliansegert/repos/Bio119_root_tracking/bright.model"):
        self.imp = imp
        self.classifier = classifier

        self.segmentor = WekaSegmentation(self.imp)
        self.segmentor.loadClassifier(self.classifier)
Example #13
0
del channels

# create empty feature stack
features = FeatureStack(stack.getWidth(), stack.getHeight(), False)

# set my features to the feature stack
features.setStack(stack)
# put my feature stack into the array
featuresArray.set(features, 0)
featuresArray.setEnabledFeatures(features.getEnabledFeatures())

mp = MultilayerPerceptron()
hidden_layers = "%i,%i,%i" % (20, 14, 8)
mp.setHiddenLayers(hidden_layers)
mp.setLearningRate(0.7)
mp.setDecay(True)
mp.setTrainingTime(200)
mp.setMomentum(0.3)

wekaSegmentation = WekaSegmentation(image)
wekaSegmentation.setFeatureStackArray(featuresArray)
wekaSegmentation.setClassifier(mp)

wekaSegmentation.addExample(0, posroi, 1)
wekaSegmentation.addExample(1, negroi, 1)
wekaSegmentation.trainClassifier()
wekaSegmentation.saveClassifier(folder + "\\vessel-classifier_big.model")
output = wekaSegmentation.applyClassifier(image, featuresArray, 0, True)

output.show()
def analyzeImage(passImage, passModel, passProbability, passOutput):
    retResults = list()

    # Apply weka training model to image
    wekaSeg = WekaSegmentation(passImage)
    wekaSeg.loadClassifier(passModel)
    wekaSeg.applyClassifier(True)

    # Extract probability map
    wekaImg = wekaSeg.getClassifiedImage()
    wekaImg.show()
    IJ.run("Clear Results")

    # Process each slice
    for sliceIdx in range(ROW_BACKGROUND + 1):
        # Select slice and duplicate
        IJ.selectWindow("Probability maps")
        IJ.setSlice(sliceIdx + 1)
        IJ.run("Duplicate...", "title=temp")

        # Apply threshold to probability
        IJ.setThreshold(passProbability, 1, "Black & White")

        # For background, take inverse
        if sliceIdx == ROW_BACKGROUND: IJ.run("Invert")

        # Change background to NaN for area, then measure
        IJ.run("NaN Background", ".")
        IJ.run("Measure")

        # Save image to output directory
        fileParts = passImage.getTitle().split(".")
        IJ.save(
            os.path.join(
                passOutput,
                "{0}-{1}.{2}".format(fileParts[0], FILE_TYPE[sliceIdx],
                                     '.'.join(fileParts[1:]))))

        IJ.selectWindow("temp")
        IJ.run("Close")

    # Close probability maps
    IJ.selectWindow("Probability maps")
    IJ.run("Close")

    # Obtain results
    tempResults = list()
    tableResults = ResultsTable.getResultsTable()
    for rowIdx in range(tableResults.size()):
        tempResults.append(
            [float(x) for x in tableResults.getRowAsString(rowIdx).split()])

    # Compile image statistics as M/(M+F), F/(M+F), M/total, F/total, U/total, E/total, M+F, total
    mfTotal = tempResults[ROW_MALE][FIELD_AREA] + tempResults[ROW_FEMALE][
        FIELD_AREA]
    total = tempResults[ROW_BACKGROUND][FIELD_AREA]

    retResults.append(tempResults[ROW_MALE][FIELD_AREA] / mfTotal)
    retResults.append(tempResults[ROW_FEMALE][FIELD_AREA] / mfTotal)
    retResults.append(tempResults[ROW_MALE][FIELD_AREA] / total)
    retResults.append(tempResults[ROW_FEMALE][FIELD_AREA] / total)
    retResults.append(tempResults[ROW_UNDIF][FIELD_AREA] / total)
    retResults.append(tempResults[ROW_EXTRA][FIELD_AREA] / total)
    retResults.append(mfTotal)
    retResults.append(total)

    return retResults
Example #15
0
    os.mkdir(sub)

scaled = "C123_wo_scaled_bg.tif"
imp = IJ.openImage(os.path.join(folder + scaled))
channels = ChannelSplitter.split(imp)

dimentions = imp.getDimensions()
imp.close()
d1 = dimentions[0]
lim1 = dimentions[1] // 4
lim2 = lim1 * 2
lim3 = lim1 * 3
s = 300

pathh = "C:\\Users\\USER\\Documents\\studia\\zaklad\\EC_rainbow\\"
segmentator = WekaSegmentation()
segmentator.loadClassifier(pathh)

for i in range(len(channels)):
    channel = channels[i]
    pix = channel.getProcessor().getPixels()
    cm = channel.getProcessor().getColorModel()
    pix1 = pix[:((lim1 + s) * d1)]
    pix2 = pix[((lim1 - s) * d1):((lim2 + s) * d1)]
    pix3 = pix[((lim2 - s) * d1):((lim3 + s) * d1)]
    pix4 = pix[((lim3 - s) * d1):]
    imps = [
        ImagePlus("Random", ShortProcessor(d1, (lim1 + s), pix1, cm)),
        ImagePlus("Random", ShortProcessor(d1, (lim2 - lim1 + 2 * s), pix2,
                                           cm)),
        ImagePlus("Random", ShortProcessor(d1, (lim3 - lim2 + 2 * s), pix3,
    def __init__(self, imp, classifier = "bright.model"):
        self.imp = imp
        self.classifier = classifier

        self.segmentor = WekaSegmentation(self.imp)
        self.segmentor.loadClassifier(self.classifier)
  
######################
# get image from ZEN #
######################




# get image
imagePath = "Z:/Tischi/projects/KatharinaSonnen/20150412--Automation_H2B-GFP/Automation_H2B-GFP/TestImage_1024.tif"
imp = IJ.openImage(imagePath)
imp.show()

# classification
classifierPath = "Z:/Tischi/projects/KatharinaSonnen/20150412--Automation_H2B-GFP/Automation_H2B-GFP/classifier.model"
ws = WekaSegmentation(imp)  # create an instance
ws.loadClassifier(classifierPath) # load classifier
impProb = ws.applyClassifier(imp, 0, True)
#impProb.show()
impMetaProb = extractChannel(impProb,1,1)
impMetaProb.setTitle("MetaProb")

# segmentation
impBW = threshold(impMetaProb,0.6)
impMetaProb.show()
IJ.run("Set Measurements...", " mean center shape area redirect=MetaProb decimal=2");
impBW.show()

# particle analysis
IJ.run("Analyze Particles...", "size=10-10000 pixel area circularity=0.00-1.00 display exclude clear stack add");
rt = Analyzer.getResultsTable()
Example #18
0
def run():

	# project = "andrea_boni_ellenberg"
	project = "julia_roberti_ellenberg"
	
	# running modalities
	max_images_to_analyse = 10000 #10000
	communicateWithMicroscope = True
	#winReg_separator = "    "
	winReg_separator = "REG_SZ"
	winReg_waitIntervall = 1
	microscope = LSM780 # LSM510
	#winReg_location = "HKLM\SOFTWARE\Test"
	winReg_location = "HKCU\SOFTWARE\VB and VBA Program Settings\OnlineImageAnalysis\macro"

	if project == "andrea_boni_ellenberg":
		particles_area_min = 3750    # divide by 4 for 512x512
		particles_area_max = 12500    # divide by 4 for 512x512
		particles_minCirc = 0.5
		particles_maxCirc = 1.0
		particles_threshold_method = "globalOtsuThreshold"
		particle_selectionMode = "nuclear_rim_intensity"
		iSlice_particles = 2
		iSlice_measurement = 1
		meanIntensThreshold = 30
		
		nPixShrink = 2
		nPixDilate = 10
		minCellsPerImage = 1 # required cells per image
		minCellsPerWell = 4 # required cells per well 
		drawRings = False # True or False !!needs to be False for the real measurement !!
		
	elif project == "julia_roberti_ellenberg":#  !!!!!!!!!!!CHANGE PARAMETERS HERE!!!!!!!!!!!!!!!!!
		particles_area_min = 1001  #  depends whether you use 512x512 or something else
		particles_area_max = 5000
		particles_minCirc = 0.0 # reject non-round particles
		particles_maxCirc = 1.0 
		particles_threshold_method = "globalOtsuThreshold"
		particle_selectionMode = "nuclear_intensity"
		iSlice_particles = 2 # channel containing the particle marker
		iSlice_measurement = 1  # channel to be measured 
		intensityThreshold = 300   #  if depends on microscope settings
		criterium = "mean_intensity" # !!! intensityThreshold is the mean
		# criterium = "total_intensity" # !!! intensityThreshold is the total
						
		
	else:
		particles_area_min = 15000
		particles_area_max = 50000 
		particles_area_min = 50
		particles_minCirc = 0.0
		particles_maxCirc = 1.0
		particles_threshold_method = "autoLocalThreshold"
		particle_selectionMode = "classification"

		
	if particle_selectionMode == "classification":
		# select classifier file
		classifier_file = "/Users/tischi/Documents/40x_widefield_cherry_bg_interphase_mitosis.model.model"
		classifier_file = "Z:/software/micropilot/Tischi-FIJI/testimages/3metaphase.model"
		
		#classifier_file = "D:/data/iliana/training_06_liveh2bcherry_tubgfp/classifier.model"
		ws = WekaSegmentation()  # create an instance
		ws.loadClassifier(classifier_file) # load classifier

	
	for iImageAnalyzed in range(max_images_to_analyse):

		# txt_out(str(iImageAnalyzed))
		
		if communicateWithMicroscope:
			code, image_file = get_imageFile_from_microscope(winReg_location, winReg_separator, winReg_waitIntervall)
		else:
			code  = LSM510_CODE_READIMAGE			
			image_file = test_filename

		# prepare output
		sX = "0" 
		sY = "0" 
		sOrientation = "0"

		
		if project == "andrea_boni_ellenberg":
			pass  # parameters have been set above
		elif code == LSM510_CODE_READIMAGE:
			particles_radius = 50 # ask the user
			particles_threshold = 2 # ask the user (pixels > mean + threshold*sd)
		elif code == LSM510_CODE_READIMAGE_HIGHRES:
			particles_radius = 4*50 # ask the user
			particles_threshold = 2 # ask the user (pixels > mean + threshold*sd)

		
		# clean up for new stuff **********************
		
		# close all open windows
		if WindowManager.getIDList() != None:
			#print "WindowManager.getIDList :"
			#print WindowManager.getIDList()
			for iImage in WindowManager.getIDList():
				imp = WindowManager.getImage(iImage)
				imp.changes = False
				#imp.close()
				imp.getWindow().close()
		
		
		# clean up old ROIs
		if RoiManager.getInstance() != None:
			RoiManager.getInstance().runCommand("reset")
		 	
		# end of clean up *****************************
		
		# run("Bio-Formats Importer", "  open=D:\\data\\tischi\\_R1.lsm color_mode=Default view=Hyperstack stack_order=XYCZT");
		#imp_image_tmp = ImagePlus(image_file) 
		#imp_image_tmp.show()
		
		# OPEN THE IMAGE
		ImagePlus(image_file).show() # todo: check for errors while opening!!
		imp_image = IJ.getImage()
		IJ.run(imp_image, "Properties...", "unit=pix pixel_width=1 pixel_height=1 voxel_depth=1 origin=0,0")

	

		# find particles
		txt_out("finding particles...")

		# get/generate/extract the image containing the particles
		if project == "andrea_boni_ellenberg":
			imp_image.setSlice(iSlice_particles)
			#imp_particles = imp_image.duplicate()
			# todo; how to only duplicate the current slice??
			IJ.run(imp_image, "Duplicate...", "title=im_particles");
			imp_particles = IJ.getImage()
		elif project == "julia_roberti_ellenberg":
			IJ.run(imp_image, "Z Project...", "projection=[Max Intensity]");
			imp_zMax = IJ.getImage()
			imp_zMax.setSlice(iSlice_particles)
			IJ.run(imp_zMax, "Duplicate...", "title=im_particles");
			imp_particles = IJ.getImage()
		else: 
			imp_particles = None
		
		 
		# smooth
		IJ.run(imp_particles, "Median...", "radius=5 slice");

		# threshold
		txt_out("thresholding...")
		imp_mask = imp_particles.duplicate()
		IJ.run(imp_mask, "8-bit", "")

		if particles_threshold_method == "globalFixedThreshold":
			ip = imp_mask.getProcessor()
			ip.setThreshold(particles_threshold, 255, ImageProcessor.NO_LUT_UPDATE)
			IJ.run(imp_mask, "Convert to Mask", "")
		if particles_threshold_method == "globalOtsuThreshold":
			IJ.run(imp_mask, "Auto Threshold", "method=Otsu white")
		elif particles_threshold_method == "autoLocalThreshold":
			alt = Auto_Local_Threshold()
			alt.exec(imp_mask,"Niblack",particles_radius,particles_threshold,0.0,True)


		# post-processing of the binary image (e.g. watershed touching objects)

	
		imp_mask.show()

		
		# Create a ParticleAnalyzer, with arguments:
		# 1. options (could be SHOW_ROI_MASKS, SHOW_OUTLINES, SHOW_MASKS, SHOW_NONE, ADD_TO_MANAGER, and others; combined with bitwise-or)
		# 2. measurement options (see [http://rsb.info.nih.gov/ij/developer/api/ij/measure/Measurements.html Measurements])
		# 3. a ResultsTable to store the measurements
		# 4. The minimum size of a particle to consider for measurement
		# 5. The maximum size (idem)
		# 6. The minimum circularity of a particle
		# 7. The maximum circularity
		table = ResultsTable()
		pa = ParticleAnalyzer(ParticleAnalyzer.ADD_TO_MANAGER | ParticleAnalyzer.EXCLUDE_EDGE_PARTICLES, Measurements.CENTROID | Measurements.CENTER_OF_MASS | Measurements.ELLIPSE | Measurements.AREA, table, particles_area_min, Double.POSITIVE_INFINITY, particles_minCirc, particles_maxCirc)
		pa.setHideOutputImage(False)

			
		if pa.analyze(imp_mask):
			pass 
		else:
			print "There was a problem in analyzing the particles"
			return()


		# initialise 
		nGoodParticles = 0
		iBestParticle = 0
						
		if (RoiManager.getInstance() != None): # found a particle
			
			if (len(RoiManager.getInstance().getRoisAsArray()) > 0):  # found a particle
		
				# todo: show the outlines on the original grayscale image
				#imp = pa.getOutputImage()
				
				#table.show("")
				
				# find "the best" particle
				if particle_selectionMode == "classification":
					ws.setTrainingImage(imp_image)
					#ws.loadClassifier(classifier_file)
					
					myClassNumber = 0 # ask the user	
					
					#/**     * Apply current classifier to a given image.     *     
					#* @param imp image (2D single image or stack)     
					#* @param numThreads The number of threads to use. Set to zero for     
					#* auto-detection.     
					#* @param probabilityMaps create probability maps for each class instead of     
					#* a classification     
					#* @return result image     
					#*/    public ImagePlus applyClassifier(final ImagePlus imp, int numThreads, final boolean probabilityMaps)
					IJ.log("Searching for class "+str(ws.getClassLabel(myClassNumber)))
					imp_probabilities = ws.applyClassifier(imp_image,0,True)
					
					# show the classification results and rename the images in the stack to show the class names
					stack = imp_probabilities.getStack()
					for iClass in range(0,imp_probabilities.getImageStackSize()):
						stack.setSliceLabel(ws.getClassLabel(iClass), iClass+1) 
					imp_probabilities.setStack(stack)
					imp_probabilities.setSlice(myClassNumber+1) 
					imp_probabilities.show()
					
					
					# Create a new list to store the mean intensity values of each class:
					means = [] 
					classification_max = 0
					imp_myClass = ImagePlus("myClass",imp_probabilities.getProcessor().duplicate())
					for i, roi in enumerate(RoiManager.getInstance().getRoisAsArray()):
						imp_myClass.setRoi(roi)
						stats = imp_myClass.getStatistics(Measurements.MEAN)
						if stats.mean > classification_max:
							classification_max = stats.mean
							iBestParticle = i
							
						table.setValue("classification", i, stats.mean) 
		
		
				elif particle_selectionMode == "nuclear_rim_intensity":
					
					if project == "julia_roberti_ellenberg":
						pass
					else:
						imp= imp_image

					imp.setSlice(iSlice_measurement)
					IJ.run(imp, "Duplicate...", "title=signal");
					imp_measure = IJ.getImage()
						
					
					# loop through the particles
					for i, roi in enumerate(RoiManager.getInstance().getRoisAsArray()):
						
						imp_measure.setRoi(roi)
						
						IJ.run(imp_measure, "Enlarge...", "enlarge=-"+str(nPixShrink));
						if drawRings:
							IJ.run(imp_measure, "Draw", "");
						stats = imp_measure.getStatistics(Measurements.MEAN + Measurements.AREA)
						areaSmall = stats.area
						meanIntensSmall = stats.mean
						
						IJ.run(imp_measure, "Enlarge...", "enlarge="+str(nPixDilate+nPixShrink));
						if drawRings:
							IJ.run(imp_measure, "Draw", "");
						stats = imp_measure.getStatistics(Measurements.MEAN + Measurements.AREA)
						areaLarge = stats.area
						meanIntensLarge = stats.mean
		
						meanIntensRing = (meanIntensLarge*areaLarge - meanIntensSmall*areaSmall ) / (areaLarge - areaSmall)
						
						txt_out("mean intens ring: " + str(int(meanIntensRing)) + "/" +str(meanIntensThreshold) )
	
						# only count valid cells
						if (meanIntensRing > meanIntensThreshold) & (areaSmall < particles_area_max):
							nGoodParticles = nGoodParticles + 1

					
				elif particle_selectionMode == "nuclear_intensity":
					
					if project == "julia_roberti_ellenberg":
						imp = imp_zMax
					else:
						imp = imp_image

					imp.setSlice(iSlice_measurement)
					IJ.run(imp, "Duplicate...", "title=signal");
					imp_measure = IJ.getImage()
					
					# loop through the cells
					intens_current_max = -1
					for i, roi in enumerate(RoiManager.getInstance().getRoisAsArray()):
						
						imp_measure.setRoi(roi)
						stats = imp_measure.getStatistics(Measurements.MEAN + Measurements.AREA)
						txt_out("particle " + str(i) + " mean: " + str(int(stats.mean)) + " area: " + str(int(stats.area)))

						# only count valid cells
						if (criterium == "mean_intensity"):
							intensity_measure = stats.mean
						elif (criterium == "total_intensity"):
							intensity_measure = stats.mean*stats.area
							
						if (intensity_measure > intensityThreshold) & (stats.area < particles_area_max):
							nGoodParticles = nGoodParticles + 1  # count valid 
							if (intensity_measure > intens_current_max):
								iBestParticle = i
								intens_current_max = intensity_measure
						
				# get location and orientation of the best particle
				x = table.getColumn(table.getColumnIndex("X"))
				y = table.getColumn(table.getColumnIndex("Y"))
				angle = table.getColumn(table.getColumnIndex("Angle"))
				
				dx = int(x[iBestParticle]-imp_image.getWidth()/2)
				dy = int(y[iBestParticle]-imp_image.getWidth()/2)
				angle = int(angle[iBestParticle])
				

		txt_out("number of good particles: " + str(nGoodParticles))

		if project == "andrea_boni_ellenberg":				
			code = "validCells_"+str(int(nGoodParticles))+"_"+str(int(minCellsPerImage))+"_"+str(int(minCellsPerWell))
			txt_out(code)
		
		elif project == "julia_roberti_ellenberg":				
			if nGoodParticles >= 1:
				sX = str(-dx) #.replace(".",",")
				sY = str(-dy) #.replace(".",",")
				sOrientation = str(angle) #.replace(".",",")
				code = LSM780_CODE_OBJECT_FOUND_AND_START_IMAGING
				txt_out("index of best good particle: " + str(iBestParticle))
		
			else:
				sX = "0"
				sY = "0"
				sOrientation = "0"
				code = LSM780_CODE_NO_OBJECT_FOUND
				
		elif microscope == LSM510:
			sX = str(dx) #.replace(".",",")
			sY = str(dy) #.replace(".",",")
			sOrientation = str(angle) #.replace(".",",")
			code = LSM510_CODE_OBJECT_FOUND
		
		elif microscope == LSM780:
			sX = str(-dx) #.replace(".",",")
			sY = str(-dy) #.replace(".",",")
			sOrientation = str(angle) #.replace(".",",")
			code = LSM780_CODE_OBJECT_FOUND_AND_START_IMAGING

		
		#table.show("")
	
		# write results to windows registry
		txt_out("sX "+ sX)
		txt_out("sY "+ sY)
		txt_out("sOrientation "+ sOrientation)
		
		wr = WindowsRegistry()
		reg = wr.writeRegistry(winReg_location,"offsetx",sX)
		reg = wr.writeRegistry(winReg_location,"offsety",sY)
		reg = wr.writeRegistry(winReg_location,"orientation",sOrientation)
		reg = wr.writeRegistry(winReg_location,"Code",code)
		
		# clean up to save memory
		wr = None
		pa = None
		ws = None

			
	return()
    def __init__(self, imp, classifier="bright.model"):
        self.imp = imp
        self.classifier = classifier

        self.segmentor = WekaSegmentation(self.imp)
        self.segmentor.loadClassifier(self.classifier)
Example #20
0
def VesselFinder(channel_array, classifier_path):
    channels = channel_array
    image = channels[3]
    channels = channels[0:3]
    proc = image.getProcessor()
    directional_op = ImagePlus("directional_op", proc)

    tubes = range(5, 130, 12)

    img_source = ImagePlus("image", proc)
    src = clij2.push(img_source)
    dst = clij2.create(src)
    sigma = 2
    clij2.gaussianBlur2D(src, dst, sigma, sigma)
    img_blur2 = clij2.pull(dst)
    src.close()
    dst.close()

    print("Tubeness mt start")
    tubenesses = [None] * len(tubes)
    rang = range(len(tubes))
    threads = []
    for i in rang:
        threads.append(
            threading.Thread(target=run_tube,
                             args=(img_blur2, tubes[i], i, tubenesses)))
        threads[i].start()

    [x.join() for x in threads]
    print("Tubeness all done")
    print(tubenesses)

    src = clij2.push(img_source)
    dst = clij2.create(src)
    sigmas = [5, 20]
    imgsigmas = []
    for sigma in sigmas:
        clij2.gaussianBlur2D(src, dst, sigma, sigma)
        img = clij2.pull(dst)
        imgsigmas.append(img)
    print("Gaussian Blur done")
    src.close()
    dst.close()

    variances = [5, 20]
    imgvars = []
    for variance in variances:
        img = ImagePlus("image", proc)
        IJ.run(img, "Variance...", "radius=" + str(variance))
        imgvars.append(img)
    print("Gaussian Blur done")

    featuresArray = FeatureStackArray(image.getStackSize())
    stack = ImageStack(image.getWidth(), image.getHeight())
    # add new feature here (2/2) and do not forget to add it with a
    # unique slice label!
    stack.addSlice("directional_op", directional_op.getProcessor())
    for i in range(len(sigmas)):
        stack.addSlice("sigma" + str(sigmas[i]), imgsigmas[i].getProcessor())

    for i in range(len(tubes)):
        stack.addSlice("Tubeness" + str(tubes[i]),
                       tubenesses[i].getProcessor())

    for i in range(len(variances)):
        stack.addSlice("Variance" + str(variances[i]),
                       imgvars[i].getProcessor())

    for i in range(len(channels)):
        stack.addSlice("channel" + str(i + 1), channels[i].getProcessor())

    del sigmas
    del tubes
    del variances
    del channels

    # create empty feature stack
    features = FeatureStack(stack.getWidth(), stack.getHeight(), False)

    # set my features to the feature stack
    features.setStack(stack)
    # put my feature stack into the array
    featuresArray.set(features, 0)
    featuresArray.setEnabledFeatures(features.getEnabledFeatures())
    del stack

    wekaSegmentation = WekaSegmentation(image)
    wekaSegmentation.setFeatureStackArray(featuresArray)
    wekaSegmentation.loadClassifier(classifier_path +
                                    "\\vessel-classifier_big.model")
    output = wekaSegmentation.applyClassifier(image, featuresArray, 0, True)
    System.gc()
    return output