Exemplo n.º 1
0
def writeCSV(filePath, results, header):
    """ Write a table as an csv file """
    rt = ResultsTable()
    for i in range(len(results[1])):
        rt.incrementCounter()
        for j in range(len(results)):
            rt.addValue(str(header[j]), results[j][i])
    rt.show("Results")
    rt.saveAs(filePath)
def writeCSV(filePath, results, header):
	""" Write a table as an csv file """
	rt = ResultsTable()
	for i in range(len(results[1])): 
		rt.incrementCounter()
		for j in range(len(results)):
			rt.addValue(str(header[j]), results[j][i])
	rt.show("Results")
	rt.saveAs(filePath); 
Exemplo n.º 3
0
def show_as_table(title, data, order=[]):
    """Helper function to display group and data information as a ResultsTable"""
    table = ResultsTable()
    for d in data:
        table.incrementCounter()
        order = [k for k in order]
        order.extend([k for k in d.keys() if not d in order])
        for k in order:
            table.addValue(k, d[k])
    table.show(title)
def reportClustersAsTable(clusters,
                          allPoints,
                          XColumn='X',
                          YColumn='Y',
                          ZColumn='Z',
                          NRColumn='NR'):
    '''
    Report the clustered and unclustered points in the tables 'clusters' and 'unclustered'.
    '''
    rt = ResultsTable()
    counter = 1
    clusterCounter = 1
    clusteredPoints = []
    for c in clusters:
        for dp in c.getPoints():
            rt.incrementCounter()
            p = dp.getPoint()
            rt.addValue(NRColumn, counter)
            rt.addValue(XColumn, p[0])
            rt.addValue(YColumn, p[1])
            rt.addValue(ZColumn, p[2])
            rt.addValue("C", clusterCounter)
            counter = counter + 1
            clusteredPoints.append([p[0], p[1], p[2]])
        clusterCounter = clusterCounter + 1
    rt.show("clusters")
    win = WindowManager.getWindow("Results")
    rt = win.getResultsTable()
    X, Y, Z = getColumns(XColumn, YColumn, ZColumn)
    if not rt.columnExists(NRColumn):
        for i in range(0, len(X)):
            rt.setValue(NRColumn, i, i + 1)
        rt.updateResults()
    NR = getColumn(NRColumn)
    unclusteredPoints = [
        [point.getPoint()[0],
         point.getPoint()[1],
         point.getPoint()[2]] for point in allPoints
        if [point.getPoint()[0],
            point.getPoint()[1],
            point.getPoint()[2]] not in clusteredPoints
    ]
    counter = 1
    rt = ResultsTable()
    for p in unclusteredPoints:
        rt.incrementCounter()
        rt.addValue(NRColumn, counter)
        rt.addValue(XColumn, p[0])
        rt.addValue(YColumn, p[1])
        rt.addValue(ZColumn, p[2])
        counter = counter + 1
    rt.show("unclustered")
    WindowManager.setWindow(win)
Exemplo n.º 5
0
def myResults(results):
    myResultsTable = ResultsTable()
    for idx, graph in enumerate(results.getGraph()):
        for edge in graph.getEdges():
            edgeLength = edge.getLength()
            v1 = edge.getV1()
            v2 = edge.getV2()
            dist = euclideanDistance(v1, v2)
            #print('v1:', type(v1), v1.getPoints())
            #
            myResultsTable.incrementCounter()  # add a row to results table
            myResultsTable.addValue('graphID', idx)
            myResultsTable.addValue('length_3d', edgeLength)
            myResultsTable.addValue('dist', dist)
            if dist > 0:
                myResultsTable.addValue('tort', edgeLength / dist)
            else:
                myResultsTable.addValue('tort', 'inf')

    myResultsTable.setPrecision(6)
    myResultsTable.show('samiSkel_results')
Exemplo n.º 6
0
def process(srcDir, dstDir, currentDir, fileName, keepDirectories, Channel_1, Channel_2, radius_background, sigmaSmaller, sigmaLarger, minPeakValue, min_dist):
 	IJ.run("Close All", "")

 	# Opening the image
 	IJ.log("Open image file:" + fileName)
 	#imp = IJ.openImage(os.path.join(currentDir, fileName))
	#imp = IJ.getImage()
	imp = BF.openImagePlus(os.path.join(currentDir, fileName))
	imp = imp[0]

	# getDimensions(width, height, channels, slices, frames)

	IJ.log("Computing Max Intensity Projection")

	if imp.getDimensions()[3] > 1:
		imp_max = ZProjector.run(imp,"max")
	else:
		imp_max = imp

	ip1, ip2 = extract_channel(imp_max, Channel_1, Channel_2)

	IJ.log("Substract background")

	imp1, imp2 = back_substraction(ip1, ip2, radius_background)

	IJ.log("Finding Peaks")

	ip1_1, ip2_1, peaks_1, peaks_2 = find_peaks(imp1, imp2, sigmaSmaller, sigmaLarger, minPeakValue)

	# Create a PointRoi from the DoG peaks, for visualization
	roi_1 = PointRoi(0, 0)
	roi_2 = PointRoi(0, 0)
	roi_3 = PointRoi(0, 0)
	roi_4 = PointRoi(0, 0)

	# A temporary array of integers, one per dimension the image has
	p_1 = zeros(ip1_1.numDimensions(), 'i')
	p_2 = zeros(ip2_1.numDimensions(), 'i')


	# Load every peak as a point in the PointRoi
	for peak in peaks_1:
	  # Read peak coordinates into an array of integers
	  peak.localize(p_1)
	  roi_1.addPoint(imp1, p_1[0], p_1[1])

	for peak in peaks_2:
	  # Read peak coordinates into an array of integers
	  peak.localize(p_2)
	  roi_2.addPoint(imp2, p_2[0], p_2[1])

	# Chose minimum distance in pixel
	#min_dist = 20

	for peak_1 in peaks_1:
		peak_1.localize(p_1)
		for peak_2 in peaks_2:
			peak_2.localize(p_2)
			d1 = distance(p_1, p_2)
			if  d1 < min_dist:
				roi_3.addPoint(imp1, p_2[0], p_2[1])
				break

	for peak_2 in peaks_2:
		peak_2.localize(p_2)
		for peak_1 in peaks_1:
			peak_1.localize(p_1)
			d2 = distance(p_2, p_1)
			if  d2 < min_dist:
				roi_4.addPoint(imp1, p_2[0], p_2[1])
				break

	cal = imp.getCalibration()
	min_distance = str(round((cal.pixelWidth * min_dist),1))

	table = ResultsTable()
	table.incrementCounter()
	table.addValue("Numbers of Neuron Markers", roi_1.getCount(0))
	table.addValue("Numbers of Glioma Markers", roi_2.getCount(0))
	table.addValue("Numbers of Glioma within %s um of Neurons" %(min_distance), roi_3.getCount(0))
	table.addValue("Numbers of Neurons within %s um of Glioma" %(min_distance), roi_4.getCount(0))
	#table.show("Results Analysis")
	saveDir = currentDir.replace(srcDir, dstDir) if keepDirectories else dstDir
	if not os.path.exists(saveDir):
		os.makedirs(saveDir)
	IJ.log("Saving to" + saveDir)
	table.save(os.path.join(saveDir, fileName + ".csv"))
	IJ.selectWindow("Log")
	IJ.saveAs("Text", os.path.join(saveDir, fileName + ".csv"));
Exemplo n.º 7
0
class MandersPlugin(ImageListener, WindowAdapter):

	def __init__(self):
		self.imp = None
		self.preview = None
		self.createMainWindow()
		self.cells = None
		self.files = []
		self.results = ResultsTable()
		ImagePlus.addImageListener(self)
		self.selectInputDir()
		self.selectOutputDir()
		self.pairs = []
		self.methods = []
		self.processNextFile()

	def selectInputDir(self):
		inputDialog = DirectoryChooser("Please select a directory contaning your images")
		inputDir = inputDialog.getDirectory()
		for imageFile in os.listdir(inputDir):
			self.files.append(inputDir + imageFile)

	def selectOutputDir(self):
		outputDialog = DirectoryChooser("Please select a directory to save your results")
		self.outputDir = outputDialog.getDirectory()
		
	def closeImage(self):
		if self.imp is not None:
			self.imp.close()
			self.imp = None
		if self.preview is not None:
			self.preview.close()
			self.preview = None

	def openImage(self, imageFile):
		try:
			images = BF.openImagePlus(imageFile)
			self.imp = images[0]
		except UnknownFormatException:
			return None
		if self.imp.getNChannels() < 2:
			IJ.error("Bad image format", "Image must contain at lease 2 channels!")
			return None
		if not self.pairs or \
			not self.methods:
			self.getOptionsDialog(self.imp)
		title = self.imp.title
		self.imp.title = title[:title.rfind('.')]
		return self.imp

	def getOptionsDialog(self, imp):
		thr_methods = ["None", "Default", "Huang", "Intermodes", "IsoData",  "Li", "MaxEntropy","Mean", "MinError(I)", "Minimum", "Moments", "Otsu", "Percentile", "RenyiEntropy", "Shanbhag" , "Triangle", "Yen"]
		gd = GenericDialog("Please select channels to collocalize")
		for i in range(1, imp.getNChannels() + 1):
			gd.addChoice("Threshold method for channel %i" % i, thr_methods, "None")
		gd.showDialog()
		if gd.wasCanceled():
			self.exit()
		channels = []
		for i in range(1, imp.getNChannels() + 1):
			method = gd.getNextChoice()
			self.methods.append(method)
			if method != "None":
				channels.append(i)
		for x in channels:
			for y in channels:
				if x < y:
					self.pairs.append((x, y))

	def processNextFile(self):
		if self.files:
			imageFile = self.files.pop(0)
			return self.processFile(imageFile)
		else:
			return False
			
	def processFile(self, imageFile):
		imp = self.openImage(imageFile)
		if imp is not None:
			cell = Cell(imp.NSlices, 1)
			self.cells = DelegateListModel([])
			self.cells.append(cell)
			self.showMainWindow(self.cells)
			if self.checkbox3D.isSelected():
				self.displayImage(imp)
			else:
				self.displayImage(imp, False)
				self.preview = self.previewImage(imp)
				self.displayImage(self.preview)
			return True
		else:
			return self.processNextFile()
	
	def displayImage(self, imp, show = True):
		imp.setDisplayMode(IJ.COMPOSITE)
		enhancer = ContrastEnhancer()
		enhancer.setUseStackHistogram(True)
		splitter = ChannelSplitter()
		for c in range(1, imp.getNChannels() + 1):
			imp.c = c
			enhancer.stretchHistogram(imp, 0.35)
		if show:
			imp.show()

	def previewImage(self, imp):
		roi = imp.getRoi()
		splitter = ChannelSplitter()
		channels = []
		for c in range(1, imp.getNChannels() + 1):
			channel = ImagePlus("Channel %i" % c, splitter.getChannel(imp, c))
			projector = ZProjector(channel)
			projector.setMethod(ZProjector.MAX_METHOD)
			projector.doProjection()
			channels.append(projector.getProjection())
		image = RGBStackMerge.mergeChannels(channels, False)
		image.title = imp.title + " MAX Intensity"
		image.luts = imp.luts
		imp.setRoi(roi)
		return image

	def getCroppedChannels(self, imp, cell):
		splitter = ChannelSplitter()
		imp.setRoi(None)
		if cell.mode3D:
			cropRoi = cell.getCropRoi()
		else:
			cropRoi = cell.roi
		if cropRoi is None:
			return None
		crop = cropRoi.getBounds()
		channels = []
		for c in range(1, imp.getNChannels() + 1):
			slices = ImageStack(crop.width, crop.height)
			channel = splitter.getChannel(imp, c)
			for z in range(1, channel.getSize() + 1):
				zslice = channel.getProcessor(z)
				zslice.setRoi(cropRoi)
				nslice = zslice.crop()
				if cell.mode3D:
					oroi = cell.slices[z - 1].roi	
				else:
					oroi = cell.roi
				if oroi is not None:
					roi = oroi.clone()
					bounds = roi.getBounds()
					roi.setLocation(bounds.x - crop.x, bounds.y - crop.y)
					nslice.setColor(Color.black)
					nslice.fillOutside(roi)
					slices.addSlice(nslice)
			channels.append(ImagePlus("Channel %i" % c, slices))
		return channels

	def getThreshold(self, imp, method):
		thresholder = Auto_Threshold()
		duplicator = Duplicator()
		tmp = duplicator.run(imp)
		return thresholder.exec(tmp, method, False, False, True, False, False, True)

	def getContainer(self, impA, impB):
		imgA = ImagePlusAdapter.wrap(impA)
		imgB = ImagePlusAdapter.wrap(impB)
		return DataContainer(imgA, imgB, 1, 1, "imageA", "imageB")

	def getManders(self, imp, cell):
	
		### Crop channels according to cell mask
		channels = self.getCroppedChannels(imp, cell)
		if channels is None:
			return None
			
		### Calculate channel thresholds
		thrs = []
		thrimps = []
		for c, method in enumerate(self.methods):
			if method != "None":
				thr, thrimp = self.getThreshold(channels[c], method)
			else:
				thr, thrimp = None, None
			thrs.append(thr)
			thrimps.append(thrimp)
		
		### Calculate manders colocalization
		manders = MandersColocalization()
		raws = []
		thrds = []
		for chA, chB in self.pairs:
			container = self.getContainer(channels[chA - 1], channels[chB - 1])
			img1 = container.getSourceImage1()
			img2 = container.getSourceImage2()
			mask = container.getMask()
			cursor = TwinCursor(img1.randomAccess(), img2.randomAccess(), Views.iterable(mask).localizingCursor())
			rtype = img1.randomAccess().get().createVariable()
			raw = manders.calculateMandersCorrelation(cursor, rtype)
			rthr1 = rtype.copy()
			rthr2 = rtype.copy()
			rthr1.set(thrs[chA - 1])
			rthr2.set(thrs[chB - 1])
			cursor.reset()
			thrd = manders.calculateMandersCorrelation(cursor, rthr1, rthr2, ThresholdMode.Above)
			raws.append(raw)
			thrds.append(thrd)
		
		return (channels, thrimps, thrs, raws, thrds)

	def saveMultichannelImage(self, title, channels, luts):
		tmp = RGBStackMerge.mergeChannels(channels, False)
		tmp.luts = luts
		saver = FileSaver(tmp)
		saver.saveAsTiffStack(self.outputDir + title + ".tif")
		tmp.close()

	def createMainWindow(self):
		self.frame = JFrame('Select cells and ROIs',
			defaultCloseOperation = JFrame.DISPOSE_ON_CLOSE
		)
		self.frame.setLayout(GridBagLayout())
		self.frame.addWindowListener(self)

		self.frame.add(JLabel("Cells"),
			GridBagConstraints(0, 0, 1, 1, 0, 0,
				GridBagConstraints.CENTER, GridBagConstraints.NONE,
				Insets(5, 2, 2, 0), 0, 0
		))
		
		self.cellList = JList(DelegateListModel([]),
			selectionMode = ListSelectionModel.SINGLE_SELECTION,
			cellRenderer = MyRenderer(),
			selectedIndex = 0,
			valueChanged = self.selectCell
		)
		self.frame.add(JScrollPane(self.cellList),
			GridBagConstraints(0, 1, 1, 5, .5, 1,
				GridBagConstraints.CENTER, GridBagConstraints.BOTH,
				Insets(0, 2, 2, 0), 0, 0
		))

		self.frame.add(JButton('Add cell', actionPerformed = self.addCell),
			GridBagConstraints(1, 2, 1, 2, 0, .25,
				GridBagConstraints.CENTER, GridBagConstraints.NONE,
				Insets(0, 0, 0, 0), 0, 0
		))
    	
		self.frame.add(JButton('Remove cell', actionPerformed = self.removeCell),
			GridBagConstraints(1, 4, 1, 2, 0, .25,
				GridBagConstraints.CENTER, GridBagConstraints.NONE,
				Insets(0, 5, 0, 5), 0, 0
		))
		
		self.frame.add(JLabel("Slices"),
			GridBagConstraints(0, 6, 1, 1, 0, 0,
				GridBagConstraints.CENTER, GridBagConstraints.NONE,
				Insets(5, 2, 2, 0), 0, 0
		))
		
		self.sliceList = JList(DelegateListModel([]),
			selectionMode = ListSelectionModel.SINGLE_SELECTION,
			cellRenderer = MyRenderer(),
			selectedIndex = 0,
			valueChanged = self.selectSlice
		)
		self.frame.add(JScrollPane(self.sliceList),
			GridBagConstraints(0, 7, 1, 5, .5, 1,
				GridBagConstraints.CENTER, GridBagConstraints.BOTH,
				Insets(0, 2, 2, 0), 0, 0
		))

		self.frame.add(JButton('Update ROI', actionPerformed = self.updateSlice),
			GridBagConstraints(1, 8, 1, 2, 0, .25,
				GridBagConstraints.CENTER, GridBagConstraints.NONE,
				Insets(0, 0, 0, 0), 0, 0
		))

		self.frame.add(JButton('Done', actionPerformed = self.doneSelecting),
			GridBagConstraints(1, 10, 1, 2, 0, .25,
				GridBagConstraints.CENTER, GridBagConstraints.NONE,
				Insets(0, 0, 0, 0), 0, 0
		))

		self.checkbox3D = JCheckBox('3D selection mode', True, actionPerformed=self.toggle3D)
		self.frame.add(self.checkbox3D,
			GridBagConstraints(0, 13, 2, 1, 0, 1,
				GridBagConstraints.WEST, GridBagConstraints.NONE,
				Insets(0, 0, 0, 0), 0, 0
		))

	def showMainWindow(self, cells = None):
		if cells is not None:
			self.cellList.model = cells
			if cells:
				self.cellList.selectedIndex = 0
		self.frame.pack()
		self.frame.visible = True

	def hideMainWindow(self):
		self.frame.visible = False

	def closeMainWindow(self):
		self.frame.dispose()

	def toggle3D(self, event):
		mode3D = self.checkbox3D.isSelected()
		if mode3D:
			self.sliceList.enabled = True
			if self.imp is not None:
				self.imp.show()
			if self.preview is not None:
				self.preview.hide()
		else:
			self.sliceList.enabled = False
			if self.preview is None:
				self.preview = self.previewImage(self.imp)
				self.displayImage(self.preview)
			else:
				self.preview.show()
			if self.imp is not None:
				self.imp.hide()
		selectedCell = self.cellList.selectedIndex
		if selectedCell >= 0:
			cell = self.cells[selectedCell]
			self.sliceList.model = cell.slices
			self.sliceList.selectedIndex = 0
		
	def addCell(self, event):
		size = len(self.cells)
		if (size > 0):
			last = self.cells[size - 1]
			n = last.n + 1
		else:
			n = 1
		self.cells.append(Cell(self.imp.NSlices, n))
		self.cellList.selectedIndex = size

	def removeCell(self, event):
		selected = self.cellList.selectedIndex
		if selected >= 0:
			self.cells.remove(self.cells[selected])
			if (selected >= 1):
				self.cellList.selectedIndex = selected - 1
			else:
				self.cellList.selectedIndex = 0

	def selectCell(self, event):
		selected = self.cellList.selectedIndex
		if selected >= 0:
			cell = self.cells[selected]
			self.sliceList.model = cell.slices
			self.sliceList.selectedIndex = 0
		else:
			self.sliceList.model = DelegateListModel([])
		if self.preview is not None:
			self.preview.setRoi(cell.roi)

	def selectSlice(self, event):
		selectedCell = self.cellList.selectedIndex
		selectedSlice = self.sliceList.selectedIndex
		if selectedCell >= 0 and selectedSlice >= 0:
			cell = self.cells[selectedCell]
			image = self.imp
			mode3D = self.checkbox3D.isSelected()
			if image is not None and cell is not None and mode3D:
				roi = cell.slices[selectedSlice].roi
				if (image.z - 1 != selectedSlice):
					image.z = selectedSlice + 1				
				image.setRoi(roi, True)
			if self.preview is not None and not mode3D:
				self.preview.setRoi(cell.roi, True)

	def updateSlice(self, event):
		if self.checkbox3D.isSelected():
			self.updateSlice3D(self.imp)
		else:
			self.updateSlice2D(self.preview)

	def updateSlice3D(self, imp):
		selectedCell = self.cellList.selectedIndex
		selectedSlice = self.sliceList.selectedIndex
		if selectedCell >= 0 and selectedSlice >= 0 and imp is not None:
			cell = self.cells[selectedCell]
			impRoi = imp.getRoi()
			if cell is not None and impRoi is not None:
				index = selectedSlice + 1
				roi = ShapeRoi(impRoi, position = index)
				cell.mode3D = True
				cell.name = "Cell %i (3D)" % cell.n
				cell.slices[selectedSlice].roi = roi
				if (index + 1 <= len(cell.slices)):
					imp.z = index + 1			
			self.cellList.repaint(self.cellList.getCellBounds(selectedCell, selectedCell))
			self.sliceList.repaint(self.sliceList.getCellBounds(selectedSlice, selectedSlice))

	def updateSlice2D(self, imp):
		selectedCell = self.cellList.selectedIndex
		if selectedCell >= 0 and imp is not None:
			cell = self.cells[selectedCell]
			impRoi = imp.getRoi()
			if cell is not None and impRoi is not None:
				roi = ShapeRoi(impRoi, position = 1)
				cell.mode3D = False
				cell.name = "Cell %i (2D)" % cell.n
				cell.roi = roi	
			self.cellList.repaint(self.cellList.getCellBounds(selectedCell, selectedCell))
	
	def imageOpened(self, imp):
		pass

	def imageClosed(self, imp):
		pass

	def imageUpdated(self, imp):
		if self.checkbox3D.isSelected():
			if imp is not None:
				selectedCell = self.cellList.selectedIndex
				selectedSlice = imp.z - 1
			if imp == self.imp and selectedSlice != self.sliceList.selectedIndex:
				self.sliceList.selectedIndex = selectedSlice

	def doneSelecting(self, event):
		oluts = self.imp.luts
		luts = []
		channels = []
		for c, method in enumerate(self.methods):
			if method != "None":
				luts.append(oluts[c])
				channels.append(c)
		for cell in self.cells:
			manders = self.getManders(self.imp, cell)
			if manders is not None:
				chimps, thrimps, thrs, raws, thrds = manders
				index = self.cells.index(cell) + 1
				title = "Cell_%i-" % index + self.imp.title
				self.saveMultichannelImage(title, chimps, oluts)
				title = "Cell_%i_thrd-" % index + self.imp.title
				self.saveMultichannelImage(title, thrimps, luts)
				self.results.incrementCounter()
				row = self.results.getCounter() - 1
				for i, thr in enumerate(thrs):
					if thr is not None:
						self.results.setValue("Threshold %i" % (i + 1), row, int(thr))
				for i, pair in enumerate(self.pairs):
					self.results.setValue("%i-%i M1 raw" % pair, row, float(raws[i].m1))
					self.results.setValue("%i-%i M2 raw" % pair, row, float(raws[i].m2))
					self.results.setValue("%i-%i M1 thrd" % pair, row, float(thrds[i].m1))
					self.results.setValue("%i-%i M2 thrd" % pair, row, float(thrds[i].m2))
		self.closeImage()
		if not self.processNextFile():
			print "All done - happy analysis!"
			self.results.show("Manders collocalization results")
			self.exit()

	def windowClosing(self, e):
		print "Closing plugin - BYE!!!"
		self.exit()

	def exit(self):
		ImagePlus.removeImageListener(self)
		self.closeImage()
		self.closeMainWindow()
Exemplo n.º 8
0
def batch_open_Rois(pathRoi,
                    file_typeRoi=None,
                    name_filterRoi=None,
                    recursive=False):
    '''Open all files in the given folder.
    :param path: The path from were to open the Rois. String and java.io.File are allowed.
    :param file_type: Only accept files with the given extension (default: None).
    :param name_filter: Reject files that contain the given string (default: wild characters).
    :param recursive: Process directories recursively (default: False).
    '''
    # Converting a File object to a string.
    if isinstance(pathRoi, File):
        pathRoi = pathRoi.getAbsolutePath()

    def check_type(string):
        '''This function is used to check the file type.
        It is possible to use a single string or a list/tuple of strings as filter.
        This function can access the variables of the surrounding function.
        :param string: The filename to perform the check on.
        '''
        if file_typeRoi:
            # The first branch is used if file_type is a list or a tuple.
            if isinstance(file_typeRoi, (list, tuple)):
                for file_type_ in file_typeRoi:
                    if string.endswith(file_type_):
                        # Exit the function with True.
                        return True
                    else:
                        # Next iteration of the for loop.
                        continue
            # The second branch is used if file_type is a string.
            elif isinstance(file_typeRoi, string):
                if string.endswith(file_typeRoi):
                    return True
                else:
                    return False
            return False
        # Accept all files if file_type is None.
        else:
            return True

    # We collect all files to open in a list.
    path_to_Roi = []
    # Replacing some abbreviations (e.g. $HOME on Linux).
    path = os.path.expanduser(pathRoi)
    # If we don't want a recursive search, we can use os.listdir().
    if not recursive:
        for file_name in os.listdir(pathRoi):
            full_path = os.path.join(pathRoi, file_name)
            if os.path.isfile(full_path):
                if check_type(file_name):
                    path_to_Roi.append(full_path)
    # For a recursive search os.walk() is used.
    else:
        # os.walk() is iterable.
        # Each iteration of the for loop processes a different directory.
        # the first return value represents the current directory.
        # The second return value is a list of included directories.
        # The third return value is a list of included files.
        for directory, dir_names, file_names in os.walk(pathRoi):
            # We are only interested in files.
            for file_name in file_names:
                # The list contains only the file names.
                # The full path needs to be reconstructed.
                full_path = os.path.join(directory, file_name)
                # Both checks are performed to filter the files.
                if check_type(file_name):
                    # Add the file to the list of Rois to open.
                    path_to_Roi.append([
                        full_path,
                        os.path.basename(os.path.splitext(full_path)[0])
                    ])

    # Create the list that will be returned by this function.
    RoisX = []
    RoisY = []
    print('path', path_to_Roi)
    for roi_path in path_to_Roi:

        print('path', roi_path)
        # An object equals True and None equals False.
        rm = RoiManager.getInstance()
        if (rm == None):
            rm = RoiManager()
        Roi = IJ.open(roi_path)
        roi_points = rm.getRoisAsArray()

    table = ResultsTable()

    for Roi in roi_points:

        xpoints = Roi.getPolygon().xpoints
        ypoints = Roi.getPolygon().ypoints
    for i in range(len(xpoints)):
        table.incrementCounter()
        table.addValue("Index", i)
        table.addValue("X", xpoints[i])
        table.addValue("Y", ypoints[i])
    table.show("XY-Coordinates")

    return roi_points
Exemplo n.º 9
0
		
		thr1, thrimp1 = calculateThreshold(imp1, roi, methods[0])
		thr2, thrimp2 = calculateThreshold(imp2, roi, methods[1])
		
		cursor = TwinCursor(img1.randomAccess(), img2.randomAccess(), Views.iterable(mask).localizingCursor())
		rtype = img1.randomAccess().get().createVariable()
		raw = manders.calculateMandersCorrelation(cursor, rtype)
		rthr1 = rtype.copy()
		rthr2 = rtype.copy()
		rthr1.set(thr1)
		rthr2.set(thr2)
		cursor.reset()
		thrd = manders.calculateMandersCorrelation(cursor, rthr1, rthr2, ThresholdMode.Above)
		print "Results are: %f %f %f %f" % (raw.m1, raw.m2, thrd.m1, thrd.m2)

		results.incrementCounter()
		rowno = results.getCounter() - 1
		results.setValue("Cell", rowno, int(rowno))
		results.setValue("Threshold 1", rowno, int(thr1))
		results.setValue("Threshold 2", rowno, int(thr2))
		results.setValue("M1 raw", rowno, float(raw.m1))
		results.setValue("M2 raw", rowno, float(raw.m2))
		results.setValue("M1 thrd", rowno, float(thrd.m1))
		results.setValue("M2 thrd", rowno, float(thrd.m2))
		
		thrimp = RGBStackMerge.mergeChannels([thrimp1, thrimp2], False)
		saver = FileSaver(thrimp)
		saver.saveAsTiffStack(outputDir + "Cell_%i-" % results.getCounter() + title + ".tif")
		thrimp.close()

results.show("Colocalization results")
Exemplo n.º 10
0
                    # we get XY coordinates of the foci
                    x = int(rt.getValue("X", count))
                    y = int(rt.getValue("Y", count))
                    # if that cell contains these coordinates, add the cell
                    # number as "cell" value for that foci
                    if roi.contains(x, y):
                        rt.setValue("cell", count, cell)
                cell = cell + 1

            # save this results table
            rt.save(directory + "/" + filename + "_GFP.csv")
            print("saving at ", directory + "/" + filename + "_GFP.csv")

            # create summary resulta table, with "cell" and "foci_count" columns
            consol = ResultsTable()
            consol.incrementCounter()
            consol.addValue("cell", 0)
            consol.addValue("foci_count", 0)
            rowcount = 1

            # loop over all cells, add cell number to the "cell" column
            for count in range(cell):
                consol.setValue("cell", count, count)

            # loop over all foci
            for count in range(rt.size()):
                # get in which cell that foci is and increase the
                # counter on the summary results table
                currcell = int(rt.getValue("cell", count))

                consol.setValue(
Exemplo n.º 11
0
def main():
    rt = RT.open2(table_file.getAbsolutePath())
    if not rt: return

    log(" --- --- --- ")
    log("Loaded %s" % table_file.getAbsolutePath())
    log("Loading column lists...")

    # Get column indices from imported file
    headings = getColumnHeadings(rt)
    id_col = getColumnIndex(headings, "TID")
    t_col = getColumnIndex(headings, "t [")
    d2p_col = getColumnIndex(headings, "D2P [")
    angle_col = getColumnIndex(headings, u'\u03B1 [deg]')
    delta_col = getColumnIndex(headings, u'\u0394\u03B1 [deg]')
    if angle_col == RT.COLUMN_NOT_FOUND:
        log("Failed to detect index for angle column. Re-trying...")
        angle_col = getColumnIndex(headings, u'? [deg]')
    if delta_col == RT.COLUMN_NOT_FOUND:
        log("Failed to detect index for delta angle column. Re-trying...")
        delta_col = getColumnIndex(headings, u'?? [deg]')
    log("Last column index is %s" % rt.getLastColumn())

    if RT.COLUMN_NOT_FOUND in (id_col, d2p_col, delta_col, angle_col):
        uiservice.showDialog("Error: Some key columns were not found!",
                             "Invalid Table?")
        return

    log("Settings: BOUT_WINDOW= %s, MIN_D2P= %s, DEF_FRAME_INTERVAL= %s" %
        (BOUT_WINDOW, '{0:.4f}'.format(MIN_D2P), DEF_FRAME_INTERVAL))

    # Store all data on dedicated lists
    track_id_rows = rt.getColumnAsDoubles(id_col)
    d2p_rows = rt.getColumnAsDoubles(d2p_col)
    angle_rows = rt.getColumnAsDoubles(angle_col)
    delta_rows = rt.getColumnAsDoubles(delta_col)
    t_rows = rt.getColumnAsDoubles(t_col)

    # Assess n of data points and extract unique path ids
    n_rows = len(track_id_rows)
    row_indices = range(n_rows)
    track_ids = set(track_id_rows)
    n_tracks = len(track_ids)
    log("Table has %g rows" % n_rows)
    log("Table has %g tracks" % n_tracks)

    log("Parsing tracks...")
    for track_id in track_ids:

        for row, next_row in zip(row_indices, row_indices[1:]):

            if track_id_rows[row] != track_id:
                continue

            if not isNumber(angle_rows[row]):
                rt.setValue("FLAG", row, "NA")
                continue

            lower_bound = max(0, row - BOUT_WINDOW + 1)
            upper_bound = min(n_rows - 1, row + BOUT_WINDOW)
            win_d2p = []
            for _ in range(lower_bound, upper_bound):
                win_d2p.append(d2p_rows[row])

            if sum(win_d2p) <= MIN_D2P * len(win_d2p):
                rt.setValue("FLAG", row, 0)

            else:
                current_angle = angle_rows[row]
                next_angle = angle_rows[next_row]
                current_delta = delta_rows[row]

                flag = -1 if current_angle < 0 else 1
                delta_change = (abs(current_delta) > 90)
                same_sign = ((current_angle < 0) == (next_angle < 0))
                if delta_change and not same_sign:
                    flag *= -1

                rt.setValue("FLAG", row, flag)
                if next_row == n_rows - 1:
                    rt.setValue("FLAG", next_row, flag)

    if rt.save(table_file.getAbsolutePath()):
        log("Processed table successfully saved (file overwritten)")
    else:
        log("Could not override input file. Displaying it...")
        rt.show(table_file.name)

    log("Creating onset table...")
    onset_rt = RT()
    onset_rt.showRowNumbers(False)

    frame_int = DEF_FRAME_INTERVAL
    if "table" in frame_rate_detection:
        frame_int = getFrameIntervalFromTable(row_indices, track_id_rows,
                                              t_rows)
    elif "image" in frame_rate_detection:
        frame_int = getFrameIntervalFromImage(image_file.getAbsolutePath())
    else:
        log("Using default frame rate")

    for track_id in track_ids:

        for prev_row, row in zip(row_indices, row_indices[1:]):

            if not track_id in (track_id_rows[prev_row], track_id_rows[row]):
                continue

            flag = rt.getValue("FLAG", row)
            if not isNumber(flag):
                continue

            flag = int(flag)
            if flag == 0:
                continue

            if flag == 1 or flag == -1:
                srow = onset_rt.getCounter()
                onset_rt.incrementCounter()
                onset_rt.setValue("TID", srow, track_id)
                from_frame = int(t_rows[prev_row] / frame_int) + 1
                to_frame = int(t_rows[row] / frame_int) + 1
                onset_rt.setValue("First disp. [t]", srow,
                                  "%s to %s" % (t_rows[prev_row], t_rows[row]))
                onset_rt.setValue("First disp. [frames]", srow,
                                  "%s to %s" % (from_frame, to_frame))
                onset_rt.setValue("ManualTag", srow, "")
                break

    out_path = suffixed_path(table_file.getAbsolutePath(), "ManualTagging")
    if onset_rt.save(out_path):
        log("Summary table successfully saved: %s" % out_path)
    else:
        log("File not saved... Displaying onset table")
        onset_rt.show("Onsets %s" % table_file.name)
maximaip = findmaximashow.getProcessor()
maximahist = maximaip.getHistogram()
CountMethod2 = maximahist[255]
print "Using the findMaxima method with a threshold of " + str(
    THRESH) + ", I found " + str(CountMethod2) + " maxima."

IJ.setRawThreshold(findmaximashow, 255, 255, "red")
IJ.run(findmaximashow, "Create Selection", "")
rm.addRoi(findmaximashow.getRoi())
rm.rename(1, "Maxima with threshold")

# ---- METHOD 3-4: API getMaxima WITH THE POINTS COUNTED OR ADDED TO THE RESULTS TABLE

maxima = mf.getMaxima(ip, 100.0, False)
CountMethod3 = maxima.npoints
print "Using the getMaxima method with no threshold , I found " + str(
    CountMethod3) + " maxima."

rt = ResultsTable()
for i in range(maxima.npoints):
    rt.incrementCounter()
    rt.addValue("X", maxima.xpoints[i])
    rt.addValue("Y", maxima.ypoints[i])

rt.show("Results")
CountMethod4 = rt.getCounter()
print "By counting points generated by getMaxima, I found " + str(
    CountMethod4) + " maxima."
# TODO: add these to the ROI mgr

# TODO: as alternative, check out HMaxima transform from landini, basically subtracting the threshold before running the find maxima
 def setUp(self):
     unittest.TestCase.setUp(self)
     rt = ResultsTable()
     rt.incrementCounter()
     rt.addLabel('LABEL', 'ID00002')
     rt.addValue('ID', 2)
     rt.addValue('TRACK_ID', 2)
     rt.addValue('QUALITY', 1)
     rt.addValue('POSITION_X', 738.9)
     rt.addValue('POSITION_Y', 670.0)
     rt.addValue('POSITION_Z', 0)
     rt.addValue('POSITION_T', 0)
     rt.incrementCounter()
     rt.addLabel('LABEL', 'ID00003')
     rt.addValue('ID', 3)
     rt.addValue('TRACK_ID', 3)
     rt.addValue('QUALITY', 1)
     rt.addValue('POSITION_X', 672.1)
     rt.addValue('POSITION_Y', 729.3)
     rt.addValue('POSITION_Z', 0)
     rt.addValue('POSITION_T', 0)
     rt.incrementCounter()
     rt.addLabel('LABEL', 'ID00001')
     rt.addValue('ID', 31)
     rt.addValue('TRACK_ID', 1)
     rt.addValue('QUALITY', 1)
     rt.addValue('POSITION_X', 953.2)
     rt.addValue('POSITION_Y', 803.5)
     rt.addValue('POSITION_Z', 0)
     rt.addValue('POSITION_T', 1)
     rt.incrementCounter()
     rt.addLabel('LABEL', 'ID000032')
     rt.addValue('ID', 32)
     rt.addValue('TRACK_ID', 2)
     rt.addValue('QUALITY', 1)
     rt.addValue('POSITION_X', 739.5)
     rt.addValue('POSITION_Y', 665.0)
     rt.addValue('POSITION_Z', 0)
     rt.addValue('POSITION_T', 1)
     rt.incrementCounter()
     rt.addLabel('LABEL', 'ID000033')
     rt.addValue('ID', 33)
     rt.addValue('TRACK_ID', 3)
     rt.addValue('QUALITY', 1)
     rt.addValue('POSITION_X', 667.0)
     rt.addValue('POSITION_Y', 729.8)
     rt.addValue('POSITION_Z', 0)
     rt.addValue('POSITION_T', 1)
     rt.incrementCounter()
     rt.addLabel('LABEL', 'ID000061')
     rt.addValue('ID', 61)
     rt.addValue('TRACK_ID', 1)
     rt.addValue('QUALITY', 1)
     rt.addValue('POSITION_X', 959.0)
     rt.addValue('POSITION_Y', 805.5)
     rt.addValue('POSITION_Z', 0)
     rt.addValue('POSITION_T', 2)
     self.table = rt
Exemplo n.º 14
0
def main():
    rt = RT.open2(table_file.getAbsolutePath())
    if not rt: return

    log(" --- --- --- ")
    log("Loaded %s" % table_file.getAbsolutePath())
    log("Loading column lists...")

    # Get column indices from imported file
    headings = getColumnHeadings(rt)
    id_col = getColumnIndex(headings, "TID")
    t_col = getColumnIndex(headings, "t [")
    d2p_col = getColumnIndex(headings, "D2P [")
    angle_col = getColumnIndex(headings, u'\u03B1 [deg]')
    delta_col = getColumnIndex(headings, u'\u0394\u03B1 [deg]')
    if angle_col == RT.COLUMN_NOT_FOUND:
        log("Failed to detect index for angle column. Re-trying...")
        angle_col = getColumnIndex(headings, u'? [deg]')
    if delta_col == RT.COLUMN_NOT_FOUND:
        log("Failed to detect index for delta angle column. Re-trying...")
        delta_col = getColumnIndex(headings, u'?? [deg]')
    log("Last column index is %s" % rt.getLastColumn())

    if RT.COLUMN_NOT_FOUND in (id_col, d2p_col, delta_col, angle_col):
        uiservice.showDialog("Error: Some key columns were not found!", "Invalid Table?")
        return

    log("Settings: BOUT_WINDOW= %s, MIN_D2P= %s, DEF_FRAME_INTERVAL= %s"
            % (BOUT_WINDOW, '{0:.4f}'.format(MIN_D2P), DEF_FRAME_INTERVAL))

    # Store all data on dedicated lists
    track_id_rows = rt.getColumnAsDoubles(id_col)
    d2p_rows = rt.getColumnAsDoubles(d2p_col)
    angle_rows = rt.getColumnAsDoubles(angle_col)
    delta_rows = rt.getColumnAsDoubles(delta_col)
    t_rows = rt.getColumnAsDoubles(t_col)

    # Assess n of data points and extract unique path ids
    n_rows = len(track_id_rows)
    row_indices = range(n_rows)
    track_ids = set(track_id_rows)
    n_tracks = len(track_ids)
    log("Table has %g rows" % n_rows)
    log("Table has %g tracks" % n_tracks)

    log("Parsing tracks...")
    for track_id in track_ids:


        for row, next_row in zip(row_indices, row_indices[1:]):

            if track_id_rows[row] != track_id:
                continue

            if not isNumber(angle_rows[row]):
                 rt.setValue("FLAG", row, "NA")
                 continue

            lower_bound = max(0, row - BOUT_WINDOW + 1)
            upper_bound = min(n_rows-1, row + BOUT_WINDOW)
            win_d2p = []
            for _ in range(lower_bound, upper_bound):
                win_d2p.append(d2p_rows[row])

            if sum(win_d2p) <= MIN_D2P * len(win_d2p):
                rt.setValue("FLAG", row, 0)

            else:
                current_angle = angle_rows[row]
                next_angle = angle_rows[next_row]
                current_delta = delta_rows[row]

                flag = -1 if current_angle < 0 else 1
                delta_change = (abs(current_delta) > 90)
                same_sign = ((current_angle<0) == (next_angle<0))
                if delta_change and not same_sign:
                    flag *= -1

                rt.setValue("FLAG", row, flag)
                if next_row == n_rows - 1:
                    rt.setValue("FLAG", next_row, flag)

    if rt.save(table_file.getAbsolutePath()):
        log("Processed table successfully saved (file overwritten)")
    else:
        log("Could not override input file. Displaying it...")
        rt.show(table_file.name)


    log("Creating onset table...")
    onset_rt = RT()
    onset_rt.showRowNumbers(False)

    frame_int = DEF_FRAME_INTERVAL
    if "table" in frame_rate_detection:
        frame_int = getFrameIntervalFromTable(row_indices, track_id_rows, t_rows)
    elif "image" in frame_rate_detection:
        frame_int = getFrameIntervalFromImage(image_file.getAbsolutePath())
    else:
        log("Using default frame rate")

    for track_id in track_ids:

        for prev_row, row in zip(row_indices, row_indices[1:]):

            if not track_id in (track_id_rows[prev_row], track_id_rows[row]):
                continue

            flag = rt.getValue("FLAG", row)
            if not isNumber(flag):
                continue

            flag = int(flag)
            if flag == 0:
                continue

            if flag == 1 or flag == -1:
                srow = onset_rt.getCounter()
                onset_rt.incrementCounter()
                onset_rt.setValue("TID", srow, track_id)
                from_frame = int(t_rows[prev_row]/frame_int) + 1
                to_frame = int(t_rows[row]/frame_int) + 1
                onset_rt.setValue("First disp. [t]", srow,
                    "%s to %s" % (t_rows[prev_row], t_rows[row]))
                onset_rt.setValue("First disp. [frames]", srow,
                    "%s to %s" % (from_frame, to_frame))
                onset_rt.setValue("ManualTag", srow, "")
                break

    out_path = suffixed_path(table_file.getAbsolutePath(), "ManualTagging")
    if onset_rt.save(out_path):
        log("Summary table successfully saved: %s" % out_path)
    else:
        log("File not saved... Displaying onset table")
        onset_rt.show("Onsets %s" % table_file.name)
CELL_LABEL_COLUMN = "cell"
FEATURES = ["POSITION_X", "POSITION_Y", "FRAME"]

trackIDs = model.getTrackModel().trackIDs(True)

results = ResultsTable()

# Parse spots to insert values as objects
for trackID in trackIDs:
    track = model.getTrackModel().trackSpots(trackID)
    # Sort by frame
    sortedTrack = list(track)
    sortedTrack.sort(key=lambda s: s.getFeature("FRAME"))

    for spot in sortedTrack:
        results.incrementCounter()
        results.addValue(ID_COLUMN, "" + str(spot.ID()))
        # results.addValue(CELL_LABEL_COLUMN,str(int(spot.getFeature("MAX_INTENSITY"))))
        results.addValue(CELL_LABEL_COLUMN, spot.getName())
        results.addValue(TRACK_ID_COLUMN, "" + str(trackID))
        for feature in FEATURES:
            val = spot.getFeature(feature)
            if math.isnan(val):
                results.addValue(feature.lower(), "None")
            else:
                results.addValue(feature.lower(), "" + str(int(val)))

        parents = []
        children = []
        for edge in model.getTrackModel().edgesOf(spot):
            source, target = model.getTrackModel().getEdgeSource(
def main(imp,options):
	from ij.plugin import ChannelSplitter
	from ij.gui import Roi,PointRoi, PolygonRoi, Overlay, Line
	from java.awt import Color
	from ij import WindowManager
	from ij.measure import ResultsTable
	from ij.text import TextWindow
	active_z=imp.getZ()
	imps = ChannelSplitter.split(imp)
	imp.setZ(active_z)
	roi_int = imp.getRoi()


	comp_imp=Zproj(imps[options["comp_ch"]],
		"SUM",
		active_z,
		options["z_range"])
	comp_imp=mode_subtract(comp_imp,roi_int)

	loci_imp=Zproj(imps[options["loci_ch"]],
		"SUM",
		imp.getZ(),
		options["z_range"])
	loci_imp=mode_subtract(loci_imp,roi_int)

	#Finding the boundaries of compartment and loci
	comp_roi=thresh(sum_prj=comp_imp,thresh=options["comp_T"],roi=roi_int,method="boundary")
	print "ok"
	if (options["loci_method"]== "locus center"):
		loci_roi=thresh(sum_prj=loci_imp,
			thresh=options["loci_T"],
			roi=roi_int,
			method="point")
	elif options["loci_method"]== "locus boundary":
		loci_roi=thresh(sum_prj=loci_imp,
			thresh=options["loci_T"],
			roi=roi_int,
			method="boundary")
		
	
	if options["loci_method"]== "locus center":
		dist,xc,yc,xl,yl=get_center_edge_dist(imp,comp_roi, loci_roi)
	elif options["loci_method"]== "locus boundary":
		dist,xc,yc,xl,yl=get_closest_points(imp,comp_roi,loci_roi)


	rt_exist = WindowManager.getWindow("Loci distance to compartment")
	
	if rt_exist==None or not isinstance(rt_exist, TextWindow):
		table= ResultsTable()
	else:
		table = rt_exist.getTextPanel().getOrCreateResultsTable()
	table.incrementCounter()
	table.addValue("Label", imp.title)
	table.addValue("Distance(micron)", dist)
	
	if options['measure_feret']:
		feret_roi,loci_feret,loci_area= feret(sum_prj=loci_imp,thresh=options["loci_T"],
		roi=roi_int,pixel_size=imp.getCalibration().pixelWidth)
		table.addValue("Loci feret", loci_feret)
		table.addValue("Loci area", loci_area)
		
		
	table.show("Loci distance to compartment")

	## Adding loci overlay
	ov=imp.getOverlay()
	if ov==None:
		ov=Overlay()
	line = Line(xc,yc, xl,yl)
	line.setStrokeWidth(0.2)
	line.setStrokeColor(Color.PINK)
	ov.add(line)

	
	if options["loci_method"]== "locus center":
		ov.add(PointRoi(loci_roi["x"],loci_roi["y"]))
	elif options["loci_method"]== "locus boundary":
		ov.add(loci_roi)
	if options['measure_feret']:
		ov.add(feret_roi)
	ov.add(comp_roi)
	imp.setOverlay(ov)	
Exemplo n.º 17
0
def runScript():

    # find table with trajectories
    rt = findResultsTable(inputTableName)
    if rt == None:
        print("Results table window titled [" + inputTableName +
              "] not found!")
        return

    # get input image and its properties
    img = WindowManager.getCurrentImage()
    if img == None:
        print("Could not access input image!")
        return
    print("Processing image:", img)
    xLen = img.getWidth()
    yLen = img.getHeight()
    zLen = img.getNSlices()
    noOfFrames = img.getNFrames()
    noOfChannels = img.getNChannels()
    stack = img.getStack()
    if (noOfChannels > 1):
        print(
            "Cannot process images with channels. Convert image to single channel first!"
        )
        return

    # Start processin data row by row...
    numOfRows = rt.getCounter()

    if numOfRows > 1:

        #create output tableName
        if showOutputTable: outputTable = ResultsTable()

        # if output in csv format requested print header
        if printOutputData:
            print("trajectory;frame;m0;sizeInPixels;avgIntensity")

        for idx in range(0, numOfRows):
            trajectoryId = rt.getValue("Trajectory", idx)
            x = rt.getValue("x", idx)
            y = rt.getValue("y", idx)
            z = rt.getValue("z", idx)
            frame = rt.getValue("Frame", idx)

            m0, size, avgInt = getIntensityData(stack, radius, frame, x, y, z,
                                                xLen, yLen, zLen)

            # if output in csv format requested print it
            if printOutputData:
                print(
                    str(int(trajectoryId)) + ";" + str(frame) + ";" + str(m0) +
                    ";" + str(size) + ";" + str(avgInt))

            # if output table requested update it with data
            if showOutputTable:
                outputTable.incrementCounter()
                outputTable.addValue("", idx + 1)
                outputTable.addValue("trajectory", int(trajectoryId))
                outputTable.addValue("frame", frame)
                outputTable.addValue("m0", m0)
                outputTable.addValue("sizeInPixels", size)
                outputTable.addValue("avgIntensity", avgInt)

        if showOutputTable: outputTable.show(outputTableName)
Exemplo n.º 18
0
	resultsTable = ResultsTable()
	resultsTable.showRowNumbers(False)

	for i in range(0, len(results)):
		if options['oneShot']:
			localBackground = options['localBackground']
			seedRadius = options['seedRadius']
			gaussXY = options['gaussXY']
			gaussZ = options['gaussZ']
		else:
			localBackground = parameters[i]['localBackground']
			seedRadius = parameters[i]['seedRadius']
			gaussXY = parameters[i]['gaussXY']
			gaussZ = parameters[i]['gaussZ']
		
		resultsTable.incrementCounter()
		resultsTable.addValue("Threshold", localBackground)
		resultsTable.addValue("Seed radius", seedRadius)
		resultsTable.addValue("GXY", gaussXY)
		resultsTable.addValue("GZ", gaussZ)
		resultsTable.addValue("TOTAL", results[i]['all'])
		resultsTable.addValue("0-250", results[i]['0'])
		resultsTable.addValue("251-500", results[i]['250'])
		resultsTable.addValue("501-750", results[i]['500'])
		resultsTable.addValue("751-1000", results[i]['750'])
		resultsTable.addValue("1001-1500", results[i]['1000'])
		resultsTable.addValue(">1501", results[i]['1500'])
		resultsTable.addValue("Skipped", results[i]['edge'])

	resultsTable.save(options['outputDir'] + options['outputFile'])
Exemplo n.º 19
0
def open_Octopus_file():

	# set up a file info structure
	fi = FileInfo()
	fi.fileFormat = fi.RAW
	fi.fileType=FileInfo.GRAY16_UNSIGNED
	fi.intelByteOrder = True
	fi.nImages = 1

	op = OpenDialog("Choose Octopus .dth file...", "")
	if not op.getDirectory(): return False

	# get the file extension
	file_extension = re.search('(\.[a-z][a-z][a-z])', op.getFileName()).group(1)
	
	if file_extension != ".dth":
		dlg = GenericDialog("Warning")
		dlg.addMessage("Please select an octopus .dth file")
		dlg.showDialog()
		return False

	# now strip the filename into a stem and index
	file_parse = re.match('([a-zA-z0-9_]*_)([0-9]+)\.dth', op.getFileName())
	file_stem = file_parse.group(1)
	file_index = int( file_parse.group(2) )

	# ok now we need to parse the header info
	header = get_Octopus_header(op.getDirectory(), file_stem, file_index)
	fi.nImages  = len(header['N'])

	# check to see whether we have a bit depth, if not, assume 16-bit
	if 'Bit_Depth' in header:
		print header['Bit_Depth']
		bit_depth = int(header['Bit_Depth'][0])
		if bit_depth == 8: fi.fileType = FileInfo.GRAY8
	else:
		bit_depth = 16

	# will assume that all files have the same size
	fi.width = int( header['W'][0] )
	fi.height = int( header['H'][0] )
	file_timestamp = strftime("%a, %d %b %Y %H:%M:%S", gmtime(float(header['Time'][0])) )
	

	# make a new imagestack to store the data
	stack = ImageStack(fi.width, fi.height)

	# finally, we need to make a list of files to import as sometimes we have
	# non contiguous file numbers
	try:
		files = os.listdir(op.getDirectory())
	except IOError:
		raise IOError( "No files exist in directory: " + op.getDirectory())

	filenums = []
	for f in files:
		# strip off the stem, and get the number
		targetfile = re.match(file_stem+'([0-9]+)\.dth', f)
		# only take thosefiles which match the formatting requirements
		if targetfile:
			filenums.append( int(targetfile.group(1)) )

	# sort the file numbers
	sorted_filenums = sorted(filenums)

	# make a file stats string
	file_stats_str = file_stem + '\n' + str(fi.width) +'x' + str(fi.height) + 'x' + \
		str(len(sorted_filenums)) +' ('+str(bit_depth)+'-bit)\n' + file_timestamp


	# now open a dialog to let the user set options
	dlg = GenericDialog("Load Octopus Stream (v"+__version__+")")
	dlg.addMessage(file_stats_str)
	dlg.addStringField("Title: ", file_stem)
	dlg.addNumericField("Start: ", 1, 0);
	dlg.addNumericField("End: ", len(sorted_filenums), 0)
	dlg.addCheckbox("Open headers", True)
	dlg.addCheckbox("Contiguous stream?", False)
	dlg.addCheckbox("8-bit unsigned", bit_depth==8)
	dlg.showDialog()

	# if we cancel the dialog, exit here
	if dlg.wasCanceled():
		return

	# set some params
	file_title = dlg.getNextString()
	file_start = dlg.getNextNumber()
	file_end = dlg.getNextNumber()
	DISPLAY_HEADER = bool( dlg.getNextBoolean() )

	# check the ranges
	if file_start > file_end: 
		file_start, file_end = file_end, file_start
	if file_start < 1: 
		file_start = 1
	if file_end > len(sorted_filenums): 
		file_end = len(sorted_filenums) 

	# now set these to the actual file numbers in the stream
	file_start = sorted_filenums[int(file_start)-1]
	file_end = sorted_filenums[int(file_end)-1]

	files_to_open = [n for n in sorted_filenums if n>=file_start and n<=file_end]

	# if we've got too many, truncate the list
	if (len(files_to_open) * fi.nImages * fi.width * fi.height) > (MAX_FRAMES_TO_IMPORT*512*512):
		dlg = GenericDialog("Warning")
		dlg.addMessage("This may use a lot of memory. Continue?")
		dlg.showDialog()
		if dlg.wasCanceled(): return False

	IJ.log( "Opening file: " + op.getDirectory() + op.getFileName() )
	IJ.log( file_stats_str + "\nFile range: " + str(files_to_open[0]) + \
		"-" + str(files_to_open[-1]) +"\n" )

	# make a results table for the metadata
	# NOTE: horrible looping at the moment, but works
	if DISPLAY_HEADER:
		rt = ResultsTable()

	# ok now we can put the files together into the stack
	for i in files_to_open:

		# open the original .dat file and get the stack
		fi.fileName = get_Octopus_filename( op.getDirectory(), file_stem, i)
		
		if os.path.isfile( fi.fileName ):
			fo = FileOpener(fi)
			imp = fo.open(False).getStack() 
	
			# put the slices into the stack
			for im_slice in xrange( imp.getSize() ):
				ip = imp.getProcessor( im_slice+1 )
				if bit_depth == 8:
					bi = ip.getBufferedImage()
				else:
					bi = ip.get16BitBufferedImage() 
				stack.addSlice( file_title,  ip )


			if DISPLAY_HEADER:
				header = get_Octopus_header(op.getDirectory(), file_stem, i)
				for n in xrange(len(header['N'])):
					rt.incrementCounter()
					for k in header.keys():
						rt.addValue(k, parse_header( header[k][n] ) )

		else:
			break

	# done!
	output = ImagePlus('Octopus ('+file_stem+')', stack)
	output.show()

	if DISPLAY_HEADER:
		rt.show("Octopus header metadata")

	return True
Exemplo n.º 20
0
Prefs.blackBackground = False
IJ.run(mask, "Make Binary", "")
IJ.run(mask, "Invert", "")
#Colocalization Test
IJ.run(
    red, "Colocalization Test",
    "channel_1=[" + red.getTitle() + "] channel_2=[" + green.getTitle() +
    "] roi=mask randomization=[Fay (x,y,z translation)]")
red.close()
green.close()
mask.changes = False
mask.close()
#get ResultsTable object and save it
textpanel = WindowManager.getFrame("Results").getTextPanel()
resultline = textpanel.getLine(textpanel.getLineCount() - 1)
value = float(resultline.split('\t')[1])
print value
allResultsTitle = "Summary"
frame = WindowManager.getFrame(allResultsTitle)
summaryrt = None
if frame is not None:
    summaryrt = frame.getTextPanel().getResultsTable()
else:
    summaryrt = ResultsTable()
summaryrt.incrementCounter()
summaryrt.addValue("Image", imp.getTitle())
summaryrt.addValue("R(obs)", value)
summaryrt.show(allResultsTitle)
#imagetitle = image.getTitle().split(".")[0]
#rtfile = path.join(outputdir, str(size)+"Summary_" + imagetitle + ".csv")
Exemplo n.º 21
0
def run(imp, preprocessor_path, postprocessor_path, threshold_method,
        user_comment):

    output_parameters = {
        "image title": "",
        "preprocessor path": float,
        "post processor path": float,
        "thresholding op": float,
        "use ridge detection": bool,
        "high contrast": int,
        "low contrast": int,
        "line width": int,
        "minimum line length": int,
        "mitochondrial footprint": float,
        "branch length mean": float,
        "branch length median": float,
        "branch length stdevp": float,
        "summed branch lengths mean": float,
        "summed branch lengths median": float,
        "summed branch lengths stdevp": float,
        "network branches mean": float,
        "network branches median": float,
        "network branches stdevp": float
    }

    output_order = [
        "image title", "preprocessor path", "post processor path",
        "thresholding op", "use ridge detection", "high contrast",
        "low contrast", "line width", "minimum line length",
        "mitochondrial footprint", "branch length mean",
        "branch length median", "branch length stdevp",
        "summed branch lengths mean", "summed branch lengths median",
        "summed branch lengths stdevp", "network branches mean",
        "network branches median", "network branches stdevp"
    ]

    # Perform any preprocessing steps...
    status.showStatus("Preprocessing image...")
    if preprocessor_path != None:
        if preprocessor_path.exists():
            preprocessor_thread = scripts.run(preprocessor_path, True)
            preprocessor_thread.get()
            imp = WindowManager.getCurrentImage()
    else:
        pass

    # Store all of the analysis parameters in the table
    if preprocessor_path == None:
        preprocessor_str = ""
    else:
        preprocessor_str = preprocessor_path.getCanonicalPath()
    if postprocessor_path == None:
        postprocessor_str = ""
    else:
        postprocessor_str = preprocessor_path.getCanonicalPath()

    output_parameters["preprocessor path"] = preprocessor_str
    output_parameters["post processor path"] = postprocessor_str
    output_parameters["thresholding op"] = threshold_method
    output_parameters["use ridge detection"] = str(use_ridge_detection)
    output_parameters["high contrast"] = rd_max
    output_parameters["low contrast"] = rd_min
    output_parameters["line width"] = rd_width
    output_parameters["minimum line length"] = rd_length

    # Create and ImgPlus copy of the ImagePlus for thresholding with ops...
    status.showStatus("Determining threshold level...")
    imp_title = imp.getTitle()
    slices = imp.getNSlices()
    frames = imp.getNFrames()
    output_parameters["image title"] = imp_title
    imp_calibration = imp.getCalibration()
    imp_channel = Duplicator().run(imp, imp.getChannel(), imp.getChannel(), 1,
                                   slices, 1, frames)
    img = ImageJFunctions.wrap(imp_channel)

    # Determine the threshold value if not manual...
    binary_img = ops.run("threshold.%s" % threshold_method, img)
    binary = ImageJFunctions.wrap(binary_img, 'binary')
    binary.setCalibration(imp_calibration)
    binary.setDimensions(1, slices, 1)

    # Get the total_area
    if binary.getNSlices() == 1:
        area = binary.getStatistics(Measurements.AREA).area
        area_fraction = binary.getStatistics(
            Measurements.AREA_FRACTION).areaFraction
        output_parameters[
            "mitochondrial footprint"] = area * area_fraction / 100.0
    else:
        mito_footprint = 0.0
        for slice in range(binary.getNSlices()):
            binary.setSliceWithoutUpdate(slice)
            area = binary.getStatistics(Measurements.AREA).area
            area_fraction = binary.getStatistics(
                Measurements.AREA_FRACTION).areaFraction
            mito_footprint += area * area_fraction / 100.0
        output_parameters[
            "mitochondrial footprint"] = mito_footprint * imp_calibration.pixelDepth

    # Generate skeleton from masked binary ...
    # Generate ridges first if using Ridge Detection
    if use_ridge_detection and (imp.getNSlices() == 1):
        skeleton = ridge_detect(imp, rd_max, rd_min, rd_width, rd_length)
    else:
        skeleton = Duplicator().run(binary)
        IJ.run(skeleton, "Skeletonize (2D/3D)", "")

    # Analyze the skeleton...
    status.showStatus("Setting up skeleton analysis...")
    skel = AnalyzeSkeleton_()
    skel.setup("", skeleton)
    status.showStatus("Analyzing skeleton...")
    skel_result = skel.run()

    status.showStatus("Computing graph based parameters...")
    branch_lengths = []
    summed_lengths = []
    graphs = skel_result.getGraph()

    for graph in graphs:
        summed_length = 0.0
        edges = graph.getEdges()
        for edge in edges:
            length = edge.getLength()
            branch_lengths.append(length)
            summed_length += length
        summed_lengths.append(summed_length)

    output_parameters["branch length mean"] = eztables.statistical.average(
        branch_lengths)
    output_parameters["branch length median"] = eztables.statistical.median(
        branch_lengths)
    output_parameters["branch length stdevp"] = eztables.statistical.stdevp(
        branch_lengths)

    output_parameters[
        "summed branch lengths mean"] = eztables.statistical.average(
            summed_lengths)
    output_parameters[
        "summed branch lengths median"] = eztables.statistical.median(
            summed_lengths)
    output_parameters[
        "summed branch lengths stdevp"] = eztables.statistical.stdevp(
            summed_lengths)

    branches = list(skel_result.getBranches())
    output_parameters["network branches mean"] = eztables.statistical.average(
        branches)
    output_parameters["network branches median"] = eztables.statistical.median(
        branches)
    output_parameters["network branches stdevp"] = eztables.statistical.stdevp(
        branches)

    # Create/append results to a ResultsTable...
    status.showStatus("Display results...")
    if "Mito Morphology" in list(WindowManager.getNonImageTitles()):
        rt = WindowManager.getWindow(
            "Mito Morphology").getTextPanel().getOrCreateResultsTable()
    else:
        rt = ResultsTable()

    rt.incrementCounter()
    for key in output_order:
        rt.addValue(key, str(output_parameters[key]))

    # Add user comments intelligently
    if user_comment != None and user_comment != "":
        if "=" in user_comment:
            comments = user_comment.split(",")
            for comment in comments:
                rt.addValue(comment.split("=")[0], comment.split("=")[1])
        else:
            rt.addValue("Comment", user_comment)

    rt.show("Mito Morphology")

    # Create overlays on the original ImagePlus and display them if 2D...
    if imp.getNSlices() == 1:
        status.showStatus("Generate overlays...")
        IJ.run(skeleton, "Green", "")
        IJ.run(binary, "Magenta", "")

        skeleton_ROI = ImageRoi(0, 0, skeleton.getProcessor())
        skeleton_ROI.setZeroTransparent(True)
        skeleton_ROI.setOpacity(1.0)
        binary_ROI = ImageRoi(0, 0, binary.getProcessor())
        binary_ROI.setZeroTransparent(True)
        binary_ROI.setOpacity(0.25)

        overlay = Overlay()
        overlay.add(binary_ROI)
        overlay.add(skeleton_ROI)

        imp.setOverlay(overlay)
        imp.updateAndDraw()

    # Generate a 3D model if a stack
    if imp.getNSlices() > 1:

        univ = Image3DUniverse()
        univ.show()

        pixelWidth = imp_calibration.pixelWidth
        pixelHeight = imp_calibration.pixelHeight
        pixelDepth = imp_calibration.pixelDepth

        # Add end points in yellow
        end_points = skel_result.getListOfEndPoints()
        end_point_list = []
        for p in end_points:
            end_point_list.append(
                Point3f(p.x * pixelWidth, p.y * pixelHeight, p.z * pixelDepth))
        univ.addIcospheres(end_point_list, Color3f(255.0, 255.0, 0.0), 2,
                           1 * pixelDepth, "endpoints")

        # Add junctions in magenta
        junctions = skel_result.getListOfJunctionVoxels()
        junction_list = []
        for p in junctions:
            junction_list.append(
                Point3f(p.x * pixelWidth, p.y * pixelHeight, p.z * pixelDepth))
        univ.addIcospheres(junction_list, Color3f(255.0, 0.0, 255.0), 2,
                           1 * pixelDepth, "junctions")

        # Add the lines in green
        graphs = skel_result.getGraph()
        for graph in range(len(graphs)):
            edges = graphs[graph].getEdges()
            for edge in range(len(edges)):
                branch_points = []
                for p in edges[edge].getSlabs():
                    branch_points.append(
                        Point3f(p.x * pixelWidth, p.y * pixelHeight,
                                p.z * pixelDepth))
                univ.addLineMesh(branch_points, Color3f(0.0, 255.0, 0.0),
                                 "branch-%s-%s" % (graph, edge), True)

        # Add the surface
        univ.addMesh(binary)
        univ.getContent("binary").setTransparency(0.5)

    # Perform any postprocessing steps...
    status.showStatus("Running postprocessing...")
    if postprocessor_path != None:
        if postprocessor_path.exists():
            postprocessor_thread = scripts.run(postprocessor_path, True)
            postprocessor_thread.get()

    else:
        pass

    status.showStatus("Done analysis!")
Exemplo n.º 22
0
def runScript():

    # find table with trajectories
    rt = findResultsTable(inputTableName)
    if rt == None:
        print("Results table window titled [" + inputTableName +
              "] not found!")
        return

    numOfRows = rt.getCounter()

    if numOfRows > 1:

        #create output tableName
        if showOutputTable: velocityRT = ResultsTable()

        # if output in csv format requested print header
        if printOutputData:
            print("trajectory;pathLen;noOfFrames;velocity;velocity stddev")

        # read first data point
        trajectoryId = rt.getValue("Trajectory", 0)
        posX = rt.getValue("x", 0)
        posY = rt.getValue("y", 0)
        posZ = rt.getValue("z", 0)
        startFrame = rt.getValue("Frame", 0)
        frame = startFrame
        trajDistances = []  # distances per frame measured in pixels

        for idx in range(1, numOfRows):
            currTrajectoryId = rt.getValue("Trajectory", idx)
            currPosX = rt.getValue("x", idx)
            currPosY = rt.getValue("y", idx)
            currPosZ = rt.getValue("z", idx)
            currFrame = rt.getValue("Frame", idx)

            # if true we are still reading data from 'current' trajectory
            if trajectoryId == currTrajectoryId:
                distance = ((currPosX - posX)**2 + (currPosY - posY)**2 +
                            (currPosZ - posZ)**2)**(0.5)
                # since trajectory data point can "jump" over some frames (depending on link range setting) calculated distance
                # is divided to get average per one frame and added multiple time if needed (necessary for calculating correct stddev)
                noOfFrames = currFrame - frame
                distancePerFrame = distance / (currFrame - frame)
                for i in range(int(noOfFrames)):
                    trajDistances.append(distancePerFrame)

            # if true we have read data point from next trajectory or this is very last point of data
            # calculate trajectory info
            if trajectoryId != currTrajectoryId or idx == numOfRows - 1:

                # calculate trajectory lenght
                trajDistancesTrueLen = [
                    pixelResolution * d for d in trajDistances
                ]
                pathLen = sum(trajDistancesTrueLen)

                # calculate length of trajectory in frames
                if idx == numOfRows - 1:
                    stopFrame = currFrame
                else:
                    stopFrame = frame
                numOfFrames = stopFrame - startFrame

                # calculate velocity and std dev
                velocity = pathLen / (numOfFrames * frameInterval)
                standardDev = stddev(
                    [d / frameInterval for d in trajDistancesTrueLen])

                # if output in csv format requested print it
                if printOutputData:
                    print(
                        str(int(trajectoryId)) + ";" + str(pathLen) + ";" +
                        str(numOfFrames) + ";" + str(velocity) + ";" +
                        str(standardDev))

                # if output table requested update it with data
                if showOutputTable:
                    velocityRT.incrementCounter()
                    velocityRT.addValue("trajectory", int(trajectoryId))
                    velocityRT.addValue("pathLen", pathLen)
                    velocityRT.addValue("noOfFrames", numOfFrames)
                    velocityRT.addValue("velocity", velocity)
                    velocityRT.addValue("velocity stddev", standardDev)

                # beginning of new trajectory
                trajDistances = []
                startFrame = rt.getValue("Frame", idx)

            trajectoryId = currTrajectoryId
            posX = currPosX
            posY = currPosY
            posZ = currPosZ
            frame = currFrame

        velocityRT.show(outputTableName)
Exemplo n.º 23
0
        Channel4 = Generate_segmented_manual(imp1, 4, "Moments", 2)
        # # # # Generate ROIs by "Analyse Particles"
        IJ.run(Channel4, "Analyze Particles...",
               "size=5-Infinity pixel add exclude stack")
        IJ.run("Clear Results", "")
        Channel4_count = RoiManager.getInstance().getCount()
        print Channel4_count
        time.sleep(0.5)

        rm.runCommand("reset")

        time.sleep(0.5)

        ort = ResultsTable()
        ort.setPrecision(2)
        ort.incrementCounter()
        ort.addValue("Channel1", Channel1_count)
        ort.addValue("Channel2", Channel2_count)
        ort.addValue("Channel3", Channel3_count)
        ort.addValue("Channel4", Channel4_count)
        ort.show("Results")
        if automatic_save_results:
            dataname = imp1.getTitle()

            filename = dataname + ".csv"
            #files = glob.glob(savepath+"/"+dataname+"*.csv")
            savename = savepath + "/" + filename
            ort.saveAs(savename)

        Channel1.changes = False
        Channel1.close()
Exemplo n.º 24
0
br_uncorr = math.sqrt((x_col[1] - x_col[0])**2 + (y_col[1] - y_col[0])**2)
br_thick = (z_col[1] - z_col[0]) * z_thick
br_corr = math.sqrt(br_uncorr**2 + br_thick**2)

po_uncorr = math.sqrt((x_col[2] - x_col[1])**2 + (y_col[2] - y_col[1])**2)
po_thick = (z_col[2] - z_col[1]) * z_thick
po_corr = math.sqrt(po_uncorr**2 + po_thick**2)

mbl = math.sqrt((x_col[4] - x_col[3])**2 + (y_col[4] - y_col[3])**2)

sl = math.sqrt((x_col[6] - x_col[5])**2 + (y_col[6] - y_col[5])**2)
sw = math.sqrt((x_col[8] - x_col[7])**2 + (y_col[8] - y_col[7])**2)

IJ.run("Clear Results")

# Append to custom Measurement table or create it if non existing
MeasureTable = WindowManager.getWindow("Measurements")
if MeasureTable == None:
    MeasureTable = ResultsTable()
else:
    MeasureTable = WindowManager.getWindow("Measurements")
    MeasureTable = MeasureTable.getTextPanel().getOrCreateResultsTable()

MeasureTable.incrementCounter()
MeasureTable.addValue("BR", br_corr)
MeasureTable.addValue("PO", po_corr)
MeasureTable.addValue("MBL", mbl)
MeasureTable.addValue("SL", sl)
MeasureTable.addValue("SW", sw)
MeasureTable.show("Measurements")
Exemplo n.º 25
0
		ipch3 = impch3.getImageStack().getProcessor(int(zA[i]) + 1)
		dotRoi = OvalRoi(int(yA[i] - xyoffset), int(xA[i] - xyoffset), thdist, thdist)	
		ipch2.setRoi(dotRoi)
  		#stats = IS.getStatistics(ip, options, imp.getCalibration())
  		stats = IS.getStatistics(ipch2, options, cal)
		ipch3.setRoi(dotRoi)
  		statsch3 = IS.getStatistics(ipch3, options, cal)
  		print "dot", i
  		print "...ch2 TotalInt ", stats.area * stats.mean
  		print "...ch2 Area     ", stats.area
  		print "...ch2 mean     ", stats.mean
  		print ".."  		
  		print "...ch3 TotalInt ", statsch3.area * statsch3.mean
  		print "...ch3 Area     ", statsch3.area
  		print "...ch3 mean     ", statsch3.mean
	 	rt.incrementCounter()
	 	rt.setValue("DotID", ct, i)
	 	rt.setValue("DotX", ct, yA[i])
	 	rt.setValue("DotY", ct, xA[i])
	 	rt.setValue("DotZ", ct, zA[i])	 	
		rt.setValue("Ch2_TotalIntensity", ct, stats.area * stats.mean)
		rt.setValue("Ch2_MeanIntensity", ct, stats.mean)
		rt.setValue("Ch3_TotalIntensity", ct, statsch3.area * statsch3.mean)
		rt.setValue("Ch3_meanIntensity", ct, statsch3.mean)
		ct += 1
rt.show("Dot Intensity")


#AREA, AREA_FRACTION, CENTER_OF_MASS, CENTROID, CIRCULARITY, ELLIPSE, FERET, 
#INTEGRATED_DENSITY, INVERT_Y, KURTOSIS, LABELS, LIMIT, MAX_STANDARDS, MEAN, 
#MEDIAN, MIN_MAX, MODE, PERIMETER, RECT, SCIENTIFIC_NOTATION, SHAPE_DESCRIPTORS, 
Exemplo n.º 26
0
def run(imp, preprocessor_path, postprocessor_path, threshold_method, user_comment):

    output_parameters = {"image title" : "",
    "preprocessor path" : float,
    "post processor path" : float,
    "thresholding op" : float,
    "use ridge detection" : bool,
    "high contrast" : int,
    "low contrast" : int,
    "line width" : int,
    "minimum line length" : int,
    "mitochondrial footprint" : float,
    "branch length mean" : float,
    "branch length median" : float,
    "branch length stdevp" : float,
    "summed branch lengths mean" : float,
    "summed branch lengths median" : float,
    "summed branch lengths stdevp" : float,
    "network branches mean" : float,
    "network branches median" : float,
    "network branches stdevp" : float}

    output_order = ["image title",
    "preprocessor path",
    "post processor path",
    "thresholding op",
    "use ridge detection",
    "high contrast",
    "low contrast",
    "line width",
    "minimum line length",
    "mitochondrial footprint",
    "branch length mean",
    "branch length median",
    "branch length stdevp",
    "summed branch lengths mean",
    "summed branch lengths median",
    "summed branch lengths stdevp",
    "network branches mean",
    "network branches median",
    "network branches stdevp"]

    # Perform any preprocessing steps...
    status.showStatus("Preprocessing image...")
    if preprocessor_path != None:
        if preprocessor_path.exists():
            preprocessor_thread = scripts.run(preprocessor_path, True)
            preprocessor_thread.get()
            imp = WindowManager.getCurrentImage()
    else:
        pass

    # Store all of the analysis parameters in the table
    if preprocessor_path == None:
        preprocessor_str = ""
    else:
        preprocessor_str = preprocessor_path.getCanonicalPath()
    if postprocessor_path == None:
        postprocessor_str = ""
    else:
        postprocessor_str = preprocessor_path.getCanonicalPath()

    output_parameters["preprocessor path"] = preprocessor_str
    output_parameters["post processor path"] = postprocessor_str
    output_parameters["thresholding op"] = threshold_method
    output_parameters["use ridge detection"] = str(use_ridge_detection)
    output_parameters["high contrast"] = rd_max
    output_parameters["low contrast"] = rd_min
    output_parameters["line width"] = rd_width
    output_parameters["minimum line length"] = rd_length

    # Create and ImgPlus copy of the ImagePlus for thresholding with ops...
    status.showStatus("Determining threshold level...")
    imp_title = imp.getTitle()
    slices = imp.getNSlices()
    frames = imp.getNFrames()
    output_parameters["image title"] = imp_title
    imp_calibration = imp.getCalibration()
    imp_channel = Duplicator().run(imp, imp.getChannel(), imp.getChannel(), 1, slices, 1, frames)
    img = ImageJFunctions.wrap(imp_channel)

    # Determine the threshold value if not manual...
    binary_img = ops.run("threshold.%s"%threshold_method, img)
    binary = ImageJFunctions.wrap(binary_img, 'binary')
    binary.setCalibration(imp_calibration)
    binary.setDimensions(1, slices, 1)

    # Get the total_area
    if binary.getNSlices() == 1:
        area = binary.getStatistics(Measurements.AREA).area
        area_fraction = binary.getStatistics(Measurements.AREA_FRACTION).areaFraction
        output_parameters["mitochondrial footprint"] =  area * area_fraction / 100.0
    else:
        mito_footprint = 0.0
        for slice in range(binary.getNSlices()):
            	binary.setSliceWithoutUpdate(slice)
                area = binary.getStatistics(Measurements.AREA).area
                area_fraction = binary.getStatistics(Measurements.AREA_FRACTION).areaFraction
                mito_footprint += area * area_fraction / 100.0
        output_parameters["mitochondrial footprint"] = mito_footprint * imp_calibration.pixelDepth

    # Generate skeleton from masked binary ...
    # Generate ridges first if using Ridge Detection
    if use_ridge_detection and (imp.getNSlices() == 1):
        skeleton = ridge_detect(imp, rd_max, rd_min, rd_width, rd_length)
    else:
        skeleton = Duplicator().run(binary)
        IJ.run(skeleton, "Skeletonize (2D/3D)", "")

    # Analyze the skeleton...
    status.showStatus("Setting up skeleton analysis...")
    skel = AnalyzeSkeleton_()
    skel.setup("", skeleton)
    status.showStatus("Analyzing skeleton...")
    skel_result = skel.run()

    status.showStatus("Computing graph based parameters...")
    branch_lengths = []
    summed_lengths = []
    graphs = skel_result.getGraph()

    for graph in graphs:
        summed_length = 0.0
        edges = graph.getEdges()
        for edge in edges:
            length = edge.getLength()
            branch_lengths.append(length)
            summed_length += length
        summed_lengths.append(summed_length)

    output_parameters["branch length mean"] = eztables.statistical.average(branch_lengths)
    output_parameters["branch length median"] = eztables.statistical.median(branch_lengths)
    output_parameters["branch length stdevp"] = eztables.statistical.stdevp(branch_lengths)

    output_parameters["summed branch lengths mean"] = eztables.statistical.average(summed_lengths)
    output_parameters["summed branch lengths median"] = eztables.statistical.median(summed_lengths)
    output_parameters["summed branch lengths stdevp"] = eztables.statistical.stdevp(summed_lengths)

    branches = list(skel_result.getBranches())
    output_parameters["network branches mean"] = eztables.statistical.average(branches)
    output_parameters["network branches median"] = eztables.statistical.median(branches)
    output_parameters["network branches stdevp"] = eztables.statistical.stdevp(branches)

    # Create/append results to a ResultsTable...
    status.showStatus("Display results...")
    if "Mito Morphology" in list(WindowManager.getNonImageTitles()):
        rt = WindowManager.getWindow("Mito Morphology").getTextPanel().getOrCreateResultsTable()
    else:
        rt = ResultsTable()

    rt.incrementCounter()
    for key in output_order:
        rt.addValue(key, str(output_parameters[key]))

    # Add user comments intelligently
    if user_comment != None and user_comment != "":
        if "=" in user_comment:
            comments = user_comment.split(",")
            for comment in comments:
                rt.addValue(comment.split("=")[0], comment.split("=")[1])
        else:
            rt.addValue("Comment", user_comment)

    rt.show("Mito Morphology")

	# Create overlays on the original ImagePlus and display them if 2D...
    if imp.getNSlices() == 1:
        status.showStatus("Generate overlays...")
        IJ.run(skeleton, "Green", "")
        IJ.run(binary, "Magenta", "")

        skeleton_ROI = ImageRoi(0,0,skeleton.getProcessor())
        skeleton_ROI.setZeroTransparent(True)
        skeleton_ROI.setOpacity(1.0)
        binary_ROI = ImageRoi(0,0,binary.getProcessor())
        binary_ROI.setZeroTransparent(True)
        binary_ROI.setOpacity(0.25)

        overlay = Overlay()
        overlay.add(binary_ROI)
        overlay.add(skeleton_ROI)

        imp.setOverlay(overlay)
        imp.updateAndDraw()

    # Generate a 3D model if a stack
    if imp.getNSlices() > 1:

        univ = Image3DUniverse()
        univ.show()

        pixelWidth = imp_calibration.pixelWidth
        pixelHeight = imp_calibration.pixelHeight
        pixelDepth = imp_calibration.pixelDepth

        # Add end points in yellow
        end_points = skel_result.getListOfEndPoints()
        end_point_list = []
        for p in end_points:
            end_point_list.append(Point3f(p.x * pixelWidth, p.y * pixelHeight, p.z * pixelDepth))
        univ.addIcospheres(end_point_list, Color3f(255.0, 255.0, 0.0), 2, 1*pixelDepth, "endpoints")

        # Add junctions in magenta
        junctions = skel_result.getListOfJunctionVoxels()
        junction_list = []
        for p in junctions:
            junction_list.append(Point3f(p.x * pixelWidth, p.y * pixelHeight, p.z * pixelDepth))
        univ.addIcospheres(junction_list, Color3f(255.0, 0.0, 255.0), 2, 1*pixelDepth, "junctions")

        # Add the lines in green
        graphs = skel_result.getGraph()
        for graph in range(len(graphs)):
            edges = graphs[graph].getEdges()
            for edge in range(len(edges)):
                branch_points = []
                for p in edges[edge].getSlabs():
                    branch_points.append(Point3f(p.x * pixelWidth, p.y * pixelHeight, p.z * pixelDepth))
                univ.addLineMesh(branch_points, Color3f(0.0, 255.0, 0.0), "branch-%s-%s"%(graph, edge), True)

        # Add the surface
        univ.addMesh(binary)
        univ.getContent("binary").setTransparency(0.5)

    # Perform any postprocessing steps...
    status.showStatus("Running postprocessing...")
    if postprocessor_path != None:
        if postprocessor_path.exists():
            postprocessor_thread = scripts.run(postprocessor_path, True)
            postprocessor_thread.get()

    else:
        pass

    status.showStatus("Done analysis!")
Exemplo n.º 27
0
for peak in peaks:
    # Read peak coordinates into an array of integers
    peak.localize(p)
    roi.addPoint(imp, p[0], p[1])

imp.setRoi(roi)

# Now, iterate each peak, defining a small interval centered at each peak,
# and measure the sum of total pixel intensity,
# and display the results in an ImageJ ResultTable.
table = ResultsTable()

for peak in peaks:
    # Read peak coordinates into an array of integers
    peak.localize(p)
    # Define limits of the interval around the peak:
    # (sigmaSmaller is half the radius of the embryo)
    minC = [p[i] - sigmaSmaller for i in range(img.numDimensions())]
    maxC = [p[i] + sigmaSmaller for i in range(img.numDimensions())]
    # View the interval around the peak, as a flat iterable (like an array)
    fov = Views.interval(img, minC, maxC)
    # Compute sum of pixel intensity values of the interval
    # (The t is the Type that mediates access to the pixels, via its get* methods)
    s = sum(t.getInteger() for t in fov)
    # Add to results table
    table.incrementCounter()
    table.addValue("x", p[0])
    table.addValue("y", p[1])
    table.addValue("sum", s)

table.show("Embryo intensities at peaks")
Exemplo n.º 28
0
def run():

	IJ.run("Close All", "")
	IJ.log("\\Clear")

	IJ.log("Find_close_peaks")

	imp = IJ.run("Bio-Formats Importer")
	imp = IJ.getImage()


	Channel_1, Channel_2, radius_background, sigmaSmaller, sigmaLarger, minPeakValue, min_dist = getOptions()

	IJ.log("option used:" \
    		+ "\n" + "channel 1:" + str(Channel_1) \
    		+ "\n" + "channel 2:"+ str(Channel_2) \
    		+ "\n" + "Radius Background:"+ str(radius_background) \
    		+ "\n" + "Smaller Sigma:"+ str(sigmaSmaller) \
    		+ "\n" + "Larger Sigma:"+str(sigmaLarger) \
    		+ "\n" + "Min Peak Value:"+str(minPeakValue) \
    		+ "\n" + "Min dist between peaks:"+str(min_dist))

	IJ.log("Computing Max Intensity Projection")

	if imp.getDimensions()[3] > 1:
		imp_max = ZProjector.run(imp,"max")
		#imp_max = IJ.run("Z Project...", "projection=[Max Intensity]")
		#imp_max = IJ.getImage()
	else:
		imp_max = imp

	ip1, ip2 = extract_channel(imp_max, Channel_1, Channel_2)
	imp1, imp2 = back_substraction(ip1, ip2, radius_background)
	imp1.show()
	imp2.show()

	IJ.log("Finding Peaks")

	ip1_1, ip2_1, peaks_1, peaks_2 = find_peaks(imp1, imp2, sigmaSmaller, sigmaLarger, minPeakValue)

	# Create a PointRoi from the DoG peaks, for visualization
	roi_1 = PointRoi(0, 0)
	roi_2 = PointRoi(0, 0)
	roi_3 = PointRoi(0, 0)
	roi_4 = PointRoi(0, 0)

	# A temporary array of integers, one per dimension the image has
	p_1 = zeros(ip1_1.numDimensions(), 'i')
	p_2 = zeros(ip2_1.numDimensions(), 'i')

	# Load every peak as a point in the PointRoi
	for peak in peaks_1:
	  # Read peak coordinates into an array of integers
	  peak.localize(p_1)
	  roi_1.addPoint(imp1, p_1[0], p_1[1])

	for peak in peaks_2:
	  # Read peak coordinates into an array of integers
	  peak.localize(p_2)
	  roi_2.addPoint(imp2, p_2[0], p_2[1])

	# Chose minimum distance in pixel
	#min_dist = 20

	for peak_1 in peaks_1:
		peak_1.localize(p_1)
		for peak_2 in peaks_2:
			peak_2.localize(p_2)
			d1 = distance(p_1, p_2)
			if  d1 < min_dist:
				roi_3.addPoint(imp1, p_2[0], p_2[1])
				break

	for peak_2 in peaks_2:
		peak_2.localize(p_2)
		for peak_1 in peaks_1:
			peak_1.localize(p_1)
			d2 = distance(p_2, p_1)
			if  d2 < min_dist:
				roi_4.addPoint(imp1, p_2[0], p_2[1])
				break

	rm = RoiManager.getInstance()
	if not rm:
	  rm = RoiManager()
	rm.reset()

	rm.addRoi(roi_1)
	rm.addRoi(roi_2)
	rm.addRoi(roi_3)
	rm.addRoi(roi_4)

	rm.select(0)
	rm.rename(0, "ROI neuron")
	rm.runCommand("Set Color", "yellow")

	rm.select(1)
	rm.rename(1, "ROI glioma")
	rm.runCommand("Set Color", "blue")

	rm.select(2)
	rm.rename(2, "ROI glioma touching neurons")
	rm.runCommand("Set Color", "red")

	rm.select(3)
	rm.rename(3, "ROI neurons touching glioma")
	rm.runCommand("Set Color", "green")

	rm.runCommand(imp1, "Show All")

	#Change distance to be in um
	cal = imp.getCalibration()
	min_distance = str(round((cal.pixelWidth * min_dist),1))

	table = ResultsTable()
	table.incrementCounter()
	table.addValue("Numbers of Neuron Markers", roi_1.getCount(0))
	table.addValue("Numbers of Glioma Markers", roi_2.getCount(0))
	table.addValue("Numbers of Glioma within %s um of Neurons" %(min_distance), roi_3.getCount(0))
	table.addValue("Numbers of Neurons within %s um of Glioma" %(min_distance), roi_4.getCount(0))

	table.show("Results Analysis")