Example #1
0
def SaveCoverFromFs(tiles, newwidth, newheight, cols, rows):

	tilewidth = int(newwidth/cols)
	tileheight = int(newheight/rows)

	newwidth = int(newwidth/tilewidth) * tilewidth
	newheight = int(newheight/tileheight) * tileheight

	hiresoutip = ColorProcessor(newwidth, newheight)
	hiresout = ImagePlus("hi res output", hiresoutip)
	hiresout.show()

	x = 0
	y = -1

	plane = []

	# scale the images
	for i in sorted(tiles.iterkeys()):
		if y < rows-1:
			y += 1
		else:
			y = 0
			x += 1
		imp = IJ.openImage(str(tiles[i]))
		scale = Scale(imp.getProcessor())
		ipscaled = ScaleImageToSize(scale, tilewidth, tileheight)
		hiresoutip.copyBits(ipscaled, x*tilewidth, y*tileheight, 0)
		hiresout.draw()
def scale(ip, s):
	""" Scale the image with the parameter scale = s """
	imp = ImagePlus('scale',ip)
	IJ.run(imp, "Scale...", "x="+str(s)+" y="+str(s)+" interpolation=Bilinear average");
	ip = imp.getProcessor()
	w = ip.width
	h = ip.height
	cd = CanvasResizer()
	ip = cd.expandImage(ip, int(round(s*w)), int(round(s*h)), -int(round((1-s)/2*w)), -int(round((1-s)/2*h)) )
	return ip
def draw_bounding_boxes(objects,title,templateImage):
	drawnIp = ByteProcessor(templateImage.getWidth(),templateImage.getHeight())
	drawnImage = ImagePlus(title,drawnIp)
	drawnImage.show()
	IJ.selectWindow(title)
	for j in range(len(objects)):
		IJ.makeRectangle(objects[j][42],objects[j][43],objects[j][45]-objects[j][42],objects[j][46]-objects[j][43])
		IJ.run("Draw","stack")
	drawnImage.hide()
	return(drawnImage)
def generateOverlay(project, patch, shape):
    oWidth  = patch.getOWidth()
    oHeight = patch.getOHeight()

    overlayp = ByteProcessor(oWidth, oHeight)
    # TODO: Use ShortProcessor instead of ByteProcessor
    imp      = ImagePlus("Patch %s" % patch, overlayp)
    stepX    = oWidth/shape[0]
    stepY    = oHeight/shape[1]
    color    = 1

    for x in xrange(shape[0]):
        offsetX = x * stepX
        for y in xrange(shape[1]):
            offsetY = y * stepY
            imp.setRoi(offsetX, offsetY, stepX, stepY)
            imp.getProcessor().setValue(color)
            imp.getProcessor().fill()
            color += 1
    imp.setRoi(None)
    
    overlayPatch = Patch(project, "%s_overlay" % patch, 0.0, 0.0, imp)
    
    overlayPatch.setAffineTransform(patch.getAffineTransform())
    overlayPatch.setCoordinateTransform(patch.getCoordinateTransform())
    return overlayPatch
Example #5
0
		def __init__(self, listpaths):
			self.__listpaths = listpaths
			self.__listimp = []
			for v in listpaths :
				print v
				imp = ImagePlus(v)
				print imp.getTitle()
				self.__listimp.append(imp)
			swing.JFrame.__init__(self, title="Images")
			self.setDefaultCloseOperation(swing.JFrame.DISPOSE_ON_CLOSE)
			self.run()
def scaleLongSide(ip, longSide):
	""" Scale the image with respect to the longSide parameter (new size along the long side of the image should equal the longSide parameter) """
	w = ip.width
	h = ip.height
	l = max(w,h)
	s = float(longSide)/l
	imp = ImagePlus('scaleLongSide',ip)
	IJ.run(imp, "Scale...", "x="+str(s)+" y="+str(s)+" interpolation=Bilinear average");
	ip = imp.getProcessor()
	cd = CanvasResizer()
	ip = cd.expandImage(ip, int(round(s*w)), int(round(s*h)), -int(round((1-s)/2*w)), -int(round((1-s)/2*h)) )
	return ip, s
def siftSingle(ref, target):
	""" perform SIFT registration for one image """
	impRef = ImagePlus('Sift_source', ref)
	impTarget = ImagePlus('Sift_target', target)
	expected_transformation = 'Similarity'
	initial_gaussian_blur = 2
	feature_descriptor_size = 8
	imp = ImagePlus('siftRefTest', ref)
	t = SIFT_ExtractPointRoi()
	t.exec( impRef, impTarget )
	roiRef = impRef.getRoi()
	roiTarget = impTarget.getRoi()
	return roiRef, roiTarget
Example #8
0
	def __init__(self):
		self.imp = None
		self.preview = None
		self.createMainWindow()
		self.cells = None
		self.files = []
		self.results = ResultsTable()
		ImagePlus.addImageListener(self)
		self.selectInputDir()
		self.selectOutputDir()
		self.pairs = []
		self.methods = []
		self.processNextFile()
def extract_frame_process_roi(imp, frame, channel, process, roi):
  # extract frame and channel 
  imp_frame = ImagePlus("", extract_frame(imp, frame, channel)).duplicate()
  # check for roi and crop
  if roi != None:
    #print roi.getBounds()
    imp_frame.setRoi(roi)
    IJ.run(imp_frame, "Crop", "")
  # process  
  if process:
    IJ.run(imp_frame, "Mean 3D...", "x=1 y=1 z=0");
    IJ.run(imp_frame, "Find Edges", "stack");
  # return
  return imp_frame
def test():
	newImg = ImagePlus("GrayScaled",imp)
	newip = newImg.getProcessor()

	hist = newip.getHistogram()
	lowTH = Auto_Threshold.IsoData(hist)
	newip.setThreshold(lowTH, max(hist),ImageProcessor.BLACK_AND_WHITE_LUT)


	rt = ResultsTable()
	pa = ParticleAnalyzer(ParticleAnalyzer.SHOW_RESULTS | ParticleAnalyzer.SHOW_OVERLAY_OUTLINES, Measurements.AREA |Measurements.MEAN |\
		Measurements.MEDIAN | Measurements.STD_DEV | Measurements.MIN_MAX | Measurements.RECT, rt,50, 200000, 0.5, 1  )
	pa.setResultsTable(rt)
	pa.analyze(newImg)
	rt.show("Results")
def extract_stack_under_arealist():
	# Check that a Display is open
	display = Display.getFront()
	if display is None:
		IJ.log("Open a TrakEM2 Display first!")
		return
	# Check that an AreaList is selected and active:
	ali = display.getActive()
	if ali is None or not isinstance(ali, AreaList):
		IJ.log("Please select an AreaList first!")
		return

	# Get the range of layers to which ali paints:
	ls = display.getLayerSet()
	ifirst = ls.indexOf(ali.getFirstLayer())
	ilast = ls.indexOf(ali.getLastLayer())
	layers = display.getLayerSet().getLayers().subList(ifirst, ilast +1)

	# Create a stack with the dimensions of ali
	bounds = ali.getBoundingBox()
	stack = ImageStack(bounds.width, bounds.height)

	# Using 16-bit. To change to 8-bit, use GRAY8 and ByteProcessor in the two lines below:
	type = ImagePlus.GRAY16
	ref_ip = ShortProcessor(bounds.width, bounds.height)

	for layer in layers:
		area = ali.getArea(layer)
		z = layer.getZ()
		ip = ref_ip.createProcessor(bounds.width, bounds.height)
		if area is None:
			stack.addSlice(str(z), bp)
			continue

		# Create a ROI from the area of ali at layer:
		aff = ali.getAffineTransformCopy()
		aff.translate(-bounds.x, -bounds.y)
		roi = ShapeRoi(area.createTransformedArea(aff))

		# Create a cropped snapshot of the images at layer under ali:
		flat = Patch.makeFlatImage(type, layer, bounds, 1.0, layer.getDisplayables(Patch), Color.black)
		b = roi.getBounds()
		flat.setRoi(roi)
		ip.insert(flat.crop(), b.x, b.y)

		# Clear the outside of ROI (ShapeRoi is a non-rectangular ROI type)
		bimp = ImagePlus("", ip)
		bimp.setRoi(roi)
		ip.setValue(0)
		ip.setBackgroundValue(0)
		IJ.run(bimp, "Clear Outside", "")

		# Accumulate slices
		stack.addSlice(str(z), ip)

	imp = ImagePlus("AreaList stack", stack)
	imp.setCalibration(ls.getCalibrationCopy())
	imp.show()
def run_script():
    # We can use import inside of code blocks to limit the scope.
    import math
    from ij import IJ, ImagePlus
    from ij.process import FloatProcessor
    blank = IJ.createImage("Blank", "32-bit black", img_size, img_size, 1)
    # This create a list of lists. Each inner list represents a line.
    # pixel_matrix[0] is the first line where y=0.
    pixel_matrix = split_list(blank.getProcessor().getPixels(), wanted_parts=img_size)
    # This swaps x and y coordinates.
    # http://stackoverflow.com/questions/8421337/rotating-a-two-dimensional-array-in-python
    # As zip() creates tuples, we have to convert each one by using list().
    pixel_matrix = [list(x) for x in zip(*pixel_matrix)]
    for y in range(img_size):
        for x in range(img_size):
            # This function oszillates between 0 and 1.
            # The distance of 2 maxima in a row/column is given by spacing.
            val = (0.5 * (math.cos(2*math.pi/spacing*x) + math.sin(2*math.pi/spacing*y)))**2
            # When assigning, we multiply the value by the amplitude.
            pixel_matrix[x][y] = amplitude * val
    # The constructor of FloatProcessor works fine with a 2D Python list.
    crystal = ImagePlus("Crystal", FloatProcessor(pixel_matrix))
    # Crop without selection is used to duplicate an image.
    crystal_with_noise = crystal.crop()
    crystal_with_noise.setTitle("Crystal with noise")
    IJ.run(crystal_with_noise, "Add Specified Noise...", "standard=%d" % int(amplitude/math.sqrt(2)))
    # As this is a demo, we don't want to be ask to save an image on closing it.
    # In Python True and False start with capital letters.
    crystal_with_noise.changes = False
    crystal.show()
    crystal_with_noise.show()
    filtered = fft_filter(crystal_with_noise)
    # We create a lambda function to be used as a parameter of img_calc().
    subtract = lambda values: values[0] - values[1]
    """ This is a short form for:
    def subtract(values):
        return values[0] - values[1]
    """
    # The first time we call img_calc with 2 images.
    difference = img_calc(subtract, crystal, filtered, title="Difference of 2")
    difference.show()
    # The first time we call img_calc with 3 images.
    minimum = img_calc(min, crystal, filtered, crystal_with_noise, title="Minimum of 3")
    minimum.show()
    for imp in (crystal, crystal_with_noise, filtered, difference, minimum):
        IJ.run(imp, "Measure", "")
Example #13
0
	def __falign(self) :
		#self.__impRes=IJ.getImage()
		stack = self.__impRes.getStack() # get the stack within the ImagePlus
		n_slices = stack.getSize() # get the number of slices
		ic = ImageCalculator()
		w = self.__impRes.getWidth()
		h = self.__impRes.getHeight()
		self.__sens[:] = []
		self.__listrois[:] = []

		
		
		for index in range(1, n_slices+1):	
			self.__impRes.setSlice(index)
			ip1 = stack.getProcessor(index)
			imp1 = ImagePlus("imp1-"+str(index), ip1)
			#imp1sqr = ic.run("Multiply create 32-bit", imp1, imp1)			

			#IJ.setThreshold(imp1sqr, 1, 4294967296)
			#IJ.run(imp1sqr, "Create Selection", "")
			#IJ.run(imp1sqr, "Select All", "")
			#roi = imp1sqr.getRoi()
			#rect=roi.getBounds()
			#roi = Roi(rect)

			#self.__listrois.append(roi)
			#ipsqr = imp1sqr.getProcessor()
			#is1 = ipsqr.getStatistics()
			#self.__impRes.killRoi()

			IJ.run(imp1, "Select All", "")
			roi = imp1.getRoi()
			self.__listrois.append(roi)			
			ipsqr = imp1.getProcessor()
			is1 = ipsqr.getStatistics()
			self.__impRes.killRoi()
			
			if is1.xCenterOfMass > w/2.00 : 
				self.__impRes.setRoi(roi)
				ip1 = self.__impRes.getProcessor()
				ip1.flipHorizontal()
				self.__impRes.killRoi()
				self.__sens.append(-1)
			else : self.__sens.append(1)
				
			self.__impRes.updateAndDraw()
def extract_frame_process_roi(imp, frame, channel, process, roi, roiz):
  
  if( imp.getStack().getClass().getName() == "ct.vss.VirtualStackOfStacks"):
    imp_frame = extract_cropped_frame_from_VirtualStackOfStacks(imp, frame-1, channel, roi, roiz)
  else:
    # extract frame and channel 
    imp_frame = ImagePlus("", extract_frame(imp, frame, channel, roiz)).duplicate()
    # check for roi and crop
    if roi != None:
      #print roi.getBounds()
      imp_frame.setRoi(roi)
      IJ.run(imp_frame, "Crop", "")
  # process  
  if process:
    IJ.run(imp_frame, "Mean 3D...", "x=1 y=1 z=0");
    IJ.run(imp_frame, "Find Edges", "stack");
  # return
  return imp_frame
Example #15
0
def SaveCoverFromZip(tileIndex, newwidth, newheight, cols, rows, originalspath):
	baseDir = re.sub(r'\/originals.zip', "", originalspath)

	#print baseDir

	zf = zipfile.ZipFile(originalspath, mode='r')

	tilewidth = int(newwidth/cols)
	tileheight = int(newheight/rows)

	newwidth = int(newwidth/tilewidth) * tilewidth
	newheight = int(newheight/tileheight) * tileheight

	hiresoutip = ColorProcessor(newwidth, newheight)
	hiresout = ImagePlus("hi res output", hiresoutip)
	hiresout.show()

	x = 0
	y = -1

	plane = []

	# scale the images
	for i in sorted(tileIndex.iterkeys()):
		if y < rows-1:
			y += 1
		else:
			y = 0
			x += 1
		#bi = bir.openImage(tileIndex[i]);
		#ip = ColorProcessor(bi)
		image = zf.read(str(tileIndex[i]) + ".jpeg")
		#IJ.log("Placing image :" + str(tileIndex[i]) + ".jpeg")
		my_file = open(baseDir + 'temporary.jpeg','w')
		my_file.write(image)
		my_file.close()
		imp = IJ.openImage(baseDir + "/temporary.jpeg")
		ip = imp.getProcessor()
		scale = Scale(ip)
		ipscaled = ScaleImageToSize(scale, tilewidth, tileheight)
		hiresoutip.copyBits(ipscaled, x*tilewidth, y*tileheight, 0)
		hiresout.draw()
Example #16
0
def run():

    d = str(input_dir)
    files = get_swc_files(d, filenameFilter);
    if not files or len(files) == 0:
        uiservice.showDialog("No files matched the specified criteria", "Error")

    IT = ImportTracings()
    IT.applyScalingFactor(scale_factor, scale_factor, scale_factor)
    # IT.applyCalibration(1, 1, 1, "um")
    proj_rendering = "Projected" in display_choice
    for (counter, f) in enumerate(files):
        basename = os.path.basename(f)
        status.showStatus('Loading file %s: %s...' % (counter + 1, basename))
        try:
            IT.autoLoadSWC(f, proj_rendering)
            if proj_rendering:
                imp = ImagePlus("file: " + basename, IT.getSkeletonizedProjection())
                imp.show()
        except Exception, msg:  # Jython 3: except Exception as msg:
            log.error("An error occurred when loading %s. Details:\n%s" % (f, msg))
            break
Example #17
0
	def __end(self, event): 
		if len(self.__iplist)==0 : 
			IJ.showMessage("", "Stack is empty")
			return

		self.__iplist.sort(key = lambda ip : ip.width)
		
		self.__ipw=[ ip.getWidth() for ip in self.__iplist ]
		self.__iph=[ ip.getHeight() for ip in self.__iplist ]
		maxw=max(self.__ipw)
		maxh=max(self.__iph)
		if self.__enlarge : 
			resizelist = [ ip.resize(maxw, maxh, True) for ip in self.__iplist ]
			
		else : 
			resizelist = []
			for ip in self.__iplist :
				tempip = ShortProcessor(maxw, maxh)
				xloc = int(math.floor((maxw/2.00) - (ip.width/2.00)))
				yloc = int(math.floor((maxh/2.00) - (ip.height/2.00)))
				tempip.copyBits(ip, xloc, yloc, Blitter.COPY)
				resizelist.append(tempip)
		ims = ImageStack(maxw, maxh) 	

		#for ip in resizelist : ims.addSlice("", ip)
		for i in range(len(resizelist)) : 
			ims.addSlice(self.__labels[i], resizelist[i])
		
		
		self.__impRes = ImagePlus(self.__name, ims)
		self.__impRes.show()

		self.__sens = [1 for i in range(len(self.__iplist)) ]
		
		if self.__appmedian : IJ.run(self.__impRes, "Median...", "radius=1 stack")
		if self.__align : self.__falign()
		if self.__avg : self.__favg()
		if self.__mosa : self.__fmosa()
		if self.__maxfinder : self.__fmaxfinder()
		if self.__fire : IJ.run(self.__impRes, "Fire", "")
		if self.__measures : self.__fmeasures()
		
		self.__sens[:] = []
		
		self.__listrois[:] = []
		self.__iplist[:] = []
		self.__cellsrois[:] = []
		self.__ipw[:] = []
		self.__iph[:] = []

		self.__init = False
Example #18
0
def Extract_Red_Channel(color):
	imp = IJ.getImage()
	stack = imp.getImageStack()  
	print "number of slices:", imp.getNSlices()  
	# A list of red slices  
	reds = []  
	# Iterate each slice in the stack  
	for i in xrange(1, imp.getNSlices()+1):  
	  # Get the ColorProcessor slice at index i  
	  cp = stack.getProcessor(i)  
	  # Get its green channel as a FloatProcessor  
	  fp = cp.toFloat(0, None)  
	  # ... and store it in a list  
	  reds.append(fp)  
	# Create a new stack with only the green channel  
	stack2 = ImageStack(imp.width, imp.height)  
	for fp in reds:  
	  stack2.addSlice(None, fp)  
	# Create a new image with the stack of green channel slices  
	imp2 = ImagePlus("Red channel", stack2)  
	# Set a green look-up table:  
	IJ.run(imp2, "Red", "")  
	imp2.show()  	
Example #19
0
	def load(self, event): #choose a folder to load images
		self.imdir = DirectoryChooser("Select a dir, dude").getDirectory()

		self.pictureList = [path.join(self.imdir, f) for f in listdir(self.imdir) if path.splitext(f)[1]==".tiff" and 'AVG' not in f and 'Avg' not in f]  #list of pictures (not averages) with .tiff extension
		print self.pictureList #list of pictures
		self.imLeft = ImagePlus(self.pictureList[self.imageCount]) #read the image
		self.imageCount =self.imageCount+1 #increase counter
		self.imRight = ImagePlus(self.pictureList[self.imageCount]) #read the image
		self.imageCount =self.imageCount+1
		
		self.imLeft.show() #show image on the left
		self.imLeft.getWindow().setLocation(self.coordx,self.coordy) #reposition image
		
       
		self.imRight.show() #show image on the right
		self.rightImLocx = self.coordx+self.imLeft.getWindow().getWidth() #set a variable with the x position for right image
		self.imRight.getWindow().setLocation(self.rightImLocx,self.coordy) #reposition image

		#WindowOrganizer("Tile")
		
		#SyncWindows(self.imLeft.getTitle() + " " + self.imRight.getTitle())
		#IJ.run("Sync Windows")

		print len(self.pictureList)
Example #20
0
	def ChooseLeft(self, event): #remove right image and load another
		if self.listendFlag==0: #if is not the end of the list
			print "You chose left, which is of course right"
			self.imRight.close()
		if self.imageCount>=len(self.pictureList): #if is the end of the list
			print "YOU HAVE A WINNER!!!"
			self.listendFlag = 1	#flag
			if self.imageCount==len(self.pictureList):
				self.chosenOne = 'L'	#a variable to know the position of the chosen one
			self.imageCount = self.imageCount+1	#this is to avoid changing the chosen one
		else:
			self.imRight = ImagePlus(self.pictureList[self.imageCount]) #read next image
			self.imageCount =self.imageCount+1	#increase counter
			self.imRight.show() #show image on the right
			self.imRight.getWindow().setLocation(self.rightImLocx,self.coordy) #reposition image
Example #21
0
	def ChooseRight(self, event): #same as above but for the right image
		if self.listendFlag==0:
			print "You chose right, :)"
			self.imLeft.close()
		if self.imageCount>=len(self.pictureList):
			print "YOU HAVE A WINNER!!!"
			self.listendFlag = 1
			if self.imageCount==len(self.pictureList):
				self.chosenOne = 'R'
			self.imageCount = self.imageCount+1
		else:
			self.imLeft = ImagePlus(self.pictureList[self.imageCount])
			self.imageCount =self.imageCount+1
			self.imLeft.show() #show image on the left
			self.imLeft.getWindow().setLocation(self.coordx,self.coordy) #reposition image
 def openImp(self, path):
    imp = ImagePlus(path)  # open associated tif file
    imp.show()
    imp.getWindow().setLocationAndSize(280, 120, imp.getWidth()*4, imp.getHeight()*4) # int x, int y, int width, int height
    return imp
#!/bin/sh
''''exec "$(dirname "$0")"/ImageJ.sh --jython "$0" "$@" # (call again with fiji)'''

from org.jpedal import PdfDecoder
from ij import ImageJ, ImagePlus
import sys

if len(sys.argv) != 2:
    print 'Usage:', sys.argv[0], 'source.pdf'
    sys.exit(1)

ij = None

decoder = PdfDecoder(False)
decoder.setExtractionMode(PdfDecoder.RAWIMAGES | PdfDecoder.FINALIMAGES)
decoder.openPdfFile(sys.argv[1])

for page in range(0, decoder.getPageCount()):
    decoder.decodePage(page + 1)
    images = decoder.getPdfImageData()
    image_count = images.getImageCount()
    for i in range(0, image_count):
        name = images.getImageName(i)
        image = decoder.getObjectStore().loadStoredImage('R' + name)
        if ij == None:
            ij = ImageJ()
            ij.exitWhenQuitting(True)
        ImagePlus(name, image).show()
    decoder.flushObjectValues(True)
decoder.closePdfFile()
def test(red, green, blue, easy=True):
    saturation = let(
        "red", red, "green", green, "blue", blue, "max",
        maximum("red", "green", "blue"), "min",
        minimum("red", "green", "blue"),
        IF(EQ(0, "max"), THEN(0), ELSE(div(sub("max", "min"), "max"))))

    brightness = div(maximum(red, green, blue), 255.0)

    hue = IF(
        EQ(0, saturation), THEN(0),
        ELSE(
            let(
                "red", red, "green", green, "blue", blue, "max",
                maximum("red", "green", "blue"), "min",
                minimum("red", "green", "blue"), "range", sub("max", "min"),
                "redc", div(sub("max", "red"), "range"), "greenc",
                div(sub("max", "green"), "range"), "bluec",
                div(sub("max", "blue"), "range"), "hue",
                div(
                    IF(
                        EQ("red", "max"), THEN(sub("bluec", "greenc")),
                        ELSE(
                            IF(EQ("green", "max"),
                               THEN(sub(add(2, "redc"), "bluec")),
                               ELSE(sub(add(4, "greenc"), "redc"))))), 6),
                IF(LT("hue", 0), THEN(add("hue", 1)), ELSE("hue")))))

    #print hierarchy(hue)

    #print "hue view:", hue.view( FloatType() ).iterationOrder()

    if easy:
        # About 26 ms
        """
    hsb = Views.stack( hue.view( FloatType() ),
                       saturation.view( FloatType() ),
                       brightness.view( FloatType() ) )
    """

        # About 13 ms: half! Still much worse than plain ImageJ,
        # but the source images are iterated 4 times, rather than just once,
        # and the saturation is computed twice,
        # and the min, max is computed 3 and 4 times, respectively.
        hsb = Views.stack(hue.viewDouble(FloatType()),
                          saturation.viewDouble(FloatType()),
                          brightness.viewDouble(FloatType()))
        """
    # Even worse: ~37 ms
    width, height = rgb.dimension(0), rgb.dimension(1)
    h = compute(hue).into(ArrayImgs.floats([width, height]))
    s = compute(saturation).into(ArrayImgs.floats([width, height]))
    b = compute(brightness).into(ArrayImgs.floats([width, height]))
    hsb = Views.stack( h, s, b )
    """

        imp = IL.wrap(hsb, "HSB view")
    else:
        # Tested it: takes more time (~40 ms vs 26 ms above)
        width, height = rgb.dimension(0), rgb.dimension(1)
        hb = zeros(width * height, 'f')
        sb = zeros(width * height, 'f')
        bb = zeros(width * height, 'f')
        h = ArrayImgs.floats(hb, [width, height])
        s = ArrayImgs.floats(sb, [width, height])
        b = ArrayImgs.floats(bb, [width, height])
        #print "ArrayImg:", b.iterationOrder()
        ImgUtil.copy(ImgView.wrap(hue.view(FloatType()), None), h)
        ImgUtil.copy(ImgView.wrap(saturation.view(FloatType()), None), s)
        ImgUtil.copy(ImgView.wrap(brightness.view(FloatType()), None), b)
        stack = ImageStack(width, height)
        stack.addSlice(FloatProcessor(width, height, hb, None))
        stack.addSlice(FloatProcessor(width, height, sb, None))
        stack.addSlice(FloatProcessor(width, height, bb, None))
        imp = ImagePlus("hsb", stack)
    return imp
Example #25
0
		nearZImp.close()
		
	#add the images to concatenated stacks
	conThresholdStack = concatStacks(conThresholdStack, thresholdImp)
	conFRETImp2Stack=concatStacks(conFRETImp2Stack, FRETimp2)
	conFRETProjImpStack=concatStacks(conFRETProjImpStack, FRETProjImp)
	conlabelImpStack=concatStacks(conlabelImpStack, labelImp)
	
	thresholdImp.close()
	FRETimp2.close()
	FRETProjImp.close()
	labelImp.close()

#Show the images and make the images pretty... I should have put in a function`

conThresholdImp= ImagePlus( "Threshold image for "+ originalTitle, conThresholdStack)
conThresholdImp.setDimensions(1,  imp1.getNSlices(), imp1.getNFrames())
IJ.setMinAndMax(conThresholdImp, 0,1)
conThresholdImp.setCalibration(cal)
conThresholdImp = CompositeImage(conThresholdImp, CompositeImage.COMPOSITE)
conThresholdImp.show()


conFRETImp2 = ImagePlus( "Emission ratios X1000 of "+ originalTitle, conFRETImp2Stack)
conFRETImp2.setDimensions(1, imp1.getNSlices(), imp1.getNFrames())
conFRETImp2.setCalibration(cal)
stats=StackStatistics(conFRETImp2)
conFRETImp2 = CompositeImage(conFRETImp2, CompositeImage.COMPOSITE)  
IJ.setMinAndMax(conFRETImp2, 500, 3500)
conFRETImp2.show()
IJ.run("16_colors")
    # Convert method string to the opencv corresponding index
    Dico_Method = {
        "Square difference": 0,
        "Normalised Square Difference": 1,
        "Cross-Correlation": 2,
        "Normalised cross-correlation": 3,
        "0-mean cross-correlation": 4,
        "Normalised 0-mean cross-correlation": 5
    }
    Method = Dico_Method[method]

    if show_images:
        from ij import ImagePlus, ImageStack
        Stack_Image = ImageStack()
        Stack_Image_ImP = ImagePlus()

    if add_roi:
        from ij.plugin.frame import RoiManager
        from ij.gui import Roi
        RM = RoiManager()
        rm = RM.getInstance()

    if show_table:
        from ij.measure import ResultsTable
        from utils import AddToTable
        Table = ResultsTable().getResultsTable(
        )  # allows to append to an existing table

    ## Check if input are valid
    if n_hit <= 0:
 def getThreshold(self, imp):
     result = ImagePlus(imp.title,
                        self.segmentor.applyClassifier(imp).getProcessor())
     return result
Example #28
0
def crop(im,roi):
	ip = im.getProcessor()
	ip.setRoi(roi)
	im = ImagePlus(im.getTitle() + '_Cropped', ip.crop())
	return im
def scaleandfilter(infile,outfile,scalex,scaley,scalez,anisofilter,runtube):
	
	print ("infile is: "+infile)
	imp = Opener().openImage(infile)
	print imp
	print "scalex = %f; scaley = %f ; scalez = %f" % (scalex,scaley,scalez)
	
	# Rescale
	cal = imp.getCalibration()
	iml = ImgLib.wrap(imp)
	scaledimg = Scale3D(iml, scalex, scaley, scalez)
	imp2=ImgLib.wrap(scaledimg)
	
	# find range of pixel values for scaled image
	from mpicbg.imglib.algorithm.math import ComputeMinMax
	# (for imglib2 will be: net.imglib2.algorithm.stats)
	minmax=ComputeMinMax(scaledimg)
	minmax.process()
	(min,max)=(minmax.getMin().get(),minmax.getMax().get())
	# Make a copy of the stack (converting to 8 bit as we go)
	stack = ImageStack(imp2.width, imp2.height)
	print "min = %e, max =%e" % (min,max)
	for i in xrange(1, imp2.getNSlices()+1):
		imp2.setSliceWithoutUpdate(i)
		ip=imp2.getProcessor()
		# set range
		ip.setMinAndMax(min,max)
		stack.addSlice(str(i), ip.convertToByte(True))
	
	# save copy of calibration info
	cal=imp.getCalibration()
	# close original image
	imp.close()
	# make an image plus with the copy
	scaled = ImagePlus(imp2.title, stack)
	
	# Deal with calibration info which didn't seem to come along for the ride
	cal.pixelWidth/=scalex
	cal.pixelHeight/=scaley
	cal.pixelDepth/=scalez
	scaled.setCalibration(cal)
	print "dx = %f; dy=%f; dz=%f" % (cal.pixelWidth,cal.pixelHeight,cal.pixelDepth)
	
	intif=infile+".tif"
	outtif=infile+"-filtered.tif"
	if anisofilter.upper() != 'FALSE':
		print("saving input file as "+intif)
		f=FileSaver(scaled)
		f.saveAsTiffStack(intif)
		scaled.close()
		# anisotropic filtering
		anisopts="-scanrange:10 -tau:2 -nsteps:2 -lambda:0.1 -ipflag:0 -anicoeff1:1 -anicoeff2:0 -anicoeff3:0"
		anisopts=anisopts+" -dx:%f -dy:%f -dz:%f" % (cal.pixelWidth,cal.pixelHeight,cal.pixelDepth)

		if sys.version_info > (2, 4):
			#for testing
			# subprocess.check_call(["cp",intif,outtif])
			subprocess.check_call([anisofilter]+anisopts.split(' ')+[intif,outtif])
		else:
			os.system(" ".join([anisofilter]+anisopts.split(' ')+[intif,outtif]))
		# Open anisofilter output back into Fiji
		print("Opening output tif: "+outtif)
		scaled = Opener().openImage(outtif)
		scaled.setCalibration(cal)
	
	# Hessian (tubeness)
	print("Running tubeness")
	if(runtube):
		tp=TubenessProcessor(1.0,False)
		result = tp.generateImage(scaled)
		IJ.run(result, "8-bit","")
	else:
		result=scaled
	# Save out file
	fileName, fileExtension = os.path.splitext(outfile)
	print("Saving as "+fileExtension+": "+outfile)
	if fileExtension.lower()=='.nrrd':
		nw=Nrrd_Writer()
		nw.setNrrdEncoding("gzip")
		nw.save(result,outfile)
	else:
		# Save to PIC
		IJ.run(result,"Biorad ...", "biorad=["+outfile+"]")
	scaled.close()
	result.close()
Example #30
0
	def exit(self):
		ImagePlus.removeImageListener(self)
		self.closeImage()
		self.closeMainWindow()
Example #31
0
redgreen = ops.add(red32, green32)
display.createDisplay("redgreen", data.create(redgreen))

# make a copy of the red + green image
copy = redgreen.copy()
# wrap as ImagePlus
imp = ImageJFunctions.wrap(copy, "wrapped")

# create and call background subtractor
bgs = BackgroundSubtracter()
bgs.rollingBallBackground(imp.getProcessor(), 50.0, False, False, True, True,
                          True)

# wrap as Img and display
iplus = ImagePlus("bgs", imp.getProcessor())
print type(imp)
imgBgs = ImageJFunctions.wrapFloat(iplus)
display.createDisplay("back_sub", data.create(ImgPlus(imgBgs)))

kernel = DetectionUtils.createLoGKernel(3.0, 2, array([1.0, 1.0], 'd'))

print type(kernel)
print type(imgBgs)
print type(red32.getImg())

log = ops.convolve(ops.create(dimensions2D, FloatType()), imgBgs, kernel)
display.createDisplay("log", data.create(ImgPlus(log)))

otsu = ops.run("threshold", ops.create(dimensions2D, BitType()), log, Otsu())
Example #32
0
        os.mkdir(z_proj_dir+'/green')
if red:
    if not os.path.exists(z_proj_dir+'/red'):
        os.mkdir(z_proj_dir+'/red')
if yellow:
    if not os.path.exists(z_proj_dir+'/yellow'):
        os.mkdir(z_proj_dir+'/yellow')
if not os.path.exists(merge_z_dir):
    os.mkdir(merge_z_dir)
for stage_pos in im_series:
    print 'current stage position ID: ' + stage_pos
    imps_for_comp = [None, None, None, None, None, None, None]
    if blue:
        cyan_id = color_sublists['bfp'][stage_pos]
	print 'blue image file: ' + cyan_id
        c_imp = ImagePlus(img_dir+'/'+cyan_id)
        imps_for_comp[4] = c_imp
    if cyan:
        cyan_id = color_sublists['cfp'][stage_pos]
	print 'cyan image file: ' + cyan_id
        c_imp = ImagePlus(img_dir+'/'+cyan_id)
        imps_for_comp[4] = c_imp
    if green:
        green_id = color_sublists['gfp'][stage_pos]
 	print 'green image file: ' + green_id
        g_imp = ImagePlus(img_dir+'/'+green_id)
        imps_for_comp[1] = g_imp
    if yellow:
        green_id = color_sublists['yfp'][stage_pos]
 	print 'yellow image file: ' + green_id
        g_imp = ImagePlus(img_dir+'/'+green_id)
    stack2 = imp2.getImageStack()

    #this is te destination (we will display as composite)
    newStack = ImageStack(imp1.width, imp1.height)

    #fuse the 2 stacks inter intwoven stack
    for i in xrange(1, imp1.getNSlices() + 1):
        # Get the ColorProcessor slice at index i
        cp1 = stack1.getProcessor(i)
        cp2 = stack2.getProcessor(i)
        # Add both to the new stack
        newStack.addSlice(None, cp1)
        newStack.addSlice(None, cp2)

    # Create a new ImagePlus with the new stack newStack
    newImp = ImagePlus("my composite", newStack)
    newImp.setCalibration(imp1.getCalibration().copy())

    # Tell the ImagePlus to represent the slices in its stack
    # in hyperstack form, and open it as a CompositeImage:
    nChannels = 2  # two color channels
    nSlices = stack1.getSize()  # the number of slices of the original stack
    nFrames = 1  # only one time point
    newImp.setDimensions(nChannels, nSlices, nFrames)
    #comp = ImagePlus.CompositeImage(newImp, CompositeImage.COMPOSITE)
    comp = ImagePlus.CompositeImage(newImp)
    comp.show()

    imp1 = comp
else:
    print "Opening single channel:"
def snapshot(imp, c, z, f, x, y, L):
	'''Take a L x L snapshot centered at (x,y) at channel c, slice z and frame f

	Parameters:
		imp: an ImagePlus object for processing
		c, z, f: specificed channel, slice (z-dimension) and frame coordinate
		x, y: targeted center position to take the snapshot
		L: desired edge lengths of the snapshot in pixels

	Returns:
		cropped: an ImagePlus object of the snapshot image
	'''
	# Import inside function to limit scope
	from ij import ImagePlus
	
	# Get the dimensions of the images
	cN = imp.getNChannels()
	zN = imp.getNSlices()
#	fN = imp.getNFrames()

	# Convert specified position (c,z,f) to index assuming hyperstack order is czt (default)
	sliceIndex = int(cN * zN * (f-1) + cN * (z-1) + c)
	imp.setSlice(sliceIndex)
	ipTemp = imp.getProcessor()
	impTemp = ImagePlus('temp', ipTemp)

	# Expand the image by half of L to make sure final duplicate is the same size
	# when requested (x,y) could be within L pixels of the edge
	wOld = ipTemp.getWidth()
	hOld = ipTemp.getHeight()
	wNew = wOld + int(L)
	hNew = hOld + int(L)
	ipNew = ipTemp.createProcessor(wNew, hNew)
	ipNew.setColor(0) # 0 = black color
	ipNew.insert(ipTemp, int(L/2), int(L/2))
	imgNew = ImagePlus('new', ipNew)
	
	# Use the specified center coordinate x, y and the target side length to create ROI
	imgNew.setRoi(int(x), int(y), int(L), int(L))
#	cropped = ImagePlus('cropped', ipNew.crop())
	cropped = imgNew.crop()
	
	impTemp.close()
	imgNew.close()
	
	return cropped
#DRAW_DOT = False
DRAW_DOT = True # use if drawing a dot at the center of tracking

#DRAW_BOX = False
DRAW_BOX = True # use if drawing a dot at the center of tracking

ANNOTATE_Z = False
#ANNOTATE_Z = True # use if drawing a string to annotate the z position

TEST_RUN = False
#TEST_RUN = True # use for testing run (2 tracks)

# Reading in the image file for taking snapshots
tifFile = input_folder + tif_filename
imp = ImagePlus(tifFile)

#----------------------------------------------------------
# For running all ZOOM_BY_Z_DEPTH and DRAW_DOT combinations
#----------------------------------------------------------

#boolList = [False, True]
#for ZOOM_BY_Z_DEPTH in boolList:
#	for DRAW_DOT in boolList:
#		# Running the heavy-lifting function to save all snapshot series
#		output_folder = input_folder + outputPrefix + '-zZoom_' + str(ZOOM_BY_Z_DEPTH) + '-drawDot_' + str(DRAW_DOT) + '-' + time_stamp + '/'
#		if not os.path.exists(output_folder):
#			os.makedirs(output_folder)
#		save_snap_shot_seq(imp, xml_filename, input_folder, output_folder, ZOOM_BY_Z_DEPTH, Z1_CLOSE_TO_COVERSLIP, DRAW_DOT, TEST_RUN, z_number_to_project, L)

#----------------------------------------------------------
Example #36
0
            for ch in range(0, 2):  #Loop over channels
                norm = 1 / (apicalimps[ch].getProcessor().getStats().mean
                            ) * 1000  #normalize to mean intensity of image
                apicalimps[ch].setRoi(
                    int(centroidX[cell]) - 30,
                    int(centroidY[cell]) - 30, 60, 60)
                toAdd = apicalimps[ch].crop().getProcessor()
                toAdd.multiply(norm)
                stacks[ch].addSlice(toAdd)
    except:
        print('There was a problem with sample ' + sample_label)
        pass

#Merge channels and save image stacks

measureAll = ImagePlus('measure_all', stacks[0])
detectAll = ImagePlus('detect_all', stacks[1])

measureAvg = zp.run(measureAll, 'avg')
detectAvg = zp.run(detectAll, 'avg')

compositeAvg = cm.mergeChannels([measureAvg, detectAvg],
                                False)  #this we'll save

compositeAll = cm.mergeChannels([measureAll, detectAll], False)

IJ.saveAs(compositeAll, "Tiff", os.path.join(outputdir, 'Composite_All.tif'))
print('Saved ' + os.path.join(outputdir, 'Composite_All.tif'))
IJ.saveAs(compositeAvg, "Tiff", os.path.join(outputdir, 'Composite_AVG.tif'))
print('Saved ' + os.path.join(outputdir, 'Composite_AVG.tif'))
def main():

    Interpreter.batchMode = True

    if (lambda_flat == 0) ^ (lambda_dark == 0):
        print ("ERROR: Both of lambda_flat and lambda_dark must be zero,"
               " or both non-zero.")
        return
    lambda_estimate = "Automatic" if lambda_flat == 0 else "Manual"

    #import pdb; pdb.set_trace()
    print "Loading images..."
    filenames = enumerate_filenames(pattern)
    num_channels = len(filenames)
    num_images = len(filenames[0])
    image = Opener().openImage(filenames[0][0])
    width = image.width
    height = image.height
    image.close()

    # The internal initialization of the BaSiC code fails when we invoke it via
    # scripting, unless we explicitly set a the private 'noOfSlices' field.
    # Since it's private, we need to use Java reflection to access it.
    Basic_noOfSlices = Basic.getDeclaredField('noOfSlices')
    Basic_noOfSlices.setAccessible(True)
    basic = Basic()
    Basic_noOfSlices.setInt(basic, num_images)

    # Pre-allocate the output profile images, since we have all the dimensions.
    ff_image = IJ.createImage("Flat-field", width, height, num_channels, 32);
    df_image = IJ.createImage("Dark-field", width, height, num_channels, 32);

    print("\n\n")

    # BaSiC works on one channel at a time, so we only read the images from one
    # channel at a time to limit memory usage.
    for channel in range(num_channels):
        print "Processing channel %d/%d..." % (channel + 1, num_channels)
        print "==========================="

        stack = ImageStack(width, height, num_images)
        opener = Opener()
        for i, filename in enumerate(filenames[channel]):
            print "Loading image %d/%d" % (i + 1, num_images)
            image = opener.openImage(filename)
            stack.setProcessor(image.getProcessor(), i + 1)
        input_image = ImagePlus("input", stack)

        # BaSiC seems to require the input image is actually the ImageJ
        # "current" image, otherwise it prints an error and aborts.
        WindowManager.setTempCurrentImage(input_image)
        basic.exec(
            input_image, None, None,
            "Estimate shading profiles", "Estimate both flat-field and dark-field",
            lambda_estimate, lambda_flat, lambda_dark,
            "Ignore", "Compute shading only"
        )
        input_image.close()

        # Copy the pixels from the BaSiC-generated profile images to the
        # corresponding channel of our output images.
        ff_channel = WindowManager.getImage("Flat-field:%s" % input_image.title)
        ff_image.slice = channel + 1
        ff_image.getProcessor().insert(ff_channel.getProcessor(), 0, 0)
        ff_channel.close()
        df_channel = WindowManager.getImage("Dark-field:%s" % input_image.title)
        df_image.slice = channel + 1
        df_image.getProcessor().insert(df_channel.getProcessor(), 0, 0)
        df_channel.close()

        print("\n\n")

    template = '%s/%s-%%s.tif' % (output_dir, experiment_name)
    ff_filename = template % 'ffp'
    IJ.saveAsTiff(ff_image, ff_filename)
    ff_image.close()
    df_filename = template % 'dfp'
    IJ.saveAsTiff(df_image, df_filename)
    df_image.close()

    print "Done!"
Example #38
0
imp = IJ.getImage()
stkA = ArrayList()
for i in range(1, 4):
#for i in range(1, imp.getNFrames()):
   e4d = Extractfrom4D()
   e4d.setGstarttimepoint(i)
   IJ.log("current time point" + str(i))
   aframe = e4d.coreheadless(imp, 3)
   ortho = XYZMaxProject(aframe)
   orthoimp = ortho.getXYZProject()
   stkA.add(orthoimp)
   #orthoimp.show()
stk = ImageStack(stkA.get(0).getWidth(), stkA.get(0).getHeight())
for item in stkA:
   stk.addSlice("slcie", item.getProcessor())
out = ImagePlus("out", stk)
#out.setCalibration(imp.getCalibration().copy())

IJ.run(out, "Grays", "");
IJ.run(out, "RGB Color", "");

# load data from file
filepath = '/Users/miura/Dropbox/Mette/20_23h/20_23hrfull_corrected_1_6_6_netdispZ40.csv'
filename = os.path.basename(filepath)
newfilename = os.path.join(os.path.splitext(filename)[0], '_plotStack.tif')
out.setTitle(os.path.basename(filename)+'_OutStack.tif')

PLOT_ONLY_IN_FRAME1 = False
data = readCSV(filepath)
calib = imp.getCalibration()
xscale = calib.pixelWidth
Example #39
0
finally:
  # This 'finally' block executes even in the event of an error
  # guaranteeing that the executing threads will be shut down no matter what.
  exe.shutdown()


# ISSUE: Does not work with IntervalView from View.rotate,
# so img1 and img2 were copied into ArrayImg
# (The error would occur when iterating vol4d pixels beyond the first element in the 4th dimension.)
vol4d = Views.stack([img1] + steps + [img2])

# Convert 1 -> 255 for easier volume rendering in 3D Viewer
#compute(mul(vol4d, 255)).into(vol4d)
for t in Views.iterable(vol4d):
  if 0 != t.getByte():
    t.setReal(255)

# Construct an ij.VirtualStack from vol4d
virtualstack = IL.wrap(vol4d, "interpolations").getStack()
imp = ImagePlus("interpolations", virtualstack)
imp.setDimensions(1, vol4d.dimension(2), vol4d.dimension(3))
imp.setDisplayRange(0, 255)
# Show as a hyperstack with 4 dimensions, 1 channel
com = CompositeImage(imp, CompositeImage.GRAYSCALE)
com.show()

# Show rendered volumes in 4D viewer: 3D + time axis
univ = Image3DUniverse()
univ.show()
univ.addVoltex(com)
Example #40
0
def register_hyperstack_subpixel(imp, channel, shifts, target_folder, virtual):
  """ Takes the imp, determines the x,y,z drift for each pair of time points, using the preferred given channel,
  and outputs as a hyperstack.
  The shifted image is computed using TransformJ allowing for sub-pixel shifts using interpolation.
  This is quite a bit slower than just shifting the image by full pixels as done in above function register_hyperstack().
  However it significantly improves the result by removing pixel jitter.
  """
  # Compute bounds of the new volume,
  # which accounts for all translations:
  minx, miny, minz, maxx, maxy, maxz = compute_min_max(shifts)
  # Make shifts relative to new canvas dimensions
  # so that the min values become 0,0,0
  for shift in shifts:
    shift.x -= minx
    shift.y -= miny
    shift.z -= minz
  # new canvas dimensions:
  width = int(imp.width + maxx - minx)
  height = int(maxy - miny + imp.height)
  slices = int(maxz - minz + imp.getNSlices())

  print "New dimensions:", width, height, slices
  
  # prepare stack for final results
  stack = imp.getStack()
  if virtual is True: 
    names = []
  else:
    registeredstack = ImageStack(width, height, imp.getProcessor().getColorModel())
  
  # prepare empty slice for padding
  empty = imp.getProcessor().createProcessor(width, height)

  IJ.showProgress(0)

  # get raw data as stack
  stack = imp.getStack()

  # loop across frames
  for frame in range(1, imp.getNFrames()+1):
      
    IJ.showProgress(frame / float(imp.getNFrames()+1))
    fr = "t" + zero_pad(frame, len(str(imp.getNFrames()))) # for saving files in a virtual stack
    
    # get and report current shift
    shift = shifts[frame-1]
    print "frame",frame,"correcting drift",-shift.x-minx,-shift.y-miny,-shift.z-minz
    IJ.log("    frame "+str(frame)+" correcting drift "+str(round(-shift.x-minx,2))+","+str(round(-shift.y-miny,2))+","+str(round(-shift.z-minz,2)))

    # loop across channels
    for ch in range(1, imp.getNChannels()+1):      
      
      tmpstack = ImageStack(width, height, imp.getProcessor().getColorModel())

      # get all slices of this channel and frame
      for s in range(1, imp.getNSlices()+1):
        ip = stack.getProcessor(imp.getStackIndex(ch, s, frame))
        ip2 = ip.createProcessor(width, height) # potentially larger
        ip2.insert(ip, 0, 0)
        tmpstack.addSlice("", ip2)

      # Pad the end (in z) of this channel and frame
      for s in range(imp.getNSlices(), slices):
        tmpstack.addSlice("", empty)

      # subpixel translation
      imp_tmpstack = ImagePlus("", tmpstack)
      imp_translated = translate_single_stack_using_imglib2(imp_tmpstack, shift.x, shift.y, shift.z)
      
      # Add translated frame to final time-series
      translated_stack = imp_translated.getStack()
      for s in range(1, translated_stack.getSize()+1):
        ss = "_z" + zero_pad(s, len(str(slices)))
        ip = translated_stack.getProcessor(s).duplicate() # duplicate is important as otherwise it will only be a reference that can change its content  
        if virtual is True:
          name = fr + ss + "_c" + zero_pad(ch, len(str(imp.getNChannels()))) +".tif"
          names.append(name)
          currentslice = ImagePlus("", ip)
          currentslice.setCalibration(imp.getCalibration().copy())
          currentslice.setProperty("Info", imp.getProperty("Info"));
          FileSaver(currentslice).saveAsTiff(target_folder + "/" + name)
        else:
          registeredstack.addSlice("", ip)    
          
  IJ.showProgress(1)

  if virtual is True:
    # Create virtual hyper stack with the result
    registeredstack = VirtualStack(width, height, None, target_folder)
    for name in names:
      registeredstack.addSlice(name)
    registeredstack_imp = ImagePlus("registered time points", registeredstack)
    registeredstack_imp.setDimensions(imp.getNChannels(), slices, imp.getNFrames())
    registeredstack_imp.setCalibration(imp.getCalibration().copy())
    registeredstack_imp.setOpenAsHyperStack(True)
  else:
    registeredstack_imp = ImagePlus("registered time points", registeredstack)
    registeredstack_imp.setCalibration(imp.getCalibration().copy())
    registeredstack_imp.setProperty("Info", imp.getProperty("Info"))
    registeredstack_imp.setDimensions(imp.getNChannels(), slices, imp.getNFrames())
    registeredstack_imp.setOpenAsHyperStack(True)
    if 1 == registeredstack_imp.getNChannels():
      return registeredstack_imp
     
  #IJ.log("\nHyperstack dimensions: time frames:" + str(registeredstack_imp.getNFrames()) + ", slices: " + str(registeredstack_imp.getNSlices()) + ", channels: " + str(registeredstack_imp.getNChannels()))

  # Else, as composite
  mode = CompositeImage.COLOR;
  if isinstance(imp, CompositeImage):
    mode = imp.getMode()
  else:
    return registeredstack_imp
  return CompositeImage(registeredstack_imp, mode)
Example #41
0
a different LUT to each slice in a stack...

"""

from ij import IJ, ImagePlus, ImageStack

# start clean
IJ.run("Close All")

# returned is an array of ImagePlus, in many cases just one imp.
src = img_dir + "/VS_demo.tif"
stack = IJ.openImage(src)
stack.show()

ip_red = stack.getProcessor(1)
imp_red = ImagePlus('red', ip_red)
imp_red.show()
imp_r = imp_red.duplicate()
# IJ.saveAs(imp_red, "Tiff", img_dir + "/R.tif")

ip_green = stack.getProcessor(2)
imp_green = ImagePlus('green', ip_green)
imp_green.show()
imp_g = imp_green.duplicate()

ip_blue = stack.getProcessor(3)
imp_blue = ImagePlus('blue', ip_blue)
imp_blue.show()
imp_b = imp_blue.duplicate()

ip_ir = stack.getProcessor(4)
    print("STICS completed for ROI " + str(i + 1) + " of " +
          str(num_window_x * num_window_y))

IJ.log("2D STICS completed")
IJ.log("Window size: " + str(width_sub) + " pixels")
IJ.log("Maximum time lag analyzed: " + str(max_cycle_length) + " frames")

#savepath = IJ.getDirectory("")
#imp = IJ.getImage()
#ssize = imp.getStackSize()
#titleext = imp.getTitle()
title = os.path.splitext(
    filename)[0] + "_STICS_map"  #os.path.splitext(titleext)[0]

save_imp = ImagePlus(title, result_stack)
save_imp.show()

dimA = save_imp.getDimensions()
for c in range(dimA[2]):
    for z in range(dimA[3]):
        for t in range(dimA[4]):
            save_imp.setPosition(c + 1, z + 1, t + 1)
            #print c, z, t
            numberedtitle = \
            title + "_c" + IJ.pad(c, 2) + \
            "_z" + IJ.pad(z, 4) + \
            "_t" + IJ.pad(t, 4) + ".tif"
            stackindex = save_imp.getStackIndex(c + 1, z + 1, t + 1)
            aframe = ImagePlus(numberedtitle,
                               save_imp.getStack().getProcessor(stackindex))
Example #43
0
def fretCalculations(imp1, nFrame, donorChannel, acceptorChannel, acceptorChannel2, table, gfx1, gfx2, gfx3, gfx4, gfx5, originalTitle):
	donorImp=extractChannel(imp1, donorChannel, nFrame)
	acceptorImp=extractChannel(imp1, acceptorChannel, nFrame)
	acceptorImp2=extractChannel(imp1, acceptorChannel2, nFrame)
	
	#push donor and acceptor channels to gpu and threshold them both to remove saturated pixels
	
	gfx4=clij2.push(donorImp)
	gfx5=clij2.push(acceptorImp)
	gfx6=clij2.create(gfx5)
	
	clij2.threshold(gfx4,gfx2, maxIntensity)
	clij2.binarySubtract(gfx3, gfx2, gfx6)
	
	clij2.threshold(gfx5,gfx2, maxIntensity)
	clij2.binarySubtract(gfx6, gfx2, gfx3)
	
	clij2.threshold(gfx3,gfx6, 0.5)
	clij2.multiplyImages(gfx6, gfx4, gfx2)
	clij2.multiplyImages(gfx6, gfx5, gfx4)
	
	
	
	
	gfx6=clij2.push(acceptorImp2)
	
	#donor is gfx2, acceptor FRET is gfx4, segment channel (acceptor normal) is gfx6
	
	results=ResultsTable()
	clij2.statisticsOfBackgroundAndLabelledPixels(gfx2, gfx1, results)
	
	donorChIntensity=results.getColumn(13)
	results2=ResultsTable()
	clij2.statisticsOfBackgroundAndLabelledPixels(gfx4, gfx1, results2)
	acceptorChIntensity=results2.getColumn(13)
	
	results3=ResultsTable()
	clij2.statisticsOfBackgroundAndLabelledPixels(gfx6, gfx1, results3)
	
	#calculate the fret ratios, removing any ROI with intensity of zero
	FRET =[]
	
	for i in xrange(len(acceptorChIntensity)):
		if (acceptorChIntensity[i]>0) and (donorChIntensity[i]>0):
			#don't write in the zeros to the results
			FRET.append((1000*acceptorChIntensity[i]/donorChIntensity[i]))
	
			table.incrementCounter()
			table.addValue("Frame (Time)", nFrame)
			table.addValue("Label", i)
			table.addValue("Emission ratio", acceptorChIntensity[i]/donorChIntensity[i])

			table.addValue("Mean donor emission", results.getValue("MEAN_INTENSITY",i))
			table.addValue("Mean acceptor emission (FRET)", results2.getValue("MEAN_INTENSITY",i))
			table.addValue("Mean acceptor emission", results3.getValue("MEAN_INTENSITY",i))


			
			table.addValue("Sum donor emission", donorChIntensity[i])
			table.addValue("Sum acceptor emission (FRET)", acceptorChIntensity[i])
			table.addValue("Sum acceptor emission", results3.getValue("SUM_INTENSITY",i))

			
			table.addValue("Volume", cal.pixelWidth * cal.pixelHeight * cal.pixelDepth * results.getValue("PIXEL_COUNT",i))
			table.addValue("Pixel count", results.getValue("PIXEL_COUNT",i))
			table.addValue("x", cal.pixelWidth*results.getValue("CENTROID_X",i))
			table.addValue("y", cal.pixelHeight*results.getValue("CENTROID_Y",i))
			table.addValue("z", cal.pixelDepth*results.getValue("CENTROID_Z",i))
			table.addValue("File name", originalTitle)
		else:
			#must write in the zeros as this array is used to generate the map of emission ratios
			FRET.append(0)
			
	
	
	
	table.show("Results of " + originalTitle)
	
	FRET[0]=0
	FRETarray= array( "f", FRET)
	fp= FloatProcessor(len(FRET), 1, FRETarray, None)
	FRETImp= ImagePlus("FRETImp", fp)
	gfx4=clij2.push(FRETImp)
	clij2.replaceIntensities(gfx1, gfx4, gfx5)
	maxProj=clij2.create(gfx5.getWidth(), gfx5.getHeight(), 1)
	clij2.maximumZProjection(gfx5, maxProj)
	
	
	#pull the images
	
	FRETimp2=clij2.pull(gfx5)
	FRETProjImp=clij2.pull(maxProj)
	labelImp = clij2.pull(gfx1)

	clij2.clear()
	donorImp.close()
	acceptorImp.close()
	acceptorImp2.close()
	
	return table, FRETimp2, FRETProjImp, labelImp
Example #44
0
# Capture RGB movie from stack, including ROIs and overlays
	
from ij import IJ, ImageStack, ImagePlus
from ij.process import ColorProcessor
from java.awt.image import BufferedImage as BI
from java.lang import Thread

f = ImagePlus.getDeclaredField("listeners")
f.setAccessible(True)
listeners = f.get(None)

imp = IJ.getImage()
canvas = imp.getWindow().getCanvas()

# Define range of slices to capture
slices = xrange(1, imp.getNSlices() + 1)

w, h = canvas.getWidth(), canvas.getHeight()

capture = ImageStack(w, h)

for i in slices:
  imp.setSlice(i)
  for l in listeners:
    l.imageUpdated(imp)
  Thread.sleep(50) # wait for repaints to happen, triggered by listeners, if any.
  bi = BI(w, h, BI.TYPE_INT_ARGB)
  g = bi.createGraphics()
  canvas.paint(g)
  g.dispose()
  capture.addSlice(ColorProcessor(bi))
Example #45
0
    # (watershed segmentation), some segmentation lines
    # may be improperly placed if local maxima are suppressed
    # by the tolerance.
    mf = MaximumFinder()
    if output_type == 'Single_Points':
        output_type_param = mf.SINGLE_POINTS
    elif output_type == 'Maxima_Within_Tolerance':
        output_type_param = mf.IN_TOLERANCE
    elif output_type == 'Segmented_Particles':
        output_type_param = mf.SEGMENTED
    elif output_type == 'List':
        output_type_param = mf.LIST
    elif output_type == 'Count':
        output_type_param = mf.COUNT
    # Get a new byteProcessor with a normal (uninverted) LUT where
    # the marked points are set to 255 (Background 0). Pixels outside
    # of the roi of the input image_processor_copy are not set. No
    # output image is created for output types POINT_SELECTION, LIST
    # and COUNT.  In these cases findMaxima returns null.
    byte_processor = mf.findMaxima(image_processor_copy, noise_tolerance,
                                   ImageProcessor.NO_THRESHOLD,
                                   output_type_param, exclude_edge_maxima,
                                   False)
    # Invert the image or ROI.
    byte_processor.invert()
    if output_type == 'Segmented_Particles' and not light_background:
        # Invert the values in this image's LUT (indexed color model).
        byte_processor.invertLut()
    image_plus = ImagePlus("output", byte_processor)
    IJ.saveAs(image_plus, output_datatype, tmp_output_path)
Example #46
0
A = SimpleMatrix(s)  #ejml matrix
B = SimpleMatrix.transpose(A.divide(A.normF()))  # normalize source

R = SimpleMatrix.transpose(B.svd().getU().negative(
))  # Rotation matrix from SVD decomposition of source color
c_suv = R.mult(D).getMatrix().data  # Rotate RGB

#Fiddly stuff to divide data array into SUV components
L1 = [
    c_suv[i:i + imp2.height * imp2.width]
    for i in range(0, len(c_suv), imp2.height * imp2.width)
]

# \\J\\, two channel diffuse color vector. See Mallick et al. Beyond Lambert: Reconstructing Specular Surfaces Using Color
J1 = [val * val for val in L1[1]]
J2 = [val * val for val in L1[2]]
J = [math.sqrt(x + y) for x, y in zip(J1, J2)]

# arrays into images
S = ImagePlus("S", FloatProcessor(imp2.height, imp2.width, L1[0]))
U = ImagePlus("U", FloatProcessor(imp2.height, imp2.width, L1[1]))
V = ImagePlus("V", FloatProcessor(imp2.height, imp2.width, L1[2]))
JNorm = ImagePlus("two channel diffuse color vector",
                  FloatProcessor(imp2.height, imp2.width, J))

#S.show()
#U.show()
#V.show()
JNorm.show(
)  # two channel diffuse color vector show, uncomment for SUV components
def testImageJ(imp):
    return ImagePlus("HSB stack", imp.getProcessor().getHSBStack())
Example #48
0
if not os.path.exists(resultPrefix):
    resultPrefix = IJ.getDirectory("home")
resultPrefix = resultPrefix + "/Results"

# for all the frames in the movie, run  "Analyze Skeleton (2D/3D)" (maybe
# later in multiple threads)

newStack = ImageStack(imp.getWidth(), imp.getHeight())

# getNFrames not getNSlices since input data is a 2D time series stack
# NOT a 3D stack.
frameCount = imp.getNFrames()

# Remember: a python range (1, 10) is the numbers 1 to 9!
for i in range(1, frameCount + 1):
    slice = ImagePlus(str(i), stack.getProcessor(i))
    # Execute plugin exactly on this slice i
    IJ.run(slice, "Analyze Skeleton (2D/3D)", "")
    image = WindowManager.getCurrentImage()
    IJ.saveAs("Measurements", resultPrefix + str(i) + ".xls")

    # concatenate the new tagged image onto the end of the tagged image stack we
    # want in the end, but not if its then 1st one!
    newStack.addSlice("", image.getProcessor().getPixels())
    if i == 1:
        newStack.setColorModel(image.getProcessor().getColorModel())
    image.close()

image = ImagePlus("TaggedMovie", newStack)
image.show()
Example #49
0
yoffset = imp.getHeight()
ip = out.getProcessor()

size = 5
off = int(Math.floor(size/2) + 1)
for i in range(len(data)):
	if i < 1:
		continue
#	if i > 20:
#		break
	frame = float(data.get(i)[2])
#	nextframe = float(data.get(i+1)[2])
#	if nextframe - frame < 1:
#		print str(i), 'trackend'
#	else:
		#print str(i), 'in track'
	x1 = int(round(float(data.get(i)[13]) / xscale))
	y1 = int(round(float(data.get(i)[14]) / xscale))
	z1 = int(round(float(data.get(i)[15]) / xscale))
	ip.setColor(Color(255, 100, 100))
	ip.drawOval(x1-off, y1-off, size, size)
	ip.drawOval(x1-off, yoffset + z1 -off, size, size)
	ip.drawOval(xoffset + z1 -off, y1 - off, size, size)		
# plot 
outimp = ImagePlus(os.path.basename(filename)+'_Out.tif', ip)
outimp.show()




# Assumes all files have the same size
ImStack = None
for root, directories, filenames in os.walk(sourceDir):
	for filename in filenames:
	  # Skip non-TIFF files
	  if not filename.endswith(".tif"):
		continue
	  path = os.path.join(root, filename)
	  # Upon finding the first image, initialize the VirtualStack
	  if ImStack is None:
		imp = IJ.openImage(path)
		ImStack = VirtualStack(imp.width, imp.height, None, sourceDir)
	  # Add a slice, relative to the sourceDIr
	  ImStack.addSlice(path[len(sourceDir):])

OnscreenImage = ImagePlus(sourceDir,ImStack)
OnscreenImage.show()

print "Generating MIP, waiting..."
outimp = maxZprojection(OnscreenImage)
outimp.show()
print "Max projection generated"

#@ String (label="Would you like to save the max proj? y = save, anything else = don't save", description="Save as") SaveQuery
	#@output String greeting
print SaveQuery
savename = sourceDir+"_MIP.tif"
if SaveQuery.upper=='Y':

	#@ String (label="Would you like to rename the max proj? (if not, will use directory name+MIP). y = yes, otherwise no", description="Save as rename") RenameQuery
	#@output String greeting
Example #51
0
        else:
            xradius = gd.getNextNumber()
            yradius = gd.getNextNumber()
            zradius = gd.getNextNumber()
 
    return canProceed


# Get active image and store it in the variable 'img'
img = WindowManager.getCurrentImage()

# Can we proceed?
if getSettings(img):

    # Get the image stack within the ImagePlus of img
    stack = img.getStack()

    # Instantiate ij.plugin.Filters3D
    f3d = Filters3D()

    # Retrieve filtered stack
    newStack = f3d.filter(stack, f3d.MEDIAN, xradius, yradius, zradius)

    # Construct a new ImagePlus from the stack
    fImg = ImagePlus("Filtered_"+img.getTitle(), newStack);

    # Other processing could go here (...)
    #IJ.run(fImg, "Shen-Castan Edge Detector", "coefficient=0.50");

    # Display result
    fImg.show()
Example #52
0
def from32To8Bit(imPath):
	im = IJ.openImage(imPath)
	im.getProcessor().setMinAndMax(0,255)
	im = ImagePlus(im.getTitle(),im.getProcessor().convertToByteProcessor())
	IJ.save(im,imPath)
Example #53
0
def PrepareDatabase(minw, maxw, baseDir, aspectRatio, majorWidth, majorHeight):
	outputpath = baseDir + "/" + str(majorWidth) + "_" + str(majorHeight) + "_orig.tif"
	#initialize stacks and labels
	stackScaled = []
	stackOrig = ImageStack(majorWidth, majorHeight)
	imageNames = []
	for i in range(minw, maxw+1):
		stackScaled.append(ImageStack(i, int(round(i/aspectRatio, 0))))
		imageNames.append('')

	counter = 0

	# initialize zip file for originals
	zf = zipfile.ZipFile(baseDir + "/originals.zip", mode='w', compression=zipfile.ZIP_DEFLATED, allowZip64=1)
	zf.writestr('from_string.txt', 'hello')
	zf.close()
	zf = zipfile.ZipFile(baseDir + "/originals.zip", mode='a', compression=zipfile.ZIP_DEFLATED, allowZip64=1)

	for root, dirs, files in os.walk(str(baseDir)):
		for f1 in files:
			if f1.endswith(".jpg") or f1.endswith(".jpe") or f1.endswith(".jpeg"):
				id = root + "/" +  f1
				IJ.redirectErrorMessages()
				IJ.redirectErrorMessages(1)
				imp = IJ.openImage(id)
				if imp is None:
					print "Couldn\'t open image from file:", id
					continue
				# skip non RGBimages
				if imp.getProcessor().getNChannels() != 3:
					print "Converting non RGB image:", id
					if imp.getStackSize() > 1:
						StackConverter(imp).convertToRGB()
					else:
						ImageConverter(imp).convertToRGB()
				#skip images with different aspect ratio
				width = imp.getWidth()
				height = imp.getHeight()
				ratio = round(float(width)/float(height), 2) # this makes the ratio filering approximate, minor variations in image dimensions will be ignored
				if ratio != aspectRatio:
					IJ.log("Skipping image of size: " + str(width) + "," + str(height))
					continue
				# now scale the image within a given range
				scale = Scale(imp.getProcessor())
				IJ.log("Scaling image " + str(counter) + " " + str(id))
				for i in range(minw, maxw+1):
					stackScaled[i-minw].addSlice(None, ScaleImageToSize(scale, i, int(round(i/aspectRatio, 0))))
					imageNames[i-minw] += str(id) + ";"
				# save the originals to a temp directory
				scaledOrig = ImagePlus(None, ScaleImageToSize(scale, majorWidth, majorHeight))
				SaveToZip(zf, scaledOrig, baseDir, counter)
				counter += 1
	zf.close()
	# save the stacks
	for i in range(minw, maxw+1):
		impScaled = ImagePlus(str(minw) + "_" + str(int(round(i/aspectRatio, 0))), stackScaled[i-minw])
		impScaled.show()
		#print imageNames
		impScaled.setProperty('Info', imageNames[i-minw][:-1])
		fs = FileSaver(impScaled)
		filepath = baseDir + "/" + str(i) + "_" + str(int(round(i/aspectRatio, 0))) + ".tif"
		IJ.log("Saving output stack" + str(filepath))
		fs.saveAsTiffStack(filepath)
		#IJ.save(impScaled, filepath);
		IJ.log("Done")
Example #54
0
def CreateCover(ip, width, height, dbpath):
    # split input image into appropriate tiles
    stackt = SplitImage(ip, width, height)
    impt = ImagePlus("template", stackt)
    nSlicestmp = impt.getNSlices()

    # open the preprocessed database
    print dbpath
    impd = IJ.openImage(dbpath)
    stackd = impd.getImageStack()
    nSlicesdb = impd.getNSlices()

    #associate index with image names
    imageNames = impd.getProperty('Info')
    imageList = imageNames.split(';')

    # set up preview output
    outputip = ColorProcessor(ip.width, ip.height)
    outputimp = ImagePlus("output", outputip)
    outputimp.show()

    cols = ip.width / width
    rows = ip.height / height

    print str(cols) + "," + str(rows)

    x = 0
    y = 0

    arrays = [None, None]  # a list of two elements
    tileNames = {}
    tileIndex = {}
    placed = {}
    used = {}

    while len(placed) < nSlicestmp:
        randomTileIndex = random.randint(1, nSlicestmp)
        if randomTileIndex in placed:
            continue
        # transform to row adn column coordinate
        if randomTileIndex % rows == 0:
            y = rows - 1
            x = (randomTileIndex / rows) - 1
        else:
            y = (randomTileIndex % rows) - 1
            x = int(randomTileIndex / rows)

        pixelst = stackt.getPixels(randomTileIndex)
        minimum = Float.MAX_VALUE
        #iterate through database images
        j = 1
        indexOfBestMatch = 0
        arrays[0] = pixelst
        while j < nSlicesdb:
            if j in used:
                j += 1
                continue
            arrays[1] = stackd.getPixels(j)
            diff = CoverMakerUtils.tileTemplateDifference(arrays)
            if diff < minimum:
                minimum = diff
                indexOfBestMatch = j
            j += 1
        ip = stackd.getProcessor(indexOfBestMatch)
        outputip.copyBits(ip, x * width, y * height, 0)
        used[indexOfBestMatch] = 1
        tileNames[randomTileIndex] = imageList[indexOfBestMatch - 1]
        tileIndex[randomTileIndex] = indexOfBestMatch - 1
        outputimp.draw()
        placed[randomTileIndex] = 1

    return tileNames, tileIndex, cols, rows
Example #55
0
from ij import IJ, ImagePlus  
from ij.process import FloatProcessor  
from array import zeros  
from random import random  
from ij.gui import Roi, PolygonRoi  
  
# Create a new ImagePlus filled with noise  
width = 1024  
height = 1024  
pixels = zeros('f', width * height)  
  
for i in xrange(len(pixels)):  
  pixels[i] = random()  
  
fp = FloatProcessor(width, height, pixels, None)  
imp = ImagePlus("Random", fp)  
  
# Fill a rectangular region of interest  
# with a value of 2:  
roi = Roi(400, 200, 400, 300)  
fp.setRoi(roi)  
fp.setValue(2.0)  
fp.fill()  
  
# Fill a polygonal region of interest  
# with a value of -3  
xs = [234, 174, 162, 102, 120, 123, 153, 177, 171,  
      60, 0, 18, 63, 132, 84, 129, 69, 174, 150,  
      183, 207, 198, 303, 231, 258, 234, 276, 327,  
      378, 312, 228, 225, 246, 282, 261, 252]  
ys = [48, 0, 60, 18, 78, 156, 201, 213, 270, 279,  
# Duplicate a hyperstack reducing dimensionality in the Z axis,
# opening a new stack with XYT (no Z).

from ij import IJ, ImagePlus, ImageStack

imp = IJ.getImage()
stack1 = imp.getStack()

# Fixed Z position
slice_index = imp.getSlice()# 1-based

stack2 = ImageStack(imp.getWidth(), imp.getHeight())

for frame_index in xrange(imp.getNFrames()):
  i = frame_index * imp.getNSlices() + slice_index
  stack2.addSlice(str(frame_index), stack1.getPixels(i))

ImagePlus(imp.getTitle() + " - fixed Z=" + str(slice_index), stack2).show()
def main():
    try:
        # Retrieve valid data
        rt = Utils.getTable();
        start = time.time()

        # Retrive x,y,t positions (all in unc. units)
        x = getColumn(rt, X_POS_HEADING)
        y = getColumn(rt, Y_POS_HEADING)
        t = getColumn(rt, T_POS_HEADING)

        # Retrieve the total n. of tracks
        track_ids = getColumn(rt, ID_HEADING)
        track_ids = [int(i) for i in track_ids]
        n_tracks = track_ids[-1]
        log("Tracks to be analyzed: ", n_tracks)
    except:
        IJ.error("Invalid Results Table")
        return

    # Create "nan"-padded tables to hold results
    detail_rt = new_Table()

    # Extract individual tracks and determine the track with the
    # largest data (i.e., the one with the highest number of rows)
    track_row = 0
    max_track_row = 0
    for i in range(0, rt.getCounter()-1):

        track_label = str(track_ids[i])
        if (track_ids[i]==track_ids[i+1]):
            dx = (x[i+1]-x[i])**2
            dy = (y[i+1]-y[1])**2
            dt = t[i+1]-t[i]
            dis = math.sqrt(dx+dy)
            vel = dis/dt
            if (track_row>max_track_row):
                max_track_row = track_row

            # Log to "detailed" table
            if (i<=max_track_row):
                detail_rt.incrementCounter()
            detail_rt.setValue("Dis_" + track_label, track_row, dis)
            detail_rt.setValue("Vel_" + track_label, track_row, vel)
            detail_rt.setValue("Dur_" + track_label, track_row, dt)
            detail_rt.setValue("Flag_" + track_label, track_row,
                RESTING_FLAG if vel < restingVelocity else MOVING_FLAG)
            track_row += 1
        else:
            # Analyzed track just ended: Reset loop variables and create column
            # to hold bout flags
            track_row = 0
            detail_rt.setValue("BoutFlag_" + track_label, 0, float("nan"))
            detail_rt.setValue("Mov_Dur_" + track_label, 0, float("nan"))
            detail_rt.setValue("Rest_Dur_" + track_label, 0, float("nan"))
            log("Extracting track ", track_label)


    listOfRasterPaths = [] # List holding raster tracks

    # Loop through individual tracks and tag each datapoint (i.e., each row)
    for track in range(0, n_tracks):

        durHeading = "Dur_" + str(track)
        fFlagHeading = "Flag_" + str(track)
        bFlagHeading = "BoutFlag_" + str(track)
        mDurHeading = "Mov_Dur_" + str(track)
        rDurHeading = "Rest_Dur_" + str(track)

        durations  = getColumn(detail_rt, durHeading)
        fFlags = getColumn(detail_rt, fFlagHeading)
        bFlags = getColumn(detail_rt, bFlagHeading)
        nDataPoints = findLastNonNumberIdx(durations) + 1

        log("Tagging track ", track, ": ", nDataPoints , " positions")
        for row in range(0, nDataPoints):

            # Define the boundaries of the moving window. "Stopping flags"
            # within this window will be monitoried to define a motionless bout
            # NB: Boundaries are defined from the rows of the input table. This
            # works only when the time elapsed betwen two rows is a single frame.
            # So we'll have to monitor the actual time that has elapsed within the
            # bounderies of the window
            lower_bound = max(0, row - neighborhood + 1)
            upper_bound = min(nDataPoints, row+neighborhood)
            sum_of_flags = 0
            sum_of_frames = 0
            neighborhood_sum = upper_bound - lower_bound

            for i in xrange(lower_bound, upper_bound):
                if isNumber(durations[i]) and isNumber(fFlags[i]):
                    sum_of_flags += (fFlags[i] * durations[i])
                    sum_of_frames += durations[i]
                if sum_of_frames >= neighborhood_sum:
                    break

            # Assign this tracked point to its bout
            moving_bout_duration = float("nan")
            resting_bout_duration = float("nan")
            bout_flag = float("nan")
            if sum_of_flags >= neighborhood_sum:
                bout_flag = MOVING_FLAG
                moving_bout_duration = durations[row]
            else:
                bout_flag = RESTING_FLAG
                resting_bout_duration = durations[row]
            detail_rt.setValue(bFlagHeading, row, bout_flag)
            detail_rt.setValue(mDurHeading, row, moving_bout_duration)
            detail_rt.setValue(rDurHeading, row, resting_bout_duration)

        if generateRasterTracks:
            # Generate raster column if path is long enough
            if nDataPoints > shortestRasterTrack:

                # Retrieve updated column of bout flags
                bFlags = getColumn(detail_rt, bFlagHeading)

                # Generate raster column (motion-flags temporally aligned, all 1
                # frame apart) until the path duration reaches the maximum limit
                keepGrowingRasterPath = True
                for idx, duration in enumerate(durations):
                    if (keepGrowingRasterPath):
                        flag = bFlags[idx]
                        for insertIdx in range(1, int(duration)):
                            if (len(bFlags)==longestRasterTrack):
                                keepGrowingRasterPath = False
                                break
                            bFlags.insert(idx+insertIdx, flag)

                # Store only lists without NaN values
                listOfRasterPaths.append(bFlags[:findLastNonNumberIdx(bFlags)])

        # Allow analysis to be interrupted
        if IJ.escapePressed():
            break

    # Display table. Displaying it now may ensure all tracks are padded with "NaN"
    if (displayDetailedTable):
        detail_rt.show("Track_Details["+ str(restingVelocity) +"-"+ str(neighborhood) +"]")


    # Now that all paths are contained in listOfRasterPaths. Sort them by length of track
    listOfRasterPaths = sorted(listOfRasterPaths, key = len)

    # Create Image of analysis. We'll create it from a ResultsTable. It would be much
    # more efficient to generate a text image directly, but this allows the table to be
    # processed elsewhere if needed. In IJ1, column headings of a ResultsTable must be
    # unique, so we will use distinct identifiers
    if generateRasterTracks:
        raster_rt = new_Table()
        log('Tracks to be rendered:', len(listOfRasterPaths))
        for rasterPath in xrange(len(listOfRasterPaths)):

            log("Rendering track ", rasterPath)
            for row, raster_flag in enumerate(listOfRasterPaths[rasterPath]):

                if not isNumber(raster_flag):
                    break
                if (row>raster_rt.getCounter()-1):
                    raster_rt.incrementCounter()

                # Create upper border: 1 px-wide
                bColor = borderColor if isNumber(raster_flag) else backgroundColor
                raster_rt.setValue("Delim1_" + str(rasterPath), row, bColor)

                # Create raster path: 18 px wide
                raster_flag_color = colorizeFlag(raster_flag)
                for i in 'abcdefghijklmnopq':
                    raster_rt.setValue("Raster_" + str(rasterPath) + str(i), row, raster_flag_color)

                # Create lower border: 1 px-wide
                raster_rt.setValue("Delim2_" + str(rasterPath), row, bColor)

                # Append padding space between tracks: 10px wide
                for j in 'abcdefghij':
                    raster_rt.setValue("Space_" + str(rasterPath) + str(j), row, backgroundColor)

            # Allow analysis to be interrupted
            if IJ.escapePressed():
                break

        # Display table of rasterized tracks
        if displayRasterTable:
            raster_rt.show("RasterTracks["+ str(restingVelocity) +"-"+ str(neighborhood ) +"]")

        # Display image of rasterized tracks
        ip = raster_rt.getTableAsImage().rotateLeft()
        paintNaNpixels(ip, backgroundColor)
        ip = ip.convertToByte(False)
        imp = ImagePlus("RasterTracks["+ str(restingVelocity) +"-"+ str(neighborhood ) +"]", ip)
        imp.show()

        ## Add scale-bar for time
        IJ.run(imp, "Set Scale...", "distance=1 known="+ str(frameCal[0]) +" unit="+ frameCal[1]);
        IJ.run(imp, "Scale Bar...", "width=10 color=Black location=[Lower Right] overlay");


    # Loop through individual tracks and extract some basic statistics. Most of these parameters
    # are already retrieved by Trackmate. We calculate them here just for convenience
    track_ids = []      # List holding the track identifier
    sum_distances = []  # List holding the track's total distance
    sum_durations = []  # List holding the track's total duration
    max_speeds = []     # List holding the track's Max speed
    min_speeds = []     # List holding the track's Min speed
    sum_n_rests = []    # List holding the number of resting bouts in each track
    sum_n_moves = []    # List holding the number of moving bouts in each track
    sum_dur_rests = []  # List holding the total resting time of each tracked object
    sum_dur_moves = []  # List holding the total moving time of each tracked object


    log("Logging Summaries...")
    summary_rt = new_Table()
    for track in range(0, n_tracks):

        # Retrieve and store the track identifier
        track_id = str(track)
        track_ids.insert(track, "Track_"+track_id)

        # Retrive tracking data
        distances = getColumn(detail_rt, "Dis_" + track_id)
        durations = getColumn(detail_rt, "Dur_" + track_id)
        velocities = getColumn(detail_rt, "Vel_" + track_id)
        mov_durations = getColumn(detail_rt, "Mov_Dur_" + track_id)
        rest_durations = getColumn(detail_rt, "Rest_Dur_" + track_id)

        # Reset stats for this track
        track_sum_dis = 0
        track_sum_dur = 0
        track_max_vel = 0
        track_min_vel = sys.maxint
        track_sum_move = 0
        track_sum_rest = 0
        track_n_moves  = 0
        track_n_rests  = 0

        # Compute basic stats and store them in dedicated lists
        nDataPoints = findLastNonNumberIdx(distances) + 1
        for row in xrange(nDataPoints):
            track_sum_dis += distances[row]
            track_sum_dur += durations[row]
            if (velocities[row]>track_max_vel):
                track_max_vel = velocities[row]
            if (velocities[row]<track_min_vel):
                track_min_vel = velocities[row]
            if isNumber(mov_durations[row]):
                track_sum_move += mov_durations[row]
            if isNumber(rest_durations[row]):
                track_sum_rest += rest_durations[row]

        sum_distances.insert(track, track_sum_dis)
        sum_durations.insert(track, track_sum_dur)
        max_speeds.insert(track, track_max_vel)
        min_speeds.insert(track, track_min_vel)
        sum_dur_moves.insert(track, track_sum_move)
        sum_dur_rests.insert(track, track_sum_rest)

        # Assess the number of moving/resting bouts in this track
        for row in xrange(nDataPoints-1):
            if isNumber(mov_durations[row]) and not isNumber(mov_durations[row+1]):
                track_n_moves += 1
            if isNumber(rest_durations[row]) and not isNumber(rest_durations[row+1]):
                track_n_rests += 1

        # Predict cases in which bouts lasted entire track duration
        if track_n_moves==0 and track_sum_dur==track_sum_move:
            track_n_moves += 1
        if track_n_rests==0 and track_sum_dur==track_sum_rest:
            track_n_rests += 1

        sum_n_moves.insert(track, track_n_moves)
        sum_n_rests.insert(track, track_n_rests)

    # Log summary data
    for i in range(0, n_tracks):

        # Ignore tracks shorter than neighborhood
        if hideShortTracks and sum_durations[i]<neighborhood:
            continue

        row = summary_rt.getCounter()
        summary_rt.incrementCounter()
        summary_rt.setLabel(track_ids[i], row)
        summary_rt.setValue("Total dx", row, sum_distances[i])
        summary_rt.setValue("Duration", row, sum_durations[i])
        summary_rt.setValue("Max speed", row, max_speeds[i])
        summary_rt.setValue("Min speed", row, min_speeds[i])
        summary_rt.setValue("Moving dur", row, sum_dur_moves[i])
        summary_rt.setValue("Resting dur", row, sum_dur_rests[i])
        summary_rt.setValue("Resting %", row, 100 * sum_dur_rests[i] / sum_durations[i])
        summary_rt.setValue("Moving bouts", row, sum_n_moves[i])
        summary_rt.setValue("Resting bouts", row, sum_n_rests[i])
        if sum_n_moves[i]!=0:
            summary_rt.setValue("Avg moving bout dur", row, sum_dur_moves[i] / sum_n_moves[i])
        if sum_n_rests[i]!=0:
            summary_rt.setValue("Avg resting bout dur", row, sum_dur_rests[i] / sum_n_rests[i])

    summary_rt.show("Track_Summaries["+ str(restingVelocity) +"-"+ str(neighborhood) +"]")
    log("Finished: ", time.time()-start, " seconds")
Example #58
0
# Input parameters
N = 500     # number of pixels
L = 532*0.000000001      # wavelength in nm
w = 0.002  # width field of view 
z = 0.05  # z step in meter

imp = IJ.getImage()

#synthetic amplitude and phase
am = [math.exp(-1.6*val) for val in imp.getProcessor().getPixels()] 
ph = [-3*val for val in imp.getProcessor().getPixels()]

t = [x*cmath.exp(-1j*y) for x, y in zip(am, ph)] 

real = ImagePlus("real", FloatProcessor(N,N,[val.real for val in t]))
imaginary = ImagePlus("IMAGINARY", FloatProcessor(N,N,[val.imag for val in t]))


fft_1 = fft.fft(real,imaginary,N,N)

#projection to detector plane
z = Propagator.Zone(L, N, z, w)
real2 = [(x * y).real for x , y in zip(fft_1, [val.conjugate() for val in z])]
imag2 = [(x * y).imag for x , y in zip(fft_1, [val.conjugate() for val in z])]
R2 = ImagePlus("real", FloatProcessor(N, N, real2, None))
I2 = ImagePlus("imaginary", FloatProcessor(N, N, imag2, None))

ifft_1 = fft.ifft(R2,I2,N,N)

#amplitude at detector plane
Example #59
0
def create_registered_hyperstack(imp, channel, target_folder, virtual):
  """ Takes the imp, determines the x,y,z drift for each pair of time points, using the preferred given channel,
  and outputs as a hyperstack."""
  shifts = compute_frame_translations(imp, channel)
  # Make shifts relative to 0,0,0 of the original imp:
  shifts = concatenate_shifts(shifts)
  print "shifts concatenated:"
  for s in shifts:
    print s.x, s.y, s.z
  # Compute bounds of the new volume,
  # which accounts for all translations:
  minx, miny, minz, maxx, maxy, maxz = compute_min_max(shifts)
  # Make shifts relative to new canvas dimensions
  # so that the min values become 0,0,0
  for shift in shifts:
    shift.x -= minx
    shift.y -= miny
    shift.z -= minz
  print "shifts relative to new dimensions:"
  for s in shifts:
    print s.x, s.y, s.z
  # new canvas dimensions:
  width = imp.width + maxx - minx
  height = maxy - miny + imp.height
  slices = maxz - minz + imp.getNSlices()

  print "New dimensions:", width, height, slices
  # Prepare empty slice to pad in Z when necessary
  empty = imp.getProcessor().createProcessor(width, height)

  # if it's RGB, fill the empty slice with blackness
  if isinstance(empty, ColorProcessor):
    empty.setValue(0)
    empty.fill()
  # Write all slices to files:
  stack = imp.getStack()

  if virtual is False:
  	registeredstack = ImageStack(width, height, imp.getProcessor().getColorModel())
  names = []
  for frame in range(1, imp.getNFrames()+1):
    shift = shifts[frame-1]
    fr = "t" + zero_pad(frame, len(str(imp.getNFrames())))
    # Pad with empty slices before reaching the first slice
    for s in range(shift.z):
      ss = "_z" + zero_pad(s + 1, len(str(slices))) # slices start at 1
      for ch in range(1, imp.getNChannels()+1):
        name = fr + ss + "_c" + zero_pad(ch, len(str(imp.getNChannels()))) +".tif"
        names.append(name)

        if virtual is True:
          currentslice = ImagePlus("", empty)
          currentslice.setCalibration(imp.getCalibration().copy())
          currentslice.setProperty("Info", imp.getProperty("Info"))
          FileSaver(currentslice).saveAsTiff(target_folder + "/" + name)
        else:
          empty = imp.getProcessor().createProcessor(width, height)
          registeredstack.addSlice(str(name), empty)
    # Add all proper slices
    stack = imp.getStack()
    for s in range(1, imp.getNSlices()+1):
      ss = "_z" + zero_pad(s + shift.z, len(str(slices)))
      for ch in range(1, imp.getNChannels()+1):
         ip = stack.getProcessor(imp.getStackIndex(ch, s, frame))
         ip2 = ip.createProcessor(width, height) # potentially larger
         ip2.insert(ip, shift.x, shift.y)
         name = fr + ss + "_c" + zero_pad(ch, len(str(imp.getNChannels()))) +".tif"
         names.append(name)

         if virtual is True:
           currentslice = ImagePlus("", ip2)
           currentslice.setCalibration(imp.getCalibration().copy())
           currentslice.setProperty("Info", imp.getProperty("Info"));
           FileSaver(currentslice).saveAsTiff(target_folder + "/" + name)
         else:
           registeredstack.addSlice(str(name), ip2)

    # Pad the end
    for s in range(shift.z + imp.getNSlices(), slices):
      ss = "_z" + zero_pad(s + 1, len(str(slices)))
      for ch in range(1, imp.getNChannels()+1):
        name = fr + ss + "_c" + zero_pad(ch, len(str(imp.getNChannels()))) +".tif"
        names.append(name)

        if virtual is True:
          currentslice = ImagePlus("", empty)
          currentslice.setCalibration(imp.getCalibration().copy())
          currentslice.setProperty("Info", imp.getProperty("Info"))
          FileSaver(currentslice).saveAsTiff(target_folder + "/" + name)
        else:
          registeredstack.addSlice(str(name), empty)

  if virtual is True:
      # Create virtual hyper stack with the result
      registeredstack = VirtualStack(width, height, None, target_folder)
      for name in names:
        registeredstack.addSlice(name)
      registeredstack_imp = ImagePlus("registered time points", registeredstack)
      registeredstack_imp.setDimensions(imp.getNChannels(), len(names) / (imp.getNChannels() * imp.getNFrames()), imp.getNFrames())
      registeredstack_imp.setCalibration(imp.getCalibration().copy())
      registeredstack_imp.setOpenAsHyperStack(True)

  else:
    registeredstack_imp = ImagePlus("registered time points", registeredstack)
    registeredstack_imp.setCalibration(imp.getCalibration().copy())
    registeredstack_imp.setProperty("Info", imp.getProperty("Info"))
    registeredstack_imp.setDimensions(imp.getNChannels(), len(names) / (imp.getNChannels() * imp.getNFrames()), imp.getNFrames())
    registeredstack_imp.setOpenAsHyperStack(True)
    if 1 == registeredstack_imp.getNChannels():
      return registeredstack_imp
  IJ.log("\nHyperstack dimensions: time frames:" + str(registeredstack_imp.getNFrames()) + ", slices: " + str(registeredstack_imp.getNSlices()) + ", channels: " + str(registeredstack_imp.getNChannels()))

  # Else, as composite
  mode = CompositeImage.COLOR;
  if isinstance(imp, CompositeImage):
    mode = imp.getMode()
  else:
    return registeredstack_imp
  return CompositeImage(registeredstack_imp, mode)
Example #60
0
def register_hyperstack(imp, channel, shifts, target_folder, virtual):
  """ Takes the imp, determines the x,y,z drift for each pair of time points, using the preferred given channel,
  and outputs as a hyperstack."""
  # Compute bounds of the new volume,
  # which accounts for all translations:
  minx, miny, minz, maxx, maxy, maxz = compute_min_max(shifts)
  # Make shifts relative to new canvas dimensions
  # so that the min values become 0,0,0
  for shift in shifts:
    shift.x -= minx
    shift.y -= miny
    shift.z -= minz
  #print "shifts relative to new dimensions:"
  #for s in shifts:
  #  print s.x, s.y, s.z
  # new canvas dimensions:r
  width = imp.width + maxx - minx
  height = maxy - miny + imp.height
  slices = maxz - minz + imp.getNSlices()

  print "New dimensions:", width, height, slices
  # Prepare empty slice to pad in Z when necessary
  empty = imp.getProcessor().createProcessor(width, height)

  # if it's RGB, fill the empty slice with blackness
  if isinstance(empty, ColorProcessor):
    empty.setValue(0)
    empty.fill()
  # Write all slices to files:
  stack = imp.getStack()

  if virtual is False:
  	registeredstack = ImageStack(width, height, imp.getProcessor().getColorModel())
  names = []
  
  for frame in range(1, imp.getNFrames()+1):
 
    shift = shifts[frame-1]
    
    print "frame",frame,"correcting drift",-shift.x-minx,-shift.y-miny,-shift.z-minz
    IJ.log("    frame "+str(frame)+" correcting drift "+str(-shift.x-minx)+","+str(-shift.y-miny)+","+str(-shift.z-minz))
    
    fr = "t" + zero_pad(frame, len(str(imp.getNFrames())))
    # Pad with empty slices before reaching the first slice
    for s in range(shift.z):
      ss = "_z" + zero_pad(s + 1, len(str(slices))) # slices start at 1
      for ch in range(1, imp.getNChannels()+1):
        name = fr + ss + "_c" + zero_pad(ch, len(str(imp.getNChannels()))) +".tif"
        names.append(name)

        if virtual is True:
          currentslice = ImagePlus("", empty)
          currentslice.setCalibration(imp.getCalibration().copy())
          currentslice.setProperty("Info", imp.getProperty("Info"))
          FileSaver(currentslice).saveAsTiff(target_folder + "/" + name)
        else:
          empty = imp.getProcessor().createProcessor(width, height)
          registeredstack.addSlice(str(name), empty)
    
    
    # Add all proper slices
    stack = imp.getStack()
    for s in range(1, imp.getNSlices()+1):
      ss = "_z" + zero_pad(s + shift.z, len(str(slices)))
      for ch in range(1, imp.getNChannels()+1):
         ip = stack.getProcessor(imp.getStackIndex(ch, s, frame))
         ip2 = ip.createProcessor(width, height) # potentially larger
         ip2.insert(ip, shift.x, shift.y)
         name = fr + ss + "_c" + zero_pad(ch, len(str(imp.getNChannels()))) +".tif"
         names.append(name)

         if virtual is True:
           currentslice = ImagePlus("", ip2)
           currentslice.setCalibration(imp.getCalibration().copy())
           currentslice.setProperty("Info", imp.getProperty("Info"));
           FileSaver(currentslice).saveAsTiff(target_folder + "/" + name)
         else:
           registeredstack.addSlice(str(name), ip2)

    # Pad the end
    for s in range(shift.z + imp.getNSlices(), slices):
      ss = "_z" + zero_pad(s + 1, len(str(slices)))
      for ch in range(1, imp.getNChannels()+1):
        name = fr + ss + "_c" + zero_pad(ch, len(str(imp.getNChannels()))) +".tif"
        names.append(name)

        if virtual is True:
          currentslice = ImagePlus("", empty)
          currentslice.setCalibration(imp.getCalibration().copy())
          currentslice.setProperty("Info", imp.getProperty("Info"))
          FileSaver(currentslice).saveAsTiff(target_folder + "/" + name)
        else:
          registeredstack.addSlice(str(name), empty)

  if virtual is True:
      # Create virtual hyper stack with the result
      registeredstack = VirtualStack(width, height, None, target_folder)
      for name in names:
        registeredstack.addSlice(name)
      registeredstack_imp = ImagePlus("registered time points", registeredstack)
      registeredstack_imp.setDimensions(imp.getNChannels(), len(names) / (imp.getNChannels() * imp.getNFrames()), imp.getNFrames())
      registeredstack_imp.setCalibration(imp.getCalibration().copy())
      registeredstack_imp.setOpenAsHyperStack(True)
  else:
    registeredstack_imp = ImagePlus("registered time points", registeredstack)
    registeredstack_imp.setCalibration(imp.getCalibration().copy())
    registeredstack_imp.setProperty("Info", imp.getProperty("Info"))
    registeredstack_imp.setDimensions(imp.getNChannels(), len(names) / (imp.getNChannels() * imp.getNFrames()), imp.getNFrames())
    registeredstack_imp.setOpenAsHyperStack(True)
    if 1 == registeredstack_imp.getNChannels():
      return registeredstack_imp
  #IJ.log("\nHyperstack dimensions: time frames:" + str(registeredstack_imp.getNFrames()) + ", slices: " + str(registeredstack_imp.getNSlices()) + ", channels: " + str(registeredstack_imp.getNChannels()))

  # Else, as composite
  mode = CompositeImage.COLOR;
  if isinstance(imp, CompositeImage):
    mode = imp.getMode()
  else:
    return registeredstack_imp
  return CompositeImage(registeredstack_imp, mode)