def translate_single_stack_using_imglib2(imp, dx, dy, dz): # wrap into a float imglib2 and translate # conversion into float is necessary due to "overflow of n-linear interpolation due to accuracy limits of unsigned bytes" # see: https://github.com/fiji/fiji/issues/136#issuecomment-173831951 img = ImagePlusImgs.from(imp.duplicate()) extended = Views.extendBorder(img) converted = Converters.convert(extended, RealFloatSamplerConverter()) interpolant = Views.interpolate(converted, NLinearInterpolatorFactory()) # translate if imp.getNDimensions()==3: transformed = RealViews.affine(interpolant, Translation3D(dx, dy, dz)) elif imp.getNDimensions()==2: transformed = RealViews.affine(interpolant, Translation2D(dx, dy)) else: IJ.log("Can only work on 2D or 3D stacks") return None cropped = Views.interval(transformed, img) # wrap back into bit depth of input image and return bd = imp.getBitDepth() if bd==8: return(ImageJFunctions.wrapUnsignedByte(cropped,"imglib2")) elif bd == 16: return(ImageJFunctions.wrapUnsignedShort(cropped,"imglib2")) elif bd == 32: return(ImageJFunctions.wrapFloat(cropped,"imglib2")) else: return None
def transformedView(img, matrix): imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) aff = AffineTransform3D() aff.set(*matrix) aff = aff.inverse() imgT = RealViews.transform(imgI, aff) return Views.interval(imgT, [0, 0, 0], [img.dimension(d) for d in xrange(3)])
def dropSlices(img, nth): """ Drop every nth slice. Calibration is to be multipled by nth for Z. Counts slices 1-based so as to preserve the first slice (index zero). """ return Views.stack([ Views.hyperSlice(img, 2, i) for i in xrange(img.dimension(2)) if 0 == (i + 1) % nth ])
def viewTransformed(img, affine): imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) imgT = RealViews.transform(imgI, affine) minC = [0, 0, 0] maxC = [img.dimension(d) - 1 for d in xrange(img.numDimensions())] imgB = Views.interval(imgT, minC, maxC) return imgB
def getPixels(self, n): # 'n' is 1-based aimg = ArrayImgs.unsignedShorts(self.dimensions[0:2]) #computeInto(ImgSource(Views.hyperSlice(self.img4d, 2, n-1)), aimg) nZ = self.img4d.dimension(2) fixedT = Views.hyperSlice(self.img4d, 3, int((n - 1) / nZ)) # Z blocks fixedZ = Views.hyperSlice(fixedT, 2, (n - 1) % nZ) w.copy(fixedZ.cursor(), aimg.cursor()) return aimg.update(None).getCurrentStorageArray()
def getViewFromImp(imp, r=None): # r is a java.awt.rectangle im = IL.wrapByte(imp) if r is None: r = Rectangle(0, 0, imp.getWidth(), imp.getHeight()) v = Views.zeroMin( Views.interval(im, [r.x, r.y], [r.x + r.width - 1, r.y + r.height - 1])) return v
def updatePixels(self): # Copy interval into pixels view = Views.interval( Views.extendZero(Views.hyperSlice(self.img3D, 2, self.indexZ)), self.interval2D) aimg = ArrayImgs.floats( self.getPixels(), [self.interval2D.dimension(0), self.interval2D.dimension(1)]) ImgUtil.copy(view, aimg)
def get(self, path): transform = self.transformsDict[path] img = self.loader.get(path) imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) imgT = RealViews.transform(imgI, transform) minC = self.roi[0] if self.roi else [0] * img.numDimensions() maxC = self.roi[1] if self.roi else [img.dimension(d) -1 for d in xrange(img.numDimensions())] imgO = Views.zeroMin(Views.interval(imgT, minC, maxC)) return ImgView.wrap(imgO, img.factory()) if self.asImg else imgO
def projectMax(img, minC, maxC, reduce_max): imgA = ArrayImgs.unsignedSorts( Intervals.dimensionsAsLongArray(imgC)) ImgUtil.copy( ImgView.wrap( convert( Views.collapseReal( Views.interval(img, minC, maxC)), reduce_max.newInstance(), imglibtype), img.factory()), imgA) return imgA
def viewTransformed(img, matrix): affine = AffineTransform3D() affine.set(*matrix) # It's a forward transform: invert affine = affine.inverse() imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) imgT = RealViews.transform(imgI, affine) # Same dimensions imgB = Views.interval(imgT, img) return imgB
def transformedView(img, transform, interval=None): """ """ imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) imgT = RealViews.transform(imgI, transform) if interval: return Views.interval(imgT, interval) else: return Views.interval( imgT, [0, 0, 0], [img.dimension(d) - 1 for d in xrange(img.numDimensions())])
def makeCell(self, index): self.preloadCells(index) # preload others in the background img = self.loadImg(self.filepaths[index]) affine = AffineTransform2D() affine.set(self.matrices[index]) imgI = Views.interpolate(Views.extendZero(img), NLinearInterpolatorFactory()) imgA = RealViews.transform(imgI, affine) imgT = Views.zeroMin(Views.interval(imgA, self.interval)) aimg = img.factory().create(self.interval) ImgUtil.copy(ImgView.wrap(imgT, aimg.factory()), aimg) return Cell(self.cell_dimensions, [0, 0, index], aimg.update(None))
def scale3D(img, x=1.0, y=1.0, z=1.0): scale3d = AffineTransform3D() scale3d.set(x, 0, 0, 0, 0, y, 0, 0, 0, 0, z, 0) imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) imgT = RealViews.transform(imgI, scale3d) # dimensions minC = [0, 0, 0] maxC = [int(img.dimension(d) * k + 0.5) -1 for d, k in enumerate([x, y, z])] imgB = Views.interval(imgT, minC, maxC) return imgB
def keyPressed(self, ke): keyCode = ke.getKeyCode() if ke.isControlDown() and keyCode in Navigator.moves: d, sign = Navigator.moves[keyCode] inc = 200 if ke.isShiftDown() else 20 mins[d] += sign * inc maxs[d] += sign * inc # Replace source with shifted cropped volume fsource.set(stack, Views.zeroMin(Views.interval(imgE, FinalInterval(mins, maxs)))) imp.updateVirtualSlice() return # Else, pass the event onto other listeners for kl in kls: kl.keyPressed(ke)
def crop(event): global cropped, cropped_imp coords = [int(float(tf.getText())) for tf in textfields] minC = [max(0, c) for c in coords[0:3]] maxC = [min(d -1, c) for d, c in izip(Intervals.dimensionsAsLongArray(images[0]), coords[3:6])] storeRoi(minC, maxC) print "ROI min and max coordinates" print minC print maxC cropped = [Views.zeroMin(Views.interval(img, minC, maxC)) for img in images] cropped_imp = showAsStack(cropped, title="cropped") cropped_imp.setDisplayRange(imp.getDisplayRangeMin(), imp.getDisplayRangeMax()) if cropContinuationFn: cropContinuationFn(images, minC, maxC, cropped, cropped_imp)
def scale(img, calibration): scale3d = AffineTransform3D() scale3d.set(calibration[0], 0, 0, 0, 0, calibration[1], 0, 0, 0, 0, calibration[2], 0) imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) imgT = RealViews.transform(imgI, scale3d) # dimensions minC = [0, 0, 0] maxC = [ int(img.dimension(d) * cal) - 1 for d, cal in enumerate(calibration) ] imgB = Views.interval(imgT, minC, maxC) return imgB
def pyramidAreaAveraging(img, top_level, min_width=32, sumType=UnsignedLongType, mathType=UnsignedLongType, converter=Util.genericIntegerTypeConverter()): """ Return a list of image views, one per scale level of the image pyramid, except for level zero (the first image) which is the provided img. All images are of the same type as the source img. Based on an integral image for fast computation. """ img_type = img.randomAccess().get().createVariable() # Create an integral image in longs alg = IntegralImg(img, sumType(), converter) alg.process() integralImg = alg.getResult() # Create an image pyramid as views, with ImgMath and imglib2, # which amounts to scale area averaging sped up by the integral image # and generated on demand whenever each pyramid level is read. width = img.dimension(0) imgE = Views.extendBorder(integralImg) blockSide = 1 level_index = 1 # Corners for level 1: a box of 2x2 corners = [[0, 0], [1, 0], [0, 1], [1, 1]] pyramid = [img] while width > min_width and level_index <= top_level: blockSide *= 2 width /= 2 # Scale the corner coordinates to make the block larger cs = [[c * blockSide for c in corner] for corner in corners] blockRead = div(block(imgE, cs), pow(blockSide, 2)) # the op # a RandomAccessibleInterval view of the op, computed with shorts but seen as bytes view = blockRead.view(mathType(), img_type.createVariable()) # Views.subsample by 2 will turn a 512-pixel width to a 257 width, # so crop to proper interval 256 level = Views.interval(Views.subsample(view, blockSide), [0] * img.numDimensions(), # min [img.dimension(d) / blockSide -1 for d in xrange(img.numDimensions())]) # max pyramid.append(level) level_index += 1 # for next iteration return pyramid
def asNormalizedUnsignedByteArrayImg(interval, invert, blockRadius, n_bins, slope, matrices, index, imp): sp = imp.getProcessor() # ShortProcessor # Crop to interval if needed x = interval.min(0) y = interval.min(1) width = interval.max(0) - interval.min(0) + 1 height = interval.max(1) - interval.min(1) + 1 if 0 != x or 0 != y or sp.getWidth() != width or sp.getHeight( ) != height: sp.setRoi(x, y, width, height) sp = sp.crop() if invert: sp.invert() CLAHE.run( ImagePlus("", sp), blockRadius, n_bins, slope, None ) # far less memory requirements than NormalizeLocalContrast, and faster. minimum, maximum = autoAdjust(sp) # Transform and convert image to 8-bit, mapping to display range img = ArrayImgs.unsignedShorts( sp.getPixels(), [sp.getWidth(), sp.getHeight()]) sp = None imp = None # Must use linear interpolation for subpixel precision affine = AffineTransform2D() affine.set(matrices[index]) imgI = Views.interpolate(Views.extendZero(img), NLinearInterpolatorFactory()) imgA = RealViews.transform(imgI, affine) imgT = Views.zeroMin(Views.interval(imgA, img)) # Convert to 8-bit imgMinMax = convert2(imgT, RealUnsignedByteConverter(minimum, maximum), UnsignedByteType, randomAccessible=False) # use IterableInterval aimg = ArrayImgs.unsignedBytes(Intervals.dimensionsAsLongArray(img)) # ImgUtil copies multi-threaded, which is not appropriate here as there are many other images being copied too #ImgUtil.copy(ImgView.wrap(imgMinMax, aimg.factory()), aimg) # Single-threaded copy copier = createBiConsumerTypeSet(UnsignedByteType) LoopBuilder.setImages(imgMinMax, aimg).forEachPixel(copier) img = imgI = imgA = imgMinMax = imgT = None return aimg
def MakeMultiChannelPhantom (ops, size): if len(size)>3: numChannels=size[3] else: numChannels=1 image=ops.run("create", size, FloatType()) ax=[Axes.X, Axes.Y, Axes.Z, Axes.CHANNEL] imgPlus=ImgPlus(image, "phantom", ax) location=zeros(3,'i') location[0]=40; location[1]=size[1]/2; location[2]=size[2]/2; #ops.run("addsphere", image, location, radius, 1.0) #ops.run("addassymetricspherel", image, location, 1.0, radius1, radius2) shapes=Add3DShapes(ops, size) def AddShapes(hyperSlice): #shapes.addRandomPointsInROI(hyperSlice, 100.0, 20) shapes.addCenterSphere(hyperSlice, 5.0, 20) if (numChannels>1): for d in range(0,numChannels): hyperSlice= Views.hyperSlice(image, 3, d) AddShapes(hyperSlice) location[0]+=10 else: AddShapes(image) return imgPlus
def MakeMultiChannelPhantom(ops, size): if len(size) > 3: numChannels = size[3] else: numChannels = 1 image = ops.run("create", size, FloatType()) ax = [Axes.X, Axes.Y, Axes.Z, Axes.CHANNEL] imgPlus = ImgPlus(image, "phantom", ax) location = zeros(3, 'i') location[0] = 40 location[1] = size[1] / 2 location[2] = size[2] / 2 #ops.run("addsphere", image, location, radius, 1.0) #ops.run("addassymetricspherel", image, location, 1.0, radius1, radius2) shapes = Add3DShapes(ops, size) def AddShapes(hyperSlice): #shapes.addRandomPointsInROI(hyperSlice, 100.0, 20) shapes.addCenterSphere(hyperSlice, 5.0, 20) if (numChannels > 1): for d in range(0, numChannels): hyperSlice = Views.hyperSlice(image, 3, d) AddShapes(hyperSlice) location[0] += 10 else: AddShapes(image) return imgPlus
def test(iraf): # Test dimensions: should be the same as the one input image print "Dimensions:", Intervals.dimensionsAsLongArray(iraf) # Test Cursor c = iraf.cursor() pos = zeros(2, 'l') while c.hasNext(): c.fwd() c.localize(pos) print "Cursor:", pos, "::", c.get() # Test RandomAccess ra = iraf.randomAccess() c = iraf.cursor() while c.hasNext(): c.fwd() ra.setPosition(c) c.localize(pos) print "RandomAccess:", pos, "::", ra.get() # Test source img: should be untouched c = img.cursor() while c.hasNext(): print "source:", c.next() # Test interval view: the middle 2x2 square v = Views.interval(iraf, [1, 1], [2, 2]) IL.wrap(v, "+2 view").show()
def dog_detection(overlay,img, imp, cal): # Create a variable of the correct type (UnsignedByteType) for the value-extended view zero = img.randomAccess().get().createVariable() # Run the difference of Gaussian cell = 8.0 # microns in diameter min_peak = 2.0 # min intensity for a peak to be considered dog = DogDetection(Views.extendValue(img, zero), img, [cal.pixelWidth, cal.pixelHeight,cal.pixelDepth], cell / 2, cell, DogDetection.ExtremaType.MINIMA, min_peak, False, DoubleType()) peaks = dog.getPeaks() roi = OvalRoi(0, 0, cell/cal.pixelWidth, cell/cal.pixelHeight) print ('Number of cells = ', len(peaks)) p = zeros(img.numDimensions(), 'i') boundRect = imp.getRoi() for peak in peaks: # Read peak coordinates into an array of integers XYZ location of spots peak.localize(p) print(p) if(boundRect is not None and boundRect.contains(p[0], p[1])): oval = OvalRoi(p[0], p[1],cell/cal.pixelWidth, cell/cal.pixelHeight) oval.setColor(Color.RED) overlay.add(oval)
def filterBankPatch(img, width=5): """ Returns the raw pixel value of a square block of pixels (a patch) centered each pixel. """ half = width / 2 # e.g. for 5, it's 2 imgE = Views.extendBorder(img) ops = [offset(imgE, [x, y]) for x in xrange(-half, half + 1) for y in xrange(-half, half + 1)] return ops
def showAsStack(images, title=None, show=True): if not title: title = "Stack of %i images" % len(images) imp = wrap(Views.stack(images), title) if show: imp.show() return imp
def wrap(img, title="", n_channels=1): """ Like ImageJFunctions.wrap but, when n_channels=1 (the default), then a new dimension of size 1 is inserted at position 2 to prevent the Z axis from showing as the channels axis. To enable ImageJFunctions.wrap default behavior, set n_channels to a value other than 1. """ if 1 == n_channels: # Append a dimension of size 1 at the end # and permute it iteratively so that it becomes the channels dimension (d=2) img = Views.addDimension(img, 1, 1) d = img.numDimensions( ) - 1 # starts with the last: the new one of size 1 while d > 2: img = Views.permute(img, d, d - 1) d -= 1 # return IL.wrap(img, title)
def findNucleiOverTime(img4D, params, show=True): """ params["frames"]: number of time frames to average params["calibration"]: e.g. [1.0, 1.0, 1.0] params["somaDiameter"]: width of a soma, in pixels params["minPeakValue"]: determine it by hand with e.g. difference of Gaussians sigma=somaDiameter/4 minus sigma=somaDiameter/2 params["sigmaSmaller"]: for difference of Gaussian to detect somas. Recommended somaDiameter / 4.0 -- in pixels params["sigmaLarger"]: for difference of Gaussian to detect somas. Recommended somaDiameter / 2.0 -- in pixels params["searchRadius"]: for finding nearby DoG peaks which are actually the same soma. Recommended somaDiameter / 3.0 -- in pixels parmams["min_count"]: to consider only somas detected in at least min_count time points, i.e. their coordinates are the average of at least min_count independent detections. """ peaks = findPeaks(img4D, params) mergedPeaks = mergePeaks(peaks, params) nuclei = filterNuclei(mergedPeaks, params) # Show as a 3D volume with spheres if show: spheresRAI = virtualPointsRAI(nuclei, params["somaDiameter"] / 2.0, Views.hyperSlice(img4D, 3, 1)) imp = showStack(spheresRAI, title="nuclei (min_count=%i)" % params["min_count"]) return peaks, mergedPeaks, nuclei, spheresRAI, imp return peaks, mergedPeaks, nuclei
def looping(img, center): for z in xrange(img.dimension(2)): radius = img.dimension(0) * 0.5 / (z + 1) circle = GeomMasks.openSphere(center, radius) # Works, explicit iteration of every pixel for t in Regions.sample(circle, Views.hyperSlice(img, 2, z)): t.setOne()
def readPatch(img, width=5): half = width / 2 # e.g. for 5, it's 2 imgE = Views.extendBorder(img) ops = [ offset(imgE, [x, y]) for x in xrange(-half, half + 1) for y in xrange(-half, half + 1) ] return ops
def viewTransformed(img, calibration, affine): """ View img transformed to isotropy (via the calibration) and transformed by the affine. """ scale3d = AffineTransform3D() scale3d.set(calibration[0], 0, 0, 0, 0, calibration[1], 0, 0, 0, 0, calibration[2], 0) transform = affine.copy() transform.concatenate(scale3d) imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) imgT = RealViews.transform(imgI, transform) # dimensions minC = [0, 0, 0] maxC = [int(img.dimension(d) * cal) -1 for d, cal in enumerate(calibration)] imgB = Views.interval(imgT, minC, maxC) return imgB
def roi(mask, image): # Convert ROI from R^n to Z^n. #discreteROI = Views.raster(Masks.toRealRandomAccessible(mask)) # Apply finite bounds to the discrete ROI. boundedDiscreteROI = Views.interval(mask, image) # Create an iterable version of the finite discrete ROI. iterableROI = Regions.iterable(boundedDiscreteROI) return iterableROI
def peakData(): """ A generator function that returns all peaks and their pixel sum, one by one. """ for peak in peaks: peak.localize(p) minCoords, maxCoords = centerAt(p, minC, maxC) fov = Views.interval(img, minCoords, maxCoords) s = sum(t.getInteger() for t in fov) yield p, s
def get(self, path): img = self.klb.readFull(path) imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) affine = AffineTransform3D() affine.set(self.transforms[path]) affine = affine.inverse() # it's a forward transform: must invert affine.concatenate(scale3d) # calibrated space: isotropic imgT = RealViews.transform(imgI, affine) minC = [0, 0, 0] maxC = [ int(img.dimension(d) * cal) - 1 for d, cal in enumerate(calibration) ] imgB = Views.interval(imgT, minC, maxC) # View a RandomAccessibleInterval as an Img, required by Load.lazyStack return ImgView.wrap(imgB, img.factory())
def translate_using_imglib2(imp, dx, dy, dz): print "imp channels",imp.getNChannels() # todo: # if multiple channels use Duplicator to translate each channel individually ## wrap # http://javadoc.imagej.net/ImgLib2/net/imglib2/img/imageplus/ImagePlusImg.html img = ImagePlusImgs.from(imp.duplicate()) print "dimensions:",img.numDimensions() print img.getChannels() ## prepare image print "img",img ddd extended = Views.extendBorder(img) #print "extended",extended #print "extended",extended.dimension(1) dims = zeros(4, 'l') img.dimensions(dims) print "dims",dims converted = Converters.convert(extended, RealFloatSamplerConverter()) composite = Views.collapseReal(converted, imp.getNChannels()) print "composite",composite interpolant = Views.interpolate(composite, NLinearInterpolatorFactory()) #print "interpolant",interpolant transformed = RealViews.affine(interpolant, Translation3D(dx, dy, dz)) print "transformed", transformed cropped = Views.interval(transformed, img) print "cropped.numDimensions()", cropped.numDimensions() print "cropped",cropped ## wrap back and return bd = imp.getBitDepth() # maybe simply wrap works? if bd==8: return(ImageJFunctions.wrapUnsignedByte(cropped,"imglib2")) elif bd == 16: return(ImageJFunctions.wrapUnsignedShort(cropped,"imglib2")) elif bd == 32: return(ImageJFunctions.wrapFloat(cropped,"imglib2")) else: return None
def getManders(self, imp, cell): ### Crop channels according to cell mask channels = self.getCroppedChannels(imp, cell) if channels is None: return None ### Calculate channel thresholds thrs = [] thrimps = [] for c, method in enumerate(self.methods): if method != "None": thr, thrimp = self.getThreshold(channels[c], method) else: thr, thrimp = None, None thrs.append(thr) thrimps.append(thrimp) ### Calculate manders colocalization manders = MandersColocalization() raws = [] thrds = [] for chA, chB in self.pairs: container = self.getContainer(channels[chA - 1], channels[chB - 1]) img1 = container.getSourceImage1() img2 = container.getSourceImage2() mask = container.getMask() cursor = TwinCursor(img1.randomAccess(), img2.randomAccess(), Views.iterable(mask).localizingCursor()) rtype = img1.randomAccess().get().createVariable() raw = manders.calculateMandersCorrelation(cursor, rtype) rthr1 = rtype.copy() rthr2 = rtype.copy() rthr1.set(thrs[chA - 1]) rthr2.set(thrs[chB - 1]) cursor.reset() thrd = manders.calculateMandersCorrelation(cursor, rthr1, rthr2, ThresholdMode.Above) raws.append(raw) thrds.append(thrd) return (channels, thrimps, thrs, raws, thrds)
affine.rotate(angle_rad) affine.translate(center) # Get the interpolator interpolator = LanczosInterpolatorFactory() # Iterate over all frame in the stack axis = Axes.TIME output = [] for d in range(data.dimension(axis)): # Get the current frame frame = crop_along_one_axis(ops, data, [d, d], "TIME") # Get the interpolate view of the frame extended = ops.run("transform.extendZeroView", frame) interpolant = ops.run("transform.interpolateView", extended, interpolator) # Apply the transformation to it rotated = RealViews.affine(interpolant, affine) # Set the intervals rotated = ops.transform().offset(rotated, frame) output.append(rotated) output = Views.stack(output) # Create output Dataset output = ds.create(output)
threads = [] upper = start while upper < stop: correlationRange = int(c) lower = max( 0, upper - overlap ) upper = lower + interval if upper + step >= stop: upper = min( stop, upper + step ) home = root.rstrip('/') + '/range=%d_%s/lower=%d_upper=%d' home = home % ( correlationRange, timestamp, lower, upper ) make_sure_path_exists( home.rstrip('/') + '/' ) options.comparisonRange = int(c) subStrip = ConvertedRandomAccessibleInterval( Views.interval( wholeStrip, [long(0), long(lower)], [long(wholeStrip.dimension(0)-1), long(upper-1)] ), RealDoubleConverter(), DoubleType() ) gitCommitInfoFile = '%s/commitHash' % home.rstrip('/') #with open( gitCommitInfoFile, 'w' ) as f: # f.write( '%s\n' % utility.gitcommit.getCommit( thickness_estimation_repo_dir ) ) gitDiffFile = '%s/gitDiff' % home.rstrip('/') #with open( gitDiffFile, 'w' ) as f: # f.write( '%s\n' % utility.gitcommit.getDiff( thickness_estimation_repo_dir ) ) optionsFile = '%s/options' % home.rstrip('/') with open( optionsFile, 'w' ) as f: f.write( '%s\n' % options.toString() )
title = title[:title.rfind('.')] image.close() preview.close() ch1 = ImagePlusAdapter.wrap(imp1) ch2 = ImagePlusAdapter.wrap(imp2) for roi in rm.getRoisAsArray(): container = createContainer(roi, ch1, ch2) img1 = container.getSourceImage1() img2 = container.getSourceImage2() mask = container.getMask() thr1, thrimp1 = calculateThreshold(imp1, roi, methods[0]) thr2, thrimp2 = calculateThreshold(imp2, roi, methods[1]) cursor = TwinCursor(img1.randomAccess(), img2.randomAccess(), Views.iterable(mask).localizingCursor()) rtype = img1.randomAccess().get().createVariable() raw = manders.calculateMandersCorrelation(cursor, rtype) rthr1 = rtype.copy() rthr2 = rtype.copy() rthr1.set(thr1) rthr2.set(thr2) cursor.reset() thrd = manders.calculateMandersCorrelation(cursor, rthr1, rthr2, ThresholdMode.Above) print "Results are: %f %f %f %f" % (raw.m1, raw.m2, thrd.m1, thrd.m2) results.incrementCounter() rowno = results.getCounter() - 1 results.setValue("Cell", rowno, int(rowno)) results.setValue("Threshold 1", rowno, int(thr1)) results.setValue("Threshold 2", rowno, int(thr2))
resultFileName = '%s/result.tif' % home.rstrip('/') imp = ImageJFunctions.wrap( result, 'result' ) IJ.saveAsTiff(imp.duplicate(), resultFileName) relativeResult = result.copy() c = relativeResult.cursor() while c.hasNext(): c.fwd() cur = c.get() val = cur.get() cur.set( val - c.getDoublePosition( 2 ) ) relativeResultFileName = '%s/relativeResult.tif' % home.rstrip('/') imp = ImageJFunctions.wrap( relativeResult, 'relative result' ) IJ.saveAsTiff(imp.duplicate(), relativeResultFileName) ratio = [ wrappedImage.dimension( 0 )*1.0/result.dimension( 0 ), wrappedImage.dimension( 1 )*1.0/result.dimension( 1 ) ] shift = [ 0.0, 0.0 ] lutField = SingleDimensionLUTGrid(3, 3, result, 2, ratio, shift ) transformed = Views.interval( Views.raster( RealViews.transformReal( Views.interpolate( Views.extendBorder( wrappedImage ), NLinearInterpolatorFactory() ), lutField ) ), wrappedImage ) imp = ImageJFunctions.wrap( transformed, 'transformed' ) transformedFileName = '%s/transformed.tif' % home.rstrip('/') IJ.saveAsTiff( imp.duplicate(), transformedFileName ) # result = inference.estimateZCoordinates( 0, 0, startingCoordinates, matrixTracker, options )