def test(img): imgT = TransformView.transformView(img, aff, interval, MultiViewDeconvolution.minValueImg, MultiViewDeconvolution.outsideValueImg, 1) # 1: linear interpolation imgA = ArrayImgs.floats(dimensions) ImgUtil.copy(ImgView.wrap(imgT, imgA.factory()), imgA)
def asNormalizedUnsignedByteArrayImg(interval, invert, blockRadius, n_bins, slope, matrices, copy_threads, index, imp): sp = imp.getProcessor() # ShortProcessor sp.setRoi(interval.min(0), interval.min(1), interval.max(0) - interval.min(0) + 1, interval.max(1) - interval.min(1) + 1) sp = sp.crop() if invert: sp.invert() CLAHE.run( ImagePlus("", sp), blockRadius, n_bins, slope, None ) # far less memory requirements than NormalizeLocalContrast, and faster. minimum, maximum = autoAdjust(sp) # Transform and convert image to 8-bit, mapping to display range img = ArrayImgs.unsignedShorts( sp.getPixels(), [sp.getWidth(), sp.getHeight()]) sp = None affine = AffineTransform2D() affine.set(matrices[index]) imgI = Views.interpolate(Views.extendZero(img), NLinearInterpolatorFactory()) imgA = RealViews.transform(imgI, affine) imgT = Views.zeroMin(Views.interval(imgA, img)) imgMinMax = convert(imgT, RealUnsignedByteConverter(minimum, maximum), UnsignedByteType) aimg = ArrayImgs.unsignedBytes(Intervals.dimensionsAsLongArray(img)) ImgUtil.copy(ImgView.wrap(imgMinMax, aimg.factory()), aimg, copy_threads) img = imgI = imgA = imgT = imgMinMax = None return aimg
def twoStep(index=0): # The current way: img = klb.readFull(filepaths[index]) # klb_loader.get(filepaths[index]) imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) imgT = RealViews.transform(imgI, cmIsotropicTransforms[index]) imgB = Views.zeroMin(Views.interval(imgT, roi[0], roi[1])) # bounded: crop with ROI imgBA = ArrayImgs.unsignedShorts(Intervals.dimensionsAsLongArray(imgB)) ImgUtil.copy(ImgView.wrap(imgB, imgBA.factory()), imgBA) imgP = prepareImgForDeconvolution( imgBA, affine3D(fineTransformsPostROICrop[index]).inverse(), FinalInterval([0, 0, 0], [imgB.dimension(d) - 1 for d in xrange(3)])) # Copy transformed view into ArrayImg for best performance in deconvolution imgA = ArrayImgs.floats(Intervals.dimensionsAsLongArray(imgP)) ImgUtil.copy(ImgView.wrap(imgP, imgA.factory()), imgA) IL.wrap(imgA, "two step").show()
def get(self, path): transform = self.transformsDict[path] img = self.loader.get(path) imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) imgT = RealViews.transform(imgI, transform) minC = self.roi[0] if self.roi else [0] * img.numDimensions() maxC = self.roi[1] if self.roi else [img.dimension(d) -1 for d in xrange(img.numDimensions())] imgO = Views.zeroMin(Views.interval(imgT, minC, maxC)) return ImgView.wrap(imgO, img.factory()) if self.asImg else imgO
def makeCell(self, index): self.preloadCells(index) # preload others in the background img = self.loadImg(self.filepaths[index]) affine = AffineTransform2D() affine.set(self.matrices[index]) imgI = Views.interpolate(Views.extendZero(img), NLinearInterpolatorFactory()) imgA = RealViews.transform(imgI, affine) imgT = Views.zeroMin(Views.interval(imgA, self.interval)) aimg = img.factory().create(self.interval) ImgUtil.copy(ImgView.wrap(imgT, aimg.factory()), aimg) return Cell(self.cell_dimensions, [0, 0, index], aimg.update(None))
def get(self, path): img = self.klb.readFull(path) imgE = Views.extendZero(img) imgI = Views.interpolate(imgE, NLinearInterpolatorFactory()) affine = AffineTransform3D() affine.set(self.transforms[path]) affine = affine.inverse() # it's a forward transform: must invert affine.concatenate(scale3d) # calibrated space: isotropic imgT = RealViews.transform(imgI, affine) minC = [0, 0, 0] maxC = [ int(img.dimension(d) * cal) - 1 for d, cal in enumerate(calibration) ] imgB = Views.interval(imgT, minC, maxC) # View a RandomAccessibleInterval as an Img, required by Load.lazyStack return ImgView.wrap(imgB, img.factory())
def oneStep(index=0): # Combining transforms into one, via a translation to account of the ROI crop img = klb.readFull(filepaths[index]) # klb_loader.get(filepaths[index]) t1 = cmIsotropicTransforms[index] t2 = affine3D( [1, 0, 0, -roi[0][0], 0, 1, 0, -roi[0][1], 0, 0, 1, -roi[0][2]]) t3 = affine3D(fineTransformsPostROICrop[index]).inverse() aff = AffineTransform3D() aff.set(t1) aff.preConcatenate(t2) aff.preConcatenate(t3) # Final interval is now rooted at 0,0,0 given that the transform includes the translation imgP = prepareImgForDeconvolution( img, aff, FinalInterval([0, 0, 0], [maxC - minC for minC, maxC in izip(roi[0], roi[1])])) # Copy transformed view into ArrayImg for best performance in deconvolution imgA = ArrayImgs.floats(Intervals.dimensionsAsLongArray(imgP)) ImgUtil.copy(ImgView.wrap(imgP, imgA.factory()), imgA) IL.wrap(imgA, "one step index %i" % index).show()
def testJython(): ImgUtil.copy( ImgView.wrap( Converters.convertRandomAccessibleIterableInterval( img1, UnsignedByteToFloatSamplerConverter()), img1.factory()), img2)
def testASM(): ImgUtil.copy( ImgView.wrap( Converters.convertRandomAccessibleIterableInterval( img1, samplerClass.newInstance()), img1.factory()), img2)
def testASMLongs(): img2 = ArrayImgs.unsignedLongs(dimensions) ImgUtil.copy( ImgView.wrap( Converters.convertRandomAccessibleIterableInterval( img1s, sampler_conv_longs), img1.factory()), img2)
def testASMDoubles(): img2 = ArrayImgs.doubles(dimensions) ImgUtil.copy( ImgView.wrap( Converters.convertRandomAccessibleIterableInterval( img1, sampler_conv_doubles), img1.factory()), img2)
def test(red, green, blue, easy=True): saturation = let( "red", red, "green", green, "blue", blue, "max", maximum("red", "green", "blue"), "min", minimum("red", "green", "blue"), IF(EQ(0, "max"), THEN(0), ELSE(div(sub("max", "min"), "max")))) brightness = div(maximum(red, green, blue), 255.0) hue = IF( EQ(0, saturation), THEN(0), ELSE( let( "red", red, "green", green, "blue", blue, "max", maximum("red", "green", "blue"), "min", minimum("red", "green", "blue"), "range", sub("max", "min"), "redc", div(sub("max", "red"), "range"), "greenc", div(sub("max", "green"), "range"), "bluec", div(sub("max", "blue"), "range"), "hue", div( IF( EQ("red", "max"), THEN(sub("bluec", "greenc")), ELSE( IF(EQ("green", "max"), THEN(sub(add(2, "redc"), "bluec")), ELSE(sub(add(4, "greenc"), "redc"))))), 6), IF(LT("hue", 0), THEN(add("hue", 1)), ELSE("hue"))))) #print hierarchy(hue) #print "hue view:", hue.view( FloatType() ).iterationOrder() if easy: # About 26 ms """ hsb = Views.stack( hue.view( FloatType() ), saturation.view( FloatType() ), brightness.view( FloatType() ) ) """ # About 13 ms: half! Still much worse than plain ImageJ, # but the source images are iterated 4 times, rather than just once, # and the saturation is computed twice, # and the min, max is computed 3 and 4 times, respectively. hsb = Views.stack(hue.viewDouble(FloatType()), saturation.viewDouble(FloatType()), brightness.viewDouble(FloatType())) """ # Even worse: ~37 ms width, height = rgb.dimension(0), rgb.dimension(1) h = compute(hue).into(ArrayImgs.floats([width, height])) s = compute(saturation).into(ArrayImgs.floats([width, height])) b = compute(brightness).into(ArrayImgs.floats([width, height])) hsb = Views.stack( h, s, b ) """ imp = IL.wrap(hsb, "HSB view") else: # Tested it: takes more time (~40 ms vs 26 ms above) width, height = rgb.dimension(0), rgb.dimension(1) hb = zeros(width * height, 'f') sb = zeros(width * height, 'f') bb = zeros(width * height, 'f') h = ArrayImgs.floats(hb, [width, height]) s = ArrayImgs.floats(sb, [width, height]) b = ArrayImgs.floats(bb, [width, height]) #print "ArrayImg:", b.iterationOrder() ImgUtil.copy(ImgView.wrap(hue.view(FloatType()), None), h) ImgUtil.copy(ImgView.wrap(saturation.view(FloatType()), None), s) ImgUtil.copy(ImgView.wrap(brightness.view(FloatType()), None), b) stack = ImageStack(width, height) stack.addSlice(FloatProcessor(width, height, hb, None)) stack.addSlice(FloatProcessor(width, height, sb, None)) stack.addSlice(FloatProcessor(width, height, bb, None)) imp = ImagePlus("hsb", stack) return imp
# Write NumDirEntries as 2 bytes: the number of tags ra.writeShort(8) # 7 in tags dict plus tag 273 added later # Write each tag as 12 bytes each: # First all non-changing tags, constant for all IFDs ra.write(tags_bytes) # Size of IFD dict in number of bytes # Then the variable 273 tag: the offset to the image data array, just after the IFD definition ra.write(asBytes(273, (9, 4, offset + n_bytes_IFD))) # Write NextIFDOffset as 4 bytes offset += n_bytes_IFD + tags[279][ 2] # i.e. StripByteCounts: the size of the image data in number of bytes ra.writeInt(0 if img.dimension(2) - 1 == z else offset) # Write image plane # The long[] array doesn't necessarily end sharply at image plane boundaries # Therefore must copy plane into another, 2D ArrayImg of bit type ImgUtil.copy(ImgView.wrap(Views.hyperSlice(img, 2, z), None), plane_img) # Each long stores 64 bits but from right to left, and we need left to right # (the 8 bytes of the 64-bit long are already left to right in little endian) longbits = array(imap(Long.reverse, plane_array), 'l') bb.rewind() # bring mark to zero bb.asLongBuffer().put( longbits ) # a LongBuffer view of the ByteBuffer, writes to the ByteBuffer ra.write(bb.array()) finally: ra.close() # Now read the file back as a stack using lib.io.TIFFSlices slices = TIFFSlices(filepath, types={1: TIFFSlices.types[64][:2] + (BitType, )})
def get(self, path): img = self.klb_loader.load(path) return ImgView.wrap( viewTransformed(img, getCalibration(img), self.path_transforms[path]), img.factory())