示例#1
0
    def asNormalizedUnsignedByteArrayImg(interval, invert, blockRadius, n_bins,
                                         slope, matrices, index, imp):
        sp = imp.getProcessor()  # ShortProcessor
        # Crop to interval if needed
        x = interval.min(0)
        y = interval.min(1)
        width = interval.max(0) - interval.min(0) + 1
        height = interval.max(1) - interval.min(1) + 1
        if 0 != x or 0 != y or sp.getWidth() != width or sp.getHeight(
        ) != height:
            sp.setRoi(x, y, width, height)
            sp = sp.crop()

        if invert:
            sp.invert()

        CLAHE.run(
            ImagePlus("", sp), blockRadius, n_bins, slope, None
        )  # far less memory requirements than NormalizeLocalContrast, and faster.
        minimum, maximum = autoAdjust(sp)

        # Transform and convert image to 8-bit, mapping to display range
        img = ArrayImgs.unsignedShorts(
            sp.getPixels(), [sp.getWidth(), sp.getHeight()])
        sp = None
        imp = None
        # Must use linear interpolation for subpixel precision
        affine = AffineTransform2D()
        affine.set(matrices[index])
        imgI = Views.interpolate(Views.extendZero(img),
                                 NLinearInterpolatorFactory())
        imgA = RealViews.transform(imgI, affine)
        imgT = Views.zeroMin(Views.interval(imgA, img))
        # Convert to 8-bit
        imgMinMax = convert2(imgT,
                             RealUnsignedByteConverter(minimum, maximum),
                             UnsignedByteType,
                             randomAccessible=False)  # use IterableInterval
        aimg = ArrayImgs.unsignedBytes(Intervals.dimensionsAsLongArray(img))
        # ImgUtil copies multi-threaded, which is not appropriate here as there are many other images being copied too
        #ImgUtil.copy(ImgView.wrap(imgMinMax, aimg.factory()), aimg)

        # Single-threaded copy
        copier = createBiConsumerTypeSet(UnsignedByteType)
        LoopBuilder.setImages(imgMinMax, aimg).forEachPixel(copier)

        img = imgI = imgA = imgMinMax = imgT = None
        return aimg
示例#2
0
def readUnsignedShorts(path, dimensions, header=0, return_array=False, byte_order=ByteOrder.LITTLE_ENDIAN):
  """ Read a file as an ArrayImg of UnsignedShortType """
  size = reduce(operator.mul, dimensions)
  ra = RandomAccessFile(path, 'r')
  try:
    if header < 0:
      # Interpret from the end: useful for files with variable header lengths
      # such as some types of uncompressed TIFF formats
      header = ra.length() + header
    ra.skipBytes(header)
    bytes = zeros(size * 2, 'b')
    ra.read(bytes)
    shorts = zeros(size, 'h') # h is for short
    ByteBuffer.wrap(bytes).order(byte_order).asShortBuffer().get(shorts)
    return shorts if return_array else ArrayImgs.unsignedShorts(shorts, dimensions)
  finally:
    ra.close()
def twoStep(index=0):
    # The current way:
    img = klb.readFull(filepaths[index])  # klb_loader.get(filepaths[index])
    imgE = Views.extendZero(img)
    imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
    imgT = RealViews.transform(imgI, cmIsotropicTransforms[index])
    imgB = Views.zeroMin(Views.interval(imgT, roi[0],
                                        roi[1]))  # bounded: crop with ROI
    imgBA = ArrayImgs.unsignedShorts(Intervals.dimensionsAsLongArray(imgB))
    ImgUtil.copy(ImgView.wrap(imgB, imgBA.factory()), imgBA)
    imgP = prepareImgForDeconvolution(
        imgBA,
        affine3D(fineTransformsPostROICrop[index]).inverse(),
        FinalInterval([0, 0, 0], [imgB.dimension(d) - 1 for d in xrange(3)]))
    # Copy transformed view into ArrayImg for best performance in deconvolution
    imgA = ArrayImgs.floats(Intervals.dimensionsAsLongArray(imgP))
    ImgUtil.copy(ImgView.wrap(imgP, imgA.factory()), imgA)
    IL.wrap(imgA, "two step").show()
示例#4
0
def projectLastDimension(img, showEarly=False):
    """
  Project the last dimension, e.g. a 4D image becomes a 3D image,
  using the provided reducing function (e.g. min, max, sum).
  """
    last_dimension = img.numDimensions() - 1
    # The collapsed image
    imgC = ArrayImgs.unsignedShorts(
        [img.dimension(d) for d in xrange(last_dimension)])

    if showEarly:
        showStack(
            imgC,
            title="projected")  # show it early, will be updated progressively

    if img.dimension(last_dimension) > 10:
        # one by one
        print "One by one"
        for i in xrange(img.dimension(last_dimension)):
            print i
            compute(maximum(imgC, Views.hyperSlice(img, last_dimension,
                                                   i))).into(imgC)
    else:
        # Each sample of img3DV is a virtual vector over all time frames at that 3D coordinate:
        imgV = Views.collapseReal(img)
        # Reduce each vector to a single scalar, using a Converter
        # The Converter class
        reduce_max = makeCompositeToRealConverter(
            reducer_class=Math,
            reducer_method="max",
            reducer_method_signature="(DD)D")
        img3DC = convert(imgV, reduce_max.newInstance(),
                         img.randomAccess().get().getClass())
        ImgUtil.copy(ImgView.wrap(imgV, img.factory()), imgC)

    return imgC
 def getPixels(self, n):
     # 'n' is 1-based
     # Target 2D array img to copy data into
     aimg = ArrayImgs.unsignedShorts(self.dimensions[0:2])
     # The number of slices of the 3D volume of a single timepoint
     nZ = self.img4d.dimension(2)
     # The slice_index if there was a single channel
     slice_index = int((n - 1) / 2)  # 0-based, of the whole 4D series
     local_slice_index = slice_index % nZ  # 0-based, of the timepoint 3D volume
     timepoint_index = int(slice_index / nZ)  # Z blocks
     if 1 == n % 2:
         # Odd slice index: image channel
         fixedT = Views.hyperSlice(self.img4d, 3, timepoint_index)
         fixedZ = Views.hyperSlice(fixedT, 2, local_slice_index)
         w.copy(fixedZ.cursor(), aimg.cursor())
     else:
         # Even slice index: spheres channel
         sd = SpheresData(self.kdtrees[timepoint_index], radius, inside,
                          outside)
         volume = Views.interval(Views.raster(sd), self.dimensions3d)
         plane = Views.hyperSlice(volume, 2, local_slice_index)
         w.copy(plane.cursor(), aimg.cursor())
     #
     return aimg.update(None).getCurrentStorageArray()
from random import random
from net.imglib2.algorithm.math import ImgMath
from net.imglib2.util import ImgUtil
from net.imglib2.img import ImgView
import sys
sys.path.append("/home/albert/lab/scripts/python/imagej/IsoView-GCaMP/")
from lib.util import timeit

roi = (
    [1, 228, 0],  # top-left coordinates
    [1 + 406 - 1, 228 + 465 - 1,
     0 + 325 - 1])  # bottom-right coordinates (inclusive, hence the -1)

dimensions = [maxC - minC + 1 for minC, maxC in zip(roi[0], roi[1])]

imgU = ArrayImgs.unsignedShorts(dimensions)
imgF = ArrayImgs.floats(dimensions)
#c = imgF.cursor()
#while c.hasNext():
#  c.next().set(random() * 65535)
ImgMath.compute(ImgMath.number(17)).into(imgF)
ImgMath.compute(ImgMath.img(imgF)).into(imgU)
aff = AffineTransform3D()
"""
aff.set(1, 0, 0, 0,
        0, 1, 0, 0,
        0, 0, 1, 0)
"""
aff.set(*[
    0.9999949529841275, -0.0031770224721305684, 2.3118912942710207e-05,
    -1.6032353998500826, 0.003177032139125933, 0.999994860398559,
示例#7
0
from net.imglib2.realtransform import RealViews, AffineTransform3D
from net.imglib2.interpolation.randomaccess import NLinearInterpolatorFactory
from net.imglib2.util import Intervals

img = IL.wrap(IJ.getImage())

# Cut out a cube
img1 = Views.zeroMin(
    Views.interval(img, [39, 49, 0], [39 + 378 - 1, 49 + 378 - 1, 378 - 1]))
print[img1.dimension(d) for d in xrange(img1.numDimensions())]

# Rotate the cube on the Y axis to the left
img2 = Views.rotate(img1, 2, 0)

# copy into ArrayImg
img1a = ArrayImgs.unsignedShorts([378, 378, 378])
ImgMath.compute(ImgSource(img1)).into(img1a)

img2a = ArrayImgs.unsignedShorts([378, 378, 378])
ImgMath.compute(ImgSource(img2)).into(img2a)

img1 = img1a
img2 = img2a

IL.wrap(img1, "cube").show()
IL.wrap(img2, "cube rotated").show()

# Now register them

# IMPORTANT PARAMETERS
raw_calibration = [1.0, 1.0, 1.0]  # micrometers per pixel
# 2022 I2K example
# Albert Cardona 2022
# Create an ImgLib2 ArrayImg

# 1. With ArrayImgs: trivial
from net.imglib2.img.array import ArrayImgs

img = ArrayImgs.unsignedShorts([512, 512])

# Or reusing the pixel array from e.g., an open ImageJ image:
from net.imglib2.img.array import ArrayImgs
from ij import IJ
from ij.process import ShortProcessor

imp = IJ.getImage()  # most recently activated ImageJ image window
ip = imp.getProcessor()
if isinstance(ip, ShortProcessor):
    width, height = imp.getWidth(), imp.getHeight()
    pixelsU16 = ip.getPixels()
    img = ArrayImgs.unsignedShorts(pixelsU16, [width, height])

# Or creating a new pixel array from scratch
from net.imglib2.img.array import ArrayImgs
from jarray import zeros

pixelsU16 = zeros(512 * 512, 'h')  # 'h' means short[]
img = ArrayImgs.unsignedShorts(pixelsU16, [512, 512])

# 2. With ArrayImgFactory: unnecessary, use ArrayImgs
from net.imglib2.img.array import ArrayImgFactory
from net.imglib2.type.numeric.integer import UnsignedShortType
示例#9
0
def maxProjectLastDimension(img, strategy="1by1", chunk_size=0):
  last_dimension = img.numDimensions() -1

  if "1by1" == strategy:
    exe = newFixedThreadPool()
    try:
      n_threads = exe.getCorePoolSize()
      imgTs = [ArrayImgs.unsignedShorts(list(Intervals.dimensionsAsLongArray(img))[:-1]) for i in xrange(n_threads)]
      
      def mergeMax(img1, img2, imgT):
        return compute(maximum(img1, img2)).into(imgT)

      def hyperSlice(index):
        return Views.hyperSlice(img, last_dimension, index)

      # The first n_threads mergeMax:
      futures = [exe.submit(Task(mergeMax, hyperSlice(i*2), hyperSlice(i*2 +1), imgTs[i]))
                 for i in xrange(n_threads)]
      # As soon as one finishes, merge it with the next available hyperSlice
      next = n_threads
      while len(futures) > 0: # i.e. not empty
        imgT = futures.pop(0).get()
        if next < img.dimension(last_dimension):
          futures.append(exe.submit(Task(mergeMax, imgT, hyperSlice(next), imgT)))
          next += 1
        else:
          # Run out of hyperSlices to merge
          if 0 == len(futures):
            return imgT # done
          # Merge imgT to each other until none remain
          futures.append(exe.submit(Task(mergeMax, imgT, futures.pop(0).get(), imgT)))
    finally:
      exe.shutdownNow()
  else:
    # By chunks
    imglibtype =  img.randomAccess().get().getClass()
    # The Converter class
    reduce_max = makeCompositeToRealConverter(reducer_class=Math,
                                              reducer_method="max",
                                              reducer_method_signature="(DD)D")
    if chunk_size > 0:
      # map reduce approach
      exe = newFixedThreadPool()
      try:
        def projectMax(img, minC, maxC, reduce_max):
          imgA = ArrayImgs.unsignedSorts(Intervals.dimensionsAsLongArray(imgC))
          ImgUtil.copy(ImgView.wrap(convert(Views.collapseReal(Views.interval(img, minC, maxC)), reduce_max.newInstance(), imglibtype), img.factory()), imgA)
          return imgA
        
        # The min and max coordinates of all dimensions except the last one
        minCS = [0 for d in xrange(last_dimension)]
        maxCS = [img.dimension(d) -1 for d in xrange(last_dimension)]

        # Process every chunk in parallel
        futures = [exe.submit(Task(projectMax, img, minCS + [offset], maxCS + [min(offset + chunk_size, img.dimension(last_dimension)) -1]))
                   for offset in xrange(0, img.dimension(last_dimension), chunk_size)]
        
        return reduce(lambda f1, f2: compute(maximum(f1.get(), f2.get())).into(f1.get(), futures))
      finally:
        exe.shutdownNow()
    else:
      # One chunk: all at once
      # Each sample of img3DV is a virtual vector over all time frames at that 3D coordinate
      # Reduce each vector to a single scalar, using a Converter
      img3DC = convert(Views.collapseReal(img), reduce_max.newInstance(), imglibtype)
      imgA = ArrayImgs.unsignedShorts([img.dimension(d) for d in xrange(last_dimension)])
      ImgUtil.copy(ImgView.wrap(imgV, img.factory()), imgA)
      return imgA
示例#10
0
from net.imglib2.type.numeric.real import FloatType
from net.imglib2.view import Views


imp = IJ.getImage()
dimensions = [imp.getWidth(), imp.getHeight()]
ip = imp.getProcessor()
pixels = ip.getPixels()

# In practice, you never want to do this below,
# and instead you'd use the built-in wrapper: ImageJFunctions.wrap(imp)
# This is merely for illustration of how to use ArrayImgs with an existing pixel array
if isinstance(ip, ByteProcessor):
  img1 = ArrayImgs.unsignedBytes(pixels, dimensions)
elif isinstance(ip, ShortProcessor):
  img1 = ArrayImgs.unsignedShorts(pixels, dimensions)
elif isinstance(ip, FloatProcessor):
  img1 = ArrayImgs.floats(pixels, dimensions)
else:
  print "Can't handle image of type:", type(ip).getName()


# An empty image of float[]
img2 = ArrayImgs.floats(dimensions)

# View it as RandomAccessibleInterval<FloatType> by converting on the fly
# using a generic RealType to FloatType converter
floatView = Converters.convertRAI(img1, RealFloatConverter(), FloatType())

# The above 'floatView' can be used as an image: one that gets always converted on demand.
# If you only have to iterate over the pixels just once, there's no need to create a new image.
示例#11
0
def loadUnsignedShort(filepath):
    imp = loadImp(filepath)
    return ArrayImgs.unsignedShorts(
        imp.getProcessor().getPixels(),
        [imp.getWidth(), imp.getHeight()])
示例#12
0
def asArrayImg(img1):
  dimensions1 = [img1.dimension(d) for d in xrange(img1.numDimensions())]
  img2 = ArrayImgs.unsignedShorts(dimensions1)
  ImgMath.compute(ImgSource(img1)).into(img2)
  return img2
def loadUnsignedShort(filepath):
    imp = loadImp(filepath)  # an instance of an ij.ImagePlus
    return ArrayImgs.unsignedShorts(
        imp.getProcessor().getPixels(),
        [imp.getWidth(), imp.getHeight()])