def asNormalizedUnsignedByteArrayImg(interval, invert, blockRadius, n_bins,
                                         slope, matrices, copy_threads, index,
                                         imp):
        sp = imp.getProcessor()  # ShortProcessor
        sp.setRoi(interval.min(0), interval.min(1),
                  interval.max(0) - interval.min(0) + 1,
                  interval.max(1) - interval.min(1) + 1)
        sp = sp.crop()
        if invert:
            sp.invert()
        CLAHE.run(
            ImagePlus("", sp), blockRadius, n_bins, slope, None
        )  # far less memory requirements than NormalizeLocalContrast, and faster.
        minimum, maximum = autoAdjust(sp)

        # Transform and convert image to 8-bit, mapping to display range
        img = ArrayImgs.unsignedShorts(
            sp.getPixels(), [sp.getWidth(), sp.getHeight()])
        sp = None
        affine = AffineTransform2D()
        affine.set(matrices[index])
        imgI = Views.interpolate(Views.extendZero(img),
                                 NLinearInterpolatorFactory())
        imgA = RealViews.transform(imgI, affine)
        imgT = Views.zeroMin(Views.interval(imgA, img))
        imgMinMax = convert(imgT, RealUnsignedByteConverter(minimum, maximum),
                            UnsignedByteType)
        aimg = ArrayImgs.unsignedBytes(Intervals.dimensionsAsLongArray(img))
        ImgUtil.copy(ImgView.wrap(imgMinMax, aimg.factory()), aimg,
                     copy_threads)
        img = imgI = imgA = imgT = imgMinMax = None
        return aimg
def test(img):
    imgT = TransformView.transformView(img, aff, interval,
                                       MultiViewDeconvolution.minValueImg,
                                       MultiViewDeconvolution.outsideValueImg,
                                       1)  # 1: linear interpolation
    imgA = ArrayImgs.floats(dimensions)
    ImgUtil.copy(ImgView.wrap(imgT, imgA.factory()), imgA)
Beispiel #3
0
 def updatePixels(self):
     # Copy interval into pixels
     view = Views.interval(
         Views.extendZero(Views.hyperSlice(self.img3D, 2, self.indexZ)),
         self.interval2D)
     aimg = ArrayImgs.floats(
         self.getPixels(),
         [self.interval2D.dimension(0),
          self.interval2D.dimension(1)])
     ImgUtil.copy(view, aimg)
Beispiel #4
0
 def projectMax(img, minC, maxC, reduce_max):
     imgA = ArrayImgs.unsignedSorts(
         Intervals.dimensionsAsLongArray(imgC))
     ImgUtil.copy(
         ImgView.wrap(
             convert(
                 Views.collapseReal(
                     Views.interval(img, minC, maxC)),
                 reduce_max.newInstance(), imglibtype),
             img.factory()), imgA)
     return imgA
 def makeCell(self, index):
     self.preloadCells(index)  # preload others in the background
     img = self.loadImg(self.filepaths[index])
     affine = AffineTransform2D()
     affine.set(self.matrices[index])
     imgI = Views.interpolate(Views.extendZero(img),
                              NLinearInterpolatorFactory())
     imgA = RealViews.transform(imgI, affine)
     imgT = Views.zeroMin(Views.interval(imgA, self.interval))
     aimg = img.factory().create(self.interval)
     ImgUtil.copy(ImgView.wrap(imgT, aimg.factory()), aimg)
     return Cell(self.cell_dimensions, [0, 0, index], aimg.update(None))
Beispiel #6
0
 def prepare(index):
     # Prepare the img for deconvolution:
     # 0. Transform in one step.
     # 1. Ensure its pixel values conform to expectations (no zeros inside)
     # 2. Copy it into an ArrayImg for faster recurrent retrieval of same pixels
     syncPrint("Preparing %s CM0%i for deconvolution" % (tm_dirname, index))
     img = klb_loader.get(filepaths[index])  # of UnsignedShortType
     imgP = prepareImgForDeconvolution(
         img, transforms[index], target_interval)  # returns of FloatType
     # Copy transformed view into ArrayImg for best performance in deconvolution
     imgA = ArrayImgs.floats(Intervals.dimensionsAsLongArray(imgP))
     #ImgUtil.copy(ImgView.wrap(imgP, imgA.factory()), imgA)
     ImgUtil.copy(imgP, imgA, n_threads / 2)  # parallel copying
     syncPrint("--Completed preparing %s CM0%i for deconvolution" %
               (tm_dirname, index))
     imgP = None
     img = None
     return (index, imgA)
def twoStep(index=0):
    # The current way:
    img = klb.readFull(filepaths[index])  # klb_loader.get(filepaths[index])
    imgE = Views.extendZero(img)
    imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
    imgT = RealViews.transform(imgI, cmIsotropicTransforms[index])
    imgB = Views.zeroMin(Views.interval(imgT, roi[0],
                                        roi[1]))  # bounded: crop with ROI
    imgBA = ArrayImgs.unsignedShorts(Intervals.dimensionsAsLongArray(imgB))
    ImgUtil.copy(ImgView.wrap(imgB, imgBA.factory()), imgBA)
    imgP = prepareImgForDeconvolution(
        imgBA,
        affine3D(fineTransformsPostROICrop[index]).inverse(),
        FinalInterval([0, 0, 0], [imgB.dimension(d) - 1 for d in xrange(3)]))
    # Copy transformed view into ArrayImg for best performance in deconvolution
    imgA = ArrayImgs.floats(Intervals.dimensionsAsLongArray(imgP))
    ImgUtil.copy(ImgView.wrap(imgP, imgA.factory()), imgA)
    IL.wrap(imgA, "two step").show()
def oneStep(index=0):
    # Combining transforms into one, via a translation to account of the ROI crop
    img = klb.readFull(filepaths[index])  # klb_loader.get(filepaths[index])
    t1 = cmIsotropicTransforms[index]
    t2 = affine3D(
        [1, 0, 0, -roi[0][0], 0, 1, 0, -roi[0][1], 0, 0, 1, -roi[0][2]])
    t3 = affine3D(fineTransformsPostROICrop[index]).inverse()
    aff = AffineTransform3D()
    aff.set(t1)
    aff.preConcatenate(t2)
    aff.preConcatenate(t3)
    # Final interval is now rooted at 0,0,0 given that the transform includes the translation
    imgP = prepareImgForDeconvolution(
        img, aff,
        FinalInterval([0, 0, 0],
                      [maxC - minC for minC, maxC in izip(roi[0], roi[1])]))
    # Copy transformed view into ArrayImg for best performance in deconvolution
    imgA = ArrayImgs.floats(Intervals.dimensionsAsLongArray(imgP))
    ImgUtil.copy(ImgView.wrap(imgP, imgA.factory()), imgA)
    IL.wrap(imgA, "one step index %i" % index).show()
Beispiel #9
0
def projectLastDimension(img, showEarly=False):
    """
  Project the last dimension, e.g. a 4D image becomes a 3D image,
  using the provided reducing function (e.g. min, max, sum).
  """
    last_dimension = img.numDimensions() - 1
    # The collapsed image
    imgC = ArrayImgs.unsignedShorts(
        [img.dimension(d) for d in xrange(last_dimension)])

    if showEarly:
        showStack(
            imgC,
            title="projected")  # show it early, will be updated progressively

    if img.dimension(last_dimension) > 10:
        # one by one
        print "One by one"
        for i in xrange(img.dimension(last_dimension)):
            print i
            compute(maximum(imgC, Views.hyperSlice(img, last_dimension,
                                                   i))).into(imgC)
    else:
        # Each sample of img3DV is a virtual vector over all time frames at that 3D coordinate:
        imgV = Views.collapseReal(img)
        # Reduce each vector to a single scalar, using a Converter
        # The Converter class
        reduce_max = makeCompositeToRealConverter(
            reducer_class=Math,
            reducer_method="max",
            reducer_method_signature="(DD)D")
        img3DC = convert(imgV, reduce_max.newInstance(),
                         img.randomAccess().get().getClass())
        ImgUtil.copy(ImgView.wrap(imgV, img.factory()), imgC)

    return imgC
Beispiel #10
0
        # Write NumDirEntries as 2 bytes: the number of tags
        ra.writeShort(8)  # 7 in tags dict plus tag 273 added later
        # Write each tag as 12 bytes each:
        # First all non-changing tags, constant for all IFDs
        ra.write(tags_bytes)
        # Size of IFD dict in number of bytes
        # Then the variable 273 tag: the offset to the image data array, just after the IFD definition
        ra.write(asBytes(273, (9, 4, offset + n_bytes_IFD)))
        # Write NextIFDOffset as 4 bytes
        offset += n_bytes_IFD + tags[279][
            2]  # i.e. StripByteCounts: the size of the image data in number of bytes
        ra.writeInt(0 if img.dimension(2) - 1 == z else offset)
        # Write image plane
        # The long[] array doesn't necessarily end sharply at image plane boundaries
        # Therefore must copy plane into another, 2D ArrayImg of bit type
        ImgUtil.copy(ImgView.wrap(Views.hyperSlice(img, 2, z), None),
                     plane_img)
        # Each long stores 64 bits but from right to left, and we need left to right
        # (the 8 bytes of the 64-bit long are already left to right in little endian)
        longbits = array(imap(Long.reverse, plane_array), 'l')
        bb.rewind()  # bring mark to zero
        bb.asLongBuffer().put(
            longbits
        )  # a LongBuffer view of the ByteBuffer, writes to the ByteBuffer
        ra.write(bb.array())
finally:
    ra.close()

# Now read the file back as a stack using lib.io.TIFFSlices
slices = TIFFSlices(filepath,
                    types={1: TIFFSlices.types[64][:2] + (BitType, )})
img2 = slices.asLazyCachedCellImg()
Beispiel #11
0
def testJython():
    ImgUtil.copy(
        ImgView.wrap(
            Converters.convertRandomAccessibleIterableInterval(
                img1, UnsignedByteToFloatSamplerConverter()), img1.factory()),
        img2)
Beispiel #12
0
def testASM():
    ImgUtil.copy(
        ImgView.wrap(
            Converters.convertRandomAccessibleIterableInterval(
                img1, samplerClass.newInstance()), img1.factory()), img2)
def testASMLongs():
    img2 = ArrayImgs.unsignedLongs(dimensions)
    ImgUtil.copy(
        ImgView.wrap(
            Converters.convertRandomAccessibleIterableInterval(
                img1s, sampler_conv_longs), img1.factory()), img2)
def testASMDoubles():
    img2 = ArrayImgs.doubles(dimensions)
    ImgUtil.copy(
        ImgView.wrap(
            Converters.convertRandomAccessibleIterableInterval(
                img1, sampler_conv_doubles), img1.factory()), img2)
Beispiel #15
0
def maxProjectLastDimension(img, strategy="1by1", chunk_size=0):
  last_dimension = img.numDimensions() -1

  if "1by1" == strategy:
    exe = newFixedThreadPool()
    try:
      n_threads = exe.getCorePoolSize()
      imgTs = [ArrayImgs.unsignedShorts(list(Intervals.dimensionsAsLongArray(img))[:-1]) for i in xrange(n_threads)]
      
      def mergeMax(img1, img2, imgT):
        return compute(maximum(img1, img2)).into(imgT)

      def hyperSlice(index):
        return Views.hyperSlice(img, last_dimension, index)

      # The first n_threads mergeMax:
      futures = [exe.submit(Task(mergeMax, hyperSlice(i*2), hyperSlice(i*2 +1), imgTs[i]))
                 for i in xrange(n_threads)]
      # As soon as one finishes, merge it with the next available hyperSlice
      next = n_threads
      while len(futures) > 0: # i.e. not empty
        imgT = futures.pop(0).get()
        if next < img.dimension(last_dimension):
          futures.append(exe.submit(Task(mergeMax, imgT, hyperSlice(next), imgT)))
          next += 1
        else:
          # Run out of hyperSlices to merge
          if 0 == len(futures):
            return imgT # done
          # Merge imgT to each other until none remain
          futures.append(exe.submit(Task(mergeMax, imgT, futures.pop(0).get(), imgT)))
    finally:
      exe.shutdownNow()
  else:
    # By chunks
    imglibtype =  img.randomAccess().get().getClass()
    # The Converter class
    reduce_max = makeCompositeToRealConverter(reducer_class=Math,
                                              reducer_method="max",
                                              reducer_method_signature="(DD)D")
    if chunk_size > 0:
      # map reduce approach
      exe = newFixedThreadPool()
      try:
        def projectMax(img, minC, maxC, reduce_max):
          imgA = ArrayImgs.unsignedSorts(Intervals.dimensionsAsLongArray(imgC))
          ImgUtil.copy(ImgView.wrap(convert(Views.collapseReal(Views.interval(img, minC, maxC)), reduce_max.newInstance(), imglibtype), img.factory()), imgA)
          return imgA
        
        # The min and max coordinates of all dimensions except the last one
        minCS = [0 for d in xrange(last_dimension)]
        maxCS = [img.dimension(d) -1 for d in xrange(last_dimension)]

        # Process every chunk in parallel
        futures = [exe.submit(Task(projectMax, img, minCS + [offset], maxCS + [min(offset + chunk_size, img.dimension(last_dimension)) -1]))
                   for offset in xrange(0, img.dimension(last_dimension), chunk_size)]
        
        return reduce(lambda f1, f2: compute(maximum(f1.get(), f2.get())).into(f1.get(), futures))
      finally:
        exe.shutdownNow()
    else:
      # One chunk: all at once
      # Each sample of img3DV is a virtual vector over all time frames at that 3D coordinate
      # Reduce each vector to a single scalar, using a Converter
      img3DC = convert(Views.collapseReal(img), reduce_max.newInstance(), imglibtype)
      imgA = ArrayImgs.unsignedShorts([img.dimension(d) for d in xrange(last_dimension)])
      ImgUtil.copy(ImgView.wrap(imgV, img.factory()), imgA)
      return imgA
def test(red, green, blue, easy=True):
    saturation = let(
        "red", red, "green", green, "blue", blue, "max",
        maximum("red", "green", "blue"), "min",
        minimum("red", "green", "blue"),
        IF(EQ(0, "max"), THEN(0), ELSE(div(sub("max", "min"), "max"))))

    brightness = div(maximum(red, green, blue), 255.0)

    hue = IF(
        EQ(0, saturation), THEN(0),
        ELSE(
            let(
                "red", red, "green", green, "blue", blue, "max",
                maximum("red", "green", "blue"), "min",
                minimum("red", "green", "blue"), "range", sub("max", "min"),
                "redc", div(sub("max", "red"), "range"), "greenc",
                div(sub("max", "green"), "range"), "bluec",
                div(sub("max", "blue"), "range"), "hue",
                div(
                    IF(
                        EQ("red", "max"), THEN(sub("bluec", "greenc")),
                        ELSE(
                            IF(EQ("green", "max"),
                               THEN(sub(add(2, "redc"), "bluec")),
                               ELSE(sub(add(4, "greenc"), "redc"))))), 6),
                IF(LT("hue", 0), THEN(add("hue", 1)), ELSE("hue")))))

    #print hierarchy(hue)

    #print "hue view:", hue.view( FloatType() ).iterationOrder()

    if easy:
        # About 26 ms
        """
    hsb = Views.stack( hue.view( FloatType() ),
                       saturation.view( FloatType() ),
                       brightness.view( FloatType() ) )
    """

        # About 13 ms: half! Still much worse than plain ImageJ,
        # but the source images are iterated 4 times, rather than just once,
        # and the saturation is computed twice,
        # and the min, max is computed 3 and 4 times, respectively.
        hsb = Views.stack(hue.viewDouble(FloatType()),
                          saturation.viewDouble(FloatType()),
                          brightness.viewDouble(FloatType()))
        """
    # Even worse: ~37 ms
    width, height = rgb.dimension(0), rgb.dimension(1)
    h = compute(hue).into(ArrayImgs.floats([width, height]))
    s = compute(saturation).into(ArrayImgs.floats([width, height]))
    b = compute(brightness).into(ArrayImgs.floats([width, height]))
    hsb = Views.stack( h, s, b )
    """

        imp = IL.wrap(hsb, "HSB view")
    else:
        # Tested it: takes more time (~40 ms vs 26 ms above)
        width, height = rgb.dimension(0), rgb.dimension(1)
        hb = zeros(width * height, 'f')
        sb = zeros(width * height, 'f')
        bb = zeros(width * height, 'f')
        h = ArrayImgs.floats(hb, [width, height])
        s = ArrayImgs.floats(sb, [width, height])
        b = ArrayImgs.floats(bb, [width, height])
        #print "ArrayImg:", b.iterationOrder()
        ImgUtil.copy(ImgView.wrap(hue.view(FloatType()), None), h)
        ImgUtil.copy(ImgView.wrap(saturation.view(FloatType()), None), s)
        ImgUtil.copy(ImgView.wrap(brightness.view(FloatType()), None), b)
        stack = ImageStack(width, height)
        stack.addSlice(FloatProcessor(width, height, hb, None))
        stack.addSlice(FloatProcessor(width, height, sb, None))
        stack.addSlice(FloatProcessor(width, height, bb, None))
        imp = ImagePlus("hsb", stack)
    return imp
Beispiel #17
0
            Views.extendZero(Converters.argbChannel(imgSlice1, i)),
            NLinearInterpolatorFactory()) for i in [1, 2, 3]
    ]
    # ARGBType 2D view of the transformed color channels
    imgSlice2 = Converters.mergeARGB(
        Views.stack(
            Views.interval(RealViews.transform(channel, transform),
                           sliceInterval)
            for channel in channels), ColorChannelOrder.RGB)
    slices2.append(imgSlice2)

# Transformed view
viewImg2 = Views.stack(slices2)
# Materialized image
img2 = ArrayImgs.argbs(Intervals.dimensionsAsLongArray(interval2))
ImgUtil.copy(viewImg2, img2)

imp4 = IL.wrap(img2, "imglib2-transformed RGB (pull)")
imp4.show()

# Fourth approach: pull (CORRECT!), and much faster (delegates pixel-wise operations
# to java libraries and delegates RGB color handling altogether)
# Defines a list of views (recipes, really) for transforming every stack slice
# and then materializes the view by copying it in a multi-threaded way into an ArrayImg.
# Now without separating the color channels: will use the NLinearInterpolatorARGB
# In practice, it's a tad slower than the third approach: also processes the alpha channel in ARGB
# even though we know it is empty. Its conciseness adds clarity and is a win.
"""
# Procedural code:
slices3 = []
for index in xrange(img1.dimension(2)):
Beispiel #18
0
pixels = ip.getPixels()

# In practice, you never want to do this below,
# and instead you'd use the built-in wrapper: ImageJFunctions.wrap(imp)
# This is merely for illustration of how to use ArrayImgs with an existing pixel array
if isinstance(ip, ByteProcessor):
  img1 = ArrayImgs.unsignedBytes(pixels, dimensions)
elif isinstance(ip, ShortProcessor):
  img1 = ArrayImgs.unsignedShorts(pixels, dimensions)
elif isinstance(ip, FloatProcessor):
  img1 = ArrayImgs.floats(pixels, dimensions)
else:
  print "Can't handle image of type:", type(ip).getName()


# An empty image of float[]
img2 = ArrayImgs.floats(dimensions)

# View it as RandomAccessibleInterval<FloatType> by converting on the fly
# using a generic RealType to FloatType converter
floatView = Converters.convertRAI(img1, RealFloatConverter(), FloatType())

# The above 'floatView' can be used as an image: one that gets always converted on demand.
# If you only have to iterate over the pixels just once, there's no need to create a new image.
IL.show(floatView, "32-bit view of the 8-bit")

# Copy one into the other: both are of the same type
ImgUtil.copy(floatView, img2)

IL.show(img2, "32-bit copy")
img = IL.wrap(IJ.getImage())

pyramid = [img]  # level 0 is the image itself

# Create levels of a pyramid with interpolation
width = img.dimension(0)
min_width = 32

s = [0.5 for d in xrange(img.numDimensions())]
t = [-0.25 for d in xrange(img.numDimensions())]
while width > min_width:
    width /= 2
    imgE = Views.interpolate(Views.extendBorder(img),
                             NLinearInterpolatorFactory())
    # A scaled-down view of the imgR
    level = Views.interval(
        RealViews.transform(imgE, ScaleAndTranslation(s, t)),
        FinalInterval([
            int(img.dimension(d) * 0.5) for d in xrange(img.numDimensions())
        ]))
    # Create a new image for this level
    scaledImg = img.factory().create(level)  # of dimensions as of level
    ImgUtil.copy(level, scaledImg)  # copy the scaled down view into scaledImg
    pyramid.append(scaledImg)
    # Prepare for next iteration
    img = scaledImg  # for the dimensions of the level in the next iteration

for i, imgScaled in enumerate(pyramid):
    IL.wrap(imgScaled, str(i + 1)).show()
Beispiel #20
0
# Let's pick a pixel coordinate in 2D
pos[0] = 128
pos[1] = 200

ra = img.randomAccess()
ra.setPosition(pos)
t = ra.get()  # returns the Type class, which could be e.g. UnsignedByteType
# which provides access to the pixel at that position
print type(t)  # Print the Type class

# To print the pixel value, it's one level of indirection away, so do any of:
print t.get()  # the native primitive type, e.g., byte
print t.getRealFloat()
print t.getRealDouble()

# To copy two images that are compatible in their iteration order, use cursors:
cursor = img.cursor()
cursor2 = img2.cursor()
for t in cursor:
    cursor2.next().setReal(t.getRealFloat())

# The above is very slow in jython due to iteration overheads.
# Instead, do: (as fast as possible, even multithreaded)
ImgUtil.copy(img, img2)

ImageJFunctions.show(img2, "copied")

# For low-level operations in jython, you can inline small snippets of java code using the Weaver.
# Search for "Weaver" in the tutorial for several examples:
# https://syn.mrc-lmb.cam.ac.uk/acardona/fiji-tutorial