Beispiel #1
0
 def get(self, index):
     img = CellLoader.klb.readFull(timepoint_paths[index]).getImg()
     # Each cell has "1" as its dimension in the last axis (time)
     # and index as its min coordinate in the last axis (time)
     return Cell(
         Intervals.dimensionsAsIntArray(img) + array([1], 'i'),
         Intervals.minAsLongArray(img) + array([index], 'l'),
         extractArrayAccess(img))
Beispiel #2
0
def makeInterpolatedImage(img1, img2, weight):
    """ weight: float between 0 and 1 """
    edge_pix1 = findEdgePixels(img1)
    kdtree1 = KDTree(edge_pix1, edge_pix1)
    search1 = NearestNeighborSearchOnKDTree(kdtree1)
    edge_pix2 = findEdgePixels(img2)
    kdtree2 = KDTree(edge_pix2, edge_pix2)
    search2 = NearestNeighborSearchOnKDTree(kdtree2)
    img3 = ArrayImgs.unsignedBytes(Intervals.dimensionsAsLongArray(img1))
    c1 = img1.cursor()
    c2 = img2.cursor()
    c3 = img3.cursor()
    pos = zeros(img1.numDimensions(), 'l')
    while c3.hasNext():
        t1 = c1.next()
        t2 = c2.next()
        t3 = c3.next()
        sign1 = -1 if 0 == t1.get() else 1
        sign2 = -1 if 0 == t2.get() else 1
        search1.search(c1)
        search2.search(c2)
        value1 = sign1 * search1.getDistance() * weight
        value2 = sign2 * search2.getDistance() * (1 - weight)
        if value1 + value2 > 0:
            t3.setOne()
    return img3
    def asNormalizedUnsignedByteArrayImg(interval, invert, blockRadius, n_bins,
                                         slope, matrices, copy_threads, index,
                                         imp):
        sp = imp.getProcessor()  # ShortProcessor
        sp.setRoi(interval.min(0), interval.min(1),
                  interval.max(0) - interval.min(0) + 1,
                  interval.max(1) - interval.min(1) + 1)
        sp = sp.crop()
        if invert:
            sp.invert()
        CLAHE.run(
            ImagePlus("", sp), blockRadius, n_bins, slope, None
        )  # far less memory requirements than NormalizeLocalContrast, and faster.
        minimum, maximum = autoAdjust(sp)

        # Transform and convert image to 8-bit, mapping to display range
        img = ArrayImgs.unsignedShorts(
            sp.getPixels(), [sp.getWidth(), sp.getHeight()])
        sp = None
        affine = AffineTransform2D()
        affine.set(matrices[index])
        imgI = Views.interpolate(Views.extendZero(img),
                                 NLinearInterpolatorFactory())
        imgA = RealViews.transform(imgI, affine)
        imgT = Views.zeroMin(Views.interval(imgA, img))
        imgMinMax = convert(imgT, RealUnsignedByteConverter(minimum, maximum),
                            UnsignedByteType)
        aimg = ArrayImgs.unsignedBytes(Intervals.dimensionsAsLongArray(img))
        ImgUtil.copy(ImgView.wrap(imgMinMax, aimg.factory()), aimg,
                     copy_threads)
        img = imgI = imgA = imgT = imgMinMax = None
        return aimg
Beispiel #4
0
def crop_along_one_axis(ops, data, intervals, axis_type):
    """Crop along a single axis using Views.

    Parameters
    ----------
    intervals : List with two values specifying the start and the end of the interval.
    axis_type : Along which axis to crop. Can be ["X", "Y", "Z", "TIME", "CHANNEL"]
    """

    axis = get_axis(axis_type)
    interval_start = [
        data.min(d) if d != data.dimensionIndex(axis) else intervals[0]
        for d in range(0, data.numDimensions())
    ]
    interval_end = [
        data.max(d) if d != data.dimensionIndex(axis) else intervals[1]
        for d in range(0, data.numDimensions())
    ]

    interval = interval_start + interval_end
    interval = Intervals.createMinMax(*interval)

    output = ops.run("transform.crop", data, interval, True)

    return output
Beispiel #5
0
def test(iraf):

    # Test dimensions: should be the same as the one input image
    print "Dimensions:", Intervals.dimensionsAsLongArray(iraf)

    # Test Cursor
    c = iraf.cursor()
    pos = zeros(2, 'l')
    while c.hasNext():
        c.fwd()
        c.localize(pos)
        print "Cursor:", pos, "::", c.get()

    # Test RandomAccess
    ra = iraf.randomAccess()
    c = iraf.cursor()
    while c.hasNext():
        c.fwd()
        ra.setPosition(c)
        c.localize(pos)
        print "RandomAccess:", pos, "::", ra.get()

    # Test source img: should be untouched
    c = img.cursor()
    while c.hasNext():
        print "source:", c.next()

    # Test interval view: the middle 2x2 square
    v = Views.interval(iraf, [1, 1], [2, 2])
    IL.wrap(v, "+2 view").show()
Beispiel #6
0
def rotatedView(img, angle, enlarge=True, extend=Views.extendBorder):
  """ Return a rotated view of the image, around the Z axis,
      with an expanded (or reduced) interval view so that all pixels are exactly included.

      img: a RandomAccessibleInterval
      angle: in degrees
  """
  cx = img.dimension(0) / 2.0
  cy = img.dimension(1) / 2.0
  toCenter = AffineTransform2D()
  toCenter.translate(-cx, -cy)
  rotation = AffineTransform2D()
  # Step 1: place origin of rotation at the center of the image
  rotation.preConcatenate(toCenter)
  # Step 2: rotate around the Z axis
  rotation.rotate(radians(angle))
  # Step 3: undo translation to the center
  rotation.preConcatenate(toCenter.inverse())
  rotated = RV.transform(Views.interpolate(extend(img),
                                           NLinearInterpolatorFactory()), rotation)
  if enlarge:
    # Bounds:
    bounds = repeat((sys.maxint, 0)) # initial upper- and lower-bound values  
                                     # for min, max to compare against  
    transformed = zeros(2, 'f')
    for corner in product(*zip(repeat(0), Intervals.maxAsLongArray(img))):
      rotation.apply(corner, transformed)
      bounds = [(min(vmin, int(floor(v))), max(vmax, int(ceil(v))))
                for (vmin, vmax), v in zip(bounds, transformed)]
    minC, maxC = map(list, zip(*bounds)) # transpose list of 2 pairs
                                         # into 2 lists of 2 values
    imgRot = Views.zeroMin(Views.interval(rotated, minC, maxC))
  else:
    imgRot = Views.interval(rotated, img)
  return imgRot
def twoStep(index=0):
    # The current way:
    img = klb.readFull(filepaths[index])  # klb_loader.get(filepaths[index])
    imgE = Views.extendZero(img)
    imgI = Views.interpolate(imgE, NLinearInterpolatorFactory())
    imgT = RealViews.transform(imgI, cmIsotropicTransforms[index])
    imgB = Views.zeroMin(Views.interval(imgT, roi[0],
                                        roi[1]))  # bounded: crop with ROI
    imgBA = ArrayImgs.unsignedShorts(Intervals.dimensionsAsLongArray(imgB))
    ImgUtil.copy(ImgView.wrap(imgB, imgBA.factory()), imgBA)
    imgP = prepareImgForDeconvolution(
        imgBA,
        affine3D(fineTransformsPostROICrop[index]).inverse(),
        FinalInterval([0, 0, 0], [imgB.dimension(d) - 1 for d in xrange(3)]))
    # Copy transformed view into ArrayImg for best performance in deconvolution
    imgA = ArrayImgs.floats(Intervals.dimensionsAsLongArray(imgP))
    ImgUtil.copy(ImgView.wrap(imgP, imgA.factory()), imgA)
    IL.wrap(imgA, "two step").show()
 def translate(self, dx, dy):
     a = zeros(2, 'l')
     self.interval.min(a)
     width = self.cell_dimensions[0]
     height = self.cell_dimensions[1]
     x0 = max(0, min(a[0] + dx, self.img_dimensions[0] - width))
     y0 = max(0, min(a[1] + dy, self.img_dimensions[1] - height))
     self.interval = FinalInterval([x0, y0],
                                   [x0 + width - 1, y0 + height - 1])
     syncPrintQ(str(Intervals.dimensionsAsLongArray(self.interval)))
     self.cache.clear()
Beispiel #9
0
 def projectMax(img, minC, maxC, reduce_max):
     imgA = ArrayImgs.unsignedSorts(
         Intervals.dimensionsAsLongArray(imgC))
     ImgUtil.copy(
         ImgView.wrap(
             convert(
                 Views.collapseReal(
                     Views.interval(img, minC, maxC)),
                 reduce_max.newInstance(), imglibtype),
             img.factory()), imgA)
     return imgA
def makeImg(filepaths, pixelType, loadImg, img_dimensions, matrices,
            cropInterval, preload):
    dims = Intervals.dimensionsAsLongArray(cropInterval)
    voldims = [dims[0], dims[1], len(filepaths)]
    cell_dimensions = [dims[0], dims[1], 1]
    grid = CellGrid(voldims, cell_dimensions)
    cellGet = TranslatedSectionGet(filepaths,
                                   loadImg,
                                   matrices,
                                   img_dimensions,
                                   cell_dimensions,
                                   cropInterval,
                                   preload=preload)
    return LazyCellImg(grid, pixelType(), cellGet), cellGet
Beispiel #11
0
 def crop(event):
   global cropped, cropped_imp
   coords = [int(float(tf.getText())) for tf in textfields]
   minC = [max(0, c) for c in coords[0:3]]
   maxC = [min(d -1, c) for d, c in izip(Intervals.dimensionsAsLongArray(images[0]), coords[3:6])]
   storeRoi(minC, maxC)
   print "ROI min and max coordinates"
   print minC
   print maxC
   cropped = [Views.zeroMin(Views.interval(img, minC, maxC)) for img in images]
   cropped_imp = showAsStack(cropped, title="cropped")
   cropped_imp.setDisplayRange(imp.getDisplayRangeMin(), imp.getDisplayRangeMax())
   if cropContinuationFn:
     cropContinuationFn(images, minC, maxC, cropped, cropped_imp)
def populateInstances(instances, synth_imgs, class_index, mins, maxs):
    # Populate the training data: create the filter bank for each feature image
    # by reading values from the interval defined by mins and maxs
    target = ArrayImgs.floats([width, height])
    interval = FinalInterval(mins, maxs)
    n_samples = Intervals.numElements(interval)
    for img in synth_imgs:
        vectors = [zeros(len(attributes), 'd') for _ in xrange(n_samples)]
        for k, op in enumerate(filterBank(img, sumType=DoubleType())):
            imgOp = compute(op).into(target)
            for i, v in enumerate(Views.interval(imgOp, interval)):
                vectors[i][k] = v.getRealDouble()
        for vector in vectors:
            vector[-1] = class_index
            instances.add(DenseInstance(1.0, vector))
    def asNormalizedUnsignedByteArrayImg(interval, invert, blockRadius, n_bins,
                                         slope, matrices, index, imp):
        sp = imp.getProcessor()  # ShortProcessor
        # Crop to interval if needed
        x = interval.min(0)
        y = interval.min(1)
        width = interval.max(0) - interval.min(0) + 1
        height = interval.max(1) - interval.min(1) + 1
        if 0 != x or 0 != y or sp.getWidth() != width or sp.getHeight(
        ) != height:
            sp.setRoi(x, y, width, height)
            sp = sp.crop()

        if invert:
            sp.invert()

        CLAHE.run(
            ImagePlus("", sp), blockRadius, n_bins, slope, None
        )  # far less memory requirements than NormalizeLocalContrast, and faster.
        minimum, maximum = autoAdjust(sp)

        # Transform and convert image to 8-bit, mapping to display range
        img = ArrayImgs.unsignedShorts(
            sp.getPixels(), [sp.getWidth(), sp.getHeight()])
        sp = None
        imp = None
        # Must use linear interpolation for subpixel precision
        affine = AffineTransform2D()
        affine.set(matrices[index])
        imgI = Views.interpolate(Views.extendZero(img),
                                 NLinearInterpolatorFactory())
        imgA = RealViews.transform(imgI, affine)
        imgT = Views.zeroMin(Views.interval(imgA, img))
        # Convert to 8-bit
        imgMinMax = convert2(imgT,
                             RealUnsignedByteConverter(minimum, maximum),
                             UnsignedByteType,
                             randomAccessible=False)  # use IterableInterval
        aimg = ArrayImgs.unsignedBytes(Intervals.dimensionsAsLongArray(img))
        # ImgUtil copies multi-threaded, which is not appropriate here as there are many other images being copied too
        #ImgUtil.copy(ImgView.wrap(imgMinMax, aimg.factory()), aimg)

        # Single-threaded copy
        copier = createBiConsumerTypeSet(UnsignedByteType)
        LoopBuilder.setImages(imgMinMax, aimg).forEachPixel(copier)

        img = imgI = imgA = imgMinMax = imgT = None
        return aimg
Beispiel #14
0
 def prepare(index):
     # Prepare the img for deconvolution:
     # 0. Transform in one step.
     # 1. Ensure its pixel values conform to expectations (no zeros inside)
     # 2. Copy it into an ArrayImg for faster recurrent retrieval of same pixels
     syncPrint("Preparing %s CM0%i for deconvolution" % (tm_dirname, index))
     img = klb_loader.get(filepaths[index])  # of UnsignedShortType
     imgP = prepareImgForDeconvolution(
         img, transforms[index], target_interval)  # returns of FloatType
     # Copy transformed view into ArrayImg for best performance in deconvolution
     imgA = ArrayImgs.floats(Intervals.dimensionsAsLongArray(imgP))
     #ImgUtil.copy(ImgView.wrap(imgP, imgA.factory()), imgA)
     ImgUtil.copy(imgP, imgA, n_threads / 2)  # parallel copying
     syncPrint("--Completed preparing %s CM0%i for deconvolution" %
               (tm_dirname, index))
     imgP = None
     img = None
     return (index, imgA)
Beispiel #15
0
def crop_along_one_axis(ops, data, intervals, axis_type):
    """Crop along a single axis using Views.
 
    Parameters
    ----------
    intervals : List with two values specifying the start and the end of the interval.
    axis_type : Along which axis to crop. Can be ["X", "Y", "Z", "TIME", "CHANNEL"]
    """
 
    axis = get_axis(axis_type)
    interval_start = [data.min(d) if d != data.dimensionIndex(axis) else intervals[0] for d in range(0, data.numDimensions())]
    interval_end = [data.max(d) if d != data.dimensionIndex(axis) else intervals[1] for d in range(0, data.numDimensions())]
 
    interval = interval_start + interval_end
    interval = Intervals.createMinMax(*interval)
 
    output = ops.run("transform.crop", data, interval, True)
 
    return output
Beispiel #16
0
def makeInterpolatedImage(img1, search1, img2, search2, weight):
  """ weight: float between 0 and 1 """
  img3 = ArrayImgs.unsignedBytes(Intervals.dimensionsAsLongArray(img1))
  c1 = img1.cursor()
  c2 = img2.cursor()
  c3 = img3.cursor()
  while c3.hasNext():
    t1 = c1.next()
    t2 = c2.next()
    t3 = c3.next()
    sign1 = -1 if 0 == t1.get() else 1
    sign2 = -1 if 0 == t2.get() else 1
    search1.search(c1)
    search2.search(c2)
    value1 = sign1 * search1.getDistance() * (1 - weight)
    value2 = sign2 * search2.getDistance() * weight
    if value1 + value2 > 0:
      t3.setOne()
  return img3
def oneStep(index=0):
    # Combining transforms into one, via a translation to account of the ROI crop
    img = klb.readFull(filepaths[index])  # klb_loader.get(filepaths[index])
    t1 = cmIsotropicTransforms[index]
    t2 = affine3D(
        [1, 0, 0, -roi[0][0], 0, 1, 0, -roi[0][1], 0, 0, 1, -roi[0][2]])
    t3 = affine3D(fineTransformsPostROICrop[index]).inverse()
    aff = AffineTransform3D()
    aff.set(t1)
    aff.preConcatenate(t2)
    aff.preConcatenate(t3)
    # Final interval is now rooted at 0,0,0 given that the transform includes the translation
    imgP = prepareImgForDeconvolution(
        img, aff,
        FinalInterval([0, 0, 0],
                      [maxC - minC for minC, maxC in izip(roi[0], roi[1])]))
    # Copy transformed view into ArrayImg for best performance in deconvolution
    imgA = ArrayImgs.floats(Intervals.dimensionsAsLongArray(imgP))
    ImgUtil.copy(ImgView.wrap(imgP, imgA.factory()), imgA)
    IL.wrap(imgA, "one step index %i" % index).show()
Beispiel #18
0
def filterBankRotations(img,
                        angles=xrange(0, 46, 9), # sequence, in degrees
                        filterBankFn=filterBank, # function that takes an img as sole positional argument
                        outputType=FloatType()):
  """ img: a RandomAccessibleInterval.
      filterBankFn: the function from which to obtain a sequence of ImgMath ops.
      angles: a sequence of angles in degrees.
      outputType: for materializing rotated operations and rotating them back.

      For every angle, will prepare a rotated view of the image,
      then create a list of ops on the basis of that rotated view,
      then materialize each op into an image so that an unrotated view
      can be returned back.

      returns a list of unrotated views, each containing the values of applying
      each op to the rotated view. 
  """
  ops_rotations = []
  
  for angle in angles:
    imgRot = img if 0 == angle else rotatedView(img, angle)
    ops = filterBankFn(imgRot)

    # Materialize these two combination ops and rotate them back (rather, a rotated view)
    interval = Intervals.translate(img, [(imgRot.dimension(d) - img.dimension(d)) / 2
                                         for d in xrange(img.numDimensions())])
    for op in ops:
      imgOpRot = compute(op).intoArrayImg(outputType)
      if 0 == angle:
        ops_rotations.append(imgOpRot)
        continue
      # Rotate them back and crop view
      imgOpUnrot = rotatedView(imgOpRot, -angle, enlarge=False)
      imgOp = Views.zeroMin(Views.interval(imgOpUnrot, interval))
      #if angle == 0 or angle == 45:
      #  IL.wrap(imgOpRot, "imgOpRot angle=%i" % angle).show()
      #  IL.wrap(imgOpUnrot, "imgOpUnrot angle=%i" % angle).show()
      #  IL.wrap(imgOp, "imgOp angle=%i" % angle).show()
      ops_rotations.append(imgOp)
  
  return ops_rotations
Beispiel #19
0
def crop(ops, data, intervals):
    """Crop along a one or more axis.
 
    Parameters
    ----------
    intervals : Dict specifying which axis to crop and with what intervals.
                Example :
                intervals = {'X' : [0, 50],
                             'Y' : [0, 50]}
    """

    intervals_start = [data.min(d) for d in range(0, data.numDimensions())]
    intervals_end = [data.max(d) for d in range(0, data.numDimensions())]

    for axis_type, interval in intervals.items():
        index = data.dimensionIndex(get_axis(axis_type))
        intervals_start[index] = interval[0]
        intervals_end[index] = interval[1]

    intervals = Intervals.createMinMax(*intervals_start + intervals_end)

    output = ops.run("transform.crop", data, intervals, True)

    return output
Beispiel #20
0
def crop(ops, data, intervals):
    """Crop along a one or more axis.
 
    Parameters
    ----------
    intervals : Dict specifying which axis to crop and with what intervals.
                Example :
                intervals = {'X' : [0, 50],
                             'Y' : [0, 50]}
    """
 
    intervals_start = [data.min(d) for d in range(0, data.numDimensions())]
    intervals_end = [data.max(d) for d in range(0, data.numDimensions())]
 
    for axis_type, interval in intervals.items():
        index = data.dimensionIndex(get_axis(axis_type))
        intervals_start[index] = interval[0]
        intervals_end[index] = interval[1]
 
    intervals = Intervals.createMinMax(*intervals_start + intervals_end)
 
    output = ops.run("transform.crop", data, intervals, True)
    
    return output
  aff = AffineTransform3D()
  aff.set(*matrix)
  return aff

# Transform the kernel for each view
kernels = [kernel,
           transformPSFKernelToView(kernel, affine3D(matrices["imgB0-imgB1"])),
           transformPSFKernelToView(kernel, affine3D(matrices["imgB0-imgB2"])),
           transformPSFKernelToView(kernel, affine3D(matrices["imgB0-imgB3"]))]

def deconvolve(images, kernels, name, n_iterations):
  # Bayesian-based multi-view deconvolution
  exe = newFixedThreadPool(Runtime.getRuntime().availableProcessors() -2)
  try:
    mylambda = 0.0006
    blockSize = Intervals.dimensionsAsIntArray(images[0]) # [128, 128, 128]
    cptf = ComputeBlockSeqThreadCPUFactory(exe, mylambda, blockSize, ArrayImgFactory(FloatType()))
    psiInitFactory = PsiInitBlurredFusedFactory() # PsiInitAvgPreciseFactory() fails with type mismatch: UnsignedByteType (?) vs FloatType
    weight = Views.interval(ConstantRandomAccessible(FloatType(1), images[0].numDimensions()), FinalInterval(images[0]))
    filterBlocksForContent = False # Run once with True, none were removed
    decon_views = DeconViews([DeconView(exe, img, weight, kernel, PSFTYPE.INDEPENDENT, blockSize, 1, filterBlocksForContent)
                              for img in images],
                             exe)
    #n_iterations = 10
    decon = MultiViewDeconvolutionSeq(decon_views, n_iterations, psiInitFactory, cptf, ArrayImgFactory(FloatType()))
    if not decon.initWasSuccessful():
      print "Something went wrong initializing MultiViewDeconvolution"
    else:
      decon.runIterations()
      img = decon.getPSI()
      imp = IL.wrap(img, name + "_deconvolved_" + str(n_iterations) + "_iterations")
Beispiel #22
0
    print median, max_sum

    # Turns out the maximum is infinity.
    # Therefore, discard all infinity values, and also any above 1.5 * median
    threshold = median * 1.5

    filtered = [
        filename for filename, pixel_sum in sums if pixel_sum < threshold
    ]

    n_threads = Runtime.getRuntime().availableProcessors()
    threads = []
    chunk_size = len(filtered) / n_threads
    aimgs = []
    first = klb.readFull(os.path.join(srcDir, filtered[0]))
    dimensions = Intervals.dimensionsAsLongArray(first)

    for i in xrange(n_threads):
        m = Max(dimensions, filtered[i * chunk_size:(i + 1) * chunk_size])
        m.start()
        threads.append(m)

    # Await completion of all
    for m in threads:
        m.join()

    # Merge all results into a single maximum projection
    max_projection = computeInto(maximum([m.aimg for m in threads]),
                                 ArrayImgs.floats(dimensions))

    max3D = writeZip(max_projection,
from net.imglib2.util import Intervals
from net.imagej.axis import Axes
from net.imglib2.type.numeric.real import FloatType

from java.lang.Math import floor

# crop PSF to desired size, this makes decon run faster with little effect on final quality
psfX = psf.dimension(data.dimensionIndex(Axes.X))
psfY = psf.dimension(data.dimensionIndex(Axes.Y))
psfZ = psf.dimension(2)

psf_ = ops.transform().crop(
    psf.getImgPlus(),
    Intervals.createMinMax(psfX / 2 - psfXSize / 2, psfY / 2 - psfYSize / 2, 0,
                           psfX / 2 + psfXSize / 2 - 1,
                           psfY / 2 + psfYSize / 2 - 1, psfZ - 1))

psf_ = ops.convert().float32(psf_)

maxPSF = ops.stats().max(psf_).getRealFloat()
psfBackground = psfBackgroundPercent * maxPSF

# subtract background from psf
for t in psf_:
    val = t.getRealFloat() - psfBackground
    if val < 0:
        val = 0
    t.setReal(val)

# normalize psf
Beispiel #24
0
IL.wrap(img_sub, "LoopBuilder").show()
"""

# Example 2b: with ImgLib2 LoopBuilder using a clojure-defined TriConsumer
from net.imglib2.img.display.imagej import ImageJFunctions as IL
from net.imglib2.converter import Converters
from net.imglib2.img.array import ArrayImgs
from net.imglib2.util import Intervals
from net.imglib2.loops import LoopBuilder
from org.scijava.plugins.scripting.clojure import ClojureScriptEngine

img = IL.wrap(imp_rgb) # an ARGBType Img
red   = Converters.argbChannel(img, 1) # a view of the ARGB red channel
green = Converters.argbChannel(img, 2) # a view of the ARGB green channel
img_sub = ArrayImgs.unsignedBytes(Intervals.dimensionsAsLongArray(img))    # the img to store the result

code = """
(deftype Consumer [^long threshold]
  %s
  (accept [self red green result] ; can't type-hint, doesn't find matching method
    (let [^%s r red
          ^%s g green
          ^%s s result]
      (.setInteger s (if (>= (.getInteger r) threshold)
                       (.getInteger g)
                       0)))))
""" % ((LoopBuilder.TriConsumer.getName(),) \
      + tuple(a.randomAccess().get().getClass().getName() for a in [red, green, img_sub]))

clj = ClojureScriptEngine()
Beispiel #25
0
def makeCropUI(imp, images, tgtDir, panel=None, cropContinuationFn=None):
  """ imp: the ImagePlus to work on.
      images: the list of ImgLib2 images, one per frame, not original but already isotropic.
              (These are views that use a nearest neighbor interpolation using the calibration to scale to isotropy.)
      tgtDir: the target directory where e.g. CSV files will be stored, for ROI, features, pointmatches.
      panel: optional, a JPanel controlled by a GridBagLayout.
      cropContinuationFn: optional, a function to execute after cropping,
                          which is given as arguments the original images,
                          minC, maxC (both define a ROI), and the cropped images. """
  independent = None == panel
  if not panel:
    panel = JPanel()
    panel.setBorder(BorderFactory.createEmptyBorder(10,10,10,10))
    gb = GridBagLayout()
    gc = GBC()
  else:
    gb = panel.getLayout()
    # Constraints of the last component
    gc = gb.getConstraints(panel.getComponent(panel.getComponentCount() - 1))
    
    # Horizontal line to separate prior UI components from crop UI
    gc.gridx = 0
    gc.gridy += 1
    gc.gridwidth = 4
    gc.anchor = GBC.WEST
    gc.fill = GBC.HORIZONTAL
    sep = JSeparator()
    sep.setMinimumSize(Dimension(200, 10))
    gb.setConstraints(sep, gc)
    panel.add(sep)

  # ROI UI header
  title = JLabel("ROI controls:")
  gc.gridy +=1
  gc.anchor = GBC.WEST
  gc.gridwidth = 4
  gb.setConstraints(title, gc)
  panel.add(title)

  # Column labels for the min and max coordinates
  gc.gridy += 1
  gc.gridwidth = 1
  for i, title in enumerate(["", "X", "Y", "Z"]):
    gc.gridx = i
    gc.anchor = GBC.CENTER
    label = JLabel(title)
    gb.setConstraints(label, gc)
    panel.add(label)

  textfields = []
  rms = []

  # Load stored ROI if any
  roi_path = path = os.path.join(tgtDir, "crop-roi.csv")
  if os.path.exists(roi_path):
    with open(roi_path, 'r') as csvfile:
      reader = csv.reader(csvfile, delimiter=',', quotechar="\"")
      reader.next() # header
      minC = map(int, reader.next()[1:])
      maxC = map(int, reader.next()[1:])
      # Place the ROI over the ImagePlus
      imp.setRoi(Roi(minC[0], minC[1], maxC[0] + 1 - minC[0], maxC[1] + 1 - minC[1]))
  else:
    # Use whole image dimensions
    minC = [0, 0, 0]
    maxC = [v -1 for v in Intervals.dimensionsAsLongArray(images[0])]

  # Text fields for the min and max coordinates
  for rowLabel, coords in izip(["min coords: ", "max coords: "],
                               [minC, maxC]):
    gc.gridx = 0
    gc.gridy += 1
    label = JLabel(rowLabel)
    gb.setConstraints(label, gc)
    panel.add(label)
    for i in xrange(3):
      gc.gridx += 1
      tf = JTextField(str(coords[i]), 10)
      gb.setConstraints(tf, gc)
      panel.add(tf)
      textfields.append(tf)
      listener = RoiMaker(imp, textfields, len(textfields) -1)
      rms.append(listener)
      tf.addKeyListener(listener)
      tf.addMouseWheelListener(listener)

  # Listen to changes in the ROI of imp
  rfl = RoiFieldListener(imp, textfields)
  Roi.addRoiListener(rfl)
  # ... and enable cleanup
  ImagePlus.addImageListener(FieldDisabler(rfl, rms))

  # Functions for cropping images
  cropped = None
  cropped_imp = None

  def storeRoi(minC, maxC):
    if os.path.exists(roi_path):
      # Load ROI
      with open(path, 'r') as csvfile:
        reader = csv.reader(csvfile, delimiter=',', quotechar="\"")
        reader.next() # header
        same = True
        for a, b in izip(minC + maxC, map(int, reader.next()[1:] + reader.next()[1:])):
          if a != b:
            same = False
            # Invalidate any CSV files for features and pointmatches: different cropping
            for filename in os.listdir(tgtDir):
              if filename.endswith("features.csv") or filename.endswith("pointmatches.csv"):
                os.remove(os.path.join(tgtDir, filename))
            break
        if same:
          return
    # Store the ROI as crop-roi.csv
    with open(roi_path, 'w') as csvfile:
      w = csv.writer(csvfile, delimiter=',', quotechar="\"", quoting=csv.QUOTE_NONNUMERIC)
      w.writerow(["coords", "x", "y", "z"])
      w.writerow(["min"] + map(int, minC))
      w.writerow(["max"] + map(int, maxC))
  
  def crop(event):
    global cropped, cropped_imp
    coords = [int(float(tf.getText())) for tf in textfields]
    minC = [max(0, c) for c in coords[0:3]]
    maxC = [min(d -1, c) for d, c in izip(Intervals.dimensionsAsLongArray(images[0]), coords[3:6])]
    storeRoi(minC, maxC)
    print "ROI min and max coordinates"
    print minC
    print maxC
    cropped = [Views.zeroMin(Views.interval(img, minC, maxC)) for img in images]
    cropped_imp = showAsStack(cropped, title="cropped")
    cropped_imp.setDisplayRange(imp.getDisplayRangeMin(), imp.getDisplayRangeMax())
    if cropContinuationFn:
      cropContinuationFn(images, minC, maxC, cropped, cropped_imp)

  # Buttons to create a ROI and to crop to ROI,
  # which when activated enables the fine registration buttons
  crop_button = JButton("Crop to ROI")
  crop_button.addActionListener(crop)
  gc.gridx = 0
  gc.gridy += 1
  gc.gridwidth = 4
  gc.anchor = GBC.WEST
  buttons_panel = JPanel()
  buttons_panel.add(crop_button)
  gb.setConstraints(buttons_panel, gc)
  panel.add(buttons_panel)

  if independent:
    frame = JFrame("Crop by ROI")
    frame.getContentPane().add(panel)
    frame.pack()
    frame.setDefaultCloseOperation(JFrame.DO_NOTHING_ON_CLOSE)
    frame.addWindowListener(CloseControl(destroyables=rms + [rfl]))
    frame.setVisible(True)
  else:
    # Re-pack the JFrame
    parent = panel.getParent()
    while not isinstance(parent, JFrame) and parent is not None:
      parent = parent.getParent()

    if parent:
      frame = parent
      frame.pack()
      found = False
      for wl in frame.getWindowListeners():
        if isinstance(wl, CloseControl):
          wl.addDestroyables(rms + [rfl])
          found = True
          break
      if not found:
        frame.addWindowListener(CloseControl(destroyables=rms + [rfl]))
      frame.setVisible(True)

  return panel
Beispiel #26
0
def maxProjectLastDimension(img, strategy="1by1", chunk_size=0):
  last_dimension = img.numDimensions() -1

  if "1by1" == strategy:
    exe = newFixedThreadPool()
    try:
      n_threads = exe.getCorePoolSize()
      imgTs = [ArrayImgs.unsignedShorts(list(Intervals.dimensionsAsLongArray(img))[:-1]) for i in xrange(n_threads)]
      
      def mergeMax(img1, img2, imgT):
        return compute(maximum(img1, img2)).into(imgT)

      def hyperSlice(index):
        return Views.hyperSlice(img, last_dimension, index)

      # The first n_threads mergeMax:
      futures = [exe.submit(Task(mergeMax, hyperSlice(i*2), hyperSlice(i*2 +1), imgTs[i]))
                 for i in xrange(n_threads)]
      # As soon as one finishes, merge it with the next available hyperSlice
      next = n_threads
      while len(futures) > 0: # i.e. not empty
        imgT = futures.pop(0).get()
        if next < img.dimension(last_dimension):
          futures.append(exe.submit(Task(mergeMax, imgT, hyperSlice(next), imgT)))
          next += 1
        else:
          # Run out of hyperSlices to merge
          if 0 == len(futures):
            return imgT # done
          # Merge imgT to each other until none remain
          futures.append(exe.submit(Task(mergeMax, imgT, futures.pop(0).get(), imgT)))
    finally:
      exe.shutdownNow()
  else:
    # By chunks
    imglibtype =  img.randomAccess().get().getClass()
    # The Converter class
    reduce_max = makeCompositeToRealConverter(reducer_class=Math,
                                              reducer_method="max",
                                              reducer_method_signature="(DD)D")
    if chunk_size > 0:
      # map reduce approach
      exe = newFixedThreadPool()
      try:
        def projectMax(img, minC, maxC, reduce_max):
          imgA = ArrayImgs.unsignedSorts(Intervals.dimensionsAsLongArray(imgC))
          ImgUtil.copy(ImgView.wrap(convert(Views.collapseReal(Views.interval(img, minC, maxC)), reduce_max.newInstance(), imglibtype), img.factory()), imgA)
          return imgA
        
        # The min and max coordinates of all dimensions except the last one
        minCS = [0 for d in xrange(last_dimension)]
        maxCS = [img.dimension(d) -1 for d in xrange(last_dimension)]

        # Process every chunk in parallel
        futures = [exe.submit(Task(projectMax, img, minCS + [offset], maxCS + [min(offset + chunk_size, img.dimension(last_dimension)) -1]))
                   for offset in xrange(0, img.dimension(last_dimension), chunk_size)]
        
        return reduce(lambda f1, f2: compute(maximum(f1.get(), f2.get())).into(f1.get(), futures))
      finally:
        exe.shutdownNow()
    else:
      # One chunk: all at once
      # Each sample of img3DV is a virtual vector over all time frames at that 3D coordinate
      # Reduce each vector to a single scalar, using a Converter
      img3DC = convert(Views.collapseReal(img), reduce_max.newInstance(), imglibtype)
      imgA = ArrayImgs.unsignedShorts([img.dimension(d) for d in xrange(last_dimension)])
      ImgUtil.copy(ImgView.wrap(imgV, img.factory()), imgA)
      return imgA
# first take a look at the size and type of each dimension
for d in range(data.numDimensions()):
    print "axis d: type: " + str(data.axis(d).type()) + " length: " + str(
        data.dimension(d))

img = data.getImgPlus()

xLen = data.dimension(data.dimensionIndex(Axes.X))
yLen = data.dimension(data.dimensionIndex(Axes.Y))
zLen = data.dimension(data.dimensionIndex(Axes.Z))
cLen = data.dimension(data.dimensionIndex(Axes.CHANNEL))

# crop a channel
c0 = ops.transform().crop(
    img, Intervals.createMinMax(0, 0, 0, 0, xLen - 1, yLen - 1, 0, zLen - 1))
c0.setName("c0")

# crop both channels at z=12
z12 = ops.transform().crop(
    img, Intervals.createMinMax(0, 0, 0, 12, xLen - 1, yLen - 1, cLen - 1, 12))
z12.setName("z12")

# crop channel 0 at z=12
c0z12 = ops.transform().crop(
    img, Intervals.createMinMax(0, 0, 0, 12, xLen - 1, yLen - 1, 0, 12))
c0z12.setName("c0z12")

# crop an roi at channel 0, z=12
roiC0z12 = ops.transform().crop(
    img, Intervals.createMinMax(150, 150, 0, 12, 200, 200, 0, 12))
from net.imglib2.util import Intervals
from net.imagej.axis import Axes

# first take a look at the size and type of each dimension
for d in range(data.numDimensions()):
	print "axis d: type: "+str(data.axis(d).type())+" length: "+str(data.dimension(d))

img=data.getImgPlus()

xLen = data.dimension(data.dimensionIndex(Axes.X))
yLen = data.dimension(data.dimensionIndex(Axes.Y))
zLen = data.dimension(data.dimensionIndex(Axes.Z))
cLen = data.dimension(data.dimensionIndex(Axes.CHANNEL))

# crop a channel
c0=ops.transform().crop(img, Intervals.createMinMax(0, 0, 0,0,xLen-1, yLen-1, 0, zLen-1))
c0.setName("c0")

# crop both channels at z=12
z12=ops.transform().crop(img, Intervals.createMinMax(0,0,0,12, xLen-1, yLen-1, cLen-1, 12))
z12.setName("z12")

# crop channel 0 at z=12
c0z12=ops.transform().crop(img, Intervals.createMinMax(0,0,0,12, xLen-1, yLen-1, 0, 12))
c0z12.setName("c0z12")

# crop an roi at channel 0, z=12
roiC0z12=ops.transform().crop(img, Intervals.createMinMax(150,150,0,12, 200, 200, 0, 12))
roiC0z12.setName("roiC0z12")
#@ OpService ops
#@ SCIFIO scifio
#@ DatasetService datasetService
#@ String input_path
#@ String output_path
#@ UIService ui

import os
from net.imglib2.util import Intervals
from net.imglib2.view import IterableRandomAccessibleInterval

try:
    os.unlink(output_path)
except OSError:
    pass

input = scifio.datasetIO().open(input_path)
dims = Intervals.dimensionsAsLongArray(input)
output_dims = dims[:-1]
output = ops.create().img(output_dims)
ops.transform().project(IterableRandomAccessibleInterval(output), input,
                        ops.op('stats.max', input.getImgPlus()),
                        len(output_dims))

scifio.datasetIO().save(datasetService.create(output), output_path)
ui.show(output)
print("OK")
def export8bitN5(
        filepaths,
        loadFn,
        img_dimensions,
        matrices,
        name,
        exportDir,
        interval,
        gzip_compression=6,
        invert=True,
        CLAHE_params=[400, 256, 3.0],
        n5_threads=0,  # 0 means as many as CPU cores
        block_size=[128, 128, 128]):
    """
  Export into an N5 volume, in parallel, in 8-bit.

  filepaths: the ordered list of filepaths, one per serial section.
  loadFn: a function to load a filepath into an ImagePlus.
  name: name to assign to the N5 volume.
  matrices: the list of transformation matrices (each one is an array), one per section
  exportDir: the directory into which to save the N5 volume.
  interval: for cropping.
  gzip_compression: defaults to 6 as suggested by Saalfeld. 0 means no compression.
  invert:  Defaults to True (necessary for FIBSEM). Whether to invert the images upon loading.
  CLAHE_params: defaults to [400, 256, 3.0]. If not None, the a list of the 3 parameters needed for a CLAHE filter to apply to each image.
  n5_threads: defaults to 0, meaning as many as CPU cores.
  block_size: defaults to 128x128x128 px. A list of 3 integer numbers, the dimensions of each individual block.
  """

    dims = Intervals.dimensionsAsLongArray(interval)
    voldims = [dims[0], dims[1], len(filepaths)]
    cell_dimensions = [dims[0], dims[1], 1]

    def asNormalizedUnsignedByteArrayImg(interval, invert, blockRadius, n_bins,
                                         slope, matrices, index, imp):
        sp = imp.getProcessor()  # ShortProcessor
        # Crop to interval if needed
        x = interval.min(0)
        y = interval.min(1)
        width = interval.max(0) - interval.min(0) + 1
        height = interval.max(1) - interval.min(1) + 1
        if 0 != x or 0 != y or sp.getWidth() != width or sp.getHeight(
        ) != height:
            sp.setRoi(x, y, width, height)
            sp = sp.crop()

        if invert:
            sp.invert()

        CLAHE.run(
            ImagePlus("", sp), blockRadius, n_bins, slope, None
        )  # far less memory requirements than NormalizeLocalContrast, and faster.
        minimum, maximum = autoAdjust(sp)

        # Transform and convert image to 8-bit, mapping to display range
        img = ArrayImgs.unsignedShorts(
            sp.getPixels(), [sp.getWidth(), sp.getHeight()])
        sp = None
        imp = None
        # Must use linear interpolation for subpixel precision
        affine = AffineTransform2D()
        affine.set(matrices[index])
        imgI = Views.interpolate(Views.extendZero(img),
                                 NLinearInterpolatorFactory())
        imgA = RealViews.transform(imgI, affine)
        imgT = Views.zeroMin(Views.interval(imgA, img))
        # Convert to 8-bit
        imgMinMax = convert2(imgT,
                             RealUnsignedByteConverter(minimum, maximum),
                             UnsignedByteType,
                             randomAccessible=False)  # use IterableInterval
        aimg = ArrayImgs.unsignedBytes(Intervals.dimensionsAsLongArray(img))
        # ImgUtil copies multi-threaded, which is not appropriate here as there are many other images being copied too
        #ImgUtil.copy(ImgView.wrap(imgMinMax, aimg.factory()), aimg)

        # Single-threaded copy
        copier = createBiConsumerTypeSet(UnsignedByteType)
        LoopBuilder.setImages(imgMinMax, aimg).forEachPixel(copier)

        img = imgI = imgA = imgMinMax = imgT = None
        return aimg

    blockRadius, n_bins, slope = CLAHE_params

    # A CacheLoader that interprets the list of filepaths as a 3D volume: a stack of 2D slices
    loader = SectionCellLoader(
        filepaths,
        asArrayImg=partial(asNormalizedUnsignedByteArrayImg, interval, invert,
                           blockRadius, n_bins, slope, matrices),
        loadFn=loadFn)

    # How to preload block_size[2] files at a time? Or at least as many as numCPUs()?
    # One possibility is to query the SoftRefLoaderCache.map for its entries, using a ScheduledExecutorService,
    # and preload sections ahead for the whole blockSize[2] dimension.

    cachedCellImg = lazyCachedCellImg(loader, voldims, cell_dimensions,
                                      UnsignedByteType, BYTE)

    exe_preloader = newFixedThreadPool(n_threads=min(
        block_size[2], n5_threads if n5_threads > 0 else numCPUs()),
                                       name="preloader")

    def preload(cachedCellImg, loader, block_size, filepaths, exe):
        """
    Find which is the last cell index in the cache, identify to which block
    (given the blockSize[2] AKA Z dimension) that index belongs to,
    and concurrently load all cells (sections) that the Z dimension of the blockSize will need.
    If they are already loaded, these operations are insignificant.
    """
        try:
            # The SoftRefLoaderCache.map is a ConcurrentHashMap with Long keys, aka numbers
            cache = cachedCellImg.getCache()
            f1 = cache.getClass().getDeclaredField(
                "cache")  # LoaderCacheAsCacheAdapter.cache
            f1.setAccessible(True)
            softCache = f1.get(cache)
            cache = None
            f2 = softCache.getClass().getDeclaredField(
                "map")  # SoftRefLoaderCache.map
            f2.setAccessible(True)
            keys = sorted(f2.get(softCache).keySet())
            if 0 == len(keys):
                return
            first = max(0, keys[-1] - (keys[-1] % block_size[2]))
            last = min(len(filepaths), first + block_size[2]) - 1
            keys = None
            syncPrintQ("### Preloading %i-%i ###" % (first, last))
            futures = []
            for index in xrange(first, last + 1):
                futures.append(
                    exe.submit(TimeItTask(softCache.get, index, loader)))
            softCache = None
            # Wait for all
            loaded_any = False
            count = 0
            while len(futures) > 0:
                r, t = futures.pop(0).get()  # waits for the image to load
                if t > 1000:  # in miliseconds. Less than this is for sure a cache hit, more a cache miss and reload
                    loaded_any = True
                r = None
                # t in miliseconds
                syncPrintQ("preloaded index %i in %f ms" % (first + count, t))
                count += 1
            if not loaded_any:
                syncPrintQ("Completed preloading %i-%i" %
                           (first, first + block_size[2] - 1))
        except:
            syncPrintQ(sys.exc_info())

    preloader = Executors.newSingleThreadScheduledExecutor()
    preloader.scheduleWithFixedDelay(
        RunTask(preload, cachedCellImg, loader, block_size, filepaths,
                exe_preloader), 10, 60, TimeUnit.SECONDS)

    try:
        syncPrint("N5 directory: " + exportDir + "\nN5 dataset name: " + name +
                  "\nN5 blockSize: " + str(block_size))
        writeN5(cachedCellImg,
                exportDir,
                name,
                block_size,
                gzip_compression_level=gzip_compression,
                n_threads=n5_threads)
    finally:
        preloader.shutdown()
        exe_preloader.shutdown()
# @Dataset data
# @Dataset mask
# @OUTPUT Dataset output

# Given a mask (binary image) and a raw image, remove background pixel from raw by
# keeping only those in the mask (different from 0).

# Note : As specified by @stelfrich on Gitter, the particular case when foreground pixel
# are 1 and background pixels are 0 can be simpler to write with a multiplication of the two
# images.

from net.imglib2.util import Intervals

# Check dimensions are the same for 'data' and 'mask'

if not Intervals.equalDimensions(data, mask):
    raise Exception("Dimensions from input dataset does not match.")

# Create the cursors
output = data.duplicate() 
targetCursor = output.localizingCursor()
dataRA = data.randomAccess()
maskRA = mask.randomAccess()

# Iterate over each pixels of the datasets
while targetCursor.hasNext():
    targetCursor.fwd()
    dataRA.setPosition(targetCursor)
    maskRA.setPosition(targetCursor)
 
    if maskRA.get().get() == 0:
Beispiel #32
0
 def get(self, index):
     img = self.asArrayImg(index, self.loadFn(self.filepaths[index]))
     dims = Intervals.dimensionsAsLongArray(img)
     return Cell(
         list(dims) + [1], [0] * img.numDimensions() + [index],
         img.update(None))
Beispiel #33
0
# by using the 'transformed' float array.

# We compute the bounds by, for every corner, checking if the floor of each dimension
# of a corner coordinate is smaller than the previously found minimum value,
# and by checking if the ceil of each corner coordinate is larger than the
# previously found value, packing the new pair of minimum and maximum values
# into the list of pairs that is 'bounds'.

# Notice the min coordinates can have negative values, as the rotated image
# has pixels now somewhere to the left and up from the top-left 0,0,0 origin
# of coordinates. That's why we use Views.zeroMin, to ensure that downstream
# uses of the transformed image see it as fitting within bounds that start at 0,0,0.

bounds = repeat(
    (sys.maxint, 0)
)  # initial upper- and lower-bound values for min, max to compare against
transformed = zeros(img.numDimensions(), 'f')

for corner in product(*zip(repeat(0), Intervals.maxAsLongArray(img))):
    rotation.apply(corner, transformed)
    bounds = [(min(vmin, int(floor(v))), max(vmax, int(ceil(v))))
              for (vmin, vmax), v in zip(bounds, transformed)]

minC, maxC = map(list, zip(*bounds))  # transpose list of lists
imgRot2dFit = IL.wrap(Views.zeroMin(Views.interval(rotated, minC, maxC)),
                      imp.getTitle() + " - rot2dFit")
imgRot2dFit.show()

matrix = rotation.getRowPackedCopy()
pprint([list(matrix[i:i + 4]) for i in xrange(0, 12, 4)])
converted = ops.convert().float32(data)
 
# Get the first frame (TODO: find a more convenient way !)
t_dim = data.dimensionIndex(Axes.TIME)
interval_start = []
interval_end = []
for d in range(0, data.numDimensions()):
    if d != t_dim:
        interval_start.append(0)
        interval_end.append(data.dimension(d) - 1)
    else:
        interval_start.append(0)
        interval_end.append(0)
         
intervals = interval_start + interval_end
intervals = Intervals.createMinMax(*intervals)
 
first_frame = ops.transform().crop(converted, intervals)
 
# Allocate output memory (wait for hybrid CF version of slice)
subtracted = ops.create().img(converted)
 
# Create the op
sub_op = ops.op("math.subtract", first_frame, first_frame)
 
# Setup the fixed axis
fixed_axis = [d for d in range(0, data.numDimensions()) if d != t_dim]
 
# Run the op
ops.slice(subtracted, converted, sub_op, fixed_axis)